1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/uaccess.h>
24#include <linux/types.h>
25#include <linux/fcntl.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/in.h>
29#include <linux/errno.h>
30#include <linux/timer.h>
31#include <linux/mm.h>
32#include <linux/inet.h>
33#include <linux/netdevice.h>
34#include <net/snmp.h>
35#include <net/ip.h>
36#include <net/ipv6.h>
37#include <net/icmp.h>
38#include <net/protocol.h>
39#include <linux/skbuff.h>
40#include <linux/proc_fs.h>
41#include <linux/export.h>
42#include <net/sock.h>
43#include <net/ping.h>
44#include <net/udp.h>
45#include <net/route.h>
46#include <net/inet_common.h>
47#include <net/checksum.h>
48
49
50static struct ping_table ping_table;
51
52static u16 ping_port_rover;
53
54static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask)
55{
56 int res = (num + net_hash_mix(net)) & mask;
57
58 pr_debug("hash(%d) = %d\n", num, res);
59 return res;
60}
61
62static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
63 struct net *net, unsigned int num)
64{
65 return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
66}
67
68static int ping_v4_get_port(struct sock *sk, unsigned short ident)
69{
70 struct hlist_nulls_node *node;
71 struct hlist_nulls_head *hlist;
72 struct inet_sock *isk, *isk2;
73 struct sock *sk2 = NULL;
74
75 isk = inet_sk(sk);
76 write_lock_bh(&ping_table.lock);
77 if (ident == 0) {
78 u32 i;
79 u16 result = ping_port_rover + 1;
80
81 for (i = 0; i < (1L << 16); i++, result++) {
82 if (!result)
83 result++;
84 hlist = ping_hashslot(&ping_table, sock_net(sk),
85 result);
86 ping_portaddr_for_each_entry(sk2, node, hlist) {
87 isk2 = inet_sk(sk2);
88
89 if (isk2->inet_num == result)
90 goto next_port;
91 }
92
93
94 ping_port_rover = ident = result;
95 break;
96next_port:
97 ;
98 }
99 if (i >= (1L << 16))
100 goto fail;
101 } else {
102 hlist = ping_hashslot(&ping_table, sock_net(sk), ident);
103 ping_portaddr_for_each_entry(sk2, node, hlist) {
104 isk2 = inet_sk(sk2);
105
106 if ((isk2->inet_num == ident) &&
107 (sk2 != sk) &&
108 (!sk2->sk_reuse || !sk->sk_reuse))
109 goto fail;
110 }
111 }
112
113 pr_debug("found port/ident = %d\n", ident);
114 isk->inet_num = ident;
115 if (sk_unhashed(sk)) {
116 pr_debug("was not hashed\n");
117 sock_hold(sk);
118 hlist_nulls_add_head(&sk->sk_nulls_node, hlist);
119 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
120 }
121 write_unlock_bh(&ping_table.lock);
122 return 0;
123
124fail:
125 write_unlock_bh(&ping_table.lock);
126 return 1;
127}
128
129static void ping_v4_hash(struct sock *sk)
130{
131 pr_debug("ping_v4_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
132 BUG();
133}
134
135static void ping_v4_unhash(struct sock *sk)
136{
137 struct inet_sock *isk = inet_sk(sk);
138
139 pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
140 write_lock_bh(&ping_table.lock);
141 if (sk_hashed(sk)) {
142 hlist_nulls_del(&sk->sk_nulls_node);
143 sk_nulls_node_init(&sk->sk_nulls_node);
144 sock_put(sk);
145 isk->inet_num = 0;
146 isk->inet_sport = 0;
147 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
148 }
149 write_unlock_bh(&ping_table.lock);
150}
151
152static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr,
153 u16 ident, int dif)
154{
155 struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
156 struct sock *sk = NULL;
157 struct inet_sock *isk;
158 struct hlist_nulls_node *hnode;
159
160 pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
161 (int)ident, &daddr, dif);
162 read_lock_bh(&ping_table.lock);
163
164 ping_portaddr_for_each_entry(sk, hnode, hslot) {
165 isk = inet_sk(sk);
166
167 pr_debug("found: %p: num = %d, daddr = %pI4, dif = %d\n", sk,
168 (int)isk->inet_num, &isk->inet_rcv_saddr,
169 sk->sk_bound_dev_if);
170
171 pr_debug("iterate\n");
172 if (isk->inet_num != ident)
173 continue;
174 if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != daddr)
175 continue;
176 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
177 continue;
178
179 sock_hold(sk);
180 goto exit;
181 }
182
183 sk = NULL;
184exit:
185 read_unlock_bh(&ping_table.lock);
186
187 return sk;
188}
189
190static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
191 kgid_t *high)
192{
193 kgid_t *data = net->ipv4.sysctl_ping_group_range;
194 unsigned int seq;
195
196 do {
197 seq = read_seqbegin(&net->ipv4_sysctl_local_ports.lock);
198
199 *low = data[0];
200 *high = data[1];
201 } while (read_seqretry(&net->ipv4_sysctl_local_ports.lock, seq));
202}
203
204
205static int ping_init_sock(struct sock *sk)
206{
207 struct net *net = sock_net(sk);
208 kgid_t group = current_egid();
209 struct group_info *group_info;
210 int i, j, count;
211 kgid_t low, high;
212 int ret = 0;
213
214 inet_get_ping_group_range_net(net, &low, &high);
215 if (gid_lte(low, group) && gid_lte(group, high))
216 return 0;
217
218 group_info = get_current_groups();
219 count = group_info->ngroups;
220 for (i = 0; i < group_info->nblocks; i++) {
221 int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
222 for (j = 0; j < cp_count; j++) {
223 kgid_t gid = group_info->blocks[i][j];
224 if (gid_lte(low, gid) && gid_lte(gid, high))
225 goto out_release_group;
226 }
227
228 count -= cp_count;
229 }
230
231 ret = -EACCES;
232
233out_release_group:
234 put_group_info(group_info);
235 return ret;
236}
237
238static void ping_close(struct sock *sk, long timeout)
239{
240 pr_debug("ping_close(sk=%p,sk->num=%u)\n",
241 inet_sk(sk), inet_sk(sk)->inet_num);
242 pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
243
244 sk_common_release(sk);
245}
246
247
248
249
250
251
252static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
253{
254 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
255 struct inet_sock *isk = inet_sk(sk);
256 struct net *net = sock_net(sk);
257 unsigned short snum;
258 int chk_addr_ret;
259 int err;
260
261 if (addr_len < sizeof(struct sockaddr_in))
262 return -EINVAL;
263
264 pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n",
265 sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
266
267 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
268 if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
269 chk_addr_ret = RTN_LOCAL;
270
271 if ((net->ipv4_sysctl_ip_nonlocal_bind == 0 &&
272 isk->freebind == 0 && isk->transparent == 0 &&
273 chk_addr_ret != RTN_LOCAL) ||
274 chk_addr_ret == RTN_MULTICAST ||
275 chk_addr_ret == RTN_BROADCAST)
276 return -EADDRNOTAVAIL;
277
278 lock_sock(sk);
279
280 err = -EINVAL;
281 if (isk->inet_num != 0)
282 goto out;
283
284 err = -EADDRINUSE;
285 isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
286 snum = ntohs(addr->sin_port);
287 if (ping_v4_get_port(sk, snum) != 0) {
288 isk->inet_saddr = isk->inet_rcv_saddr = 0;
289 goto out;
290 }
291
292 pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n",
293 (int)isk->inet_num,
294 &isk->inet_rcv_saddr,
295 (int)sk->sk_bound_dev_if);
296
297 err = 0;
298 if (isk->inet_rcv_saddr)
299 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
300 if (snum)
301 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
302 isk->inet_sport = htons(isk->inet_num);
303 isk->inet_daddr = 0;
304 isk->inet_dport = 0;
305 sk_dst_reset(sk);
306out:
307 release_sock(sk);
308 pr_debug("ping_v4_bind -> %d\n", err);
309 return err;
310}
311
312
313
314
315
316static inline int ping_supported(int type, int code)
317{
318 if (type == ICMP_ECHO && code == 0)
319 return 1;
320 return 0;
321}
322
323
324
325
326
327
328static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
329
330void ping_err(struct sk_buff *skb, u32 info)
331{
332 struct iphdr *iph = (struct iphdr *)skb->data;
333 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
334 struct inet_sock *inet_sock;
335 int type = icmp_hdr(skb)->type;
336 int code = icmp_hdr(skb)->code;
337 struct net *net = dev_net(skb->dev);
338 struct sock *sk;
339 int harderr;
340 int err;
341
342
343
344 if (!ping_supported(icmph->type, icmph->code))
345 return;
346
347 pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type,
348 code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
349
350 sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
351 ntohs(icmph->un.echo.id), skb->dev->ifindex);
352 if (sk == NULL) {
353 pr_debug("no socket, dropping\n");
354 return;
355 }
356 pr_debug("err on socket %p\n", sk);
357
358 err = 0;
359 harderr = 0;
360 inet_sock = inet_sk(sk);
361
362 switch (type) {
363 default:
364 case ICMP_TIME_EXCEEDED:
365 err = EHOSTUNREACH;
366 break;
367 case ICMP_SOURCE_QUENCH:
368
369
370 err = EREMOTEIO;
371 break;
372 case ICMP_PARAMETERPROB:
373 err = EPROTO;
374 harderr = 1;
375 break;
376 case ICMP_DEST_UNREACH:
377 if (code == ICMP_FRAG_NEEDED) {
378 ipv4_sk_update_pmtu(skb, sk, info);
379 if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
380 err = EMSGSIZE;
381 harderr = 1;
382 break;
383 }
384 goto out;
385 }
386 err = EHOSTUNREACH;
387 if (code <= NR_ICMP_UNREACH) {
388 harderr = icmp_err_convert[code].fatal;
389 err = icmp_err_convert[code].errno;
390 }
391 break;
392 case ICMP_REDIRECT:
393
394 ipv4_sk_redirect(skb, sk);
395 err = EREMOTEIO;
396 break;
397 }
398
399
400
401
402
403 if (!inet_sock->recverr) {
404 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
405 goto out;
406 } else {
407 ip_icmp_error(sk, skb, err, 0 ,
408 info, (u8 *)icmph);
409 }
410 sk->sk_err = err;
411 sk->sk_error_report(sk);
412out:
413 sock_put(sk);
414}
415
416
417
418
419
420struct pingfakehdr {
421 struct icmphdr icmph;
422 struct iovec *iov;
423 __wsum wcheck;
424};
425
426static int ping_getfrag(void *from, char *to,
427 int offset, int fraglen, int odd, struct sk_buff *skb)
428{
429 struct pingfakehdr *pfh = (struct pingfakehdr *)from;
430
431 if (offset == 0) {
432 if (fraglen < sizeof(struct icmphdr))
433 BUG();
434 if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr),
435 pfh->iov, 0, fraglen - sizeof(struct icmphdr),
436 &pfh->wcheck))
437 return -EFAULT;
438
439 return 0;
440 }
441 if (offset < sizeof(struct icmphdr))
442 BUG();
443 if (csum_partial_copy_fromiovecend
444 (to, pfh->iov, offset - sizeof(struct icmphdr),
445 fraglen, &pfh->wcheck))
446 return -EFAULT;
447 return 0;
448}
449
450static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
451 struct flowi4 *fl4)
452{
453 struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
454
455 if (!skb)
456 return 0;
457 pfh->wcheck = csum_partial((char *)&pfh->icmph,
458 sizeof(struct icmphdr), pfh->wcheck);
459 pfh->icmph.checksum = csum_fold(pfh->wcheck);
460 memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr));
461 skb->ip_summed = CHECKSUM_NONE;
462 return ip_push_pending_frames(sk, fl4);
463}
464
465static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
466 size_t len)
467{
468 struct net *net = sock_net(sk);
469 struct flowi4 fl4;
470 struct inet_sock *inet = inet_sk(sk);
471 struct ipcm_cookie ipc;
472 struct icmphdr user_icmph;
473 struct pingfakehdr pfh;
474 struct rtable *rt = NULL;
475 struct ip_options_data opt_copy;
476 int free = 0;
477 __be32 saddr, daddr, faddr;
478 u8 tos;
479 int err;
480
481 pr_debug("ping_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
482
483
484 if (len > 0xFFFF)
485 return -EMSGSIZE;
486
487
488 if (len < sizeof(user_icmph))
489 return -EINVAL;
490
491
492
493
494
495
496 if (msg->msg_flags & MSG_OOB)
497 return -EOPNOTSUPP;
498
499
500
501
502
503
504 if (memcpy_fromiovec((u8 *)&user_icmph, msg->msg_iov,
505 sizeof(struct icmphdr)))
506 return -EFAULT;
507 if (!ping_supported(user_icmph.type, user_icmph.code))
508 return -EINVAL;
509
510
511
512
513
514 if (msg->msg_name) {
515 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
516 if (msg->msg_namelen < sizeof(*usin))
517 return -EINVAL;
518 if (usin->sin_family != AF_INET)
519 return -EINVAL;
520 daddr = usin->sin_addr.s_addr;
521
522 } else {
523 if (sk->sk_state != TCP_ESTABLISHED)
524 return -EDESTADDRREQ;
525 daddr = inet->inet_daddr;
526
527 }
528
529 ipc.addr = inet->inet_saddr;
530 ipc.opt = NULL;
531 ipc.oif = sk->sk_bound_dev_if;
532 ipc.tx_flags = 0;
533 ipc.ttl = 0;
534 ipc.tos = -1;
535
536 sock_tx_timestamp(sk, &ipc.tx_flags);
537
538 if (msg->msg_controllen) {
539 err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
540 if (unlikely(err)) {
541 kfree(ipc.opt);
542 return err;
543 }
544 if (ipc.opt)
545 free = 1;
546 }
547 if (!ipc.opt) {
548 struct ip_options_rcu *inet_opt;
549
550 rcu_read_lock();
551 inet_opt = rcu_dereference(inet->inet_opt);
552 if (inet_opt) {
553 memcpy(&opt_copy, inet_opt,
554 sizeof(*inet_opt) + inet_opt->opt.optlen);
555 ipc.opt = &opt_copy.opt;
556 }
557 rcu_read_unlock();
558 }
559
560 saddr = ipc.addr;
561 ipc.addr = faddr = daddr;
562
563 if (ipc.opt && ipc.opt->opt.srr) {
564 if (!daddr) {
565 err = -EINVAL;
566 goto out_free;
567 }
568 faddr = ipc.opt->opt.faddr;
569 }
570 tos = get_rttos(&ipc, inet);
571 if (sock_flag(sk, SOCK_LOCALROUTE) ||
572 (msg->msg_flags & MSG_DONTROUTE) ||
573 (ipc.opt && ipc.opt->opt.is_strictroute)) {
574 tos |= RTO_ONLINK;
575 }
576
577 if (ipv4_is_multicast(daddr)) {
578 if (!ipc.oif)
579 ipc.oif = inet->mc_index;
580 if (!saddr)
581 saddr = inet->mc_addr;
582 } else if (!ipc.oif)
583 ipc.oif = inet->uc_index;
584
585 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
586 RT_SCOPE_UNIVERSE, sk->sk_protocol,
587 inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
588
589 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
590 rt = ip_route_output_flow(net, &fl4, sk);
591 if (IS_ERR(rt)) {
592 err = PTR_ERR(rt);
593 rt = NULL;
594 if (err == -ENETUNREACH)
595 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
596 goto out;
597 }
598
599 err = -EACCES;
600 if ((rt->rt_flags & RTCF_BROADCAST) &&
601 !sock_flag(sk, SOCK_BROADCAST))
602 goto out;
603
604 if (msg->msg_flags & MSG_CONFIRM)
605 goto do_confirm;
606back_from_confirm:
607
608 if (!ipc.addr)
609 ipc.addr = fl4.daddr;
610
611 lock_sock(sk);
612
613 pfh.icmph.type = user_icmph.type;
614 pfh.icmph.code = user_icmph.code;
615 pfh.icmph.checksum = 0;
616 pfh.icmph.un.echo.id = inet->inet_sport;
617 pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
618 pfh.iov = msg->msg_iov;
619 pfh.wcheck = 0;
620
621 err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
622 0, &ipc, &rt, msg->msg_flags);
623 if (err)
624 ip_flush_pending_frames(sk);
625 else
626 err = ping_push_pending_frames(sk, &pfh, &fl4);
627 release_sock(sk);
628
629out:
630 ip_rt_put(rt);
631out_free:
632 if (free)
633 kfree(ipc.opt);
634 if (!err) {
635 icmp_out_count(sock_net(sk), user_icmph.type);
636 return len;
637 }
638 return err;
639
640do_confirm:
641 if (msg->msg_flags & MSG_PROBE)
642 dst_confirm_neigh(&rt->dst, &fl4.daddr);
643 if (!(msg->msg_flags & MSG_PROBE) || len)
644 goto back_from_confirm;
645 err = 0;
646 goto out;
647}
648
649static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
650 size_t len, int noblock, int flags, int *addr_len)
651{
652 struct inet_sock *isk = inet_sk(sk);
653 struct sk_buff *skb;
654 int copied, err;
655
656 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
657
658 err = -EOPNOTSUPP;
659 if (flags & MSG_OOB)
660 goto out;
661
662 if (flags & MSG_ERRQUEUE)
663 return ip_recv_error(sk, msg, len, addr_len);
664
665 skb = skb_recv_datagram(sk, flags, noblock, &err);
666 if (!skb)
667 goto out;
668
669 copied = skb->len;
670 if (copied > len) {
671 msg->msg_flags |= MSG_TRUNC;
672 copied = len;
673 }
674
675
676 err = skb_copy_datagram_msg(skb, 0, msg, copied);
677 if (err)
678 goto done;
679
680 sock_recv_timestamp(msg, sk, skb);
681
682
683 if (msg->msg_name) {
684 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
685
686 sin->sin_family = AF_INET;
687 sin->sin_port = 0 ;
688 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
689 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
690 *addr_len = sizeof(*sin);
691 }
692 if (isk->cmsg_flags)
693 ip_cmsg_recv(msg, skb);
694 err = copied;
695
696done:
697 skb_free_datagram(sk, skb);
698out:
699 pr_debug("ping_recvmsg -> %d\n", err);
700 return err;
701}
702
703static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
704{
705 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
706 inet_sk(sk), inet_sk(sk)->inet_num, skb);
707 if (sock_queue_rcv_skb(sk, skb) < 0) {
708 kfree_skb(skb);
709 pr_debug("ping_queue_rcv_skb -> failed\n");
710 return -1;
711 }
712 return 0;
713}
714
715
716
717
718
719
720void ping_rcv(struct sk_buff *skb)
721{
722 struct sock *sk;
723 struct net *net = dev_net(skb->dev);
724 struct iphdr *iph = ip_hdr(skb);
725 struct icmphdr *icmph = icmp_hdr(skb);
726 __be32 saddr = iph->saddr;
727 __be32 daddr = iph->daddr;
728
729
730
731 pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n",
732 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
733
734
735 skb_push(skb, skb->data - (u8 *)icmph);
736
737 sk = ping_v4_lookup(net, saddr, daddr, ntohs(icmph->un.echo.id),
738 skb->dev->ifindex);
739 if (sk != NULL) {
740 pr_debug("rcv on socket %p\n", sk);
741 ping_queue_rcv_skb(sk, skb_get(skb));
742 sock_put(sk);
743 return;
744 }
745 pr_debug("no socket, dropping\n");
746
747
748}
749
750struct proto ping_prot = {
751 .name = "PING",
752 .owner = THIS_MODULE,
753 .init = ping_init_sock,
754 .close = ping_close,
755 .connect = ip4_datagram_connect,
756 .disconnect = udp_disconnect,
757 .setsockopt = ip_setsockopt,
758 .getsockopt = ip_getsockopt,
759 .sendmsg = ping_sendmsg,
760 .recvmsg = ping_recvmsg,
761 .bind = ping_bind,
762 .backlog_rcv = ping_queue_rcv_skb,
763 .release_cb = ip4_datagram_release_cb,
764 .hash = ping_v4_hash,
765 .unhash = ping_v4_unhash,
766 .get_port = ping_v4_get_port,
767 .obj_size = sizeof(struct inet_sock),
768};
769EXPORT_SYMBOL(ping_prot);
770
771#ifdef CONFIG_PROC_FS
772
773static struct sock *ping_get_first(struct seq_file *seq, int start)
774{
775 struct sock *sk;
776 struct ping_iter_state *state = seq->private;
777 struct net *net = seq_file_net(seq);
778
779 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE;
780 ++state->bucket) {
781 struct hlist_nulls_node *node;
782 struct hlist_nulls_head *hslot;
783
784 hslot = &ping_table.hash[state->bucket];
785
786 if (hlist_nulls_empty(hslot))
787 continue;
788
789 sk_nulls_for_each(sk, node, hslot) {
790 if (net_eq(sock_net(sk), net))
791 goto found;
792 }
793 }
794 sk = NULL;
795found:
796 return sk;
797}
798
799static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk)
800{
801 struct ping_iter_state *state = seq->private;
802 struct net *net = seq_file_net(seq);
803
804 do {
805 sk = sk_nulls_next(sk);
806 } while (sk && (!net_eq(sock_net(sk), net)));
807
808 if (!sk)
809 return ping_get_first(seq, state->bucket + 1);
810 return sk;
811}
812
813static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
814{
815 struct sock *sk = ping_get_first(seq, 0);
816
817 if (sk)
818 while (pos && (sk = ping_get_next(seq, sk)) != NULL)
819 --pos;
820 return pos ? NULL : sk;
821}
822
823static void *ping_seq_start(struct seq_file *seq, loff_t *pos)
824{
825 struct ping_iter_state *state = seq->private;
826 state->bucket = 0;
827
828 read_lock_bh(&ping_table.lock);
829
830 return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
831}
832
833static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
834{
835 struct sock *sk;
836
837 if (v == SEQ_START_TOKEN)
838 sk = ping_get_idx(seq, 0);
839 else
840 sk = ping_get_next(seq, v);
841
842 ++*pos;
843 return sk;
844}
845
846static void ping_seq_stop(struct seq_file *seq, void *v)
847{
848 read_unlock_bh(&ping_table.lock);
849}
850
851static void ping_format_sock(struct sock *sp, struct seq_file *f,
852 int bucket, int *len)
853{
854 struct inet_sock *inet = inet_sk(sp);
855 __be32 dest = inet->inet_daddr;
856 __be32 src = inet->inet_rcv_saddr;
857 __u16 destp = ntohs(inet->inet_dport);
858 __u16 srcp = ntohs(inet->inet_sport);
859
860 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
861 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
862 bucket, src, srcp, dest, destp, sp->sk_state,
863 sk_wmem_alloc_get(sp),
864 sk_rmem_alloc_get(sp),
865 0, 0L, 0,
866 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
867 0, sock_i_ino(sp),
868 atomic_read(&sp->sk_refcnt), sp,
869 atomic_read(&sp->sk_drops), len);
870}
871
872static int ping_seq_show(struct seq_file *seq, void *v)
873{
874 if (v == SEQ_START_TOKEN)
875 seq_printf(seq, "%-127s\n",
876 " sl local_address rem_address st tx_queue "
877 "rx_queue tr tm->when retrnsmt uid timeout "
878 "inode ref pointer drops");
879 else {
880 struct ping_iter_state *state = seq->private;
881 int len;
882
883 ping_format_sock(v, seq, state->bucket, &len);
884 seq_printf(seq, "%*s\n", 127 - len, "");
885 }
886 return 0;
887}
888
889static const struct seq_operations ping_seq_ops = {
890 .show = ping_seq_show,
891 .start = ping_seq_start,
892 .next = ping_seq_next,
893 .stop = ping_seq_stop,
894};
895
896static int ping_seq_open(struct inode *inode, struct file *file)
897{
898 return seq_open_net(inode, file, &ping_seq_ops,
899 sizeof(struct ping_iter_state));
900}
901
902static const struct file_operations ping_seq_fops = {
903 .open = ping_seq_open,
904 .read = seq_read,
905 .llseek = seq_lseek,
906 .release = seq_release_net,
907};
908
909static int ping_proc_register(struct net *net)
910{
911 struct proc_dir_entry *p;
912 int rc = 0;
913
914 p = proc_create("icmp", S_IRUGO, net->proc_net, &ping_seq_fops);
915 if (!p)
916 rc = -ENOMEM;
917 return rc;
918}
919
920static void ping_proc_unregister(struct net *net)
921{
922 remove_proc_entry("icmp", net->proc_net);
923}
924
925
926static int __net_init ping_proc_init_net(struct net *net)
927{
928 return ping_proc_register(net);
929}
930
931static void __net_exit ping_proc_exit_net(struct net *net)
932{
933 ping_proc_unregister(net);
934}
935
936static struct pernet_operations ping_net_ops = {
937 .init = ping_proc_init_net,
938 .exit = ping_proc_exit_net,
939};
940
941int __init ping_proc_init(void)
942{
943 return register_pernet_subsys(&ping_net_ops);
944}
945
946void ping_proc_exit(void)
947{
948 unregister_pernet_subsys(&ping_net_ops);
949}
950
951#endif
952
953void __init ping_init(void)
954{
955 int i;
956
957 for (i = 0; i < PING_HTABLE_SIZE; i++)
958 INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i);
959 rwlock_init(&ping_table.lock);
960}
961