linux/net/core/sock.c
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Generic socket support routines. Memory allocators, socket lock/release
   7 *              handler for protocols to use and generic option handler.
   8 *
   9 *
  10 * Authors:     Ross Biro
  11 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *              Florian La Roche, <flla@stud.uni-sb.de>
  13 *              Alan Cox, <A.Cox@swansea.ac.uk>
  14 *
  15 * Fixes:
  16 *              Alan Cox        :       Numerous verify_area() problems
  17 *              Alan Cox        :       Connecting on a connecting socket
  18 *                                      now returns an error for tcp.
  19 *              Alan Cox        :       sock->protocol is set correctly.
  20 *                                      and is not sometimes left as 0.
  21 *              Alan Cox        :       connect handles icmp errors on a
  22 *                                      connect properly. Unfortunately there
  23 *                                      is a restart syscall nasty there. I
  24 *                                      can't match BSD without hacking the C
  25 *                                      library. Ideas urgently sought!
  26 *              Alan Cox        :       Disallow bind() to addresses that are
  27 *                                      not ours - especially broadcast ones!!
  28 *              Alan Cox        :       Socket 1024 _IS_ ok for users. (fencepost)
  29 *              Alan Cox        :       sock_wfree/sock_rfree don't destroy sockets,
  30 *                                      instead they leave that for the DESTROY timer.
  31 *              Alan Cox        :       Clean up error flag in accept
  32 *              Alan Cox        :       TCP ack handling is buggy, the DESTROY timer
  33 *                                      was buggy. Put a remove_sock() in the handler
  34 *                                      for memory when we hit 0. Also altered the timer
  35 *                                      code. The ACK stuff can wait and needs major
  36 *                                      TCP layer surgery.
  37 *              Alan Cox        :       Fixed TCP ack bug, removed remove sock
  38 *                                      and fixed timer/inet_bh race.
  39 *              Alan Cox        :       Added zapped flag for TCP
  40 *              Alan Cox        :       Move kfree_skb into skbuff.c and tidied up surplus code
  41 *              Alan Cox        :       for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42 *              Alan Cox        :       kfree_s calls now are kfree_skbmem so we can track skb resources
  43 *              Alan Cox        :       Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44 *              Alan Cox        :       Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45 *              Rick Sladkey    :       Relaxed UDP rules for matching packets.
  46 *              C.E.Hawkins     :       IFF_PROMISC/SIOCGHWADDR support
  47 *      Pauline Middelink       :       identd support
  48 *              Alan Cox        :       Fixed connect() taking signals I think.
  49 *              Alan Cox        :       SO_LINGER supported
  50 *              Alan Cox        :       Error reporting fixes
  51 *              Anonymous       :       inet_create tidied up (sk->reuse setting)
  52 *              Alan Cox        :       inet sockets don't set sk->type!
  53 *              Alan Cox        :       Split socket option code
  54 *              Alan Cox        :       Callbacks
  55 *              Alan Cox        :       Nagle flag for Charles & Johannes stuff
  56 *              Alex            :       Removed restriction on inet fioctl
  57 *              Alan Cox        :       Splitting INET from NET core
  58 *              Alan Cox        :       Fixed bogus SO_TYPE handling in getsockopt()
  59 *              Adam Caldwell   :       Missing return in SO_DONTROUTE/SO_DEBUG code
  60 *              Alan Cox        :       Split IP from generic code
  61 *              Alan Cox        :       New kfree_skbmem()
  62 *              Alan Cox        :       Make SO_DEBUG superuser only.
  63 *              Alan Cox        :       Allow anyone to clear SO_DEBUG
  64 *                                      (compatibility fix)
  65 *              Alan Cox        :       Added optimistic memory grabbing for AF_UNIX throughput.
  66 *              Alan Cox        :       Allocator for a socket is settable.
  67 *              Alan Cox        :       SO_ERROR includes soft errors.
  68 *              Alan Cox        :       Allow NULL arguments on some SO_ opts
  69 *              Alan Cox        :       Generic socket allocation to make hooks
  70 *                                      easier (suggested by Craig Metz).
  71 *              Michael Pall    :       SO_ERROR returns positive errno again
  72 *              Steve Whitehouse:       Added default destructor to free
  73 *                                      protocol private data.
  74 *              Steve Whitehouse:       Added various other default routines
  75 *                                      common to several socket families.
  76 *              Chris Evans     :       Call suser() check last on F_SETOWN
  77 *              Jay Schulist    :       Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78 *              Andi Kleen      :       Add sock_kmalloc()/sock_kfree_s()
  79 *              Andi Kleen      :       Fix write_space callback
  80 *              Chris Evans     :       Security fixes - signedness again
  81 *              Arnaldo C. Melo :       cleanups, use skb_queue_purge
  82 *
  83 * To Fix:
  84 *
  85 *
  86 *              This program is free software; you can redistribute it and/or
  87 *              modify it under the terms of the GNU General Public License
  88 *              as published by the Free Software Foundation; either version
  89 *              2 of the License, or (at your option) any later version.
  90 */
  91
  92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  93
  94#include <linux/capability.h>
  95#include <linux/errno.h>
  96#include <linux/errqueue.h>
  97#include <linux/types.h>
  98#include <linux/socket.h>
  99#include <linux/in.h>
 100#include <linux/kernel.h>
 101#include <linux/module.h>
 102#include <linux/proc_fs.h>
 103#include <linux/seq_file.h>
 104#include <linux/sched.h>
 105#include <linux/timer.h>
 106#include <linux/string.h>
 107#include <linux/sockios.h>
 108#include <linux/net.h>
 109#include <linux/mm.h>
 110#include <linux/slab.h>
 111#include <linux/interrupt.h>
 112#include <linux/poll.h>
 113#include <linux/tcp.h>
 114#include <linux/init.h>
 115#include <linux/highmem.h>
 116#include <linux/user_namespace.h>
 117#include <linux/static_key.h>
 118#include <linux/memcontrol.h>
 119#include <linux/prefetch.h>
 120
 121#include <asm/uaccess.h>
 122
 123#include <linux/netdevice.h>
 124#include <net/protocol.h>
 125#include <linux/skbuff.h>
 126#include <net/net_namespace.h>
 127#include <net/request_sock.h>
 128#include <net/sock.h>
 129#include <linux/net_tstamp.h>
 130#include <net/xfrm.h>
 131#include <linux/ipsec.h>
 132#include <net/cls_cgroup.h>
 133#include <net/netprio_cgroup.h>
 134#include <linux/sock_diag.h>
 135
 136#include <linux/filter.h>
 137
 138#include <trace/events/sock.h>
 139
 140#ifdef CONFIG_INET
 141#include <net/tcp.h>
 142#endif
 143
 144#include <net/busy_poll.h>
 145
 146static DEFINE_MUTEX(proto_list_mutex);
 147static LIST_HEAD(proto_list);
 148
 149/**
 150 * sk_ns_capable - General socket capability test
 151 * @sk: Socket to use a capability on or through
 152 * @user_ns: The user namespace of the capability to use
 153 * @cap: The capability to use
 154 *
 155 * Test to see if the opener of the socket had when the socket was
 156 * created and the current process has the capability @cap in the user
 157 * namespace @user_ns.
 158 */
 159bool sk_ns_capable(const struct sock *sk,
 160                   struct user_namespace *user_ns, int cap)
 161{
 162        return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
 163                ns_capable(user_ns, cap);
 164}
 165EXPORT_SYMBOL(sk_ns_capable);
 166
 167/**
 168 * sk_capable - Socket global capability test
 169 * @sk: Socket to use a capability on or through
 170 * @cap: The global capability to use
 171 *
 172 * Test to see if the opener of the socket had when the socket was
 173 * created and the current process has the capability @cap in all user
 174 * namespaces.
 175 */
 176bool sk_capable(const struct sock *sk, int cap)
 177{
 178        return sk_ns_capable(sk, &init_user_ns, cap);
 179}
 180EXPORT_SYMBOL(sk_capable);
 181
 182/**
 183 * sk_net_capable - Network namespace socket capability test
 184 * @sk: Socket to use a capability on or through
 185 * @cap: The capability to use
 186 *
 187 * Test to see if the opener of the socket had when the socket was created
 188 * and the current process has the capability @cap over the network namespace
 189 * the socket is a member of.
 190 */
 191bool sk_net_capable(const struct sock *sk, int cap)
 192{
 193        return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
 194}
 195EXPORT_SYMBOL(sk_net_capable);
 196
 197
 198#ifdef CONFIG_MEMCG_KMEM
 199int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 200{
 201        struct proto *proto;
 202        int ret = 0;
 203
 204        mutex_lock(&proto_list_mutex);
 205        list_for_each_entry(proto, &proto_list, node) {
 206                if (proto->init_cgroup) {
 207                        ret = proto->init_cgroup(memcg, ss);
 208                        if (ret)
 209                                goto out;
 210                }
 211        }
 212
 213        mutex_unlock(&proto_list_mutex);
 214        return ret;
 215out:
 216        list_for_each_entry_continue_reverse(proto, &proto_list, node)
 217                if (proto->destroy_cgroup)
 218                        proto->destroy_cgroup(memcg);
 219        mutex_unlock(&proto_list_mutex);
 220        return ret;
 221}
 222
 223void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
 224{
 225        struct proto *proto;
 226
 227        mutex_lock(&proto_list_mutex);
 228        list_for_each_entry_reverse(proto, &proto_list, node)
 229                if (proto->destroy_cgroup)
 230                        proto->destroy_cgroup(memcg);
 231        mutex_unlock(&proto_list_mutex);
 232}
 233#endif
 234
 235/*
 236 * Each address family might have different locking rules, so we have
 237 * one slock key per address family:
 238 */
 239static struct lock_class_key af_family_keys[AF_MAX];
 240static struct lock_class_key af_family_slock_keys[AF_MAX];
 241
 242#if defined(CONFIG_MEMCG_KMEM)
 243struct static_key memcg_socket_limit_enabled;
 244EXPORT_SYMBOL(memcg_socket_limit_enabled);
 245#endif
 246
 247/*
 248 * Make lock validator output more readable. (we pre-construct these
 249 * strings build-time, so that runtime initialization of socket
 250 * locks is fast):
 251 */
 252static const char *const af_family_key_strings[AF_MAX+1] = {
 253  "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
 254  "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
 255  "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
 256  "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
 257  "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
 258  "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
 259  "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
 260  "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
 261  "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
 262  "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
 263  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
 264  "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
 265  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
 266  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
 267};
 268static const char *const af_family_slock_key_strings[AF_MAX+1] = {
 269  "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
 270  "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
 271  "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
 272  "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
 273  "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
 274  "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
 275  "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
 276  "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
 277  "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
 278  "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
 279  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
 280  "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
 281  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
 282  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
 283};
 284static const char *const af_family_clock_key_strings[AF_MAX+1] = {
 285  "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
 286  "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
 287  "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
 288  "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
 289  "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
 290  "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
 291  "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
 292  "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
 293  "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
 294  "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
 295  "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
 296  "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
 297  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
 298  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
 299};
 300
 301/*
 302 * sk_callback_lock locking rules are per-address-family,
 303 * so split the lock classes by using a per-AF key:
 304 */
 305static struct lock_class_key af_callback_keys[AF_MAX];
 306
 307/* Take into consideration the size of the struct sk_buff overhead in the
 308 * determination of these values, since that is non-constant across
 309 * platforms.  This makes socket queueing behavior and performance
 310 * not depend upon such differences.
 311 */
 312#define _SK_MEM_PACKETS         256
 313#define _SK_MEM_OVERHEAD        SKB_TRUESIZE(256)
 314#define SK_WMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 315#define SK_RMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 316
 317/* Run time adjustable parameters. */
 318__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
 319EXPORT_SYMBOL(sysctl_wmem_max);
 320__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
 321EXPORT_SYMBOL(sysctl_rmem_max);
 322__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
 323__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 324
 325/* Maximal space eaten by iovec or ancillary data plus some space */
 326int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 327EXPORT_SYMBOL(sysctl_optmem_max);
 328
 329int sysctl_tstamp_allow_data __read_mostly = 1;
 330
 331struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
 332EXPORT_SYMBOL_GPL(memalloc_socks);
 333
 334/**
 335 * sk_set_memalloc - sets %SOCK_MEMALLOC
 336 * @sk: socket to set it on
 337 *
 338 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
 339 * It's the responsibility of the admin to adjust min_free_kbytes
 340 * to meet the requirements
 341 */
 342void sk_set_memalloc(struct sock *sk)
 343{
 344        sock_set_flag(sk, SOCK_MEMALLOC);
 345        sk->sk_allocation |= __GFP_MEMALLOC;
 346        static_key_slow_inc(&memalloc_socks);
 347}
 348EXPORT_SYMBOL_GPL(sk_set_memalloc);
 349
 350void sk_clear_memalloc(struct sock *sk)
 351{
 352        sock_reset_flag(sk, SOCK_MEMALLOC);
 353        sk->sk_allocation &= ~__GFP_MEMALLOC;
 354        static_key_slow_dec(&memalloc_socks);
 355
 356        /*
 357         * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
 358         * progress of swapping. SOCK_MEMALLOC may be cleared while
 359         * it has rmem allocations due to the last swapfile being deactivated
 360         * but there is a risk that the socket is unusable due to exceeding
 361         * the rmem limits. Reclaim the reserves and obey rmem limits again.
 362         */
 363        sk_mem_reclaim(sk);
 364}
 365EXPORT_SYMBOL_GPL(sk_clear_memalloc);
 366
 367int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 368{
 369        int ret;
 370        unsigned long pflags = current->flags;
 371
 372        /* these should have been dropped before queueing */
 373        BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
 374
 375        current->flags |= PF_MEMALLOC;
 376        ret = sk->sk_backlog_rcv(sk, skb);
 377        tsk_restore_flags(current, pflags, PF_MEMALLOC);
 378
 379        return ret;
 380}
 381EXPORT_SYMBOL(__sk_backlog_rcv);
 382
 383static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 384{
 385        struct timeval tv;
 386
 387        if (optlen < sizeof(tv))
 388                return -EINVAL;
 389        if (copy_from_user(&tv, optval, sizeof(tv)))
 390                return -EFAULT;
 391        if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
 392                return -EDOM;
 393
 394        if (tv.tv_sec < 0) {
 395                static int warned __read_mostly;
 396
 397                *timeo_p = 0;
 398                if (warned < 10 && net_ratelimit()) {
 399                        warned++;
 400                        pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
 401                                __func__, current->comm, task_pid_nr(current));
 402                }
 403                return 0;
 404        }
 405        *timeo_p = MAX_SCHEDULE_TIMEOUT;
 406        if (tv.tv_sec == 0 && tv.tv_usec == 0)
 407                return 0;
 408        if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
 409                *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
 410        return 0;
 411}
 412
 413static void sock_warn_obsolete_bsdism(const char *name)
 414{
 415        static int warned;
 416        static char warncomm[TASK_COMM_LEN];
 417        if (strcmp(warncomm, current->comm) && warned < 5) {
 418                strcpy(warncomm,  current->comm);
 419                pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
 420                        warncomm, name);
 421                warned++;
 422        }
 423}
 424
 425static bool sock_needs_netstamp(const struct sock *sk)
 426{
 427        switch (sk->sk_family) {
 428        case AF_UNSPEC:
 429        case AF_UNIX:
 430                return false;
 431        default:
 432                return true;
 433        }
 434}
 435
 436static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 437{
 438        if (sk->sk_flags & flags) {
 439                sk->sk_flags &= ~flags;
 440                if (sock_needs_netstamp(sk) &&
 441                    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
 442                        net_disable_timestamp();
 443        }
 444}
 445
 446
 447int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 448{
 449        int err;
 450        unsigned long flags;
 451        struct sk_buff_head *list = &sk->sk_receive_queue;
 452
 453        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
 454                atomic_inc(&sk->sk_drops);
 455                trace_sock_rcvqueue_full(sk, skb);
 456                return -ENOMEM;
 457        }
 458
 459        err = sk_filter(sk, skb);
 460        if (err)
 461                return err;
 462
 463        if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
 464                atomic_inc(&sk->sk_drops);
 465                return -ENOBUFS;
 466        }
 467
 468        skb->dev = NULL;
 469        skb_set_owner_r(skb, sk);
 470
 471        /* we escape from rcu protected region, make sure we dont leak
 472         * a norefcounted dst
 473         */
 474        skb_dst_force(skb);
 475
 476        spin_lock_irqsave(&list->lock, flags);
 477        sock_skb_set_dropcount(sk, skb);
 478        __skb_queue_tail(list, skb);
 479        spin_unlock_irqrestore(&list->lock, flags);
 480
 481        if (!sock_flag(sk, SOCK_DEAD))
 482                sk->sk_data_ready(sk);
 483        return 0;
 484}
 485EXPORT_SYMBOL(sock_queue_rcv_skb);
 486
 487int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
 488{
 489        int rc = NET_RX_SUCCESS;
 490
 491        if (sk_filter(sk, skb))
 492                goto discard_and_relse;
 493
 494        skb->dev = NULL;
 495
 496        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
 497                atomic_inc(&sk->sk_drops);
 498                goto discard_and_relse;
 499        }
 500        if (nested)
 501                bh_lock_sock_nested(sk);
 502        else
 503                bh_lock_sock(sk);
 504        if (!sock_owned_by_user(sk)) {
 505                /*
 506                 * trylock + unlock semantics:
 507                 */
 508                mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
 509
 510                rc = sk_backlog_rcv(sk, skb);
 511
 512                mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
 513        } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 514                bh_unlock_sock(sk);
 515                atomic_inc(&sk->sk_drops);
 516                goto discard_and_relse;
 517        }
 518
 519        bh_unlock_sock(sk);
 520out:
 521        sock_put(sk);
 522        return rc;
 523discard_and_relse:
 524        kfree_skb(skb);
 525        goto out;
 526}
 527EXPORT_SYMBOL(sk_receive_skb);
 528
 529struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 530{
 531        struct dst_entry *dst = __sk_dst_get(sk);
 532
 533        if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 534                sk_tx_queue_clear(sk);
 535                RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
 536                dst_release(dst);
 537                return NULL;
 538        }
 539
 540        return dst;
 541}
 542EXPORT_SYMBOL(__sk_dst_check);
 543
 544struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
 545{
 546        struct dst_entry *dst = sk_dst_get(sk);
 547
 548        if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 549                sk_dst_reset(sk);
 550                dst_release(dst);
 551                return NULL;
 552        }
 553
 554        return dst;
 555}
 556EXPORT_SYMBOL(sk_dst_check);
 557
 558static int sock_setbindtodevice(struct sock *sk, char __user *optval,
 559                                int optlen)
 560{
 561        int ret = -ENOPROTOOPT;
 562#ifdef CONFIG_NETDEVICES
 563        struct net *net = sock_net(sk);
 564        char devname[IFNAMSIZ];
 565        int index;
 566
 567        /* Sorry... */
 568        ret = -EPERM;
 569        if (!ns_capable(net->user_ns, CAP_NET_RAW))
 570                goto out;
 571
 572        ret = -EINVAL;
 573        if (optlen < 0)
 574                goto out;
 575
 576        /* Bind this socket to a particular device like "eth0",
 577         * as specified in the passed interface name. If the
 578         * name is "" or the option length is zero the socket
 579         * is not bound.
 580         */
 581        if (optlen > IFNAMSIZ - 1)
 582                optlen = IFNAMSIZ - 1;
 583        memset(devname, 0, sizeof(devname));
 584
 585        ret = -EFAULT;
 586        if (copy_from_user(devname, optval, optlen))
 587                goto out;
 588
 589        index = 0;
 590        if (devname[0] != '\0') {
 591                struct net_device *dev;
 592
 593                rcu_read_lock();
 594                dev = dev_get_by_name_rcu(net, devname);
 595                if (dev)
 596                        index = dev->ifindex;
 597                rcu_read_unlock();
 598                ret = -ENODEV;
 599                if (!dev)
 600                        goto out;
 601        }
 602
 603        lock_sock(sk);
 604        sk->sk_bound_dev_if = index;
 605        sk_dst_reset(sk);
 606        release_sock(sk);
 607
 608        ret = 0;
 609
 610out:
 611#endif
 612
 613        return ret;
 614}
 615
 616static int sock_getbindtodevice(struct sock *sk, char __user *optval,
 617                                int __user *optlen, int len)
 618{
 619        int ret = -ENOPROTOOPT;
 620#ifdef CONFIG_NETDEVICES
 621        struct net *net = sock_net(sk);
 622        char devname[IFNAMSIZ];
 623
 624        if (sk->sk_bound_dev_if == 0) {
 625                len = 0;
 626                goto zero;
 627        }
 628
 629        ret = -EINVAL;
 630        if (len < IFNAMSIZ)
 631                goto out;
 632
 633        ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
 634        if (ret)
 635                goto out;
 636
 637        len = strlen(devname) + 1;
 638
 639        ret = -EFAULT;
 640        if (copy_to_user(optval, devname, len))
 641                goto out;
 642
 643zero:
 644        ret = -EFAULT;
 645        if (put_user(len, optlen))
 646                goto out;
 647
 648        ret = 0;
 649
 650out:
 651#endif
 652
 653        return ret;
 654}
 655
 656static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
 657{
 658        if (valbool)
 659                sock_set_flag(sk, bit);
 660        else
 661                sock_reset_flag(sk, bit);
 662}
 663
 664bool sk_mc_loop(struct sock *sk)
 665{
 666        if (dev_recursion_level())
 667                return false;
 668        if (!sk)
 669                return true;
 670        switch (sk->sk_family) {
 671        case AF_INET:
 672                return inet_sk(sk)->mc_loop;
 673#if IS_ENABLED(CONFIG_IPV6)
 674        case AF_INET6:
 675                return inet6_sk(sk)->mc_loop;
 676#endif
 677        }
 678        WARN_ON(1);
 679        return true;
 680}
 681EXPORT_SYMBOL(sk_mc_loop);
 682
 683/*
 684 *      This is meant for all protocols to use and covers goings on
 685 *      at the socket level. Everything here is generic.
 686 */
 687
 688int sock_setsockopt(struct socket *sock, int level, int optname,
 689                    char __user *optval, unsigned int optlen)
 690{
 691        struct sock *sk = sock->sk;
 692        int val;
 693        int valbool;
 694        struct linger ling;
 695        int ret = 0;
 696
 697        /*
 698         *      Options without arguments
 699         */
 700
 701        if (optname == SO_BINDTODEVICE)
 702                return sock_setbindtodevice(sk, optval, optlen);
 703
 704        if (optlen < sizeof(int))
 705                return -EINVAL;
 706
 707        if (get_user(val, (int __user *)optval))
 708                return -EFAULT;
 709
 710        valbool = val ? 1 : 0;
 711
 712        lock_sock(sk);
 713
 714        switch (optname) {
 715        case SO_DEBUG:
 716                if (val && !capable(CAP_NET_ADMIN))
 717                        ret = -EACCES;
 718                else
 719                        sock_valbool_flag(sk, SOCK_DBG, valbool);
 720                break;
 721        case SO_REUSEADDR:
 722                sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
 723                break;
 724        case SO_REUSEPORT:
 725                sk->sk_reuseport = valbool;
 726                break;
 727        case SO_TYPE:
 728        case SO_PROTOCOL:
 729        case SO_DOMAIN:
 730        case SO_ERROR:
 731                ret = -ENOPROTOOPT;
 732                break;
 733        case SO_DONTROUTE:
 734                sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
 735                break;
 736        case SO_BROADCAST:
 737                sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
 738                break;
 739        case SO_SNDBUF:
 740                /* Don't error on this BSD doesn't and if you think
 741                 * about it this is right. Otherwise apps have to
 742                 * play 'guess the biggest size' games. RCVBUF/SNDBUF
 743                 * are treated in BSD as hints
 744                 */
 745                val = min_t(u32, val, sysctl_wmem_max);
 746set_sndbuf:
 747                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
 748                sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
 749                /* Wake up sending tasks if we upped the value. */
 750                sk->sk_write_space(sk);
 751                break;
 752
 753        case SO_SNDBUFFORCE:
 754                if (!capable(CAP_NET_ADMIN)) {
 755                        ret = -EPERM;
 756                        break;
 757                }
 758                goto set_sndbuf;
 759
 760        case SO_RCVBUF:
 761                /* Don't error on this BSD doesn't and if you think
 762                 * about it this is right. Otherwise apps have to
 763                 * play 'guess the biggest size' games. RCVBUF/SNDBUF
 764                 * are treated in BSD as hints
 765                 */
 766                val = min_t(u32, val, sysctl_rmem_max);
 767set_rcvbuf:
 768                sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 769                /*
 770                 * We double it on the way in to account for
 771                 * "struct sk_buff" etc. overhead.   Applications
 772                 * assume that the SO_RCVBUF setting they make will
 773                 * allow that much actual data to be received on that
 774                 * socket.
 775                 *
 776                 * Applications are unaware that "struct sk_buff" and
 777                 * other overheads allocate from the receive buffer
 778                 * during socket buffer allocation.
 779                 *
 780                 * And after considering the possible alternatives,
 781                 * returning the value we actually used in getsockopt
 782                 * is the most desirable behavior.
 783                 */
 784                sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
 785                break;
 786
 787        case SO_RCVBUFFORCE:
 788                if (!capable(CAP_NET_ADMIN)) {
 789                        ret = -EPERM;
 790                        break;
 791                }
 792                goto set_rcvbuf;
 793
 794        case SO_KEEPALIVE:
 795#ifdef CONFIG_INET
 796                if (sk->sk_protocol == IPPROTO_TCP &&
 797                    sk->sk_type == SOCK_STREAM)
 798                        tcp_set_keepalive(sk, valbool);
 799#endif
 800                sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
 801                break;
 802
 803        case SO_OOBINLINE:
 804                sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
 805                break;
 806
 807        case SO_NO_CHECK:
 808                sk->sk_no_check_tx = valbool;
 809                break;
 810
 811        case SO_PRIORITY:
 812                if ((val >= 0 && val <= 6) ||
 813                    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 814                        sk->sk_priority = val;
 815                else
 816                        ret = -EPERM;
 817                break;
 818
 819        case SO_LINGER:
 820                if (optlen < sizeof(ling)) {
 821                        ret = -EINVAL;  /* 1003.1g */
 822                        break;
 823                }
 824                if (copy_from_user(&ling, optval, sizeof(ling))) {
 825                        ret = -EFAULT;
 826                        break;
 827                }
 828                if (!ling.l_onoff)
 829                        sock_reset_flag(sk, SOCK_LINGER);
 830                else {
 831#if (BITS_PER_LONG == 32)
 832                        if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
 833                                sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
 834                        else
 835#endif
 836                                sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
 837                        sock_set_flag(sk, SOCK_LINGER);
 838                }
 839                break;
 840
 841        case SO_BSDCOMPAT:
 842                sock_warn_obsolete_bsdism("setsockopt");
 843                break;
 844
 845        case SO_PASSCRED:
 846                if (valbool)
 847                        set_bit(SOCK_PASSCRED, &sock->flags);
 848                else
 849                        clear_bit(SOCK_PASSCRED, &sock->flags);
 850                break;
 851
 852        case SO_TIMESTAMP:
 853        case SO_TIMESTAMPNS:
 854                if (valbool)  {
 855                        if (optname == SO_TIMESTAMP)
 856                                sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 857                        else
 858                                sock_set_flag(sk, SOCK_RCVTSTAMPNS);
 859                        sock_set_flag(sk, SOCK_RCVTSTAMP);
 860                        sock_enable_timestamp(sk, SOCK_TIMESTAMP);
 861                } else {
 862                        sock_reset_flag(sk, SOCK_RCVTSTAMP);
 863                        sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 864                }
 865                break;
 866
 867        case SO_TIMESTAMPING:
 868                if (val & ~SOF_TIMESTAMPING_MASK) {
 869                        ret = -EINVAL;
 870                        break;
 871                }
 872
 873                if (val & SOF_TIMESTAMPING_OPT_ID &&
 874                    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
 875                        if (sk->sk_protocol == IPPROTO_TCP &&
 876                            sk->sk_type == SOCK_STREAM) {
 877                                if (sk->sk_state != TCP_ESTABLISHED) {
 878                                        ret = -EINVAL;
 879                                        break;
 880                                }
 881                                sk->sk_tskey = tcp_sk(sk)->snd_una;
 882                        } else {
 883                                sk->sk_tskey = 0;
 884                        }
 885                }
 886                sk->sk_tsflags = val;
 887                if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
 888                        sock_enable_timestamp(sk,
 889                                              SOCK_TIMESTAMPING_RX_SOFTWARE);
 890                else
 891                        sock_disable_timestamp(sk,
 892                                               (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
 893                break;
 894
 895        case SO_RCVLOWAT:
 896                if (val < 0)
 897                        val = INT_MAX;
 898                sk->sk_rcvlowat = val ? : 1;
 899                break;
 900
 901        case SO_RCVTIMEO:
 902                ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
 903                break;
 904
 905        case SO_SNDTIMEO:
 906                ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
 907                break;
 908
 909        case SO_ATTACH_FILTER:
 910                ret = -EINVAL;
 911                if (optlen == sizeof(struct sock_fprog)) {
 912                        struct sock_fprog fprog;
 913
 914                        ret = -EFAULT;
 915                        if (copy_from_user(&fprog, optval, sizeof(fprog)))
 916                                break;
 917
 918                        ret = sk_attach_filter(&fprog, sk);
 919                }
 920                break;
 921
 922        case SO_ATTACH_BPF:
 923                ret = -EINVAL;
 924                if (optlen == sizeof(u32)) {
 925                        u32 ufd;
 926
 927                        ret = -EFAULT;
 928                        if (copy_from_user(&ufd, optval, sizeof(ufd)))
 929                                break;
 930
 931                        ret = sk_attach_bpf(ufd, sk);
 932                }
 933                break;
 934
 935        case SO_DETACH_FILTER:
 936                ret = sk_detach_filter(sk);
 937                break;
 938
 939        case SO_LOCK_FILTER:
 940                if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
 941                        ret = -EPERM;
 942                else
 943                        sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
 944                break;
 945
 946        case SO_PASSSEC:
 947                if (valbool)
 948                        set_bit(SOCK_PASSSEC, &sock->flags);
 949                else
 950                        clear_bit(SOCK_PASSSEC, &sock->flags);
 951                break;
 952        case SO_MARK:
 953                if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 954                        ret = -EPERM;
 955                else
 956                        sk->sk_mark = val;
 957                break;
 958
 959        case SO_RXQ_OVFL:
 960                sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
 961                break;
 962
 963        case SO_WIFI_STATUS:
 964                sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
 965                break;
 966
 967        case SO_PEEK_OFF:
 968                if (sock->ops->set_peek_off)
 969                        ret = sock->ops->set_peek_off(sk, val);
 970                else
 971                        ret = -EOPNOTSUPP;
 972                break;
 973
 974        case SO_NOFCS:
 975                sock_valbool_flag(sk, SOCK_NOFCS, valbool);
 976                break;
 977
 978        case SO_SELECT_ERR_QUEUE:
 979                sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
 980                break;
 981
 982#ifdef CONFIG_NET_RX_BUSY_POLL
 983        case SO_BUSY_POLL:
 984                /* allow unprivileged users to decrease the value */
 985                if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
 986                        ret = -EPERM;
 987                else {
 988                        if (val < 0)
 989                                ret = -EINVAL;
 990                        else
 991                                sk->sk_ll_usec = val;
 992                }
 993                break;
 994#endif
 995
 996        case SO_MAX_PACING_RATE:
 997                sk->sk_max_pacing_rate = val;
 998                sk->sk_pacing_rate = min(sk->sk_pacing_rate,
 999                                         sk->sk_max_pacing_rate);
1000                break;
1001
1002        case SO_INCOMING_CPU:
1003                sk->sk_incoming_cpu = val;
1004                break;
1005
1006        default:
1007                ret = -ENOPROTOOPT;
1008                break;
1009        }
1010        release_sock(sk);
1011        return ret;
1012}
1013EXPORT_SYMBOL(sock_setsockopt);
1014
1015
1016static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1017                          struct ucred *ucred)
1018{
1019        ucred->pid = pid_vnr(pid);
1020        ucred->uid = ucred->gid = -1;
1021        if (cred) {
1022                struct user_namespace *current_ns = current_user_ns();
1023
1024                ucred->uid = from_kuid_munged(current_ns, cred->euid);
1025                ucred->gid = from_kgid_munged(current_ns, cred->egid);
1026        }
1027}
1028
1029int sock_getsockopt(struct socket *sock, int level, int optname,
1030                    char __user *optval, int __user *optlen)
1031{
1032        struct sock *sk = sock->sk;
1033
1034        union {
1035                int val;
1036                struct linger ling;
1037                struct timeval tm;
1038        } v;
1039
1040        int lv = sizeof(int);
1041        int len;
1042
1043        if (get_user(len, optlen))
1044                return -EFAULT;
1045        if (len < 0)
1046                return -EINVAL;
1047
1048        memset(&v, 0, sizeof(v));
1049
1050        switch (optname) {
1051        case SO_DEBUG:
1052                v.val = sock_flag(sk, SOCK_DBG);
1053                break;
1054
1055        case SO_DONTROUTE:
1056                v.val = sock_flag(sk, SOCK_LOCALROUTE);
1057                break;
1058
1059        case SO_BROADCAST:
1060                v.val = sock_flag(sk, SOCK_BROADCAST);
1061                break;
1062
1063        case SO_SNDBUF:
1064                v.val = sk->sk_sndbuf;
1065                break;
1066
1067        case SO_RCVBUF:
1068                v.val = sk->sk_rcvbuf;
1069                break;
1070
1071        case SO_REUSEADDR:
1072                v.val = sk->sk_reuse;
1073                break;
1074
1075        case SO_REUSEPORT:
1076                v.val = sk->sk_reuseport;
1077                break;
1078
1079        case SO_KEEPALIVE:
1080                v.val = sock_flag(sk, SOCK_KEEPOPEN);
1081                break;
1082
1083        case SO_TYPE:
1084                v.val = sk->sk_type;
1085                break;
1086
1087        case SO_PROTOCOL:
1088                v.val = sk->sk_protocol;
1089                break;
1090
1091        case SO_DOMAIN:
1092                v.val = sk->sk_family;
1093                break;
1094
1095        case SO_ERROR:
1096                v.val = -sock_error(sk);
1097                if (v.val == 0)
1098                        v.val = xchg(&sk->sk_err_soft, 0);
1099                break;
1100
1101        case SO_OOBINLINE:
1102                v.val = sock_flag(sk, SOCK_URGINLINE);
1103                break;
1104
1105        case SO_NO_CHECK:
1106                v.val = sk->sk_no_check_tx;
1107                break;
1108
1109        case SO_PRIORITY:
1110                v.val = sk->sk_priority;
1111                break;
1112
1113        case SO_LINGER:
1114                lv              = sizeof(v.ling);
1115                v.ling.l_onoff  = sock_flag(sk, SOCK_LINGER);
1116                v.ling.l_linger = sk->sk_lingertime / HZ;
1117                break;
1118
1119        case SO_BSDCOMPAT:
1120                sock_warn_obsolete_bsdism("getsockopt");
1121                break;
1122
1123        case SO_TIMESTAMP:
1124                v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1125                                !sock_flag(sk, SOCK_RCVTSTAMPNS);
1126                break;
1127
1128        case SO_TIMESTAMPNS:
1129                v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1130                break;
1131
1132        case SO_TIMESTAMPING:
1133                v.val = sk->sk_tsflags;
1134                break;
1135
1136        case SO_RCVTIMEO:
1137                lv = sizeof(struct timeval);
1138                if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1139                        v.tm.tv_sec = 0;
1140                        v.tm.tv_usec = 0;
1141                } else {
1142                        v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1143                        v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1144                }
1145                break;
1146
1147        case SO_SNDTIMEO:
1148                lv = sizeof(struct timeval);
1149                if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1150                        v.tm.tv_sec = 0;
1151                        v.tm.tv_usec = 0;
1152                } else {
1153                        v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1154                        v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1155                }
1156                break;
1157
1158        case SO_RCVLOWAT:
1159                v.val = sk->sk_rcvlowat;
1160                break;
1161
1162        case SO_SNDLOWAT:
1163                v.val = 1;
1164                break;
1165
1166        case SO_PASSCRED:
1167                v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1168                break;
1169
1170        case SO_PEERCRED:
1171        {
1172                struct ucred peercred;
1173                if (len > sizeof(peercred))
1174                        len = sizeof(peercred);
1175                cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1176                if (copy_to_user(optval, &peercred, len))
1177                        return -EFAULT;
1178                goto lenout;
1179        }
1180
1181        case SO_PEERNAME:
1182        {
1183                char address[128];
1184
1185                if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1186                        return -ENOTCONN;
1187                if (lv < len)
1188                        return -EINVAL;
1189                if (copy_to_user(optval, address, len))
1190                        return -EFAULT;
1191                goto lenout;
1192        }
1193
1194        /* Dubious BSD thing... Probably nobody even uses it, but
1195         * the UNIX standard wants it for whatever reason... -DaveM
1196         */
1197        case SO_ACCEPTCONN:
1198                v.val = sk->sk_state == TCP_LISTEN;
1199                break;
1200
1201        case SO_PASSSEC:
1202                v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1203                break;
1204
1205        case SO_PEERSEC:
1206                return security_socket_getpeersec_stream(sock, optval, optlen, len);
1207
1208        case SO_MARK:
1209                v.val = sk->sk_mark;
1210                break;
1211
1212        case SO_RXQ_OVFL:
1213                v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1214                break;
1215
1216        case SO_WIFI_STATUS:
1217                v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1218                break;
1219
1220        case SO_PEEK_OFF:
1221                if (!sock->ops->set_peek_off)
1222                        return -EOPNOTSUPP;
1223
1224                v.val = sk->sk_peek_off;
1225                break;
1226        case SO_NOFCS:
1227                v.val = sock_flag(sk, SOCK_NOFCS);
1228                break;
1229
1230        case SO_BINDTODEVICE:
1231                return sock_getbindtodevice(sk, optval, optlen, len);
1232
1233        case SO_GET_FILTER:
1234                len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1235                if (len < 0)
1236                        return len;
1237
1238                goto lenout;
1239
1240        case SO_LOCK_FILTER:
1241                v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1242                break;
1243
1244        case SO_BPF_EXTENSIONS:
1245                v.val = bpf_tell_extensions();
1246                break;
1247
1248        case SO_SELECT_ERR_QUEUE:
1249                v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1250                break;
1251
1252#ifdef CONFIG_NET_RX_BUSY_POLL
1253        case SO_BUSY_POLL:
1254                v.val = sk->sk_ll_usec;
1255                break;
1256#endif
1257
1258        case SO_MAX_PACING_RATE:
1259                v.val = sk->sk_max_pacing_rate;
1260                break;
1261
1262        case SO_INCOMING_CPU:
1263                v.val = sk->sk_incoming_cpu;
1264                break;
1265
1266        default:
1267                /* We implement the SO_SNDLOWAT etc to not be settable
1268                 * (1003.1g 7).
1269                 */
1270                return -ENOPROTOOPT;
1271        }
1272
1273        if (len > lv)
1274                len = lv;
1275        if (copy_to_user(optval, &v, len))
1276                return -EFAULT;
1277lenout:
1278        if (put_user(len, optlen))
1279                return -EFAULT;
1280        return 0;
1281}
1282
1283/*
1284 * Initialize an sk_lock.
1285 *
1286 * (We also register the sk_lock with the lock validator.)
1287 */
1288static inline void sock_lock_init(struct sock *sk)
1289{
1290        sock_lock_init_class_and_name(sk,
1291                        af_family_slock_key_strings[sk->sk_family],
1292                        af_family_slock_keys + sk->sk_family,
1293                        af_family_key_strings[sk->sk_family],
1294                        af_family_keys + sk->sk_family);
1295}
1296
1297/*
1298 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1299 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1300 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1301 */
1302static void sock_copy(struct sock *nsk, const struct sock *osk)
1303{
1304#ifdef CONFIG_SECURITY_NETWORK
1305        void *sptr = nsk->sk_security;
1306#endif
1307        memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1308
1309        memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1310               osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1311
1312#ifdef CONFIG_SECURITY_NETWORK
1313        nsk->sk_security = sptr;
1314        security_sk_clone(osk, nsk);
1315#endif
1316}
1317
1318void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1319{
1320        unsigned long nulls1, nulls2;
1321
1322        nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1323        nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1324        if (nulls1 > nulls2)
1325                swap(nulls1, nulls2);
1326
1327        if (nulls1 != 0)
1328                memset((char *)sk, 0, nulls1);
1329        memset((char *)sk + nulls1 + sizeof(void *), 0,
1330               nulls2 - nulls1 - sizeof(void *));
1331        memset((char *)sk + nulls2 + sizeof(void *), 0,
1332               size - nulls2 - sizeof(void *));
1333}
1334EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1335
1336static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1337                int family)
1338{
1339        struct sock *sk;
1340        struct kmem_cache *slab;
1341
1342        slab = prot->slab;
1343        if (slab != NULL) {
1344                sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1345                if (!sk)
1346                        return sk;
1347                if (priority & __GFP_ZERO) {
1348                        if (prot->clear_sk)
1349                                prot->clear_sk(sk, prot->obj_size);
1350                        else
1351                                sk_prot_clear_nulls(sk, prot->obj_size);
1352                }
1353        } else
1354                sk = kmalloc(prot->obj_size, priority);
1355
1356        if (sk != NULL) {
1357                kmemcheck_annotate_bitfield(sk, flags);
1358
1359                if (security_sk_alloc(sk, family, priority))
1360                        goto out_free;
1361
1362                if (!try_module_get(prot->owner))
1363                        goto out_free_sec;
1364                sk_tx_queue_clear(sk);
1365        }
1366
1367        return sk;
1368
1369out_free_sec:
1370        security_sk_free(sk);
1371out_free:
1372        if (slab != NULL)
1373                kmem_cache_free(slab, sk);
1374        else
1375                kfree(sk);
1376        return NULL;
1377}
1378
1379static void sk_prot_free(struct proto *prot, struct sock *sk)
1380{
1381        struct kmem_cache *slab;
1382        struct module *owner;
1383
1384        owner = prot->owner;
1385        slab = prot->slab;
1386
1387        security_sk_free(sk);
1388        if (slab != NULL)
1389                kmem_cache_free(slab, sk);
1390        else
1391                kfree(sk);
1392        module_put(owner);
1393}
1394
1395#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1396void sock_update_netprioidx(struct sock *sk)
1397{
1398        if (in_interrupt())
1399                return;
1400
1401        sk->sk_cgrp_prioidx = task_netprioidx(current);
1402}
1403EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1404#endif
1405
1406/**
1407 *      sk_alloc - All socket objects are allocated here
1408 *      @net: the applicable net namespace
1409 *      @family: protocol family
1410 *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1411 *      @prot: struct proto associated with this new sock instance
1412 *      @kern: is this to be a kernel socket?
1413 */
1414struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1415                      struct proto *prot, int kern)
1416{
1417        struct sock *sk;
1418
1419        sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1420        if (sk) {
1421                sk->sk_family = family;
1422                /*
1423                 * See comment in struct sock definition to understand
1424                 * why we need sk_prot_creator -acme
1425                 */
1426                sk->sk_prot = sk->sk_prot_creator = prot;
1427                sock_lock_init(sk);
1428                sk->sk_net_refcnt = kern ? 0 : 1;
1429                if (likely(sk->sk_net_refcnt))
1430                        get_net(net);
1431                sock_net_set(sk, net);
1432                atomic_set(&sk->sk_wmem_alloc, 1);
1433
1434                sock_update_classid(sk);
1435                sock_update_netprioidx(sk);
1436        }
1437
1438        return sk;
1439}
1440EXPORT_SYMBOL(sk_alloc);
1441
1442void sk_destruct(struct sock *sk)
1443{
1444        struct sk_filter *filter;
1445
1446        if (sk->sk_destruct)
1447                sk->sk_destruct(sk);
1448
1449        filter = rcu_dereference_check(sk->sk_filter,
1450                                       atomic_read(&sk->sk_wmem_alloc) == 0);
1451        if (filter) {
1452                sk_filter_uncharge(sk, filter);
1453                RCU_INIT_POINTER(sk->sk_filter, NULL);
1454        }
1455
1456        sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1457
1458        if (atomic_read(&sk->sk_omem_alloc))
1459                pr_debug("%s: optmem leakage (%d bytes) detected\n",
1460                         __func__, atomic_read(&sk->sk_omem_alloc));
1461
1462        if (sk->sk_peer_cred)
1463                put_cred(sk->sk_peer_cred);
1464        put_pid(sk->sk_peer_pid);
1465        if (likely(sk->sk_net_refcnt))
1466                put_net(sock_net(sk));
1467        sk_prot_free(sk->sk_prot_creator, sk);
1468}
1469
1470static void __sk_free(struct sock *sk)
1471{
1472        if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
1473                sock_diag_broadcast_destroy(sk);
1474        else
1475                sk_destruct(sk);
1476}
1477
1478void sk_free(struct sock *sk)
1479{
1480        /*
1481         * We subtract one from sk_wmem_alloc and can know if
1482         * some packets are still in some tx queue.
1483         * If not null, sock_wfree() will call __sk_free(sk) later
1484         */
1485        if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1486                __sk_free(sk);
1487}
1488EXPORT_SYMBOL(sk_free);
1489
1490static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1491{
1492        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1493                sock_update_memcg(newsk);
1494}
1495
1496/**
1497 *      sk_clone_lock - clone a socket, and lock its clone
1498 *      @sk: the socket to clone
1499 *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1500 *
1501 *      Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1502 */
1503struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1504{
1505        struct sock *newsk;
1506        bool is_charged = true;
1507
1508        newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1509        if (newsk != NULL) {
1510                struct sk_filter *filter;
1511
1512                sock_copy(newsk, sk);
1513
1514                /* SANITY */
1515                if (likely(newsk->sk_net_refcnt))
1516                        get_net(sock_net(newsk));
1517                sk_node_init(&newsk->sk_node);
1518                sock_lock_init(newsk);
1519                bh_lock_sock(newsk);
1520                newsk->sk_backlog.head  = newsk->sk_backlog.tail = NULL;
1521                newsk->sk_backlog.len = 0;
1522
1523                atomic_set(&newsk->sk_rmem_alloc, 0);
1524                /*
1525                 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1526                 */
1527                atomic_set(&newsk->sk_wmem_alloc, 1);
1528                atomic_set(&newsk->sk_omem_alloc, 0);
1529                skb_queue_head_init(&newsk->sk_receive_queue);
1530                skb_queue_head_init(&newsk->sk_write_queue);
1531
1532                rwlock_init(&newsk->sk_callback_lock);
1533                lockdep_set_class_and_name(&newsk->sk_callback_lock,
1534                                af_callback_keys + newsk->sk_family,
1535                                af_family_clock_key_strings[newsk->sk_family]);
1536
1537                newsk->sk_dst_cache     = NULL;
1538                newsk->sk_wmem_queued   = 0;
1539                newsk->sk_forward_alloc = 0;
1540                newsk->sk_send_head     = NULL;
1541                newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1542
1543                sock_reset_flag(newsk, SOCK_DONE);
1544                skb_queue_head_init(&newsk->sk_error_queue);
1545
1546                filter = rcu_dereference_protected(newsk->sk_filter, 1);
1547                if (filter != NULL)
1548                        /* though it's an empty new sock, the charging may fail
1549                         * if sysctl_optmem_max was changed between creation of
1550                         * original socket and cloning
1551                         */
1552                        is_charged = sk_filter_charge(newsk, filter);
1553
1554                if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1555                        /* It is still raw copy of parent, so invalidate
1556                         * destructor and make plain sk_free() */
1557                        newsk->sk_destruct = NULL;
1558                        bh_unlock_sock(newsk);
1559                        sk_free(newsk);
1560                        newsk = NULL;
1561                        goto out;
1562                }
1563
1564                newsk->sk_err      = 0;
1565                newsk->sk_priority = 0;
1566                newsk->sk_incoming_cpu = raw_smp_processor_id();
1567                atomic64_set(&newsk->sk_cookie, 0);
1568                /*
1569                 * Before updating sk_refcnt, we must commit prior changes to memory
1570                 * (Documentation/RCU/rculist_nulls.txt for details)
1571                 */
1572                smp_wmb();
1573                atomic_set(&newsk->sk_refcnt, 2);
1574
1575                /*
1576                 * Increment the counter in the same struct proto as the master
1577                 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1578                 * is the same as sk->sk_prot->socks, as this field was copied
1579                 * with memcpy).
1580                 *
1581                 * This _changes_ the previous behaviour, where
1582                 * tcp_create_openreq_child always was incrementing the
1583                 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1584                 * to be taken into account in all callers. -acme
1585                 */
1586                sk_refcnt_debug_inc(newsk);
1587                sk_set_socket(newsk, NULL);
1588                newsk->sk_wq = NULL;
1589
1590                sk_update_clone(sk, newsk);
1591
1592                if (newsk->sk_prot->sockets_allocated)
1593                        sk_sockets_allocated_inc(newsk);
1594
1595                if (sock_needs_netstamp(sk) &&
1596                    newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1597                        net_enable_timestamp();
1598        }
1599out:
1600        return newsk;
1601}
1602EXPORT_SYMBOL_GPL(sk_clone_lock);
1603
1604void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1605{
1606        u32 max_segs = 1;
1607
1608        sk_dst_set(sk, dst);
1609        sk->sk_route_caps = dst->dev->features;
1610        if (sk->sk_route_caps & NETIF_F_GSO)
1611                sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1612        sk->sk_route_caps &= ~sk->sk_route_nocaps;
1613        if (sk_can_gso(sk)) {
1614                if (dst->header_len) {
1615                        sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1616                } else {
1617                        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1618                        sk->sk_gso_max_size = dst->dev->gso_max_size;
1619                        max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1620                }
1621        }
1622        sk->sk_gso_max_segs = max_segs;
1623}
1624EXPORT_SYMBOL_GPL(sk_setup_caps);
1625
1626/*
1627 *      Simple resource managers for sockets.
1628 */
1629
1630
1631/*
1632 * Write buffer destructor automatically called from kfree_skb.
1633 */
1634void sock_wfree(struct sk_buff *skb)
1635{
1636        struct sock *sk = skb->sk;
1637        unsigned int len = skb->truesize;
1638
1639        if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1640                /*
1641                 * Keep a reference on sk_wmem_alloc, this will be released
1642                 * after sk_write_space() call
1643                 */
1644                atomic_sub(len - 1, &sk->sk_wmem_alloc);
1645                sk->sk_write_space(sk);
1646                len = 1;
1647        }
1648        /*
1649         * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1650         * could not do because of in-flight packets
1651         */
1652        if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1653                __sk_free(sk);
1654}
1655EXPORT_SYMBOL(sock_wfree);
1656
1657void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1658{
1659        skb_orphan(skb);
1660        skb->sk = sk;
1661#ifdef CONFIG_INET
1662        if (unlikely(!sk_fullsock(sk))) {
1663                skb->destructor = sock_edemux;
1664                sock_hold(sk);
1665                return;
1666        }
1667#endif
1668        skb->destructor = sock_wfree;
1669        skb_set_hash_from_sk(skb, sk);
1670        /*
1671         * We used to take a refcount on sk, but following operation
1672         * is enough to guarantee sk_free() wont free this sock until
1673         * all in-flight packets are completed
1674         */
1675        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1676}
1677EXPORT_SYMBOL(skb_set_owner_w);
1678
1679void skb_orphan_partial(struct sk_buff *skb)
1680{
1681        /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1682         * so we do not completely orphan skb, but transfert all
1683         * accounted bytes but one, to avoid unexpected reorders.
1684         */
1685        if (skb->destructor == sock_wfree
1686#ifdef CONFIG_INET
1687            || skb->destructor == tcp_wfree
1688#endif
1689                ) {
1690                atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1691                skb->truesize = 1;
1692        } else {
1693                skb_orphan(skb);
1694        }
1695}
1696EXPORT_SYMBOL(skb_orphan_partial);
1697
1698/*
1699 * Read buffer destructor automatically called from kfree_skb.
1700 */
1701void sock_rfree(struct sk_buff *skb)
1702{
1703        struct sock *sk = skb->sk;
1704        unsigned int len = skb->truesize;
1705
1706        atomic_sub(len, &sk->sk_rmem_alloc);
1707        sk_mem_uncharge(sk, len);
1708}
1709EXPORT_SYMBOL(sock_rfree);
1710
1711/*
1712 * Buffer destructor for skbs that are not used directly in read or write
1713 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1714 */
1715void sock_efree(struct sk_buff *skb)
1716{
1717        sock_put(skb->sk);
1718}
1719EXPORT_SYMBOL(sock_efree);
1720
1721kuid_t sock_i_uid(struct sock *sk)
1722{
1723        kuid_t uid;
1724
1725        read_lock_bh(&sk->sk_callback_lock);
1726        uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1727        read_unlock_bh(&sk->sk_callback_lock);
1728        return uid;
1729}
1730EXPORT_SYMBOL(sock_i_uid);
1731
1732unsigned long sock_i_ino(struct sock *sk)
1733{
1734        unsigned long ino;
1735
1736        read_lock_bh(&sk->sk_callback_lock);
1737        ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1738        read_unlock_bh(&sk->sk_callback_lock);
1739        return ino;
1740}
1741EXPORT_SYMBOL(sock_i_ino);
1742
1743/*
1744 * Allocate a skb from the socket's send buffer.
1745 */
1746struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1747                             gfp_t priority)
1748{
1749        if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1750                struct sk_buff *skb = alloc_skb(size, priority);
1751                if (skb) {
1752                        skb_set_owner_w(skb, sk);
1753                        return skb;
1754                }
1755        }
1756        return NULL;
1757}
1758EXPORT_SYMBOL(sock_wmalloc);
1759
1760/*
1761 * Allocate a memory block from the socket's option memory buffer.
1762 */
1763void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1764{
1765        if ((unsigned int)size <= sysctl_optmem_max &&
1766            atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1767                void *mem;
1768                /* First do the add, to avoid the race if kmalloc
1769                 * might sleep.
1770                 */
1771                atomic_add(size, &sk->sk_omem_alloc);
1772                mem = kmalloc(size, priority);
1773                if (mem)
1774                        return mem;
1775                atomic_sub(size, &sk->sk_omem_alloc);
1776        }
1777        return NULL;
1778}
1779EXPORT_SYMBOL(sock_kmalloc);
1780
1781/* Free an option memory block. Note, we actually want the inline
1782 * here as this allows gcc to detect the nullify and fold away the
1783 * condition entirely.
1784 */
1785static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1786                                  const bool nullify)
1787{
1788        if (WARN_ON_ONCE(!mem))
1789                return;
1790        if (nullify)
1791                kzfree(mem);
1792        else
1793                kfree(mem);
1794        atomic_sub(size, &sk->sk_omem_alloc);
1795}
1796
1797void sock_kfree_s(struct sock *sk, void *mem, int size)
1798{
1799        __sock_kfree_s(sk, mem, size, false);
1800}
1801EXPORT_SYMBOL(sock_kfree_s);
1802
1803void sock_kzfree_s(struct sock *sk, void *mem, int size)
1804{
1805        __sock_kfree_s(sk, mem, size, true);
1806}
1807EXPORT_SYMBOL(sock_kzfree_s);
1808
1809/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1810   I think, these locks should be removed for datagram sockets.
1811 */
1812static long sock_wait_for_wmem(struct sock *sk, long timeo)
1813{
1814        DEFINE_WAIT(wait);
1815
1816        sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1817        for (;;) {
1818                if (!timeo)
1819                        break;
1820                if (signal_pending(current))
1821                        break;
1822                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1823                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1824                if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1825                        break;
1826                if (sk->sk_shutdown & SEND_SHUTDOWN)
1827                        break;
1828                if (sk->sk_err)
1829                        break;
1830                timeo = schedule_timeout(timeo);
1831        }
1832        finish_wait(sk_sleep(sk), &wait);
1833        return timeo;
1834}
1835
1836
1837/*
1838 *      Generic send/receive buffer handlers
1839 */
1840
1841struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1842                                     unsigned long data_len, int noblock,
1843                                     int *errcode, int max_page_order)
1844{
1845        struct sk_buff *skb;
1846        long timeo;
1847        int err;
1848
1849        timeo = sock_sndtimeo(sk, noblock);
1850        for (;;) {
1851                err = sock_error(sk);
1852                if (err != 0)
1853                        goto failure;
1854
1855                err = -EPIPE;
1856                if (sk->sk_shutdown & SEND_SHUTDOWN)
1857                        goto failure;
1858
1859                if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1860                        break;
1861
1862                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1863                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1864                err = -EAGAIN;
1865                if (!timeo)
1866                        goto failure;
1867                if (signal_pending(current))
1868                        goto interrupted;
1869                timeo = sock_wait_for_wmem(sk, timeo);
1870        }
1871        skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1872                                   errcode, sk->sk_allocation);
1873        if (skb)
1874                skb_set_owner_w(skb, sk);
1875        return skb;
1876
1877interrupted:
1878        err = sock_intr_errno(timeo);
1879failure:
1880        *errcode = err;
1881        return NULL;
1882}
1883EXPORT_SYMBOL(sock_alloc_send_pskb);
1884
1885struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1886                                    int noblock, int *errcode)
1887{
1888        return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1889}
1890EXPORT_SYMBOL(sock_alloc_send_skb);
1891
1892int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1893                   struct sockcm_cookie *sockc)
1894{
1895        struct cmsghdr *cmsg;
1896
1897        for_each_cmsghdr(cmsg, msg) {
1898                if (!CMSG_OK(msg, cmsg))
1899                        return -EINVAL;
1900                if (cmsg->cmsg_level != SOL_SOCKET)
1901                        continue;
1902                switch (cmsg->cmsg_type) {
1903                case SO_MARK:
1904                        if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1905                                return -EPERM;
1906                        if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1907                                return -EINVAL;
1908                        sockc->mark = *(u32 *)CMSG_DATA(cmsg);
1909                        break;
1910                default:
1911                        return -EINVAL;
1912                }
1913        }
1914        return 0;
1915}
1916EXPORT_SYMBOL(sock_cmsg_send);
1917
1918/* On 32bit arches, an skb frag is limited to 2^15 */
1919#define SKB_FRAG_PAGE_ORDER     get_order(32768)
1920
1921/**
1922 * skb_page_frag_refill - check that a page_frag contains enough room
1923 * @sz: minimum size of the fragment we want to get
1924 * @pfrag: pointer to page_frag
1925 * @gfp: priority for memory allocation
1926 *
1927 * Note: While this allocator tries to use high order pages, there is
1928 * no guarantee that allocations succeed. Therefore, @sz MUST be
1929 * less or equal than PAGE_SIZE.
1930 */
1931bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
1932{
1933        if (pfrag->page) {
1934                if (atomic_read(&pfrag->page->_count) == 1) {
1935                        pfrag->offset = 0;
1936                        return true;
1937                }
1938                if (pfrag->offset + sz <= pfrag->size)
1939                        return true;
1940                put_page(pfrag->page);
1941        }
1942
1943        pfrag->offset = 0;
1944        if (SKB_FRAG_PAGE_ORDER) {
1945                /* Avoid direct reclaim but allow kswapd to wake */
1946                pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
1947                                          __GFP_COMP | __GFP_NOWARN |
1948                                          __GFP_NORETRY,
1949                                          SKB_FRAG_PAGE_ORDER);
1950                if (likely(pfrag->page)) {
1951                        pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
1952                        return true;
1953                }
1954        }
1955        pfrag->page = alloc_page(gfp);
1956        if (likely(pfrag->page)) {
1957                pfrag->size = PAGE_SIZE;
1958                return true;
1959        }
1960        return false;
1961}
1962EXPORT_SYMBOL(skb_page_frag_refill);
1963
1964bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1965{
1966        if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1967                return true;
1968
1969        sk_enter_memory_pressure(sk);
1970        sk_stream_moderate_sndbuf(sk);
1971        return false;
1972}
1973EXPORT_SYMBOL(sk_page_frag_refill);
1974
1975static void __lock_sock(struct sock *sk)
1976        __releases(&sk->sk_lock.slock)
1977        __acquires(&sk->sk_lock.slock)
1978{
1979        DEFINE_WAIT(wait);
1980
1981        for (;;) {
1982                prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1983                                        TASK_UNINTERRUPTIBLE);
1984                spin_unlock_bh(&sk->sk_lock.slock);
1985                schedule();
1986                spin_lock_bh(&sk->sk_lock.slock);
1987                if (!sock_owned_by_user(sk))
1988                        break;
1989        }
1990        finish_wait(&sk->sk_lock.wq, &wait);
1991}
1992
1993static void __release_sock(struct sock *sk)
1994        __releases(&sk->sk_lock.slock)
1995        __acquires(&sk->sk_lock.slock)
1996{
1997        struct sk_buff *skb = sk->sk_backlog.head;
1998
1999        do {
2000                sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2001                bh_unlock_sock(sk);
2002
2003                do {
2004                        struct sk_buff *next = skb->next;
2005
2006                        prefetch(next);
2007                        WARN_ON_ONCE(skb_dst_is_noref(skb));
2008                        skb->next = NULL;
2009                        sk_backlog_rcv(sk, skb);
2010
2011                        /*
2012                         * We are in process context here with softirqs
2013                         * disabled, use cond_resched_softirq() to preempt.
2014                         * This is safe to do because we've taken the backlog
2015                         * queue private:
2016                         */
2017                        cond_resched_softirq();
2018
2019                        skb = next;
2020                } while (skb != NULL);
2021
2022                bh_lock_sock(sk);
2023        } while ((skb = sk->sk_backlog.head) != NULL);
2024
2025        /*
2026         * Doing the zeroing here guarantee we can not loop forever
2027         * while a wild producer attempts to flood us.
2028         */
2029        sk->sk_backlog.len = 0;
2030}
2031
2032/**
2033 * sk_wait_data - wait for data to arrive at sk_receive_queue
2034 * @sk:    sock to wait on
2035 * @timeo: for how long
2036 * @skb:   last skb seen on sk_receive_queue
2037 *
2038 * Now socket state including sk->sk_err is changed only under lock,
2039 * hence we may omit checks after joining wait queue.
2040 * We check receive queue before schedule() only as optimization;
2041 * it is very likely that release_sock() added new data.
2042 */
2043int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2044{
2045        int rc;
2046        DEFINE_WAIT(wait);
2047
2048        prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2049        sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2050        rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
2051        sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2052        finish_wait(sk_sleep(sk), &wait);
2053        return rc;
2054}
2055EXPORT_SYMBOL(sk_wait_data);
2056
2057/**
2058 *      __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2059 *      @sk: socket
2060 *      @size: memory size to allocate
2061 *      @kind: allocation type
2062 *
2063 *      If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2064 *      rmem allocation. This function assumes that protocols which have
2065 *      memory_pressure use sk_wmem_queued as write buffer accounting.
2066 */
2067int __sk_mem_schedule(struct sock *sk, int size, int kind)
2068{
2069        struct proto *prot = sk->sk_prot;
2070        int amt = sk_mem_pages(size);
2071        long allocated;
2072        int parent_status = UNDER_LIMIT;
2073
2074        sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2075
2076        allocated = sk_memory_allocated_add(sk, amt, &parent_status);
2077
2078        /* Under limit. */
2079        if (parent_status == UNDER_LIMIT &&
2080                        allocated <= sk_prot_mem_limits(sk, 0)) {
2081                sk_leave_memory_pressure(sk);
2082                return 1;
2083        }
2084
2085        /* Under pressure. (we or our parents) */
2086        if ((parent_status > SOFT_LIMIT) ||
2087                        allocated > sk_prot_mem_limits(sk, 1))
2088                sk_enter_memory_pressure(sk);
2089
2090        /* Over hard limit (we or our parents) */
2091        if ((parent_status == OVER_LIMIT) ||
2092                        (allocated > sk_prot_mem_limits(sk, 2)))
2093                goto suppress_allocation;
2094
2095        /* guarantee minimum buffer size under pressure */
2096        if (kind == SK_MEM_RECV) {
2097                if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2098                        return 1;
2099
2100        } else { /* SK_MEM_SEND */
2101                if (sk->sk_type == SOCK_STREAM) {
2102                        if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2103                                return 1;
2104                } else if (atomic_read(&sk->sk_wmem_alloc) <
2105                           prot->sysctl_wmem[0])
2106                                return 1;
2107        }
2108
2109        if (sk_has_memory_pressure(sk)) {
2110                int alloc;
2111
2112                if (!sk_under_memory_pressure(sk))
2113                        return 1;
2114                alloc = sk_sockets_allocated_read_positive(sk);
2115                if (sk_prot_mem_limits(sk, 2) > alloc *
2116                    sk_mem_pages(sk->sk_wmem_queued +
2117                                 atomic_read(&sk->sk_rmem_alloc) +
2118                                 sk->sk_forward_alloc))
2119                        return 1;
2120        }
2121
2122suppress_allocation:
2123
2124        if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2125                sk_stream_moderate_sndbuf(sk);
2126
2127                /* Fail only if socket is _under_ its sndbuf.
2128                 * In this case we cannot block, so that we have to fail.
2129                 */
2130                if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2131                        return 1;
2132        }
2133
2134        trace_sock_exceed_buf_limit(sk, prot, allocated);
2135
2136        /* Alas. Undo changes. */
2137        sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2138
2139        sk_memory_allocated_sub(sk, amt);
2140
2141        return 0;
2142}
2143EXPORT_SYMBOL(__sk_mem_schedule);
2144
2145/**
2146 *      __sk_mem_reclaim - reclaim memory_allocated
2147 *      @sk: socket
2148 *      @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2149 */
2150void __sk_mem_reclaim(struct sock *sk, int amount)
2151{
2152        amount >>= SK_MEM_QUANTUM_SHIFT;
2153        sk_memory_allocated_sub(sk, amount);
2154        sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2155
2156        if (sk_under_memory_pressure(sk) &&
2157            (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2158                sk_leave_memory_pressure(sk);
2159}
2160EXPORT_SYMBOL(__sk_mem_reclaim);
2161
2162
2163/*
2164 * Set of default routines for initialising struct proto_ops when
2165 * the protocol does not support a particular function. In certain
2166 * cases where it makes no sense for a protocol to have a "do nothing"
2167 * function, some default processing is provided.
2168 */
2169
2170int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2171{
2172        return -EOPNOTSUPP;
2173}
2174EXPORT_SYMBOL(sock_no_bind);
2175
2176int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2177                    int len, int flags)
2178{
2179        return -EOPNOTSUPP;
2180}
2181EXPORT_SYMBOL(sock_no_connect);
2182
2183int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2184{
2185        return -EOPNOTSUPP;
2186}
2187EXPORT_SYMBOL(sock_no_socketpair);
2188
2189int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2190{
2191        return -EOPNOTSUPP;
2192}
2193EXPORT_SYMBOL(sock_no_accept);
2194
2195int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2196                    int *len, int peer)
2197{
2198        return -EOPNOTSUPP;
2199}
2200EXPORT_SYMBOL(sock_no_getname);
2201
2202unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2203{
2204        return 0;
2205}
2206EXPORT_SYMBOL(sock_no_poll);
2207
2208int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2209{
2210        return -EOPNOTSUPP;
2211}
2212EXPORT_SYMBOL(sock_no_ioctl);
2213
2214int sock_no_listen(struct socket *sock, int backlog)
2215{
2216        return -EOPNOTSUPP;
2217}
2218EXPORT_SYMBOL(sock_no_listen);
2219
2220int sock_no_shutdown(struct socket *sock, int how)
2221{
2222        return -EOPNOTSUPP;
2223}
2224EXPORT_SYMBOL(sock_no_shutdown);
2225
2226int sock_no_setsockopt(struct socket *sock, int level, int optname,
2227                    char __user *optval, unsigned int optlen)
2228{
2229        return -EOPNOTSUPP;
2230}
2231EXPORT_SYMBOL(sock_no_setsockopt);
2232
2233int sock_no_getsockopt(struct socket *sock, int level, int optname,
2234                    char __user *optval, int __user *optlen)
2235{
2236        return -EOPNOTSUPP;
2237}
2238EXPORT_SYMBOL(sock_no_getsockopt);
2239
2240int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2241{
2242        return -EOPNOTSUPP;
2243}
2244EXPORT_SYMBOL(sock_no_sendmsg);
2245
2246int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2247                    int flags)
2248{
2249        return -EOPNOTSUPP;
2250}
2251EXPORT_SYMBOL(sock_no_recvmsg);
2252
2253int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2254{
2255        /* Mirror missing mmap method error code */
2256        return -ENODEV;
2257}
2258EXPORT_SYMBOL(sock_no_mmap);
2259
2260ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2261{
2262        ssize_t res;
2263        struct msghdr msg = {.msg_flags = flags};
2264        struct kvec iov;
2265        char *kaddr = kmap(page);
2266        iov.iov_base = kaddr + offset;
2267        iov.iov_len = size;
2268        res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2269        kunmap(page);
2270        return res;
2271}
2272EXPORT_SYMBOL(sock_no_sendpage);
2273
2274/*
2275 *      Default Socket Callbacks
2276 */
2277
2278static void sock_def_wakeup(struct sock *sk)
2279{
2280        struct socket_wq *wq;
2281
2282        rcu_read_lock();
2283        wq = rcu_dereference(sk->sk_wq);
2284        if (wq_has_sleeper(wq))
2285                wake_up_interruptible_all(&wq->wait);
2286        rcu_read_unlock();
2287}
2288
2289static void sock_def_error_report(struct sock *sk)
2290{
2291        struct socket_wq *wq;
2292
2293        rcu_read_lock();
2294        wq = rcu_dereference(sk->sk_wq);
2295        if (wq_has_sleeper(wq))
2296                wake_up_interruptible_poll(&wq->wait, POLLERR);
2297        sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2298        rcu_read_unlock();
2299}
2300
2301static void sock_def_readable(struct sock *sk)
2302{
2303        struct socket_wq *wq;
2304
2305        rcu_read_lock();
2306        wq = rcu_dereference(sk->sk_wq);
2307        if (wq_has_sleeper(wq))
2308                wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2309                                                POLLRDNORM | POLLRDBAND);
2310        sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2311        rcu_read_unlock();
2312}
2313
2314static void sock_def_write_space(struct sock *sk)
2315{
2316        struct socket_wq *wq;
2317
2318        rcu_read_lock();
2319
2320        /* Do not wake up a writer until he can make "significant"
2321         * progress.  --DaveM
2322         */
2323        if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2324                wq = rcu_dereference(sk->sk_wq);
2325                if (wq_has_sleeper(wq))
2326                        wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2327                                                POLLWRNORM | POLLWRBAND);
2328
2329                /* Should agree with poll, otherwise some programs break */
2330                if (sock_writeable(sk))
2331                        sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2332        }
2333
2334        rcu_read_unlock();
2335}
2336
2337static void sock_def_destruct(struct sock *sk)
2338{
2339}
2340
2341void sk_send_sigurg(struct sock *sk)
2342{
2343        if (sk->sk_socket && sk->sk_socket->file)
2344                if (send_sigurg(&sk->sk_socket->file->f_owner))
2345                        sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2346}
2347EXPORT_SYMBOL(sk_send_sigurg);
2348
2349void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2350                    unsigned long expires)
2351{
2352        if (!mod_timer(timer, expires))
2353                sock_hold(sk);
2354}
2355EXPORT_SYMBOL(sk_reset_timer);
2356
2357void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2358{
2359        if (del_timer(timer))
2360                __sock_put(sk);
2361}
2362EXPORT_SYMBOL(sk_stop_timer);
2363
2364void sock_init_data(struct socket *sock, struct sock *sk)
2365{
2366        skb_queue_head_init(&sk->sk_receive_queue);
2367        skb_queue_head_init(&sk->sk_write_queue);
2368        skb_queue_head_init(&sk->sk_error_queue);
2369
2370        sk->sk_send_head        =       NULL;
2371
2372        init_timer(&sk->sk_timer);
2373
2374        sk->sk_allocation       =       GFP_KERNEL;
2375        sk->sk_rcvbuf           =       sysctl_rmem_default;
2376        sk->sk_sndbuf           =       sysctl_wmem_default;
2377        sk->sk_state            =       TCP_CLOSE;
2378        sk_set_socket(sk, sock);
2379
2380        sock_set_flag(sk, SOCK_ZAPPED);
2381
2382        if (sock) {
2383                sk->sk_type     =       sock->type;
2384                sk->sk_wq       =       sock->wq;
2385                sock->sk        =       sk;
2386        } else
2387                sk->sk_wq       =       NULL;
2388
2389        rwlock_init(&sk->sk_callback_lock);
2390        lockdep_set_class_and_name(&sk->sk_callback_lock,
2391                        af_callback_keys + sk->sk_family,
2392                        af_family_clock_key_strings[sk->sk_family]);
2393
2394        sk->sk_state_change     =       sock_def_wakeup;
2395        sk->sk_data_ready       =       sock_def_readable;
2396        sk->sk_write_space      =       sock_def_write_space;
2397        sk->sk_error_report     =       sock_def_error_report;
2398        sk->sk_destruct         =       sock_def_destruct;
2399
2400        sk->sk_frag.page        =       NULL;
2401        sk->sk_frag.offset      =       0;
2402        sk->sk_peek_off         =       -1;
2403
2404        sk->sk_peer_pid         =       NULL;
2405        sk->sk_peer_cred        =       NULL;
2406        sk->sk_write_pending    =       0;
2407        sk->sk_rcvlowat         =       1;
2408        sk->sk_rcvtimeo         =       MAX_SCHEDULE_TIMEOUT;
2409        sk->sk_sndtimeo         =       MAX_SCHEDULE_TIMEOUT;
2410
2411        sk->sk_stamp = ktime_set(-1L, 0);
2412
2413#ifdef CONFIG_NET_RX_BUSY_POLL
2414        sk->sk_napi_id          =       0;
2415        sk->sk_ll_usec          =       sysctl_net_busy_read;
2416#endif
2417
2418        sk->sk_max_pacing_rate = ~0U;
2419        sk->sk_pacing_rate = ~0U;
2420        sk->sk_incoming_cpu = -1;
2421        /*
2422         * Before updating sk_refcnt, we must commit prior changes to memory
2423         * (Documentation/RCU/rculist_nulls.txt for details)
2424         */
2425        smp_wmb();
2426        atomic_set(&sk->sk_refcnt, 1);
2427        atomic_set(&sk->sk_drops, 0);
2428}
2429EXPORT_SYMBOL(sock_init_data);
2430
2431void lock_sock_nested(struct sock *sk, int subclass)
2432{
2433        might_sleep();
2434        spin_lock_bh(&sk->sk_lock.slock);
2435        if (sk->sk_lock.owned)
2436                __lock_sock(sk);
2437        sk->sk_lock.owned = 1;
2438        spin_unlock(&sk->sk_lock.slock);
2439        /*
2440         * The sk_lock has mutex_lock() semantics here:
2441         */
2442        mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2443        local_bh_enable();
2444}
2445EXPORT_SYMBOL(lock_sock_nested);
2446
2447void release_sock(struct sock *sk)
2448{
2449        /*
2450         * The sk_lock has mutex_unlock() semantics:
2451         */
2452        mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2453
2454        spin_lock_bh(&sk->sk_lock.slock);
2455        if (sk->sk_backlog.tail)
2456                __release_sock(sk);
2457
2458        /* Warning : release_cb() might need to release sk ownership,
2459         * ie call sock_release_ownership(sk) before us.
2460         */
2461        if (sk->sk_prot->release_cb)
2462                sk->sk_prot->release_cb(sk);
2463
2464        sock_release_ownership(sk);
2465        if (waitqueue_active(&sk->sk_lock.wq))
2466                wake_up(&sk->sk_lock.wq);
2467        spin_unlock_bh(&sk->sk_lock.slock);
2468}
2469EXPORT_SYMBOL(release_sock);
2470
2471/**
2472 * lock_sock_fast - fast version of lock_sock
2473 * @sk: socket
2474 *
2475 * This version should be used for very small section, where process wont block
2476 * return false if fast path is taken
2477 *   sk_lock.slock locked, owned = 0, BH disabled
2478 * return true if slow path is taken
2479 *   sk_lock.slock unlocked, owned = 1, BH enabled
2480 */
2481bool lock_sock_fast(struct sock *sk)
2482{
2483        might_sleep();
2484        spin_lock_bh(&sk->sk_lock.slock);
2485
2486        if (!sk->sk_lock.owned)
2487                /*
2488                 * Note : We must disable BH
2489                 */
2490                return false;
2491
2492        __lock_sock(sk);
2493        sk->sk_lock.owned = 1;
2494        spin_unlock(&sk->sk_lock.slock);
2495        /*
2496         * The sk_lock has mutex_lock() semantics here:
2497         */
2498        mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2499        local_bh_enable();
2500        return true;
2501}
2502EXPORT_SYMBOL(lock_sock_fast);
2503
2504int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2505{
2506        struct timeval tv;
2507        if (!sock_flag(sk, SOCK_TIMESTAMP))
2508                sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2509        tv = ktime_to_timeval(sk->sk_stamp);
2510        if (tv.tv_sec == -1)
2511                return -ENOENT;
2512        if (tv.tv_sec == 0) {
2513                sk->sk_stamp = ktime_get_real();
2514                tv = ktime_to_timeval(sk->sk_stamp);
2515        }
2516        return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2517}
2518EXPORT_SYMBOL(sock_get_timestamp);
2519
2520int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2521{
2522        struct timespec ts;
2523        if (!sock_flag(sk, SOCK_TIMESTAMP))
2524                sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2525        ts = ktime_to_timespec(sk->sk_stamp);
2526        if (ts.tv_sec == -1)
2527                return -ENOENT;
2528        if (ts.tv_sec == 0) {
2529                sk->sk_stamp = ktime_get_real();
2530                ts = ktime_to_timespec(sk->sk_stamp);
2531        }
2532        return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2533}
2534EXPORT_SYMBOL(sock_get_timestampns);
2535
2536void sock_enable_timestamp(struct sock *sk, int flag)
2537{
2538        if (!sock_flag(sk, flag)) {
2539                unsigned long previous_flags = sk->sk_flags;
2540
2541                sock_set_flag(sk, flag);
2542                /*
2543                 * we just set one of the two flags which require net
2544                 * time stamping, but time stamping might have been on
2545                 * already because of the other one
2546                 */
2547                if (sock_needs_netstamp(sk) &&
2548                    !(previous_flags & SK_FLAGS_TIMESTAMP))
2549                        net_enable_timestamp();
2550        }
2551}
2552
2553int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2554                       int level, int type)
2555{
2556        struct sock_exterr_skb *serr;
2557        struct sk_buff *skb;
2558        int copied, err;
2559
2560        err = -EAGAIN;
2561        skb = sock_dequeue_err_skb(sk);
2562        if (skb == NULL)
2563                goto out;
2564
2565        copied = skb->len;
2566        if (copied > len) {
2567                msg->msg_flags |= MSG_TRUNC;
2568                copied = len;
2569        }
2570        err = skb_copy_datagram_msg(skb, 0, msg, copied);
2571        if (err)
2572                goto out_free_skb;
2573
2574        sock_recv_timestamp(msg, sk, skb);
2575
2576        serr = SKB_EXT_ERR(skb);
2577        put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2578
2579        msg->msg_flags |= MSG_ERRQUEUE;
2580        err = copied;
2581
2582out_free_skb:
2583        kfree_skb(skb);
2584out:
2585        return err;
2586}
2587EXPORT_SYMBOL(sock_recv_errqueue);
2588
2589/*
2590 *      Get a socket option on an socket.
2591 *
2592 *      FIX: POSIX 1003.1g is very ambiguous here. It states that
2593 *      asynchronous errors should be reported by getsockopt. We assume
2594 *      this means if you specify SO_ERROR (otherwise whats the point of it).
2595 */
2596int sock_common_getsockopt(struct socket *sock, int level, int optname,
2597                           char __user *optval, int __user *optlen)
2598{
2599        struct sock *sk = sock->sk;
2600
2601        return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2602}
2603EXPORT_SYMBOL(sock_common_getsockopt);
2604
2605#ifdef CONFIG_COMPAT
2606int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2607                                  char __user *optval, int __user *optlen)
2608{
2609        struct sock *sk = sock->sk;
2610
2611        if (sk->sk_prot->compat_getsockopt != NULL)
2612                return sk->sk_prot->compat_getsockopt(sk, level, optname,
2613                                                      optval, optlen);
2614        return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2615}
2616EXPORT_SYMBOL(compat_sock_common_getsockopt);
2617#endif
2618
2619int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2620                        int flags)
2621{
2622        struct sock *sk = sock->sk;
2623        int addr_len = 0;
2624        int err;
2625
2626        err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
2627                                   flags & ~MSG_DONTWAIT, &addr_len);
2628        if (err >= 0)
2629                msg->msg_namelen = addr_len;
2630        return err;
2631}
2632EXPORT_SYMBOL(sock_common_recvmsg);
2633
2634/*
2635 *      Set socket options on an inet socket.
2636 */
2637int sock_common_setsockopt(struct socket *sock, int level, int optname,
2638                           char __user *optval, unsigned int optlen)
2639{
2640        struct sock *sk = sock->sk;
2641
2642        return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2643}
2644EXPORT_SYMBOL(sock_common_setsockopt);
2645
2646#ifdef CONFIG_COMPAT
2647int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2648                                  char __user *optval, unsigned int optlen)
2649{
2650        struct sock *sk = sock->sk;
2651
2652        if (sk->sk_prot->compat_setsockopt != NULL)
2653                return sk->sk_prot->compat_setsockopt(sk, level, optname,
2654                                                      optval, optlen);
2655        return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2656}
2657EXPORT_SYMBOL(compat_sock_common_setsockopt);
2658#endif
2659
2660void sk_common_release(struct sock *sk)
2661{
2662        if (sk->sk_prot->destroy)
2663                sk->sk_prot->destroy(sk);
2664
2665        /*
2666         * Observation: when sock_common_release is called, processes have
2667         * no access to socket. But net still has.
2668         * Step one, detach it from networking:
2669         *
2670         * A. Remove from hash tables.
2671         */
2672
2673        sk->sk_prot->unhash(sk);
2674
2675        /*
2676         * In this point socket cannot receive new packets, but it is possible
2677         * that some packets are in flight because some CPU runs receiver and
2678         * did hash table lookup before we unhashed socket. They will achieve
2679         * receive queue and will be purged by socket destructor.
2680         *
2681         * Also we still have packets pending on receive queue and probably,
2682         * our own packets waiting in device queues. sock_destroy will drain
2683         * receive queue, but transmitted packets will delay socket destruction
2684         * until the last reference will be released.
2685         */
2686
2687        sock_orphan(sk);
2688
2689        xfrm_sk_free_policy(sk);
2690
2691        sk_refcnt_debug_release(sk);
2692
2693        if (sk->sk_frag.page) {
2694                put_page(sk->sk_frag.page);
2695                sk->sk_frag.page = NULL;
2696        }
2697
2698        sock_put(sk);
2699}
2700EXPORT_SYMBOL(sk_common_release);
2701
2702#ifdef CONFIG_PROC_FS
2703#define PROTO_INUSE_NR  64      /* should be enough for the first time */
2704struct prot_inuse {
2705        int val[PROTO_INUSE_NR];
2706};
2707
2708static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2709
2710#ifdef CONFIG_NET_NS
2711void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2712{
2713        __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2714}
2715EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2716
2717int sock_prot_inuse_get(struct net *net, struct proto *prot)
2718{
2719        int cpu, idx = prot->inuse_idx;
2720        int res = 0;
2721
2722        for_each_possible_cpu(cpu)
2723                res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2724
2725        return res >= 0 ? res : 0;
2726}
2727EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2728
2729static int __net_init sock_inuse_init_net(struct net *net)
2730{
2731        net->core.inuse = alloc_percpu(struct prot_inuse);
2732        return net->core.inuse ? 0 : -ENOMEM;
2733}
2734
2735static void __net_exit sock_inuse_exit_net(struct net *net)
2736{
2737        free_percpu(net->core.inuse);
2738}
2739
2740static struct pernet_operations net_inuse_ops = {
2741        .init = sock_inuse_init_net,
2742        .exit = sock_inuse_exit_net,
2743};
2744
2745static __init int net_inuse_init(void)
2746{
2747        if (register_pernet_subsys(&net_inuse_ops))
2748                panic("Cannot initialize net inuse counters");
2749
2750        return 0;
2751}
2752
2753core_initcall(net_inuse_init);
2754#else
2755static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2756
2757void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2758{
2759        __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2760}
2761EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2762
2763int sock_prot_inuse_get(struct net *net, struct proto *prot)
2764{
2765        int cpu, idx = prot->inuse_idx;
2766        int res = 0;
2767
2768        for_each_possible_cpu(cpu)
2769                res += per_cpu(prot_inuse, cpu).val[idx];
2770
2771        return res >= 0 ? res : 0;
2772}
2773EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2774#endif
2775
2776static void assign_proto_idx(struct proto *prot)
2777{
2778        prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2779
2780        if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2781                pr_err("PROTO_INUSE_NR exhausted\n");
2782                return;
2783        }
2784
2785        set_bit(prot->inuse_idx, proto_inuse_idx);
2786}
2787
2788static void release_proto_idx(struct proto *prot)
2789{
2790        if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2791                clear_bit(prot->inuse_idx, proto_inuse_idx);
2792}
2793#else
2794static inline void assign_proto_idx(struct proto *prot)
2795{
2796}
2797
2798static inline void release_proto_idx(struct proto *prot)
2799{
2800}
2801#endif
2802
2803static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2804{
2805        if (!rsk_prot)
2806                return;
2807        kfree(rsk_prot->slab_name);
2808        rsk_prot->slab_name = NULL;
2809        kmem_cache_destroy(rsk_prot->slab);
2810        rsk_prot->slab = NULL;
2811}
2812
2813static int req_prot_init(const struct proto *prot)
2814{
2815        struct request_sock_ops *rsk_prot = prot->rsk_prot;
2816
2817        if (!rsk_prot)
2818                return 0;
2819
2820        rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
2821                                        prot->name);
2822        if (!rsk_prot->slab_name)
2823                return -ENOMEM;
2824
2825        rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2826                                           rsk_prot->obj_size, 0,
2827                                           prot->slab_flags, NULL);
2828
2829        if (!rsk_prot->slab) {
2830                pr_crit("%s: Can't create request sock SLAB cache!\n",
2831                        prot->name);
2832                return -ENOMEM;
2833        }
2834        return 0;
2835}
2836
2837int proto_register(struct proto *prot, int alloc_slab)
2838{
2839        if (alloc_slab) {
2840                prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2841                                        SLAB_HWCACHE_ALIGN | prot->slab_flags,
2842                                        NULL);
2843
2844                if (prot->slab == NULL) {
2845                        pr_crit("%s: Can't create sock SLAB cache!\n",
2846                                prot->name);
2847                        goto out;
2848                }
2849
2850                if (req_prot_init(prot))
2851                        goto out_free_request_sock_slab;
2852
2853                if (prot->twsk_prot != NULL) {
2854                        prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2855
2856                        if (prot->twsk_prot->twsk_slab_name == NULL)
2857                                goto out_free_request_sock_slab;
2858
2859                        prot->twsk_prot->twsk_slab =
2860                                kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2861                                                  prot->twsk_prot->twsk_obj_size,
2862                                                  0,
2863                                                  prot->slab_flags,
2864                                                  NULL);
2865                        if (prot->twsk_prot->twsk_slab == NULL)
2866                                goto out_free_timewait_sock_slab_name;
2867                }
2868        }
2869
2870        mutex_lock(&proto_list_mutex);
2871        list_add(&prot->node, &proto_list);
2872        assign_proto_idx(prot);
2873        mutex_unlock(&proto_list_mutex);
2874        return 0;
2875
2876out_free_timewait_sock_slab_name:
2877        kfree(prot->twsk_prot->twsk_slab_name);
2878out_free_request_sock_slab:
2879        req_prot_cleanup(prot->rsk_prot);
2880
2881        kmem_cache_destroy(prot->slab);
2882        prot->slab = NULL;
2883out:
2884        return -ENOBUFS;
2885}
2886EXPORT_SYMBOL(proto_register);
2887
2888void proto_unregister(struct proto *prot)
2889{
2890        mutex_lock(&proto_list_mutex);
2891        release_proto_idx(prot);
2892        list_del(&prot->node);
2893        mutex_unlock(&proto_list_mutex);
2894
2895        kmem_cache_destroy(prot->slab);
2896        prot->slab = NULL;
2897
2898        req_prot_cleanup(prot->rsk_prot);
2899
2900        if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2901                kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2902                kfree(prot->twsk_prot->twsk_slab_name);
2903                prot->twsk_prot->twsk_slab = NULL;
2904        }
2905}
2906EXPORT_SYMBOL(proto_unregister);
2907
2908#ifdef CONFIG_PROC_FS
2909static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2910        __acquires(proto_list_mutex)
2911{
2912        mutex_lock(&proto_list_mutex);
2913        return seq_list_start_head(&proto_list, *pos);
2914}
2915
2916static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2917{
2918        return seq_list_next(v, &proto_list, pos);
2919}
2920
2921static void proto_seq_stop(struct seq_file *seq, void *v)
2922        __releases(proto_list_mutex)
2923{
2924        mutex_unlock(&proto_list_mutex);
2925}
2926
2927static char proto_method_implemented(const void *method)
2928{
2929        return method == NULL ? 'n' : 'y';
2930}
2931static long sock_prot_memory_allocated(struct proto *proto)
2932{
2933        return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2934}
2935
2936static char *sock_prot_memory_pressure(struct proto *proto)
2937{
2938        return proto->memory_pressure != NULL ?
2939        proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2940}
2941
2942static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2943{
2944
2945        seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2946                        "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2947                   proto->name,
2948                   proto->obj_size,
2949                   sock_prot_inuse_get(seq_file_net(seq), proto),
2950                   sock_prot_memory_allocated(proto),
2951                   sock_prot_memory_pressure(proto),
2952                   proto->max_header,
2953                   proto->slab == NULL ? "no" : "yes",
2954                   module_name(proto->owner),
2955                   proto_method_implemented(proto->close),
2956                   proto_method_implemented(proto->connect),
2957                   proto_method_implemented(proto->disconnect),
2958                   proto_method_implemented(proto->accept),
2959                   proto_method_implemented(proto->ioctl),
2960                   proto_method_implemented(proto->init),
2961                   proto_method_implemented(proto->destroy),
2962                   proto_method_implemented(proto->shutdown),
2963                   proto_method_implemented(proto->setsockopt),
2964                   proto_method_implemented(proto->getsockopt),
2965                   proto_method_implemented(proto->sendmsg),
2966                   proto_method_implemented(proto->recvmsg),
2967                   proto_method_implemented(proto->sendpage),
2968                   proto_method_implemented(proto->bind),
2969                   proto_method_implemented(proto->backlog_rcv),
2970                   proto_method_implemented(proto->hash),
2971                   proto_method_implemented(proto->unhash),
2972                   proto_method_implemented(proto->get_port),
2973                   proto_method_implemented(proto->enter_memory_pressure));
2974}
2975
2976static int proto_seq_show(struct seq_file *seq, void *v)
2977{
2978        if (v == &proto_list)
2979                seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2980                           "protocol",
2981                           "size",
2982                           "sockets",
2983                           "memory",
2984                           "press",
2985                           "maxhdr",
2986                           "slab",
2987                           "module",
2988                           "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2989        else
2990                proto_seq_printf(seq, list_entry(v, struct proto, node));
2991        return 0;
2992}
2993
2994static const struct seq_operations proto_seq_ops = {
2995        .start  = proto_seq_start,
2996        .next   = proto_seq_next,
2997        .stop   = proto_seq_stop,
2998        .show   = proto_seq_show,
2999};
3000
3001static int proto_seq_open(struct inode *inode, struct file *file)
3002{
3003        return seq_open_net(inode, file, &proto_seq_ops,
3004                            sizeof(struct seq_net_private));
3005}
3006
3007static const struct file_operations proto_seq_fops = {
3008        .owner          = THIS_MODULE,
3009        .open           = proto_seq_open,
3010        .read           = seq_read,
3011        .llseek         = seq_lseek,
3012        .release        = seq_release_net,
3013};
3014
3015static __net_init int proto_init_net(struct net *net)
3016{
3017        if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
3018                return -ENOMEM;
3019
3020        return 0;
3021}
3022
3023static __net_exit void proto_exit_net(struct net *net)
3024{
3025        remove_proc_entry("protocols", net->proc_net);
3026}
3027
3028
3029static __net_initdata struct pernet_operations proto_net_ops = {
3030        .init = proto_init_net,
3031        .exit = proto_exit_net,
3032};
3033
3034static int __init proto_init(void)
3035{
3036        return register_pernet_subsys(&proto_net_ops);
3037}
3038
3039subsys_initcall(proto_init);
3040
3041#endif /* PROC_FS */
3042