linux/net/ipv6/ip6_flowlabel.c
<<
>>
Prefs
   1/*
   2 *      ip6_flowlabel.c         IPv6 flowlabel manager.
   3 *
   4 *      This program is free software; you can redistribute it and/or
   5 *      modify it under the terms of the GNU General Public License
   6 *      as published by the Free Software Foundation; either version
   7 *      2 of the License, or (at your option) any later version.
   8 *
   9 *      Authors:        Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 */
  11
  12#include <linux/capability.h>
  13#include <linux/errno.h>
  14#include <linux/types.h>
  15#include <linux/socket.h>
  16#include <linux/net.h>
  17#include <linux/netdevice.h>
  18#include <linux/in6.h>
  19#include <linux/proc_fs.h>
  20#include <linux/seq_file.h>
  21#include <linux/slab.h>
  22#include <linux/export.h>
  23#include <linux/pid_namespace.h>
  24
  25#include <net/net_namespace.h>
  26#include <net/sock.h>
  27
  28#include <net/ipv6.h>
  29#include <net/rawv6.h>
  30#include <net/transp_v6.h>
  31
  32#include <asm/uaccess.h>
  33
  34#define FL_MIN_LINGER   6       /* Minimal linger. It is set to 6sec specified
  35                                   in old IPv6 RFC. Well, it was reasonable value.
  36                                 */
  37#define FL_MAX_LINGER   150     /* Maximal linger timeout */
  38
  39/* FL hash table */
  40
  41#define FL_MAX_PER_SOCK 32
  42#define FL_MAX_SIZE     4096
  43#define FL_HASH_MASK    255
  44#define FL_HASH(l)      (ntohl(l)&FL_HASH_MASK)
  45
  46static atomic_t fl_size = ATOMIC_INIT(0);
  47static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
  48
  49static void ip6_fl_gc(unsigned long dummy);
  50static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
  51
  52/* FL hash table lock: it protects only of GC */
  53
  54static DEFINE_SPINLOCK(ip6_fl_lock);
  55
  56/* Big socket sock */
  57
  58static DEFINE_SPINLOCK(ip6_sk_fl_lock);
  59
  60#define for_each_fl_rcu(hash, fl)                               \
  61        for (fl = rcu_dereference_bh(fl_ht[(hash)]);            \
  62             fl != NULL;                                        \
  63             fl = rcu_dereference_bh(fl->next))
  64#define for_each_fl_continue_rcu(fl)                            \
  65        for (fl = rcu_dereference_bh(fl->next);                 \
  66             fl != NULL;                                        \
  67             fl = rcu_dereference_bh(fl->next))
  68
  69#define for_each_sk_fl_rcu(np, sfl)                             \
  70        for (sfl = rcu_dereference_bh(np->ipv6_fl_list);        \
  71             sfl != NULL;                                       \
  72             sfl = rcu_dereference_bh(sfl->next))
  73
  74static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
  75{
  76        struct ip6_flowlabel *fl;
  77
  78        for_each_fl_rcu(FL_HASH(label), fl) {
  79                if (fl->label == label && net_eq(fl->fl_net, net))
  80                        return fl;
  81        }
  82        return NULL;
  83}
  84
  85static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
  86{
  87        struct ip6_flowlabel *fl;
  88
  89        rcu_read_lock_bh();
  90        fl = __fl_lookup(net, label);
  91        if (fl && !atomic_inc_not_zero(&fl->users))
  92                fl = NULL;
  93        rcu_read_unlock_bh();
  94        return fl;
  95}
  96
  97
  98static void fl_free(struct ip6_flowlabel *fl)
  99{
 100        if (fl) {
 101                if (fl->share == IPV6_FL_S_PROCESS)
 102                        put_pid(fl->owner.pid);
 103                kfree(fl->opt);
 104                kfree_rcu(fl, rcu);
 105        }
 106}
 107
 108static void fl_release(struct ip6_flowlabel *fl)
 109{
 110        spin_lock_bh(&ip6_fl_lock);
 111
 112        fl->lastuse = jiffies;
 113        if (atomic_dec_and_test(&fl->users)) {
 114                unsigned long ttd = fl->lastuse + fl->linger;
 115                if (time_after(ttd, fl->expires))
 116                        fl->expires = ttd;
 117                ttd = fl->expires;
 118                if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
 119                        struct ipv6_txoptions *opt = fl->opt;
 120                        fl->opt = NULL;
 121                        kfree(opt);
 122                }
 123                if (!timer_pending(&ip6_fl_gc_timer) ||
 124                    time_after(ip6_fl_gc_timer.expires, ttd))
 125                        mod_timer(&ip6_fl_gc_timer, ttd);
 126        }
 127        spin_unlock_bh(&ip6_fl_lock);
 128}
 129
 130static void ip6_fl_gc(unsigned long dummy)
 131{
 132        int i;
 133        unsigned long now = jiffies;
 134        unsigned long sched = 0;
 135
 136        spin_lock(&ip6_fl_lock);
 137
 138        for (i = 0; i <= FL_HASH_MASK; i++) {
 139                struct ip6_flowlabel *fl;
 140                struct ip6_flowlabel __rcu **flp;
 141
 142                flp = &fl_ht[i];
 143                while ((fl = rcu_dereference_protected(*flp,
 144                                                       lockdep_is_held(&ip6_fl_lock))) != NULL) {
 145                        if (atomic_read(&fl->users) == 0) {
 146                                unsigned long ttd = fl->lastuse + fl->linger;
 147                                if (time_after(ttd, fl->expires))
 148                                        fl->expires = ttd;
 149                                ttd = fl->expires;
 150                                if (time_after_eq(now, ttd)) {
 151                                        *flp = fl->next;
 152                                        fl_free(fl);
 153                                        atomic_dec(&fl_size);
 154                                        continue;
 155                                }
 156                                if (!sched || time_before(ttd, sched))
 157                                        sched = ttd;
 158                        }
 159                        flp = &fl->next;
 160                }
 161        }
 162        if (!sched && atomic_read(&fl_size))
 163                sched = now + FL_MAX_LINGER;
 164        if (sched) {
 165                mod_timer(&ip6_fl_gc_timer, sched);
 166        }
 167        spin_unlock(&ip6_fl_lock);
 168}
 169
 170static void __net_exit ip6_fl_purge(struct net *net)
 171{
 172        int i;
 173
 174        spin_lock_bh(&ip6_fl_lock);
 175        for (i = 0; i <= FL_HASH_MASK; i++) {
 176                struct ip6_flowlabel *fl;
 177                struct ip6_flowlabel __rcu **flp;
 178
 179                flp = &fl_ht[i];
 180                while ((fl = rcu_dereference_protected(*flp,
 181                                                       lockdep_is_held(&ip6_fl_lock))) != NULL) {
 182                        if (net_eq(fl->fl_net, net) &&
 183                            atomic_read(&fl->users) == 0) {
 184                                *flp = fl->next;
 185                                fl_free(fl);
 186                                atomic_dec(&fl_size);
 187                                continue;
 188                        }
 189                        flp = &fl->next;
 190                }
 191        }
 192        spin_unlock_bh(&ip6_fl_lock);
 193}
 194
 195static struct ip6_flowlabel *fl_intern(struct net *net,
 196                                       struct ip6_flowlabel *fl, __be32 label)
 197{
 198        struct ip6_flowlabel *lfl;
 199
 200        fl->label = label & IPV6_FLOWLABEL_MASK;
 201
 202        spin_lock_bh(&ip6_fl_lock);
 203        if (label == 0) {
 204                for (;;) {
 205                        fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
 206                        if (fl->label) {
 207                                lfl = __fl_lookup(net, fl->label);
 208                                if (!lfl)
 209                                        break;
 210                        }
 211                }
 212        } else {
 213                /*
 214                 * we dropper the ip6_fl_lock, so this entry could reappear
 215                 * and we need to recheck with it.
 216                 *
 217                 * OTOH no need to search the active socket first, like it is
 218                 * done in ipv6_flowlabel_opt - sock is locked, so new entry
 219                 * with the same label can only appear on another sock
 220                 */
 221                lfl = __fl_lookup(net, fl->label);
 222                if (lfl) {
 223                        atomic_inc(&lfl->users);
 224                        spin_unlock_bh(&ip6_fl_lock);
 225                        return lfl;
 226                }
 227        }
 228
 229        fl->lastuse = jiffies;
 230        fl->next = fl_ht[FL_HASH(fl->label)];
 231        rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
 232        atomic_inc(&fl_size);
 233        spin_unlock_bh(&ip6_fl_lock);
 234        return NULL;
 235}
 236
 237
 238
 239/* Socket flowlabel lists */
 240
 241struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
 242{
 243        struct ipv6_fl_socklist *sfl;
 244        struct ipv6_pinfo *np = inet6_sk(sk);
 245
 246        label &= IPV6_FLOWLABEL_MASK;
 247
 248        rcu_read_lock_bh();
 249        for_each_sk_fl_rcu(np, sfl) {
 250                struct ip6_flowlabel *fl = sfl->fl;
 251                if (fl->label == label) {
 252                        fl->lastuse = jiffies;
 253                        atomic_inc(&fl->users);
 254                        rcu_read_unlock_bh();
 255                        return fl;
 256                }
 257        }
 258        rcu_read_unlock_bh();
 259        return NULL;
 260}
 261EXPORT_SYMBOL_GPL(fl6_sock_lookup);
 262
 263void fl6_free_socklist(struct sock *sk)
 264{
 265        struct ipv6_pinfo *np = inet6_sk(sk);
 266        struct ipv6_fl_socklist *sfl;
 267
 268        if (!rcu_access_pointer(np->ipv6_fl_list))
 269                return;
 270
 271        spin_lock_bh(&ip6_sk_fl_lock);
 272        while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
 273                                                lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
 274                np->ipv6_fl_list = sfl->next;
 275                spin_unlock_bh(&ip6_sk_fl_lock);
 276
 277                fl_release(sfl->fl);
 278                kfree_rcu(sfl, rcu);
 279
 280                spin_lock_bh(&ip6_sk_fl_lock);
 281        }
 282        spin_unlock_bh(&ip6_sk_fl_lock);
 283}
 284
 285/* Service routines */
 286
 287
 288/*
 289   It is the only difficult place. flowlabel enforces equal headers
 290   before and including routing header, however user may supply options
 291   following rthdr.
 292 */
 293
 294struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
 295                                         struct ip6_flowlabel *fl,
 296                                         struct ipv6_txoptions *fopt)
 297{
 298        struct ipv6_txoptions *fl_opt = fl->opt;
 299
 300        if (!fopt || fopt->opt_flen == 0)
 301                return fl_opt;
 302
 303        if (fl_opt) {
 304                opt_space->hopopt = fl_opt->hopopt;
 305                opt_space->dst0opt = fl_opt->dst0opt;
 306                opt_space->srcrt = fl_opt->srcrt;
 307                opt_space->opt_nflen = fl_opt->opt_nflen;
 308        } else {
 309                if (fopt->opt_nflen == 0)
 310                        return fopt;
 311                opt_space->hopopt = NULL;
 312                opt_space->dst0opt = NULL;
 313                opt_space->srcrt = NULL;
 314                opt_space->opt_nflen = 0;
 315        }
 316        opt_space->dst1opt = fopt->dst1opt;
 317        opt_space->opt_flen = fopt->opt_flen;
 318        return opt_space;
 319}
 320EXPORT_SYMBOL_GPL(fl6_merge_options);
 321
 322static unsigned long check_linger(unsigned long ttl)
 323{
 324        if (ttl < FL_MIN_LINGER)
 325                return FL_MIN_LINGER*HZ;
 326        if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
 327                return 0;
 328        return ttl*HZ;
 329}
 330
 331static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
 332{
 333        linger = check_linger(linger);
 334        if (!linger)
 335                return -EPERM;
 336        expires = check_linger(expires);
 337        if (!expires)
 338                return -EPERM;
 339
 340        spin_lock_bh(&ip6_fl_lock);
 341        fl->lastuse = jiffies;
 342        if (time_before(fl->linger, linger))
 343                fl->linger = linger;
 344        if (time_before(expires, fl->linger))
 345                expires = fl->linger;
 346        if (time_before(fl->expires, fl->lastuse + expires))
 347                fl->expires = fl->lastuse + expires;
 348        spin_unlock_bh(&ip6_fl_lock);
 349
 350        return 0;
 351}
 352
 353static struct ip6_flowlabel *
 354fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
 355          char __user *optval, int optlen, int *err_p)
 356{
 357        struct ip6_flowlabel *fl = NULL;
 358        int olen;
 359        int addr_type;
 360        int err;
 361
 362        olen = optlen - CMSG_ALIGN(sizeof(*freq));
 363        err = -EINVAL;
 364        if (olen > 64 * 1024)
 365                goto done;
 366
 367        err = -ENOMEM;
 368        fl = kzalloc(sizeof(*fl), GFP_KERNEL);
 369        if (!fl)
 370                goto done;
 371
 372        if (olen > 0) {
 373                struct msghdr msg;
 374                struct flowi6 flowi6;
 375                int junk;
 376
 377                err = -ENOMEM;
 378                fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
 379                if (!fl->opt)
 380                        goto done;
 381
 382                memset(fl->opt, 0, sizeof(*fl->opt));
 383                fl->opt->tot_len = sizeof(*fl->opt) + olen;
 384                err = -EFAULT;
 385                if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
 386                        goto done;
 387
 388                msg.msg_controllen = olen;
 389                msg.msg_control = (void *)(fl->opt+1);
 390                memset(&flowi6, 0, sizeof(flowi6));
 391
 392                err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
 393                                            &junk, &junk, &junk);
 394                if (err)
 395                        goto done;
 396                err = -EINVAL;
 397                if (fl->opt->opt_flen)
 398                        goto done;
 399                if (fl->opt->opt_nflen == 0) {
 400                        kfree(fl->opt);
 401                        fl->opt = NULL;
 402                }
 403        }
 404
 405        fl->fl_net = net;
 406        fl->expires = jiffies;
 407        err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
 408        if (err)
 409                goto done;
 410        fl->share = freq->flr_share;
 411        addr_type = ipv6_addr_type(&freq->flr_dst);
 412        if ((addr_type & IPV6_ADDR_MAPPED) ||
 413            addr_type == IPV6_ADDR_ANY) {
 414                err = -EINVAL;
 415                goto done;
 416        }
 417        fl->dst = freq->flr_dst;
 418        atomic_set(&fl->users, 1);
 419        switch (fl->share) {
 420        case IPV6_FL_S_EXCL:
 421        case IPV6_FL_S_ANY:
 422                break;
 423        case IPV6_FL_S_PROCESS:
 424                fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
 425                break;
 426        case IPV6_FL_S_USER:
 427                fl->owner.uid = current_euid();
 428                break;
 429        default:
 430                err = -EINVAL;
 431                goto done;
 432        }
 433        return fl;
 434
 435done:
 436        fl_free(fl);
 437        *err_p = err;
 438        return NULL;
 439}
 440
 441static int mem_check(struct sock *sk)
 442{
 443        struct ipv6_pinfo *np = inet6_sk(sk);
 444        struct ipv6_fl_socklist *sfl;
 445        int room = FL_MAX_SIZE - atomic_read(&fl_size);
 446        int count = 0;
 447
 448        if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
 449                return 0;
 450
 451        rcu_read_lock_bh();
 452        for_each_sk_fl_rcu(np, sfl)
 453                count++;
 454        rcu_read_unlock_bh();
 455
 456        if (room <= 0 ||
 457            ((count >= FL_MAX_PER_SOCK ||
 458              (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
 459             !capable(CAP_NET_ADMIN)))
 460                return -ENOBUFS;
 461
 462        return 0;
 463}
 464
 465static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
 466                struct ip6_flowlabel *fl)
 467{
 468        spin_lock_bh(&ip6_sk_fl_lock);
 469        sfl->fl = fl;
 470        sfl->next = np->ipv6_fl_list;
 471        rcu_assign_pointer(np->ipv6_fl_list, sfl);
 472        spin_unlock_bh(&ip6_sk_fl_lock);
 473}
 474
 475int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
 476                           int flags)
 477{
 478        struct ipv6_pinfo *np = inet6_sk(sk);
 479        struct ipv6_fl_socklist *sfl;
 480
 481        if (flags & IPV6_FL_F_REMOTE) {
 482                freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
 483                return 0;
 484        }
 485
 486        if (np->repflow) {
 487                freq->flr_label = np->flow_label;
 488                return 0;
 489        }
 490
 491        rcu_read_lock_bh();
 492
 493        for_each_sk_fl_rcu(np, sfl) {
 494                if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
 495                        spin_lock_bh(&ip6_fl_lock);
 496                        freq->flr_label = sfl->fl->label;
 497                        freq->flr_dst = sfl->fl->dst;
 498                        freq->flr_share = sfl->fl->share;
 499                        freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
 500                        freq->flr_linger = sfl->fl->linger / HZ;
 501
 502                        spin_unlock_bh(&ip6_fl_lock);
 503                        rcu_read_unlock_bh();
 504                        return 0;
 505                }
 506        }
 507        rcu_read_unlock_bh();
 508
 509        return -ENOENT;
 510}
 511
 512int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
 513{
 514        int uninitialized_var(err);
 515        struct net *net = sock_net(sk);
 516        struct ipv6_pinfo *np = inet6_sk(sk);
 517        struct in6_flowlabel_req freq;
 518        struct ipv6_fl_socklist *sfl1 = NULL;
 519        struct ipv6_fl_socklist *sfl;
 520        struct ipv6_fl_socklist __rcu **sflp;
 521        struct ip6_flowlabel *fl, *fl1 = NULL;
 522
 523
 524        if (optlen < sizeof(freq))
 525                return -EINVAL;
 526
 527        if (copy_from_user(&freq, optval, sizeof(freq)))
 528                return -EFAULT;
 529
 530        switch (freq.flr_action) {
 531        case IPV6_FL_A_PUT:
 532                if (freq.flr_flags & IPV6_FL_F_REFLECT) {
 533                        if (sk->sk_protocol != IPPROTO_TCP)
 534                                return -ENOPROTOOPT;
 535                        if (!np->repflow)
 536                                return -ESRCH;
 537                        np->flow_label = 0;
 538                        np->repflow = 0;
 539                        return 0;
 540                }
 541                spin_lock_bh(&ip6_sk_fl_lock);
 542                for (sflp = &np->ipv6_fl_list;
 543                     (sfl = rcu_dereference(*sflp)) != NULL;
 544                     sflp = &sfl->next) {
 545                        if (sfl->fl->label == freq.flr_label) {
 546                                if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
 547                                        np->flow_label &= ~IPV6_FLOWLABEL_MASK;
 548                                *sflp = rcu_dereference(sfl->next);
 549                                spin_unlock_bh(&ip6_sk_fl_lock);
 550                                fl_release(sfl->fl);
 551                                kfree_rcu(sfl, rcu);
 552                                return 0;
 553                        }
 554                }
 555                spin_unlock_bh(&ip6_sk_fl_lock);
 556                return -ESRCH;
 557
 558        case IPV6_FL_A_RENEW:
 559                rcu_read_lock_bh();
 560                for_each_sk_fl_rcu(np, sfl) {
 561                        if (sfl->fl->label == freq.flr_label) {
 562                                err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
 563                                rcu_read_unlock_bh();
 564                                return err;
 565                        }
 566                }
 567                rcu_read_unlock_bh();
 568
 569                if (freq.flr_share == IPV6_FL_S_NONE &&
 570                    ns_capable(net->user_ns, CAP_NET_ADMIN)) {
 571                        fl = fl_lookup(net, freq.flr_label);
 572                        if (fl) {
 573                                err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
 574                                fl_release(fl);
 575                                return err;
 576                        }
 577                }
 578                return -ESRCH;
 579
 580        case IPV6_FL_A_GET:
 581                if (freq.flr_flags & IPV6_FL_F_REFLECT) {
 582                        struct net *net = sock_net(sk);
 583                        if (net->ipv6.sysctl.flowlabel_consistency) {
 584                                net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
 585                                return -EPERM;
 586                        }
 587
 588                        if (sk->sk_protocol != IPPROTO_TCP)
 589                                return -ENOPROTOOPT;
 590
 591                        np->repflow = 1;
 592                        return 0;
 593                }
 594
 595                if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
 596                        return -EINVAL;
 597
 598                fl = fl_create(net, sk, &freq, optval, optlen, &err);
 599                if (!fl)
 600                        return err;
 601                sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
 602
 603                if (freq.flr_label) {
 604                        err = -EEXIST;
 605                        rcu_read_lock_bh();
 606                        for_each_sk_fl_rcu(np, sfl) {
 607                                if (sfl->fl->label == freq.flr_label) {
 608                                        if (freq.flr_flags&IPV6_FL_F_EXCL) {
 609                                                rcu_read_unlock_bh();
 610                                                goto done;
 611                                        }
 612                                        fl1 = sfl->fl;
 613                                        atomic_inc(&fl1->users);
 614                                        break;
 615                                }
 616                        }
 617                        rcu_read_unlock_bh();
 618
 619                        if (!fl1)
 620                                fl1 = fl_lookup(net, freq.flr_label);
 621                        if (fl1) {
 622recheck:
 623                                err = -EEXIST;
 624                                if (freq.flr_flags&IPV6_FL_F_EXCL)
 625                                        goto release;
 626                                err = -EPERM;
 627                                if (fl1->share == IPV6_FL_S_EXCL ||
 628                                    fl1->share != fl->share ||
 629                                    ((fl1->share == IPV6_FL_S_PROCESS) &&
 630                                     (fl1->owner.pid == fl->owner.pid)) ||
 631                                    ((fl1->share == IPV6_FL_S_USER) &&
 632                                     uid_eq(fl1->owner.uid, fl->owner.uid)))
 633                                        goto release;
 634
 635                                err = -ENOMEM;
 636                                if (!sfl1)
 637                                        goto release;
 638                                if (fl->linger > fl1->linger)
 639                                        fl1->linger = fl->linger;
 640                                if ((long)(fl->expires - fl1->expires) > 0)
 641                                        fl1->expires = fl->expires;
 642                                fl_link(np, sfl1, fl1);
 643                                fl_free(fl);
 644                                return 0;
 645
 646release:
 647                                fl_release(fl1);
 648                                goto done;
 649                        }
 650                }
 651                err = -ENOENT;
 652                if (!(freq.flr_flags&IPV6_FL_F_CREATE))
 653                        goto done;
 654
 655                err = -ENOMEM;
 656                if (!sfl1)
 657                        goto done;
 658
 659                err = mem_check(sk);
 660                if (err != 0)
 661                        goto done;
 662
 663                fl1 = fl_intern(net, fl, freq.flr_label);
 664                if (fl1)
 665                        goto recheck;
 666
 667                if (!freq.flr_label) {
 668                        if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
 669                                         &fl->label, sizeof(fl->label))) {
 670                                /* Intentionally ignore fault. */
 671                        }
 672                }
 673
 674                fl_link(np, sfl1, fl);
 675                return 0;
 676
 677        default:
 678                return -EINVAL;
 679        }
 680
 681done:
 682        fl_free(fl);
 683        kfree(sfl1);
 684        return err;
 685}
 686
 687#ifdef CONFIG_PROC_FS
 688
 689struct ip6fl_iter_state {
 690        struct seq_net_private p;
 691        struct pid_namespace *pid_ns;
 692        int bucket;
 693};
 694
 695#define ip6fl_seq_private(seq)  ((struct ip6fl_iter_state *)(seq)->private)
 696
 697static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
 698{
 699        struct ip6_flowlabel *fl = NULL;
 700        struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 701        struct net *net = seq_file_net(seq);
 702
 703        for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
 704                for_each_fl_rcu(state->bucket, fl) {
 705                        if (net_eq(fl->fl_net, net))
 706                                goto out;
 707                }
 708        }
 709        fl = NULL;
 710out:
 711        return fl;
 712}
 713
 714static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
 715{
 716        struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 717        struct net *net = seq_file_net(seq);
 718
 719        for_each_fl_continue_rcu(fl) {
 720                if (net_eq(fl->fl_net, net))
 721                        goto out;
 722        }
 723
 724try_again:
 725        if (++state->bucket <= FL_HASH_MASK) {
 726                for_each_fl_rcu(state->bucket, fl) {
 727                        if (net_eq(fl->fl_net, net))
 728                                goto out;
 729                }
 730                goto try_again;
 731        }
 732        fl = NULL;
 733
 734out:
 735        return fl;
 736}
 737
 738static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
 739{
 740        struct ip6_flowlabel *fl = ip6fl_get_first(seq);
 741        if (fl)
 742                while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
 743                        --pos;
 744        return pos ? NULL : fl;
 745}
 746
 747static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
 748        __acquires(RCU)
 749{
 750        rcu_read_lock_bh();
 751        return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 752}
 753
 754static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 755{
 756        struct ip6_flowlabel *fl;
 757
 758        if (v == SEQ_START_TOKEN)
 759                fl = ip6fl_get_first(seq);
 760        else
 761                fl = ip6fl_get_next(seq, v);
 762        ++*pos;
 763        return fl;
 764}
 765
 766static void ip6fl_seq_stop(struct seq_file *seq, void *v)
 767        __releases(RCU)
 768{
 769        rcu_read_unlock_bh();
 770}
 771
 772static int ip6fl_seq_show(struct seq_file *seq, void *v)
 773{
 774        struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 775        if (v == SEQ_START_TOKEN) {
 776                seq_puts(seq, "Label S Owner  Users  Linger Expires  Dst                              Opt\n");
 777        } else {
 778                struct ip6_flowlabel *fl = v;
 779                seq_printf(seq,
 780                           "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
 781                           (unsigned int)ntohl(fl->label),
 782                           fl->share,
 783                           ((fl->share == IPV6_FL_S_PROCESS) ?
 784                            pid_nr_ns(fl->owner.pid, state->pid_ns) :
 785                            ((fl->share == IPV6_FL_S_USER) ?
 786                             from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
 787                             0)),
 788                           atomic_read(&fl->users),
 789                           fl->linger/HZ,
 790                           (long)(fl->expires - jiffies)/HZ,
 791                           &fl->dst,
 792                           fl->opt ? fl->opt->opt_nflen : 0);
 793        }
 794        return 0;
 795}
 796
 797static const struct seq_operations ip6fl_seq_ops = {
 798        .start  =       ip6fl_seq_start,
 799        .next   =       ip6fl_seq_next,
 800        .stop   =       ip6fl_seq_stop,
 801        .show   =       ip6fl_seq_show,
 802};
 803
 804static int ip6fl_seq_open(struct inode *inode, struct file *file)
 805{
 806        struct seq_file *seq;
 807        struct ip6fl_iter_state *state;
 808        int err;
 809
 810        err = seq_open_net(inode, file, &ip6fl_seq_ops,
 811                           sizeof(struct ip6fl_iter_state));
 812
 813        if (!err) {
 814                seq = file->private_data;
 815                state = ip6fl_seq_private(seq);
 816                rcu_read_lock();
 817                state->pid_ns = get_pid_ns(task_active_pid_ns(current));
 818                rcu_read_unlock();
 819        }
 820        return err;
 821}
 822
 823static int ip6fl_seq_release(struct inode *inode, struct file *file)
 824{
 825        struct seq_file *seq = file->private_data;
 826        struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 827        put_pid_ns(state->pid_ns);
 828        return seq_release_net(inode, file);
 829}
 830
 831static const struct file_operations ip6fl_seq_fops = {
 832        .owner          =       THIS_MODULE,
 833        .open           =       ip6fl_seq_open,
 834        .read           =       seq_read,
 835        .llseek         =       seq_lseek,
 836        .release        =       ip6fl_seq_release,
 837};
 838
 839static int __net_init ip6_flowlabel_proc_init(struct net *net)
 840{
 841        if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
 842                         &ip6fl_seq_fops))
 843                return -ENOMEM;
 844        return 0;
 845}
 846
 847static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
 848{
 849        remove_proc_entry("ip6_flowlabel", net->proc_net);
 850}
 851#else
 852static inline int ip6_flowlabel_proc_init(struct net *net)
 853{
 854        return 0;
 855}
 856static inline void ip6_flowlabel_proc_fini(struct net *net)
 857{
 858}
 859#endif
 860
 861static void __net_exit ip6_flowlabel_net_exit(struct net *net)
 862{
 863        ip6_fl_purge(net);
 864        ip6_flowlabel_proc_fini(net);
 865}
 866
 867static struct pernet_operations ip6_flowlabel_net_ops = {
 868        .init = ip6_flowlabel_proc_init,
 869        .exit = ip6_flowlabel_net_exit,
 870};
 871
 872int ip6_flowlabel_init(void)
 873{
 874        return register_pernet_subsys(&ip6_flowlabel_net_ops);
 875}
 876
 877void ip6_flowlabel_cleanup(void)
 878{
 879        del_timer(&ip6_fl_gc_timer);
 880        unregister_pernet_subsys(&ip6_flowlabel_net_ops);
 881}
 882