linux/net/ipv4/tcp_metrics.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/rcupdate.h>
   3#include <linux/spinlock.h>
   4#include <linux/jiffies.h>
   5#include <linux/module.h>
   6#include <linux/cache.h>
   7#include <linux/slab.h>
   8#include <linux/init.h>
   9#include <linux/tcp.h>
  10#include <linux/hash.h>
  11#include <linux/tcp_metrics.h>
  12#include <linux/vmalloc.h>
  13
  14#include <net/inet_connection_sock.h>
  15#include <net/net_namespace.h>
  16#include <net/request_sock.h>
  17#include <net/inetpeer.h>
  18#include <net/sock.h>
  19#include <net/ipv6.h>
  20#include <net/dst.h>
  21#include <net/tcp.h>
  22#include <net/genetlink.h>
  23
  24int sysctl_tcp_nometrics_save __read_mostly;
  25
  26static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  27                                                   const struct inetpeer_addr *daddr,
  28                                                   struct net *net, unsigned int hash);
  29
  30struct tcp_fastopen_metrics {
  31        u16     mss;
  32        u16     syn_loss:10,            /* Recurring Fast Open SYN losses */
  33                try_exp:2;              /* Request w/ exp. option (once) */
  34        unsigned long   last_syn_loss;  /* Last Fast Open SYN loss */
  35        struct  tcp_fastopen_cookie     cookie;
  36};
  37
  38/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
  39 * Kernel only stores RTT and RTTVAR in usec resolution
  40 */
  41#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
  42
  43struct tcp_metrics_block {
  44        struct tcp_metrics_block __rcu  *tcpm_next;
  45        possible_net_t                  tcpm_net;
  46        struct inetpeer_addr            tcpm_saddr;
  47        struct inetpeer_addr            tcpm_daddr;
  48        unsigned long                   tcpm_stamp;
  49        u32                             tcpm_lock;
  50        u32                             tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
  51        struct tcp_fastopen_metrics     tcpm_fastopen;
  52
  53        struct rcu_head                 rcu_head;
  54};
  55
  56static inline struct net *tm_net(struct tcp_metrics_block *tm)
  57{
  58        return read_pnet(&tm->tcpm_net);
  59}
  60
  61static bool tcp_metric_locked(struct tcp_metrics_block *tm,
  62                              enum tcp_metric_index idx)
  63{
  64        return tm->tcpm_lock & (1 << idx);
  65}
  66
  67static u32 tcp_metric_get(struct tcp_metrics_block *tm,
  68                          enum tcp_metric_index idx)
  69{
  70        return tm->tcpm_vals[idx];
  71}
  72
  73static void tcp_metric_set(struct tcp_metrics_block *tm,
  74                           enum tcp_metric_index idx,
  75                           u32 val)
  76{
  77        tm->tcpm_vals[idx] = val;
  78}
  79
  80static bool addr_same(const struct inetpeer_addr *a,
  81                      const struct inetpeer_addr *b)
  82{
  83        return inetpeer_addr_cmp(a, b) == 0;
  84}
  85
  86struct tcpm_hash_bucket {
  87        struct tcp_metrics_block __rcu  *chain;
  88};
  89
  90static struct tcpm_hash_bucket  *tcp_metrics_hash __read_mostly;
  91static unsigned int             tcp_metrics_hash_log __read_mostly;
  92
  93static DEFINE_SPINLOCK(tcp_metrics_lock);
  94
  95static void tcpm_suck_dst(struct tcp_metrics_block *tm,
  96                          const struct dst_entry *dst,
  97                          bool fastopen_clear)
  98{
  99        u32 msval;
 100        u32 val;
 101
 102        tm->tcpm_stamp = jiffies;
 103
 104        val = 0;
 105        if (dst_metric_locked(dst, RTAX_RTT))
 106                val |= 1 << TCP_METRIC_RTT;
 107        if (dst_metric_locked(dst, RTAX_RTTVAR))
 108                val |= 1 << TCP_METRIC_RTTVAR;
 109        if (dst_metric_locked(dst, RTAX_SSTHRESH))
 110                val |= 1 << TCP_METRIC_SSTHRESH;
 111        if (dst_metric_locked(dst, RTAX_CWND))
 112                val |= 1 << TCP_METRIC_CWND;
 113        if (dst_metric_locked(dst, RTAX_REORDERING))
 114                val |= 1 << TCP_METRIC_REORDERING;
 115        tm->tcpm_lock = val;
 116
 117        msval = dst_metric_raw(dst, RTAX_RTT);
 118        tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
 119
 120        msval = dst_metric_raw(dst, RTAX_RTTVAR);
 121        tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
 122        tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
 123        tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
 124        tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
 125        if (fastopen_clear) {
 126                tm->tcpm_fastopen.mss = 0;
 127                tm->tcpm_fastopen.syn_loss = 0;
 128                tm->tcpm_fastopen.try_exp = 0;
 129                tm->tcpm_fastopen.cookie.exp = false;
 130                tm->tcpm_fastopen.cookie.len = 0;
 131        }
 132}
 133
 134#define TCP_METRICS_TIMEOUT             (60 * 60 * HZ)
 135
 136static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
 137{
 138        if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
 139                tcpm_suck_dst(tm, dst, false);
 140}
 141
 142#define TCP_METRICS_RECLAIM_DEPTH       5
 143#define TCP_METRICS_RECLAIM_PTR         (struct tcp_metrics_block *) 0x1UL
 144
 145#define deref_locked(p) \
 146        rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
 147
 148static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
 149                                          struct inetpeer_addr *saddr,
 150                                          struct inetpeer_addr *daddr,
 151                                          unsigned int hash)
 152{
 153        struct tcp_metrics_block *tm;
 154        struct net *net;
 155        bool reclaim = false;
 156
 157        spin_lock_bh(&tcp_metrics_lock);
 158        net = dev_net(dst->dev);
 159
 160        /* While waiting for the spin-lock the cache might have been populated
 161         * with this entry and so we have to check again.
 162         */
 163        tm = __tcp_get_metrics(saddr, daddr, net, hash);
 164        if (tm == TCP_METRICS_RECLAIM_PTR) {
 165                reclaim = true;
 166                tm = NULL;
 167        }
 168        if (tm) {
 169                tcpm_check_stamp(tm, dst);
 170                goto out_unlock;
 171        }
 172
 173        if (unlikely(reclaim)) {
 174                struct tcp_metrics_block *oldest;
 175
 176                oldest = deref_locked(tcp_metrics_hash[hash].chain);
 177                for (tm = deref_locked(oldest->tcpm_next); tm;
 178                     tm = deref_locked(tm->tcpm_next)) {
 179                        if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
 180                                oldest = tm;
 181                }
 182                tm = oldest;
 183        } else {
 184                tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
 185                if (!tm)
 186                        goto out_unlock;
 187        }
 188        write_pnet(&tm->tcpm_net, net);
 189        tm->tcpm_saddr = *saddr;
 190        tm->tcpm_daddr = *daddr;
 191
 192        tcpm_suck_dst(tm, dst, true);
 193
 194        if (likely(!reclaim)) {
 195                tm->tcpm_next = tcp_metrics_hash[hash].chain;
 196                rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
 197        }
 198
 199out_unlock:
 200        spin_unlock_bh(&tcp_metrics_lock);
 201        return tm;
 202}
 203
 204static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
 205{
 206        if (tm)
 207                return tm;
 208        if (depth > TCP_METRICS_RECLAIM_DEPTH)
 209                return TCP_METRICS_RECLAIM_PTR;
 210        return NULL;
 211}
 212
 213static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
 214                                                   const struct inetpeer_addr *daddr,
 215                                                   struct net *net, unsigned int hash)
 216{
 217        struct tcp_metrics_block *tm;
 218        int depth = 0;
 219
 220        for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 221             tm = rcu_dereference(tm->tcpm_next)) {
 222                if (addr_same(&tm->tcpm_saddr, saddr) &&
 223                    addr_same(&tm->tcpm_daddr, daddr) &&
 224                    net_eq(tm_net(tm), net))
 225                        break;
 226                depth++;
 227        }
 228        return tcp_get_encode(tm, depth);
 229}
 230
 231static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
 232                                                       struct dst_entry *dst)
 233{
 234        struct tcp_metrics_block *tm;
 235        struct inetpeer_addr saddr, daddr;
 236        unsigned int hash;
 237        struct net *net;
 238
 239        saddr.family = req->rsk_ops->family;
 240        daddr.family = req->rsk_ops->family;
 241        switch (daddr.family) {
 242        case AF_INET:
 243                inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
 244                inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
 245                hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
 246                break;
 247#if IS_ENABLED(CONFIG_IPV6)
 248        case AF_INET6:
 249                inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
 250                inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
 251                hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
 252                break;
 253#endif
 254        default:
 255                return NULL;
 256        }
 257
 258        net = dev_net(dst->dev);
 259        hash ^= net_hash_mix(net);
 260        hash = hash_32(hash, tcp_metrics_hash_log);
 261
 262        for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 263             tm = rcu_dereference(tm->tcpm_next)) {
 264                if (addr_same(&tm->tcpm_saddr, &saddr) &&
 265                    addr_same(&tm->tcpm_daddr, &daddr) &&
 266                    net_eq(tm_net(tm), net))
 267                        break;
 268        }
 269        tcpm_check_stamp(tm, dst);
 270        return tm;
 271}
 272
 273static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
 274                                                 struct dst_entry *dst,
 275                                                 bool create)
 276{
 277        struct tcp_metrics_block *tm;
 278        struct inetpeer_addr saddr, daddr;
 279        unsigned int hash;
 280        struct net *net;
 281
 282        if (sk->sk_family == AF_INET) {
 283                inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 284                inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 285                hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 286        }
 287#if IS_ENABLED(CONFIG_IPV6)
 288        else if (sk->sk_family == AF_INET6) {
 289                if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
 290                        inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 291                        inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 292                        hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 293                } else {
 294                        inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
 295                        inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
 296                        hash = ipv6_addr_hash(&sk->sk_v6_daddr);
 297                }
 298        }
 299#endif
 300        else
 301                return NULL;
 302
 303        net = dev_net(dst->dev);
 304        hash ^= net_hash_mix(net);
 305        hash = hash_32(hash, tcp_metrics_hash_log);
 306
 307        tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
 308        if (tm == TCP_METRICS_RECLAIM_PTR)
 309                tm = NULL;
 310        if (!tm && create)
 311                tm = tcpm_new(dst, &saddr, &daddr, hash);
 312        else
 313                tcpm_check_stamp(tm, dst);
 314
 315        return tm;
 316}
 317
 318/* Save metrics learned by this TCP session.  This function is called
 319 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
 320 * or goes from LAST-ACK to CLOSE.
 321 */
 322void tcp_update_metrics(struct sock *sk)
 323{
 324        const struct inet_connection_sock *icsk = inet_csk(sk);
 325        struct dst_entry *dst = __sk_dst_get(sk);
 326        struct tcp_sock *tp = tcp_sk(sk);
 327        struct net *net = sock_net(sk);
 328        struct tcp_metrics_block *tm;
 329        unsigned long rtt;
 330        u32 val;
 331        int m;
 332
 333        sk_dst_confirm(sk);
 334        if (sysctl_tcp_nometrics_save || !dst)
 335                return;
 336
 337        rcu_read_lock();
 338        if (icsk->icsk_backoff || !tp->srtt_us) {
 339                /* This session failed to estimate rtt. Why?
 340                 * Probably, no packets returned in time.  Reset our
 341                 * results.
 342                 */
 343                tm = tcp_get_metrics(sk, dst, false);
 344                if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
 345                        tcp_metric_set(tm, TCP_METRIC_RTT, 0);
 346                goto out_unlock;
 347        } else
 348                tm = tcp_get_metrics(sk, dst, true);
 349
 350        if (!tm)
 351                goto out_unlock;
 352
 353        rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 354        m = rtt - tp->srtt_us;
 355
 356        /* If newly calculated rtt larger than stored one, store new
 357         * one. Otherwise, use EWMA. Remember, rtt overestimation is
 358         * always better than underestimation.
 359         */
 360        if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
 361                if (m <= 0)
 362                        rtt = tp->srtt_us;
 363                else
 364                        rtt -= (m >> 3);
 365                tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
 366        }
 367
 368        if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
 369                unsigned long var;
 370
 371                if (m < 0)
 372                        m = -m;
 373
 374                /* Scale deviation to rttvar fixed point */
 375                m >>= 1;
 376                if (m < tp->mdev_us)
 377                        m = tp->mdev_us;
 378
 379                var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
 380                if (m >= var)
 381                        var = m;
 382                else
 383                        var -= (var - m) >> 2;
 384
 385                tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
 386        }
 387
 388        if (tcp_in_initial_slowstart(tp)) {
 389                /* Slow start still did not finish. */
 390                if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 391                        val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 392                        if (val && (tp->snd_cwnd >> 1) > val)
 393                                tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 394                                               tp->snd_cwnd >> 1);
 395                }
 396                if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 397                        val = tcp_metric_get(tm, TCP_METRIC_CWND);
 398                        if (tp->snd_cwnd > val)
 399                                tcp_metric_set(tm, TCP_METRIC_CWND,
 400                                               tp->snd_cwnd);
 401                }
 402        } else if (!tcp_in_slow_start(tp) &&
 403                   icsk->icsk_ca_state == TCP_CA_Open) {
 404                /* Cong. avoidance phase, cwnd is reliable. */
 405                if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
 406                        tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 407                                       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
 408                if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 409                        val = tcp_metric_get(tm, TCP_METRIC_CWND);
 410                        tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
 411                }
 412        } else {
 413                /* Else slow start did not finish, cwnd is non-sense,
 414                 * ssthresh may be also invalid.
 415                 */
 416                if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 417                        val = tcp_metric_get(tm, TCP_METRIC_CWND);
 418                        tcp_metric_set(tm, TCP_METRIC_CWND,
 419                                       (val + tp->snd_ssthresh) >> 1);
 420                }
 421                if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 422                        val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 423                        if (val && tp->snd_ssthresh > val)
 424                                tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 425                                               tp->snd_ssthresh);
 426                }
 427                if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
 428                        val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 429                        if (val < tp->reordering &&
 430                            tp->reordering != net->ipv4.sysctl_tcp_reordering)
 431                                tcp_metric_set(tm, TCP_METRIC_REORDERING,
 432                                               tp->reordering);
 433                }
 434        }
 435        tm->tcpm_stamp = jiffies;
 436out_unlock:
 437        rcu_read_unlock();
 438}
 439
 440/* Initialize metrics on socket. */
 441
 442void tcp_init_metrics(struct sock *sk)
 443{
 444        struct dst_entry *dst = __sk_dst_get(sk);
 445        struct tcp_sock *tp = tcp_sk(sk);
 446        struct tcp_metrics_block *tm;
 447        u32 val, crtt = 0; /* cached RTT scaled by 8 */
 448
 449        sk_dst_confirm(sk);
 450        if (!dst)
 451                goto reset;
 452
 453        rcu_read_lock();
 454        tm = tcp_get_metrics(sk, dst, true);
 455        if (!tm) {
 456                rcu_read_unlock();
 457                goto reset;
 458        }
 459
 460        if (tcp_metric_locked(tm, TCP_METRIC_CWND))
 461                tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
 462
 463        val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 464        if (val) {
 465                tp->snd_ssthresh = val;
 466                if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
 467                        tp->snd_ssthresh = tp->snd_cwnd_clamp;
 468        } else {
 469                /* ssthresh may have been reduced unnecessarily during.
 470                 * 3WHS. Restore it back to its initial default.
 471                 */
 472                tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 473        }
 474        val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 475        if (val && tp->reordering != val) {
 476                tcp_disable_fack(tp);
 477                tp->reordering = val;
 478        }
 479
 480        crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 481        rcu_read_unlock();
 482reset:
 483        /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
 484         * to seed the RTO for later data packets because SYN packets are
 485         * small. Use the per-dst cached values to seed the RTO but keep
 486         * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
 487         * Later the RTO will be updated immediately upon obtaining the first
 488         * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
 489         * influences the first RTO but not later RTT estimation.
 490         *
 491         * But if RTT is not available from the SYN (due to retransmits or
 492         * syn cookies) or the cache, force a conservative 3secs timeout.
 493         *
 494         * A bit of theory. RTT is time passed after "normal" sized packet
 495         * is sent until it is ACKed. In normal circumstances sending small
 496         * packets force peer to delay ACKs and calculation is correct too.
 497         * The algorithm is adaptive and, provided we follow specs, it
 498         * NEVER underestimate RTT. BUT! If peer tries to make some clever
 499         * tricks sort of "quick acks" for time long enough to decrease RTT
 500         * to low value, and then abruptly stops to do it and starts to delay
 501         * ACKs, wait for troubles.
 502         */
 503        if (crtt > tp->srtt_us) {
 504                /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
 505                crtt /= 8 * USEC_PER_SEC / HZ;
 506                inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
 507        } else if (tp->srtt_us == 0) {
 508                /* RFC6298: 5.7 We've failed to get a valid RTT sample from
 509                 * 3WHS. This is most likely due to retransmission,
 510                 * including spurious one. Reset the RTO back to 3secs
 511                 * from the more aggressive 1sec to avoid more spurious
 512                 * retransmission.
 513                 */
 514                tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
 515                tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
 516
 517                inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 518        }
 519        /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
 520         * retransmitted. In light of RFC6298 more aggressive 1sec
 521         * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
 522         * retransmission has occurred.
 523         */
 524        if (tp->total_retrans > 1)
 525                tp->snd_cwnd = 1;
 526        else
 527                tp->snd_cwnd = tcp_init_cwnd(tp, dst);
 528        tp->snd_cwnd_stamp = tcp_jiffies32;
 529}
 530
 531bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
 532{
 533        struct tcp_metrics_block *tm;
 534        bool ret;
 535
 536        if (!dst)
 537                return false;
 538
 539        rcu_read_lock();
 540        tm = __tcp_get_metrics_req(req, dst);
 541        if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
 542                ret = true;
 543        else
 544                ret = false;
 545        rcu_read_unlock();
 546
 547        return ret;
 548}
 549
 550static DEFINE_SEQLOCK(fastopen_seqlock);
 551
 552void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
 553                            struct tcp_fastopen_cookie *cookie,
 554                            int *syn_loss, unsigned long *last_syn_loss)
 555{
 556        struct tcp_metrics_block *tm;
 557
 558        rcu_read_lock();
 559        tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
 560        if (tm) {
 561                struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 562                unsigned int seq;
 563
 564                do {
 565                        seq = read_seqbegin(&fastopen_seqlock);
 566                        if (tfom->mss)
 567                                *mss = tfom->mss;
 568                        *cookie = tfom->cookie;
 569                        if (cookie->len <= 0 && tfom->try_exp == 1)
 570                                cookie->exp = true;
 571                        *syn_loss = tfom->syn_loss;
 572                        *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
 573                } while (read_seqretry(&fastopen_seqlock, seq));
 574        }
 575        rcu_read_unlock();
 576}
 577
 578void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 579                            struct tcp_fastopen_cookie *cookie, bool syn_lost,
 580                            u16 try_exp)
 581{
 582        struct dst_entry *dst = __sk_dst_get(sk);
 583        struct tcp_metrics_block *tm;
 584
 585        if (!dst)
 586                return;
 587        rcu_read_lock();
 588        tm = tcp_get_metrics(sk, dst, true);
 589        if (tm) {
 590                struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 591
 592                write_seqlock_bh(&fastopen_seqlock);
 593                if (mss)
 594                        tfom->mss = mss;
 595                if (cookie && cookie->len > 0)
 596                        tfom->cookie = *cookie;
 597                else if (try_exp > tfom->try_exp &&
 598                         tfom->cookie.len <= 0 && !tfom->cookie.exp)
 599                        tfom->try_exp = try_exp;
 600                if (syn_lost) {
 601                        ++tfom->syn_loss;
 602                        tfom->last_syn_loss = jiffies;
 603                } else
 604                        tfom->syn_loss = 0;
 605                write_sequnlock_bh(&fastopen_seqlock);
 606        }
 607        rcu_read_unlock();
 608}
 609
 610static struct genl_family tcp_metrics_nl_family;
 611
 612static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
 613        [TCP_METRICS_ATTR_ADDR_IPV4]    = { .type = NLA_U32, },
 614        [TCP_METRICS_ATTR_ADDR_IPV6]    = { .type = NLA_BINARY,
 615                                            .len = sizeof(struct in6_addr), },
 616        /* Following attributes are not received for GET/DEL,
 617         * we keep them for reference
 618         */
 619#if 0
 620        [TCP_METRICS_ATTR_AGE]          = { .type = NLA_MSECS, },
 621        [TCP_METRICS_ATTR_TW_TSVAL]     = { .type = NLA_U32, },
 622        [TCP_METRICS_ATTR_TW_TS_STAMP]  = { .type = NLA_S32, },
 623        [TCP_METRICS_ATTR_VALS]         = { .type = NLA_NESTED, },
 624        [TCP_METRICS_ATTR_FOPEN_MSS]    = { .type = NLA_U16, },
 625        [TCP_METRICS_ATTR_FOPEN_SYN_DROPS]      = { .type = NLA_U16, },
 626        [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]    = { .type = NLA_MSECS, },
 627        [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
 628                                            .len = TCP_FASTOPEN_COOKIE_MAX, },
 629#endif
 630};
 631
 632/* Add attributes, caller cancels its header on failure */
 633static int tcp_metrics_fill_info(struct sk_buff *msg,
 634                                 struct tcp_metrics_block *tm)
 635{
 636        struct nlattr *nest;
 637        int i;
 638
 639        switch (tm->tcpm_daddr.family) {
 640        case AF_INET:
 641                if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
 642                                    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
 643                        goto nla_put_failure;
 644                if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
 645                                    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
 646                        goto nla_put_failure;
 647                break;
 648        case AF_INET6:
 649                if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
 650                                     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
 651                        goto nla_put_failure;
 652                if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
 653                                     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
 654                        goto nla_put_failure;
 655                break;
 656        default:
 657                return -EAFNOSUPPORT;
 658        }
 659
 660        if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
 661                          jiffies - tm->tcpm_stamp,
 662                          TCP_METRICS_ATTR_PAD) < 0)
 663                goto nla_put_failure;
 664
 665        {
 666                int n = 0;
 667
 668                nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
 669                if (!nest)
 670                        goto nla_put_failure;
 671                for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
 672                        u32 val = tm->tcpm_vals[i];
 673
 674                        if (!val)
 675                                continue;
 676                        if (i == TCP_METRIC_RTT) {
 677                                if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
 678                                                val) < 0)
 679                                        goto nla_put_failure;
 680                                n++;
 681                                val = max(val / 1000, 1U);
 682                        }
 683                        if (i == TCP_METRIC_RTTVAR) {
 684                                if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
 685                                                val) < 0)
 686                                        goto nla_put_failure;
 687                                n++;
 688                                val = max(val / 1000, 1U);
 689                        }
 690                        if (nla_put_u32(msg, i + 1, val) < 0)
 691                                goto nla_put_failure;
 692                        n++;
 693                }
 694                if (n)
 695                        nla_nest_end(msg, nest);
 696                else
 697                        nla_nest_cancel(msg, nest);
 698        }
 699
 700        {
 701                struct tcp_fastopen_metrics tfom_copy[1], *tfom;
 702                unsigned int seq;
 703
 704                do {
 705                        seq = read_seqbegin(&fastopen_seqlock);
 706                        tfom_copy[0] = tm->tcpm_fastopen;
 707                } while (read_seqretry(&fastopen_seqlock, seq));
 708
 709                tfom = tfom_copy;
 710                if (tfom->mss &&
 711                    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
 712                                tfom->mss) < 0)
 713                        goto nla_put_failure;
 714                if (tfom->syn_loss &&
 715                    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
 716                                tfom->syn_loss) < 0 ||
 717                     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
 718                                jiffies - tfom->last_syn_loss,
 719                                TCP_METRICS_ATTR_PAD) < 0))
 720                        goto nla_put_failure;
 721                if (tfom->cookie.len > 0 &&
 722                    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
 723                            tfom->cookie.len, tfom->cookie.val) < 0)
 724                        goto nla_put_failure;
 725        }
 726
 727        return 0;
 728
 729nla_put_failure:
 730        return -EMSGSIZE;
 731}
 732
 733static int tcp_metrics_dump_info(struct sk_buff *skb,
 734                                 struct netlink_callback *cb,
 735                                 struct tcp_metrics_block *tm)
 736{
 737        void *hdr;
 738
 739        hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 740                          &tcp_metrics_nl_family, NLM_F_MULTI,
 741                          TCP_METRICS_CMD_GET);
 742        if (!hdr)
 743                return -EMSGSIZE;
 744
 745        if (tcp_metrics_fill_info(skb, tm) < 0)
 746                goto nla_put_failure;
 747
 748        genlmsg_end(skb, hdr);
 749        return 0;
 750
 751nla_put_failure:
 752        genlmsg_cancel(skb, hdr);
 753        return -EMSGSIZE;
 754}
 755
 756static int tcp_metrics_nl_dump(struct sk_buff *skb,
 757                               struct netlink_callback *cb)
 758{
 759        struct net *net = sock_net(skb->sk);
 760        unsigned int max_rows = 1U << tcp_metrics_hash_log;
 761        unsigned int row, s_row = cb->args[0];
 762        int s_col = cb->args[1], col = s_col;
 763
 764        for (row = s_row; row < max_rows; row++, s_col = 0) {
 765                struct tcp_metrics_block *tm;
 766                struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
 767
 768                rcu_read_lock();
 769                for (col = 0, tm = rcu_dereference(hb->chain); tm;
 770                     tm = rcu_dereference(tm->tcpm_next), col++) {
 771                        if (!net_eq(tm_net(tm), net))
 772                                continue;
 773                        if (col < s_col)
 774                                continue;
 775                        if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
 776                                rcu_read_unlock();
 777                                goto done;
 778                        }
 779                }
 780                rcu_read_unlock();
 781        }
 782
 783done:
 784        cb->args[0] = row;
 785        cb->args[1] = col;
 786        return skb->len;
 787}
 788
 789static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 790                           unsigned int *hash, int optional, int v4, int v6)
 791{
 792        struct nlattr *a;
 793
 794        a = info->attrs[v4];
 795        if (a) {
 796                inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
 797                if (hash)
 798                        *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
 799                return 0;
 800        }
 801        a = info->attrs[v6];
 802        if (a) {
 803                struct in6_addr in6;
 804
 805                if (nla_len(a) != sizeof(struct in6_addr))
 806                        return -EINVAL;
 807                in6 = nla_get_in6_addr(a);
 808                inetpeer_set_addr_v6(addr, &in6);
 809                if (hash)
 810                        *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
 811                return 0;
 812        }
 813        return optional ? 1 : -EAFNOSUPPORT;
 814}
 815
 816static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 817                         unsigned int *hash, int optional)
 818{
 819        return __parse_nl_addr(info, addr, hash, optional,
 820                               TCP_METRICS_ATTR_ADDR_IPV4,
 821                               TCP_METRICS_ATTR_ADDR_IPV6);
 822}
 823
 824static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
 825{
 826        return __parse_nl_addr(info, addr, NULL, 0,
 827                               TCP_METRICS_ATTR_SADDR_IPV4,
 828                               TCP_METRICS_ATTR_SADDR_IPV6);
 829}
 830
 831static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
 832{
 833        struct tcp_metrics_block *tm;
 834        struct inetpeer_addr saddr, daddr;
 835        unsigned int hash;
 836        struct sk_buff *msg;
 837        struct net *net = genl_info_net(info);
 838        void *reply;
 839        int ret;
 840        bool src = true;
 841
 842        ret = parse_nl_addr(info, &daddr, &hash, 0);
 843        if (ret < 0)
 844                return ret;
 845
 846        ret = parse_nl_saddr(info, &saddr);
 847        if (ret < 0)
 848                src = false;
 849
 850        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 851        if (!msg)
 852                return -ENOMEM;
 853
 854        reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
 855                                  info->genlhdr->cmd);
 856        if (!reply)
 857                goto nla_put_failure;
 858
 859        hash ^= net_hash_mix(net);
 860        hash = hash_32(hash, tcp_metrics_hash_log);
 861        ret = -ESRCH;
 862        rcu_read_lock();
 863        for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 864             tm = rcu_dereference(tm->tcpm_next)) {
 865                if (addr_same(&tm->tcpm_daddr, &daddr) &&
 866                    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 867                    net_eq(tm_net(tm), net)) {
 868                        ret = tcp_metrics_fill_info(msg, tm);
 869                        break;
 870                }
 871        }
 872        rcu_read_unlock();
 873        if (ret < 0)
 874                goto out_free;
 875
 876        genlmsg_end(msg, reply);
 877        return genlmsg_reply(msg, info);
 878
 879nla_put_failure:
 880        ret = -EMSGSIZE;
 881
 882out_free:
 883        nlmsg_free(msg);
 884        return ret;
 885}
 886
 887static void tcp_metrics_flush_all(struct net *net)
 888{
 889        unsigned int max_rows = 1U << tcp_metrics_hash_log;
 890        struct tcpm_hash_bucket *hb = tcp_metrics_hash;
 891        struct tcp_metrics_block *tm;
 892        unsigned int row;
 893
 894        for (row = 0; row < max_rows; row++, hb++) {
 895                struct tcp_metrics_block __rcu **pp;
 896                spin_lock_bh(&tcp_metrics_lock);
 897                pp = &hb->chain;
 898                for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 899                        if (net_eq(tm_net(tm), net)) {
 900                                *pp = tm->tcpm_next;
 901                                kfree_rcu(tm, rcu_head);
 902                        } else {
 903                                pp = &tm->tcpm_next;
 904                        }
 905                }
 906                spin_unlock_bh(&tcp_metrics_lock);
 907        }
 908}
 909
 910static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
 911{
 912        struct tcpm_hash_bucket *hb;
 913        struct tcp_metrics_block *tm;
 914        struct tcp_metrics_block __rcu **pp;
 915        struct inetpeer_addr saddr, daddr;
 916        unsigned int hash;
 917        struct net *net = genl_info_net(info);
 918        int ret;
 919        bool src = true, found = false;
 920
 921        ret = parse_nl_addr(info, &daddr, &hash, 1);
 922        if (ret < 0)
 923                return ret;
 924        if (ret > 0) {
 925                tcp_metrics_flush_all(net);
 926                return 0;
 927        }
 928        ret = parse_nl_saddr(info, &saddr);
 929        if (ret < 0)
 930                src = false;
 931
 932        hash ^= net_hash_mix(net);
 933        hash = hash_32(hash, tcp_metrics_hash_log);
 934        hb = tcp_metrics_hash + hash;
 935        pp = &hb->chain;
 936        spin_lock_bh(&tcp_metrics_lock);
 937        for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 938                if (addr_same(&tm->tcpm_daddr, &daddr) &&
 939                    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 940                    net_eq(tm_net(tm), net)) {
 941                        *pp = tm->tcpm_next;
 942                        kfree_rcu(tm, rcu_head);
 943                        found = true;
 944                } else {
 945                        pp = &tm->tcpm_next;
 946                }
 947        }
 948        spin_unlock_bh(&tcp_metrics_lock);
 949        if (!found)
 950                return -ESRCH;
 951        return 0;
 952}
 953
 954static const struct genl_ops tcp_metrics_nl_ops[] = {
 955        {
 956                .cmd = TCP_METRICS_CMD_GET,
 957                .doit = tcp_metrics_nl_cmd_get,
 958                .dumpit = tcp_metrics_nl_dump,
 959                .policy = tcp_metrics_nl_policy,
 960        },
 961        {
 962                .cmd = TCP_METRICS_CMD_DEL,
 963                .doit = tcp_metrics_nl_cmd_del,
 964                .policy = tcp_metrics_nl_policy,
 965                .flags = GENL_ADMIN_PERM,
 966        },
 967};
 968
 969static struct genl_family tcp_metrics_nl_family __ro_after_init = {
 970        .hdrsize        = 0,
 971        .name           = TCP_METRICS_GENL_NAME,
 972        .version        = TCP_METRICS_GENL_VERSION,
 973        .maxattr        = TCP_METRICS_ATTR_MAX,
 974        .netnsok        = true,
 975        .module         = THIS_MODULE,
 976        .ops            = tcp_metrics_nl_ops,
 977        .n_ops          = ARRAY_SIZE(tcp_metrics_nl_ops),
 978};
 979
 980static unsigned int tcpmhash_entries;
 981static int __init set_tcpmhash_entries(char *str)
 982{
 983        ssize_t ret;
 984
 985        if (!str)
 986                return 0;
 987
 988        ret = kstrtouint(str, 0, &tcpmhash_entries);
 989        if (ret)
 990                return 0;
 991
 992        return 1;
 993}
 994__setup("tcpmhash_entries=", set_tcpmhash_entries);
 995
 996static int __net_init tcp_net_metrics_init(struct net *net)
 997{
 998        size_t size;
 999        unsigned int slots;
1000
1001        if (!net_eq(net, &init_net))
1002                return 0;
1003
1004        slots = tcpmhash_entries;
1005        if (!slots) {
1006                if (totalram_pages >= 128 * 1024)
1007                        slots = 16 * 1024;
1008                else
1009                        slots = 8 * 1024;
1010        }
1011
1012        tcp_metrics_hash_log = order_base_2(slots);
1013        size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1014
1015        tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1016        if (!tcp_metrics_hash)
1017                return -ENOMEM;
1018
1019        return 0;
1020}
1021
1022static void __net_exit tcp_net_metrics_exit(struct net *net)
1023{
1024        tcp_metrics_flush_all(net);
1025}
1026
1027static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1028        .init   =       tcp_net_metrics_init,
1029        .exit   =       tcp_net_metrics_exit,
1030};
1031
1032void __init tcp_metrics_init(void)
1033{
1034        int ret;
1035
1036        ret = register_pernet_subsys(&tcp_net_metrics_ops);
1037        if (ret < 0)
1038                panic("Could not allocate the tcp_metrics hash table\n");
1039
1040        ret = genl_register_family(&tcp_metrics_nl_family);
1041        if (ret < 0)
1042                panic("Could not register tcp_metrics generic netlink\n");
1043}
1044