linux/net/mptcp/pm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Multipath TCP
   3 *
   4 * Copyright (c) 2019, Intel Corporation.
   5 */
   6#define pr_fmt(fmt) "MPTCP: " fmt
   7
   8#include <linux/kernel.h>
   9#include <net/tcp.h>
  10#include <net/mptcp.h>
  11#include "protocol.h"
  12
  13static struct workqueue_struct *pm_wq;
  14
  15/* path manager command handlers */
  16
  17int mptcp_pm_announce_addr(struct mptcp_sock *msk,
  18                           const struct mptcp_addr_info *addr)
  19{
  20        pr_debug("msk=%p, local_id=%d", msk, addr->id);
  21
  22        msk->pm.local = *addr;
  23        WRITE_ONCE(msk->pm.addr_signal, true);
  24        return 0;
  25}
  26
  27int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
  28{
  29        return -ENOTSUPP;
  30}
  31
  32int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id)
  33{
  34        return -ENOTSUPP;
  35}
  36
  37/* path manager event handlers */
  38
  39void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
  40{
  41        struct mptcp_pm_data *pm = &msk->pm;
  42
  43        pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
  44
  45        WRITE_ONCE(pm->server_side, server_side);
  46}
  47
  48bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
  49{
  50        struct mptcp_pm_data *pm = &msk->pm;
  51        int ret;
  52
  53        pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
  54                 pm->subflows_max, READ_ONCE(pm->accept_subflow));
  55
  56        /* try to avoid acquiring the lock below */
  57        if (!READ_ONCE(pm->accept_subflow))
  58                return false;
  59
  60        spin_lock_bh(&pm->lock);
  61        ret = pm->subflows < pm->subflows_max;
  62        if (ret && ++pm->subflows == pm->subflows_max)
  63                WRITE_ONCE(pm->accept_subflow, false);
  64        spin_unlock_bh(&pm->lock);
  65
  66        return ret;
  67}
  68
  69/* return true if the new status bit is currently cleared, that is, this event
  70 * can be server, eventually by an already scheduled work
  71 */
  72static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
  73                                   enum mptcp_pm_status new_status)
  74{
  75        pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
  76                 BIT(new_status));
  77        if (msk->pm.status & BIT(new_status))
  78                return false;
  79
  80        msk->pm.status |= BIT(new_status);
  81        if (queue_work(pm_wq, &msk->pm.work))
  82                sock_hold((struct sock *)msk);
  83        return true;
  84}
  85
  86void mptcp_pm_fully_established(struct mptcp_sock *msk)
  87{
  88        struct mptcp_pm_data *pm = &msk->pm;
  89
  90        pr_debug("msk=%p", msk);
  91
  92        /* try to avoid acquiring the lock below */
  93        if (!READ_ONCE(pm->work_pending))
  94                return;
  95
  96        spin_lock_bh(&pm->lock);
  97
  98        if (READ_ONCE(pm->work_pending))
  99                mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
 100
 101        spin_unlock_bh(&pm->lock);
 102}
 103
 104void mptcp_pm_connection_closed(struct mptcp_sock *msk)
 105{
 106        pr_debug("msk=%p", msk);
 107}
 108
 109void mptcp_pm_subflow_established(struct mptcp_sock *msk,
 110                                  struct mptcp_subflow_context *subflow)
 111{
 112        struct mptcp_pm_data *pm = &msk->pm;
 113
 114        pr_debug("msk=%p", msk);
 115
 116        if (!READ_ONCE(pm->work_pending))
 117                return;
 118
 119        spin_lock_bh(&pm->lock);
 120
 121        if (READ_ONCE(pm->work_pending))
 122                mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
 123
 124        spin_unlock_bh(&pm->lock);
 125}
 126
 127void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
 128{
 129        pr_debug("msk=%p", msk);
 130}
 131
 132void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
 133                                const struct mptcp_addr_info *addr)
 134{
 135        struct mptcp_pm_data *pm = &msk->pm;
 136
 137        pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
 138                 READ_ONCE(pm->accept_addr));
 139
 140        /* avoid acquiring the lock if there is no room for fouther addresses */
 141        if (!READ_ONCE(pm->accept_addr))
 142                return;
 143
 144        spin_lock_bh(&pm->lock);
 145
 146        /* be sure there is something to signal re-checking under PM lock */
 147        if (READ_ONCE(pm->accept_addr) &&
 148            mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
 149                pm->remote = *addr;
 150
 151        spin_unlock_bh(&pm->lock);
 152}
 153
 154/* path manager helpers */
 155
 156bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
 157                          struct mptcp_addr_info *saddr)
 158{
 159        int ret = false;
 160
 161        spin_lock_bh(&msk->pm.lock);
 162
 163        /* double check after the lock is acquired */
 164        if (!mptcp_pm_should_signal(msk))
 165                goto out_unlock;
 166
 167        if (remaining < mptcp_add_addr_len(msk->pm.local.family))
 168                goto out_unlock;
 169
 170        *saddr = msk->pm.local;
 171        WRITE_ONCE(msk->pm.addr_signal, false);
 172        ret = true;
 173
 174out_unlock:
 175        spin_unlock_bh(&msk->pm.lock);
 176        return ret;
 177}
 178
 179int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
 180{
 181        return mptcp_pm_nl_get_local_id(msk, skc);
 182}
 183
 184static void pm_worker(struct work_struct *work)
 185{
 186        struct mptcp_pm_data *pm = container_of(work, struct mptcp_pm_data,
 187                                                work);
 188        struct mptcp_sock *msk = container_of(pm, struct mptcp_sock, pm);
 189        struct sock *sk = (struct sock *)msk;
 190
 191        lock_sock(sk);
 192        spin_lock_bh(&msk->pm.lock);
 193
 194        pr_debug("msk=%p status=%x", msk, pm->status);
 195        if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
 196                pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
 197                mptcp_pm_nl_add_addr_received(msk);
 198        }
 199        if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
 200                pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
 201                mptcp_pm_nl_fully_established(msk);
 202        }
 203        if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
 204                pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
 205                mptcp_pm_nl_subflow_established(msk);
 206        }
 207
 208        spin_unlock_bh(&msk->pm.lock);
 209        release_sock(sk);
 210        sock_put(sk);
 211}
 212
 213void mptcp_pm_data_init(struct mptcp_sock *msk)
 214{
 215        msk->pm.add_addr_signaled = 0;
 216        msk->pm.add_addr_accepted = 0;
 217        msk->pm.local_addr_used = 0;
 218        msk->pm.subflows = 0;
 219        WRITE_ONCE(msk->pm.work_pending, false);
 220        WRITE_ONCE(msk->pm.addr_signal, false);
 221        WRITE_ONCE(msk->pm.accept_addr, false);
 222        WRITE_ONCE(msk->pm.accept_subflow, false);
 223        msk->pm.status = 0;
 224
 225        spin_lock_init(&msk->pm.lock);
 226        INIT_WORK(&msk->pm.work, pm_worker);
 227
 228        mptcp_pm_nl_data_init(msk);
 229}
 230
 231void mptcp_pm_close(struct mptcp_sock *msk)
 232{
 233        if (cancel_work_sync(&msk->pm.work))
 234                sock_put((struct sock *)msk);
 235}
 236
 237void mptcp_pm_init(void)
 238{
 239        pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8);
 240        if (!pm_wq)
 241                panic("Failed to allocate workqueue");
 242
 243        mptcp_pm_nl_init();
 244}
 245