qemu/slirp/if.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 1995 Danny Gasparovski.
   3 *
   4 * Please read the file COPYRIGHT for the
   5 * terms and conditions of the copyright.
   6 */
   7
   8#include "qemu/osdep.h"
   9#include "slirp.h"
  10#include "qemu/timer.h"
  11
  12static void
  13ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
  14{
  15        ifm->ifs_next = ifmhead->ifs_next;
  16        ifmhead->ifs_next = ifm;
  17        ifm->ifs_prev = ifmhead;
  18        ifm->ifs_next->ifs_prev = ifm;
  19}
  20
  21static void
  22ifs_remque(struct mbuf *ifm)
  23{
  24        ifm->ifs_prev->ifs_next = ifm->ifs_next;
  25        ifm->ifs_next->ifs_prev = ifm->ifs_prev;
  26}
  27
  28void
  29if_init(Slirp *slirp)
  30{
  31    slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq;
  32    slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq;
  33}
  34
  35/*
  36 * if_output: Queue packet into an output queue.
  37 * There are 2 output queue's, if_fastq and if_batchq.
  38 * Each output queue is a doubly linked list of double linked lists
  39 * of mbufs, each list belonging to one "session" (socket).  This
  40 * way, we can output packets fairly by sending one packet from each
  41 * session, instead of all the packets from one session, then all packets
  42 * from the next session, etc.  Packets on the if_fastq get absolute
  43 * priority, but if one session hogs the link, it gets "downgraded"
  44 * to the batchq until it runs out of packets, then it'll return
  45 * to the fastq (eg. if the user does an ls -alR in a telnet session,
  46 * it'll temporarily get downgraded to the batchq)
  47 */
  48void
  49if_output(struct socket *so, struct mbuf *ifm)
  50{
  51        Slirp *slirp = ifm->slirp;
  52        struct mbuf *ifq;
  53        int on_fastq = 1;
  54
  55        DEBUG_CALL("if_output");
  56        DEBUG_ARG("so = %p", so);
  57        DEBUG_ARG("ifm = %p", ifm);
  58
  59        /*
  60         * First remove the mbuf from m_usedlist,
  61         * since we're gonna use m_next and m_prev ourselves
  62         * XXX Shouldn't need this, gotta change dtom() etc.
  63         */
  64        if (ifm->m_flags & M_USEDLIST) {
  65                remque(ifm);
  66                ifm->m_flags &= ~M_USEDLIST;
  67        }
  68
  69        /*
  70         * See if there's already a batchq list for this session.
  71         * This can include an interactive session, which should go on fastq,
  72         * but gets too greedy... hence it'll be downgraded from fastq to batchq.
  73         * We mustn't put this packet back on the fastq (or we'll send it out of order)
  74         * XXX add cache here?
  75         */
  76        if (so) {
  77                for (ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
  78                     (struct quehead *) ifq != &slirp->if_batchq;
  79                     ifq = ifq->ifq_prev) {
  80                        if (so == ifq->ifq_so) {
  81                                /* A match! */
  82                                ifm->ifq_so = so;
  83                                ifs_insque(ifm, ifq->ifs_prev);
  84                                goto diddit;
  85                        }
  86                }
  87        }
  88
  89        /* No match, check which queue to put it on */
  90        if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
  91                ifq = (struct mbuf *) slirp->if_fastq.qh_rlink;
  92                on_fastq = 1;
  93                /*
  94                 * Check if this packet is a part of the last
  95                 * packet's session
  96                 */
  97                if (ifq->ifq_so == so) {
  98                        ifm->ifq_so = so;
  99                        ifs_insque(ifm, ifq->ifs_prev);
 100                        goto diddit;
 101                }
 102        } else {
 103                ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
 104        }
 105
 106        /* Create a new doubly linked list for this session */
 107        ifm->ifq_so = so;
 108        ifs_init(ifm);
 109        insque(ifm, ifq);
 110
 111diddit:
 112        if (so) {
 113                /* Update *_queued */
 114                so->so_queued++;
 115                so->so_nqueued++;
 116                /*
 117                 * Check if the interactive session should be downgraded to
 118                 * the batchq.  A session is downgraded if it has queued 6
 119                 * packets without pausing, and at least 3 of those packets
 120                 * have been sent over the link
 121                 * (XXX These are arbitrary numbers, probably not optimal..)
 122                 */
 123                if (on_fastq && ((so->so_nqueued >= 6) &&
 124                                 (so->so_nqueued - so->so_queued) >= 3)) {
 125
 126                        /* Remove from current queue... */
 127                        remque(ifm->ifs_next);
 128
 129                        /* ...And insert in the new.  That'll teach ya! */
 130                        insque(ifm->ifs_next, &slirp->if_batchq);
 131                }
 132        }
 133
 134#ifndef FULL_BOLT
 135        /*
 136         * This prevents us from malloc()ing too many mbufs
 137         */
 138        if_start(ifm->slirp);
 139#endif
 140}
 141
 142/*
 143 * Send one packet from each session.
 144 * If there are packets on the fastq, they are sent FIFO, before
 145 * everything else.  Then we choose the first packet from each
 146 * batchq session (socket) and send it.
 147 * For example, if there are 3 ftp sessions fighting for bandwidth,
 148 * one packet will be sent from the first session, then one packet
 149 * from the second session, then one packet from the third.
 150 */
 151void if_start(Slirp *slirp)
 152{
 153    uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
 154    bool from_batchq = false;
 155    struct mbuf *ifm, *ifm_next, *ifqt;
 156
 157    DEBUG_CALL("if_start");
 158
 159    if (slirp->if_start_busy) {
 160        return;
 161    }
 162    slirp->if_start_busy = true;
 163
 164    struct mbuf *batch_head = NULL;
 165    if (slirp->if_batchq.qh_link != &slirp->if_batchq) {
 166        batch_head = (struct mbuf *) slirp->if_batchq.qh_link;
 167    }
 168
 169    if (slirp->if_fastq.qh_link != &slirp->if_fastq) {
 170        ifm_next = (struct mbuf *) slirp->if_fastq.qh_link;
 171    } else if (batch_head) {
 172        /* Nothing on fastq, pick up from batchq */
 173        ifm_next = batch_head;
 174        from_batchq = true;
 175    } else {
 176        ifm_next = NULL;
 177    }
 178
 179    while (ifm_next) {
 180        ifm = ifm_next;
 181
 182        ifm_next = ifm->ifq_next;
 183        if ((struct quehead *) ifm_next == &slirp->if_fastq) {
 184            /* No more packets in fastq, switch to batchq */
 185            ifm_next = batch_head;
 186            from_batchq = true;
 187        }
 188        if ((struct quehead *) ifm_next == &slirp->if_batchq) {
 189            /* end of batchq */
 190            ifm_next = NULL;
 191        }
 192
 193        /* Try to send packet unless it already expired */
 194        if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) {
 195            /* Packet is delayed due to pending ARP or NDP resolution */
 196            continue;
 197        }
 198
 199        /* Remove it from the queue */
 200        ifqt = ifm->ifq_prev;
 201        remque(ifm);
 202
 203        /* If there are more packets for this session, re-queue them */
 204        if (ifm->ifs_next != ifm) {
 205            struct mbuf *next = ifm->ifs_next;
 206
 207            insque(next, ifqt);
 208            ifs_remque(ifm);
 209            if (!from_batchq) {
 210                ifm_next = next;
 211            }
 212        }
 213
 214        /* Update so_queued */
 215        if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) {
 216            /* If there's no more queued, reset nqueued */
 217            ifm->ifq_so->so_nqueued = 0;
 218        }
 219
 220        m_free(ifm);
 221    }
 222
 223    slirp->if_start_busy = false;
 224}
 225