1
2
3
4
5
6#include "slirp.h"
7
8static void
9ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
10{
11 ifm->ifs_next = ifmhead->ifs_next;
12 ifmhead->ifs_next = ifm;
13 ifm->ifs_prev = ifmhead;
14 ifm->ifs_next->ifs_prev = ifm;
15}
16
17static void
18ifs_remque(struct mbuf *ifm)
19{
20 ifm->ifs_prev->ifs_next = ifm->ifs_next;
21 ifm->ifs_next->ifs_prev = ifm->ifs_prev;
22}
23
24void
25if_init(Slirp *slirp)
26{
27 slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq;
28 slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq;
29}
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44void
45if_output(struct socket *so, struct mbuf *ifm)
46{
47 Slirp *slirp = ifm->slirp;
48 struct mbuf *ifq;
49 int on_fastq = 1;
50
51 DEBUG_CALL("if_output");
52 DEBUG_ARG("so = %p", so);
53 DEBUG_ARG("ifm = %p", ifm);
54
55
56
57
58
59
60 if (ifm->m_flags & M_USEDLIST) {
61 remque(ifm);
62 ifm->m_flags &= ~M_USEDLIST;
63 }
64
65
66
67
68
69
70
71
72 if (so) {
73 for (ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
74 (struct quehead *) ifq != &slirp->if_batchq;
75 ifq = ifq->ifq_prev) {
76 if (so == ifq->ifq_so) {
77
78 ifm->ifq_so = so;
79 ifs_insque(ifm, ifq->ifs_prev);
80 goto diddit;
81 }
82 }
83 }
84
85
86 if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
87 ifq = (struct mbuf *) slirp->if_fastq.qh_rlink;
88 on_fastq = 1;
89
90
91
92
93 if (ifq->ifq_so == so) {
94 ifm->ifq_so = so;
95 ifs_insque(ifm, ifq->ifs_prev);
96 goto diddit;
97 }
98 } else {
99 ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
100 }
101
102
103 ifm->ifq_so = so;
104 ifs_init(ifm);
105 insque(ifm, ifq);
106
107diddit:
108 if (so) {
109
110 so->so_queued++;
111 so->so_nqueued++;
112
113
114
115
116
117
118
119 if (on_fastq && ((so->so_nqueued >= 6) &&
120 (so->so_nqueued - so->so_queued) >= 3)) {
121
122
123 remque(ifm->ifs_next);
124
125
126 insque(ifm->ifs_next, &slirp->if_batchq);
127 }
128 }
129
130
131
132
133 if_start(ifm->slirp);
134}
135
136
137
138
139
140
141
142
143
144
145void if_start(Slirp *slirp)
146{
147 uint64_t now = slirp->cb->clock_get_ns(slirp->opaque);
148 bool from_batchq = false;
149 struct mbuf *ifm, *ifm_next, *ifqt;
150
151 DEBUG_CALL("if_start");
152
153 if (slirp->if_start_busy) {
154 return;
155 }
156 slirp->if_start_busy = true;
157
158 struct mbuf *batch_head = NULL;
159 if (slirp->if_batchq.qh_link != &slirp->if_batchq) {
160 batch_head = (struct mbuf *) slirp->if_batchq.qh_link;
161 }
162
163 if (slirp->if_fastq.qh_link != &slirp->if_fastq) {
164 ifm_next = (struct mbuf *) slirp->if_fastq.qh_link;
165 } else if (batch_head) {
166
167 ifm_next = batch_head;
168 from_batchq = true;
169 } else {
170 ifm_next = NULL;
171 }
172
173 while (ifm_next) {
174 ifm = ifm_next;
175
176 ifm_next = ifm->ifq_next;
177 if ((struct quehead *) ifm_next == &slirp->if_fastq) {
178
179 ifm_next = batch_head;
180 from_batchq = true;
181 }
182 if ((struct quehead *) ifm_next == &slirp->if_batchq) {
183
184 ifm_next = NULL;
185 }
186
187
188 if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) {
189
190 continue;
191 }
192
193
194 ifqt = ifm->ifq_prev;
195 remque(ifm);
196
197
198 if (ifm->ifs_next != ifm) {
199 struct mbuf *next = ifm->ifs_next;
200
201 insque(next, ifqt);
202 ifs_remque(ifm);
203 if (!from_batchq) {
204 ifm_next = next;
205 }
206 }
207
208
209 if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) {
210
211 ifm->ifq_so->so_nqueued = 0;
212 }
213
214 m_free(ifm);
215 }
216
217 slirp->if_start_busy = false;
218}
219