1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include "qemu/osdep.h"
27#include <sys/ioctl.h>
28#include <net/if.h>
29#define NETMAP_WITH_LIBS
30#include <net/netmap.h>
31#include <net/netmap_user.h>
32
33#include "net/net.h"
34#include "net/tap.h"
35#include "clients.h"
36#include "sysemu/sysemu.h"
37#include "qemu/error-report.h"
38#include "qapi/error.h"
39#include "qemu/iov.h"
40#include "qemu/cutils.h"
41
42typedef struct NetmapState {
43 NetClientState nc;
44 struct nm_desc *nmd;
45 char ifname[IFNAMSIZ];
46 struct netmap_ring *tx;
47 struct netmap_ring *rx;
48 bool read_poll;
49 bool write_poll;
50 struct iovec iov[IOV_MAX];
51 int vnet_hdr_len;
52} NetmapState;
53
54#ifndef __FreeBSD__
55#define pkt_copy bcopy
56#else
57
58static inline void
59pkt_copy(const void *_src, void *_dst, int l)
60{
61 const uint64_t *src = _src;
62 uint64_t *dst = _dst;
63 if (unlikely(l >= 1024)) {
64 bcopy(src, dst, l);
65 return;
66 }
67 for (; l > 0; l -= 64) {
68 *dst++ = *src++;
69 *dst++ = *src++;
70 *dst++ = *src++;
71 *dst++ = *src++;
72 *dst++ = *src++;
73 *dst++ = *src++;
74 *dst++ = *src++;
75 *dst++ = *src++;
76 }
77}
78#endif
79
80
81
82
83
84static struct nm_desc *netmap_open(const NetdevNetmapOptions *nm_opts,
85 Error **errp)
86{
87 struct nm_desc *nmd;
88 struct nmreq req;
89
90 memset(&req, 0, sizeof(req));
91
92 nmd = nm_open(nm_opts->ifname, &req, NETMAP_NO_TX_POLL,
93 NULL);
94 if (nmd == NULL) {
95 error_setg_errno(errp, errno, "Failed to nm_open() %s",
96 nm_opts->ifname);
97 return NULL;
98 }
99
100 return nmd;
101}
102
103static void netmap_send(void *opaque);
104static void netmap_writable(void *opaque);
105
106
107static void netmap_update_fd_handler(NetmapState *s)
108{
109 qemu_set_fd_handler(s->nmd->fd,
110 s->read_poll ? netmap_send : NULL,
111 s->write_poll ? netmap_writable : NULL,
112 s);
113}
114
115
116static void netmap_read_poll(NetmapState *s, bool enable)
117{
118 if (s->read_poll != enable) {
119 s->read_poll = enable;
120 netmap_update_fd_handler(s);
121 }
122}
123
124
125static void netmap_write_poll(NetmapState *s, bool enable)
126{
127 if (s->write_poll != enable) {
128 s->write_poll = enable;
129 netmap_update_fd_handler(s);
130 }
131}
132
133static void netmap_poll(NetClientState *nc, bool enable)
134{
135 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
136
137 if (s->read_poll != enable || s->write_poll != enable) {
138 s->write_poll = enable;
139 s->read_poll = enable;
140 netmap_update_fd_handler(s);
141 }
142}
143
144
145
146
147
148
149static void netmap_writable(void *opaque)
150{
151 NetmapState *s = opaque;
152
153 netmap_write_poll(s, false);
154 qemu_flush_queued_packets(&s->nc);
155}
156
157static ssize_t netmap_receive(NetClientState *nc,
158 const uint8_t *buf, size_t size)
159{
160 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
161 struct netmap_ring *ring = s->tx;
162 uint32_t i;
163 uint32_t idx;
164 uint8_t *dst;
165
166 if (unlikely(!ring)) {
167
168 return size;
169 }
170
171 if (unlikely(size > ring->nr_buf_size)) {
172 RD(5, "[netmap_receive] drop packet of size %d > %d\n",
173 (int)size, ring->nr_buf_size);
174 return size;
175 }
176
177 if (nm_ring_empty(ring)) {
178
179 netmap_write_poll(s, true);
180 return 0;
181 }
182
183 i = ring->cur;
184 idx = ring->slot[i].buf_idx;
185 dst = (uint8_t *)NETMAP_BUF(ring, idx);
186
187 ring->slot[i].len = size;
188 ring->slot[i].flags = 0;
189 pkt_copy(buf, dst, size);
190 ring->cur = ring->head = nm_ring_next(ring, i);
191 ioctl(s->nmd->fd, NIOCTXSYNC, NULL);
192
193 return size;
194}
195
196static ssize_t netmap_receive_iov(NetClientState *nc,
197 const struct iovec *iov, int iovcnt)
198{
199 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
200 struct netmap_ring *ring = s->tx;
201 uint32_t last;
202 uint32_t idx;
203 uint8_t *dst;
204 int j;
205 uint32_t i;
206
207 if (unlikely(!ring)) {
208
209 return iov_size(iov, iovcnt);
210 }
211
212 last = i = ring->cur;
213
214 if (nm_ring_space(ring) < iovcnt) {
215
216 netmap_write_poll(s, true);
217 return 0;
218 }
219
220 for (j = 0; j < iovcnt; j++) {
221 int iov_frag_size = iov[j].iov_len;
222 int offset = 0;
223 int nm_frag_size;
224
225
226
227 while (iov_frag_size) {
228 nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size);
229
230 if (unlikely(nm_ring_empty(ring))) {
231
232
233 netmap_write_poll(s, true);
234 return 0;
235 }
236
237 idx = ring->slot[i].buf_idx;
238 dst = (uint8_t *)NETMAP_BUF(ring, idx);
239
240 ring->slot[i].len = nm_frag_size;
241 ring->slot[i].flags = NS_MOREFRAG;
242 pkt_copy(iov[j].iov_base + offset, dst, nm_frag_size);
243
244 last = i;
245 i = nm_ring_next(ring, i);
246
247 offset += nm_frag_size;
248 iov_frag_size -= nm_frag_size;
249 }
250 }
251
252 ring->slot[last].flags &= ~NS_MOREFRAG;
253
254
255 ring->cur = ring->head = i;
256
257 ioctl(s->nmd->fd, NIOCTXSYNC, NULL);
258
259 return iov_size(iov, iovcnt);
260}
261
262
263
264static void netmap_send_completed(NetClientState *nc, ssize_t len)
265{
266 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
267
268 netmap_read_poll(s, true);
269}
270
271static void netmap_send(void *opaque)
272{
273 NetmapState *s = opaque;
274 struct netmap_ring *ring = s->rx;
275
276
277
278 while (!nm_ring_empty(ring)) {
279 uint32_t i;
280 uint32_t idx;
281 bool morefrag;
282 int iovcnt = 0;
283 int iovsize;
284
285 do {
286 i = ring->cur;
287 idx = ring->slot[i].buf_idx;
288 morefrag = (ring->slot[i].flags & NS_MOREFRAG);
289 s->iov[iovcnt].iov_base = (u_char *)NETMAP_BUF(ring, idx);
290 s->iov[iovcnt].iov_len = ring->slot[i].len;
291 iovcnt++;
292
293 ring->cur = ring->head = nm_ring_next(ring, i);
294 } while (!nm_ring_empty(ring) && morefrag);
295
296 if (unlikely(nm_ring_empty(ring) && morefrag)) {
297 RD(5, "[netmap_send] ran out of slots, with a pending"
298 "incomplete packet\n");
299 }
300
301 iovsize = qemu_sendv_packet_async(&s->nc, s->iov, iovcnt,
302 netmap_send_completed);
303
304 if (iovsize == 0) {
305
306
307
308 netmap_read_poll(s, false);
309 break;
310 }
311 }
312}
313
314
315static void netmap_cleanup(NetClientState *nc)
316{
317 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
318
319 qemu_purge_queued_packets(nc);
320
321 netmap_poll(nc, false);
322 nm_close(s->nmd);
323 s->nmd = NULL;
324}
325
326
327static int netmap_fd_set_vnet_hdr_len(NetmapState *s, int len)
328{
329 struct nmreq req;
330
331
332
333
334 memset(&req, 0, sizeof(req));
335 pstrcpy(req.nr_name, sizeof(req.nr_name), s->ifname);
336 req.nr_version = NETMAP_API;
337 req.nr_cmd = NETMAP_BDG_VNET_HDR;
338 req.nr_arg1 = len;
339
340 return ioctl(s->nmd->fd, NIOCREGIF, &req);
341}
342
343static bool netmap_has_vnet_hdr_len(NetClientState *nc, int len)
344{
345 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
346 int prev_len = s->vnet_hdr_len;
347
348
349 if (netmap_fd_set_vnet_hdr_len(s, len)) {
350 return false;
351 }
352
353
354 if (netmap_fd_set_vnet_hdr_len(s, prev_len)) {
355 error_report("Failed to restore vnet-hdr length %d on %s: %s",
356 prev_len, s->ifname, strerror(errno));
357 abort();
358 }
359
360 return true;
361}
362
363
364
365static bool netmap_has_vnet_hdr(NetClientState *nc)
366{
367 return netmap_has_vnet_hdr_len(nc, sizeof(struct virtio_net_hdr));
368}
369
370static void netmap_using_vnet_hdr(NetClientState *nc, bool enable)
371{
372}
373
374static void netmap_set_vnet_hdr_len(NetClientState *nc, int len)
375{
376 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
377 int err;
378
379 err = netmap_fd_set_vnet_hdr_len(s, len);
380 if (err) {
381 error_report("Unable to set vnet-hdr length %d on %s: %s",
382 len, s->ifname, strerror(errno));
383 } else {
384
385 s->vnet_hdr_len = len;
386 }
387}
388
389static void netmap_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
390 int ecn, int ufo)
391{
392 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
393
394
395
396 if (!s->vnet_hdr_len) {
397 netmap_set_vnet_hdr_len(nc, sizeof(struct virtio_net_hdr));
398 }
399}
400
401
402static NetClientInfo net_netmap_info = {
403 .type = NET_CLIENT_DRIVER_NETMAP,
404 .size = sizeof(NetmapState),
405 .receive = netmap_receive,
406 .receive_iov = netmap_receive_iov,
407 .poll = netmap_poll,
408 .cleanup = netmap_cleanup,
409 .has_ufo = netmap_has_vnet_hdr,
410 .has_vnet_hdr = netmap_has_vnet_hdr,
411 .has_vnet_hdr_len = netmap_has_vnet_hdr_len,
412 .using_vnet_hdr = netmap_using_vnet_hdr,
413 .set_offload = netmap_set_offload,
414 .set_vnet_hdr_len = netmap_set_vnet_hdr_len,
415};
416
417
418
419
420
421int net_init_netmap(const Netdev *netdev,
422 const char *name, NetClientState *peer, Error **errp)
423{
424 const NetdevNetmapOptions *netmap_opts = &netdev->u.netmap;
425 struct nm_desc *nmd;
426 NetClientState *nc;
427 Error *err = NULL;
428 NetmapState *s;
429
430 nmd = netmap_open(netmap_opts, &err);
431 if (err) {
432 error_propagate(errp, err);
433 return -1;
434 }
435
436 nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name);
437 s = DO_UPCAST(NetmapState, nc, nc);
438 s->nmd = nmd;
439 s->tx = NETMAP_TXRING(nmd->nifp, 0);
440 s->rx = NETMAP_RXRING(nmd->nifp, 0);
441 s->vnet_hdr_len = 0;
442 pstrcpy(s->ifname, sizeof(s->ifname), netmap_opts->ifname);
443 netmap_read_poll(s, true);
444
445 return 0;
446}
447
448