1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#ifndef __XEN_NETBACK__COMMON_H__
28#define __XEN_NETBACK__COMMON_H__
29
30#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
31
32#include <linux/module.h>
33#include <linux/interrupt.h>
34#include <linux/slab.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/io.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/wait.h>
41#include <linux/sched.h>
42
43#include <xen/interface/io/netif.h>
44#include <xen/interface/grant_table.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47#include <xen/page.h>
48#include <linux/debugfs.h>
49
50typedef unsigned int pending_ring_idx_t;
51#define INVALID_PENDING_RING_IDX (~0U)
52
53struct pending_tx_info {
54 struct xen_netif_tx_request req;
55
56
57
58
59
60
61
62
63
64
65 struct ubuf_info callback_struct;
66};
67
68#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
69#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
70
71struct xenvif_rx_meta {
72 int id;
73 int size;
74 int gso_type;
75 int gso_size;
76};
77
78#define GSO_BIT(type) \
79 (1 << XEN_NETIF_GSO_TYPE_ ## type)
80
81
82#define INVALID_PENDING_IDX 0xFFFF
83
84#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
85
86#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
87
88
89
90
91#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
92
93
94
95
96
97
98#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
99
100#define NETBACK_INVALID_HANDLE -1
101
102
103
104
105
106
107#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
108
109
110#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
111
112
113#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
114
115struct xenvif;
116
117struct xenvif_stats {
118
119
120
121
122 unsigned int rx_bytes;
123 unsigned int rx_packets;
124 unsigned int tx_bytes;
125 unsigned int tx_packets;
126
127
128 unsigned long rx_gso_checksum_fixup;
129 unsigned long tx_zerocopy_sent;
130 unsigned long tx_zerocopy_success;
131 unsigned long tx_zerocopy_fail;
132 unsigned long tx_frag_overflow;
133};
134
135struct xenvif_queue {
136 unsigned int id;
137 char name[QUEUE_NAME_SIZE];
138 struct xenvif *vif;
139
140
141 struct napi_struct napi;
142
143 unsigned int tx_irq;
144
145 char tx_irq_name[IRQ_NAME_SIZE];
146 struct xen_netif_tx_back_ring tx;
147 struct sk_buff_head tx_queue;
148 struct page *mmap_pages[MAX_PENDING_REQS];
149 pending_ring_idx_t pending_prod;
150 pending_ring_idx_t pending_cons;
151 u16 pending_ring[MAX_PENDING_REQS];
152 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
153 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
154
155 struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
156 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
157 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
158
159 struct page *pages_to_map[MAX_PENDING_REQS];
160 struct page *pages_to_unmap[MAX_PENDING_REQS];
161
162
163 spinlock_t callback_lock;
164
165
166
167
168 spinlock_t response_lock;
169 pending_ring_idx_t dealloc_prod;
170 pending_ring_idx_t dealloc_cons;
171 u16 dealloc_ring[MAX_PENDING_REQS];
172 struct task_struct *dealloc_task;
173 wait_queue_head_t dealloc_wq;
174 atomic_t inflight_packets;
175
176
177 struct task_struct *task;
178 wait_queue_head_t wq;
179
180 unsigned int rx_irq;
181
182 char rx_irq_name[IRQ_NAME_SIZE];
183 struct xen_netif_rx_back_ring rx;
184 struct sk_buff_head rx_queue;
185
186 unsigned int rx_queue_max;
187 unsigned int rx_queue_len;
188 unsigned long last_rx_time;
189 bool stalled;
190
191 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
192
193
194
195
196 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
197
198
199 unsigned long credit_bytes;
200 unsigned long credit_usec;
201 unsigned long remaining_credit;
202 struct timer_list credit_timeout;
203 u64 credit_window_start;
204
205
206 struct xenvif_stats stats;
207};
208
209enum state_bit_shift {
210
211 VIF_STATUS_CONNECTED,
212};
213
214struct xenvif_mcast_addr {
215 struct list_head entry;
216 struct rcu_head rcu;
217 u8 addr[6];
218};
219
220#define XEN_NETBK_MCAST_MAX 64
221
222struct xenvif {
223
224 domid_t domid;
225 unsigned int handle;
226
227 u8 fe_dev_addr[6];
228 struct list_head fe_mcast_addr;
229 unsigned int fe_mcast_count;
230
231
232 int gso_mask;
233 int gso_prefix_mask;
234
235 u8 can_sg:1;
236 u8 ip_csum:1;
237 u8 ipv6_csum:1;
238 u8 multicast_control:1;
239
240
241
242
243 bool disabled;
244 unsigned long status;
245 unsigned long drain_timeout;
246 unsigned long stall_timeout;
247
248
249 struct xenvif_queue *queues;
250 unsigned int num_queues;
251 unsigned int stalled_queues;
252
253 struct xenbus_watch credit_watch;
254
255 spinlock_t lock;
256
257#ifdef CONFIG_DEBUG_FS
258 struct dentry *xenvif_dbg_root;
259#endif
260
261
262 struct net_device *dev;
263};
264
265struct xenvif_rx_cb {
266 unsigned long expires;
267 int meta_slots_used;
268};
269
270#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
271
272static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
273{
274 return to_xenbus_device(vif->dev->dev.parent);
275}
276
277void xenvif_tx_credit_callback(unsigned long data);
278
279struct xenvif *xenvif_alloc(struct device *parent,
280 domid_t domid,
281 unsigned int handle);
282
283int xenvif_init_queue(struct xenvif_queue *queue);
284void xenvif_deinit_queue(struct xenvif_queue *queue);
285
286int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
287 unsigned long rx_ring_ref, unsigned int tx_evtchn,
288 unsigned int rx_evtchn);
289void xenvif_disconnect(struct xenvif *vif);
290void xenvif_free(struct xenvif *vif);
291
292int xenvif_xenbus_init(void);
293void xenvif_xenbus_fini(void);
294
295int xenvif_schedulable(struct xenvif *vif);
296
297int xenvif_queue_stopped(struct xenvif_queue *queue);
298void xenvif_wake_queue(struct xenvif_queue *queue);
299
300
301void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
302int xenvif_map_frontend_rings(struct xenvif_queue *queue,
303 grant_ref_t tx_ring_ref,
304 grant_ref_t rx_ring_ref);
305
306
307void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
308
309
310void xenvif_carrier_off(struct xenvif *vif);
311
312int xenvif_tx_action(struct xenvif_queue *queue, int budget);
313
314int xenvif_kthread_guest_rx(void *data);
315void xenvif_kick_thread(struct xenvif_queue *queue);
316
317int xenvif_dealloc_kthread(void *data);
318
319void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
320
321void xenvif_carrier_on(struct xenvif *vif);
322
323
324void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
325
326
327void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
328
329static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
330{
331 return MAX_PENDING_REQS -
332 queue->pending_prod + queue->pending_cons;
333}
334
335irqreturn_t xenvif_interrupt(int irq, void *dev_id);
336
337extern bool separate_tx_rx_irq;
338
339extern unsigned int rx_drain_timeout_msecs;
340extern unsigned int rx_stall_timeout_msecs;
341extern unsigned int xenvif_max_queues;
342
343#ifdef CONFIG_DEBUG_FS
344extern struct dentry *xen_netback_dbg_root;
345#endif
346
347void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
348 struct sk_buff *skb);
349void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
350
351
352bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
353void xenvif_mcast_addr_list_free(struct xenvif *vif);
354
355#endif
356