1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#ifndef __XEN_NETBACK__COMMON_H__
28#define __XEN_NETBACK__COMMON_H__
29
30#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
31
32#include <linux/module.h>
33#include <linux/interrupt.h>
34#include <linux/slab.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/io.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/wait.h>
41#include <linux/sched.h>
42
43#include <xen/interface/io/netif.h>
44#include <xen/interface/grant_table.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47#include <xen/page.h>
48#include <linux/debugfs.h>
49
50typedef unsigned int pending_ring_idx_t;
51#define INVALID_PENDING_RING_IDX (~0U)
52
53struct pending_tx_info {
54 struct xen_netif_tx_request req;
55 unsigned int extra_count;
56
57
58
59
60
61
62
63
64
65
66 struct ubuf_info callback_struct;
67};
68
69#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
70#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
71
72struct xenvif_rx_meta {
73 int id;
74 int size;
75 int gso_type;
76 int gso_size;
77};
78
79#define GSO_BIT(type) \
80 (1 << XEN_NETIF_GSO_TYPE_ ## type)
81
82
83#define INVALID_PENDING_IDX 0xFFFF
84
85#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
86
87#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
88
89
90
91
92#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
93
94#define NETBACK_INVALID_HANDLE -1
95
96
97
98
99
100
101#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
102
103
104#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
105
106
107#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
108
109struct xenvif;
110
111struct xenvif_stats {
112
113
114
115
116 u64 rx_bytes;
117 u64 rx_packets;
118 u64 tx_bytes;
119 u64 tx_packets;
120
121
122 unsigned long rx_gso_checksum_fixup;
123 unsigned long tx_zerocopy_sent;
124 unsigned long tx_zerocopy_success;
125 unsigned long tx_zerocopy_fail;
126 unsigned long tx_frag_overflow;
127};
128
129#define COPY_BATCH_SIZE 64
130
131struct xenvif_copy_state {
132 struct gnttab_copy op[COPY_BATCH_SIZE];
133 RING_IDX idx[COPY_BATCH_SIZE];
134 unsigned int num;
135 struct sk_buff_head *completed;
136};
137
138struct xenvif_queue {
139 unsigned int id;
140 char name[QUEUE_NAME_SIZE];
141 struct xenvif *vif;
142
143
144 struct napi_struct napi;
145
146 unsigned int tx_irq;
147
148 char tx_irq_name[IRQ_NAME_SIZE];
149 struct xen_netif_tx_back_ring tx;
150 struct sk_buff_head tx_queue;
151 struct page *mmap_pages[MAX_PENDING_REQS];
152 pending_ring_idx_t pending_prod;
153 pending_ring_idx_t pending_cons;
154 u16 pending_ring[MAX_PENDING_REQS];
155 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
156 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
157
158 struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
159 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
160 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
161
162 struct page *pages_to_map[MAX_PENDING_REQS];
163 struct page *pages_to_unmap[MAX_PENDING_REQS];
164
165
166 spinlock_t callback_lock;
167
168
169
170
171 spinlock_t response_lock;
172 pending_ring_idx_t dealloc_prod;
173 pending_ring_idx_t dealloc_cons;
174 u16 dealloc_ring[MAX_PENDING_REQS];
175 struct task_struct *dealloc_task;
176 wait_queue_head_t dealloc_wq;
177 atomic_t inflight_packets;
178
179
180 struct task_struct *task;
181 wait_queue_head_t wq;
182
183 unsigned int rx_irq;
184
185 char rx_irq_name[IRQ_NAME_SIZE];
186 struct xen_netif_rx_back_ring rx;
187 struct sk_buff_head rx_queue;
188
189 unsigned int rx_queue_max;
190 unsigned int rx_queue_len;
191 unsigned long last_rx_time;
192 bool stalled;
193
194 struct xenvif_copy_state rx_copy;
195
196
197 unsigned long credit_bytes;
198 unsigned long credit_usec;
199 unsigned long remaining_credit;
200 struct timer_list credit_timeout;
201 u64 credit_window_start;
202 bool rate_limited;
203
204
205 struct xenvif_stats stats;
206};
207
208enum state_bit_shift {
209
210 VIF_STATUS_CONNECTED,
211};
212
213struct xenvif_mcast_addr {
214 struct list_head entry;
215 struct rcu_head rcu;
216 u8 addr[6];
217};
218
219#define XEN_NETBK_MCAST_MAX 64
220
221#define XEN_NETBK_MAX_HASH_KEY_SIZE 40
222#define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
223#define XEN_NETBK_HASH_TAG_SIZE 40
224
225struct xenvif_hash_cache_entry {
226 struct list_head link;
227 struct rcu_head rcu;
228 u8 tag[XEN_NETBK_HASH_TAG_SIZE];
229 unsigned int len;
230 u32 val;
231 int seq;
232};
233
234struct xenvif_hash_cache {
235 spinlock_t lock;
236 struct list_head list;
237 unsigned int count;
238 atomic_t seq;
239};
240
241struct xenvif_hash {
242 unsigned int alg;
243 u32 flags;
244 u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
245 u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
246 unsigned int size;
247 struct xenvif_hash_cache cache;
248};
249
250struct xenvif {
251
252 domid_t domid;
253 unsigned int handle;
254
255 u8 fe_dev_addr[6];
256 struct list_head fe_mcast_addr;
257 unsigned int fe_mcast_count;
258
259
260 int gso_mask;
261
262 u8 can_sg:1;
263 u8 ip_csum:1;
264 u8 ipv6_csum:1;
265 u8 multicast_control:1;
266
267
268
269
270 bool disabled;
271 unsigned long status;
272 unsigned long drain_timeout;
273 unsigned long stall_timeout;
274
275
276 struct xenvif_queue *queues;
277 unsigned int num_queues;
278 unsigned int stalled_queues;
279
280 struct xenvif_hash hash;
281
282 struct xenbus_watch credit_watch;
283 struct xenbus_watch mcast_ctrl_watch;
284
285 spinlock_t lock;
286
287#ifdef CONFIG_DEBUG_FS
288 struct dentry *xenvif_dbg_root;
289#endif
290
291 struct xen_netif_ctrl_back_ring ctrl;
292 unsigned int ctrl_irq;
293
294
295 struct net_device *dev;
296};
297
298struct xenvif_rx_cb {
299 unsigned long expires;
300 int meta_slots_used;
301};
302
303#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
304
305static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
306{
307 return to_xenbus_device(vif->dev->dev.parent);
308}
309
310void xenvif_tx_credit_callback(struct timer_list *t);
311
312struct xenvif *xenvif_alloc(struct device *parent,
313 domid_t domid,
314 unsigned int handle);
315
316int xenvif_init_queue(struct xenvif_queue *queue);
317void xenvif_deinit_queue(struct xenvif_queue *queue);
318
319int xenvif_connect_data(struct xenvif_queue *queue,
320 unsigned long tx_ring_ref,
321 unsigned long rx_ring_ref,
322 unsigned int tx_evtchn,
323 unsigned int rx_evtchn);
324void xenvif_disconnect_data(struct xenvif *vif);
325int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
326 unsigned int evtchn);
327void xenvif_disconnect_ctrl(struct xenvif *vif);
328void xenvif_free(struct xenvif *vif);
329
330int xenvif_xenbus_init(void);
331void xenvif_xenbus_fini(void);
332
333int xenvif_schedulable(struct xenvif *vif);
334
335int xenvif_queue_stopped(struct xenvif_queue *queue);
336void xenvif_wake_queue(struct xenvif_queue *queue);
337
338
339void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
340int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
341 grant_ref_t tx_ring_ref,
342 grant_ref_t rx_ring_ref);
343
344
345void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
346
347
348void xenvif_carrier_off(struct xenvif *vif);
349
350int xenvif_tx_action(struct xenvif_queue *queue, int budget);
351
352int xenvif_kthread_guest_rx(void *data);
353void xenvif_kick_thread(struct xenvif_queue *queue);
354
355int xenvif_dealloc_kthread(void *data);
356
357irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
358
359void xenvif_rx_action(struct xenvif_queue *queue);
360void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
361
362void xenvif_carrier_on(struct xenvif *vif);
363
364
365void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
366
367
368void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
369
370static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
371{
372 return MAX_PENDING_REQS -
373 queue->pending_prod + queue->pending_cons;
374}
375
376irqreturn_t xenvif_interrupt(int irq, void *dev_id);
377
378extern bool separate_tx_rx_irq;
379
380extern unsigned int rx_drain_timeout_msecs;
381extern unsigned int rx_stall_timeout_msecs;
382extern unsigned int xenvif_max_queues;
383extern unsigned int xenvif_hash_cache_size;
384
385#ifdef CONFIG_DEBUG_FS
386extern struct dentry *xen_netback_dbg_root;
387#endif
388
389void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
390 struct sk_buff *skb);
391void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
392
393
394bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
395void xenvif_mcast_addr_list_free(struct xenvif *vif);
396
397
398void xenvif_init_hash(struct xenvif *vif);
399void xenvif_deinit_hash(struct xenvif *vif);
400
401u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
402u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
403u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
404u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
405u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
406u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
407 u32 off);
408
409void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
410
411#ifdef CONFIG_DEBUG_FS
412void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
413#endif
414
415#endif
416