1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57#include "i1480u-wlp.h"
58
59enum {
60
61 i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
62 - sizeof(struct untd_hdr_rst),
63};
64
65
66static
67void i1480u_tx_free(struct i1480u_tx *wtx)
68{
69 kfree(wtx->buf);
70 if (wtx->skb)
71 dev_kfree_skb_irq(wtx->skb);
72 usb_free_urb(wtx->urb);
73 kfree(wtx);
74}
75
76static
77void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
78{
79 unsigned long flags;
80 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
81 list_del(&wtx->list_node);
82 i1480u_tx_free(wtx);
83 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
84}
85
86static
87void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
88{
89 unsigned long flags;
90 struct i1480u_tx *wtx, *next;
91
92 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
93 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
94 usb_unlink_urb(wtx->urb);
95 }
96 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
97}
98
99
100
101
102
103
104
105
106
107
108static
109void i1480u_tx_cb(struct urb *urb)
110{
111 struct i1480u_tx *wtx = urb->context;
112 struct i1480u *i1480u = wtx->i1480u;
113 struct net_device *net_dev = i1480u->net_dev;
114 struct device *dev = &i1480u->usb_iface->dev;
115 unsigned long flags;
116
117 switch (urb->status) {
118 case 0:
119 spin_lock_irqsave(&i1480u->lock, flags);
120 net_dev->stats.tx_packets++;
121 net_dev->stats.tx_bytes += urb->actual_length;
122 spin_unlock_irqrestore(&i1480u->lock, flags);
123 break;
124 case -ECONNRESET:
125 case -ENOENT:
126 dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
127 netif_stop_queue(net_dev);
128 break;
129 case -ESHUTDOWN:
130 dev_dbg(dev, "notif endp: down %d\n", urb->status);
131 netif_stop_queue(net_dev);
132 break;
133 default:
134 dev_err(dev, "TX: unknown URB status %d\n", urb->status);
135 if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
136 EDC_ERROR_TIMEFRAME)) {
137 dev_err(dev, "TX: max acceptable errors exceeded."
138 "Reset device.\n");
139 netif_stop_queue(net_dev);
140 i1480u_tx_unlink_urbs(i1480u);
141 wlp_reset_all(&i1480u->wlp);
142 }
143 break;
144 }
145 i1480u_tx_destroy(i1480u, wtx);
146 if (atomic_dec_return(&i1480u->tx_inflight.count)
147 <= i1480u->tx_inflight.threshold
148 && netif_queue_stopped(net_dev)
149 && i1480u->tx_inflight.threshold != 0) {
150 netif_start_queue(net_dev);
151 atomic_inc(&i1480u->tx_inflight.restart_count);
152 }
153 return;
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189static
190int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
191 gfp_t gfp_mask)
192{
193 int result;
194 void *pl;
195 size_t pl_size;
196
197 void *pl_itr, *buf_itr;
198 size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
199 struct untd_hdr_1st *untd_hdr_1st;
200 struct wlp_tx_hdr *wlp_tx_hdr;
201 struct untd_hdr_rst *untd_hdr_rst;
202
203 wtx->skb = NULL;
204 pl = skb->data;
205 pl_itr = pl;
206 pl_size = skb->len;
207 pl_size_left = pl_size;
208
209
210 pl_size_1st = i1480u_MAX_FRG_SIZE
211 - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
212 BUG_ON(pl_size_1st > pl_size);
213 pl_size_left -= pl_size_1st;
214
215
216
217 frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
218
219
220
221
222 result = -ENOMEM;
223 wtx->buf_size = sizeof(*untd_hdr_1st)
224 + sizeof(*wlp_tx_hdr)
225 + frgs * sizeof(*untd_hdr_rst)
226 + pl_size;
227 wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
228 if (wtx->buf == NULL)
229 goto error_buf_alloc;
230
231 buf_itr = wtx->buf;
232
233 untd_hdr_1st = buf_itr;
234 buf_itr += sizeof(*untd_hdr_1st);
235 untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
236 untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
237 untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
238 untd_hdr_1st->fragment_len =
239 cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
240 memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
241
242 wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
243 buf_itr += sizeof(*wlp_tx_hdr);
244
245 memcpy(buf_itr, pl_itr, pl_size_1st);
246 pl_itr += pl_size_1st;
247 buf_itr += pl_size_1st;
248
249
250 result = -EINVAL;
251 while (pl_size_left > 0) {
252 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
253 > wtx->buf_size) {
254 printk(KERN_ERR "BUG: no space for header\n");
255 goto error_bug;
256 }
257 untd_hdr_rst = buf_itr;
258 buf_itr += sizeof(*untd_hdr_rst);
259 if (pl_size_left > i1480u_MAX_PL_SIZE) {
260 frg_pl_size = i1480u_MAX_PL_SIZE;
261 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
262 } else {
263 frg_pl_size = pl_size_left;
264 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
265 }
266 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
267 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
268 untd_hdr_rst->padding = 0;
269 if (buf_itr + frg_pl_size - wtx->buf
270 > wtx->buf_size) {
271 printk(KERN_ERR "BUG: no space for payload\n");
272 goto error_bug;
273 }
274 memcpy(buf_itr, pl_itr, frg_pl_size);
275 buf_itr += frg_pl_size;
276 pl_itr += frg_pl_size;
277 pl_size_left -= frg_pl_size;
278 }
279 dev_kfree_skb_irq(skb);
280 return 0;
281
282error_bug:
283 printk(KERN_ERR
284 "BUG: skb %u bytes\n"
285 "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
286 "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
287 skb->len,
288 frg_pl_size, i1480u_MAX_FRG_SIZE,
289 buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
290
291 kfree(wtx->buf);
292error_buf_alloc:
293 return result;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312static
313int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
314 gfp_t gfp_mask)
315{
316 struct untd_hdr_cmp *untd_hdr_cmp;
317 struct wlp_tx_hdr *wlp_tx_hdr;
318
319 wtx->buf = NULL;
320 wtx->skb = skb;
321 BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
322 wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
323 wtx->wlp_tx_hdr = wlp_tx_hdr;
324 BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
325 untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
326
327 untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
328 untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
329 untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
330 untd_hdr_cmp->padding = 0;
331 return 0;
332}
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static
366struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
367 struct sk_buff *skb, gfp_t gfp_mask)
368{
369 int result;
370 struct usb_endpoint_descriptor *epd;
371 int usb_pipe;
372 unsigned long flags;
373
374 struct i1480u_tx *wtx;
375 const size_t pl_max_size =
376 i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
377 - sizeof(struct wlp_tx_hdr);
378
379 wtx = kmalloc(sizeof(*wtx), gfp_mask);
380 if (wtx == NULL)
381 goto error_wtx_alloc;
382 wtx->urb = usb_alloc_urb(0, gfp_mask);
383 if (wtx->urb == NULL)
384 goto error_urb_alloc;
385 epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
386 usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
387
388 if (skb->len > pl_max_size) {
389 result = i1480u_tx_create_n(wtx, skb, gfp_mask);
390 if (result < 0)
391 goto error_create;
392 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
393 wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
394 } else {
395 result = i1480u_tx_create_1(wtx, skb, gfp_mask);
396 if (result < 0)
397 goto error_create;
398 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
399 skb->data, skb->len, i1480u_tx_cb, wtx);
400 }
401 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
402 list_add(&wtx->list_node, &i1480u->tx_list);
403 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
404 return wtx;
405
406error_create:
407 kfree(wtx->urb);
408error_urb_alloc:
409 kfree(wtx);
410error_wtx_alloc:
411 return NULL;
412}
413
414
415
416
417
418
419
420
421
422
423
424
425
426int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
427 struct uwb_dev_addr *dst)
428{
429 int result = -ENXIO;
430 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
431 struct device *dev = &i1480u->usb_iface->dev;
432 struct net_device *net_dev = i1480u->net_dev;
433 struct i1480u_tx *wtx;
434 struct wlp_tx_hdr *wlp_tx_hdr;
435 static unsigned char dev_bcast[2] = { 0xff, 0xff };
436
437 BUG_ON(i1480u->wlp.rc == NULL);
438 if ((net_dev->flags & IFF_UP) == 0)
439 goto out;
440 result = -EBUSY;
441 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
442 netif_stop_queue(net_dev);
443 goto error_max_inflight;
444 }
445 result = -ENOMEM;
446 wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
447 if (unlikely(wtx == NULL)) {
448 if (printk_ratelimit())
449 dev_err(dev, "TX: no memory for WLP TX URB,"
450 "dropping packet (in flight %d)\n",
451 atomic_read(&i1480u->tx_inflight.count));
452 netif_stop_queue(net_dev);
453 goto error_wtx_alloc;
454 }
455 wtx->i1480u = i1480u;
456
457
458
459
460 wlp_tx_hdr = wtx->wlp_tx_hdr;
461 *wlp_tx_hdr = i1480u->options.def_tx_hdr;
462 wlp_tx_hdr->dstaddr = *dst;
463 if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
464 && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
465
466
467 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
468 }
469
470 result = usb_submit_urb(wtx->urb, GFP_ATOMIC);
471 if (result < 0) {
472 dev_err(dev, "TX: cannot submit URB: %d\n", result);
473
474 wtx->skb = NULL;
475 goto error_tx_urb_submit;
476 }
477 atomic_inc(&i1480u->tx_inflight.count);
478 net_dev->trans_start = jiffies;
479 return result;
480
481error_tx_urb_submit:
482 i1480u_tx_destroy(i1480u, wtx);
483error_wtx_alloc:
484error_max_inflight:
485out:
486 return result;
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506netdev_tx_t i1480u_hard_start_xmit(struct sk_buff *skb,
507 struct net_device *net_dev)
508{
509 int result;
510 struct i1480u *i1480u = netdev_priv(net_dev);
511 struct device *dev = &i1480u->usb_iface->dev;
512 struct uwb_dev_addr dst;
513
514 if ((net_dev->flags & IFF_UP) == 0)
515 goto error;
516 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
517 if (result < 0) {
518 dev_err(dev, "WLP verification of TX frame failed (%d). "
519 "Dropping packet.\n", result);
520 goto error;
521 } else if (result == 1) {
522
523
524 goto out;
525 }
526 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
527 if (result < 0) {
528 dev_err(dev, "Frame TX failed (%d).\n", result);
529 goto error;
530 }
531 return NETDEV_TX_OK;
532error:
533 dev_kfree_skb_any(skb);
534 net_dev->stats.tx_dropped++;
535out:
536 return NETDEV_TX_OK;
537}
538
539
540
541
542
543
544void i1480u_tx_timeout(struct net_device *net_dev)
545{
546 struct i1480u *i1480u = netdev_priv(net_dev);
547
548 wlp_reset_all(&i1480u->wlp);
549}
550
551
552void i1480u_tx_release(struct i1480u *i1480u)
553{
554 unsigned long flags;
555 struct i1480u_tx *wtx, *next;
556 int count = 0, empty;
557
558 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
559 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
560 count++;
561 usb_unlink_urb(wtx->urb);
562 }
563 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
564 count = count*10;
565
566
567
568
569
570
571
572
573 while (1) {
574 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
575 empty = list_empty(&i1480u->tx_list);
576 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
577 if (empty)
578 break;
579 count--;
580 BUG_ON(count == 0);
581 msleep(20);
582 }
583}
584