1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85#include <linux/workqueue.h>
86#include <linux/slab.h>
87#include <linux/usb.h>
88#include "i2400m-usb.h"
89
90
91#define D_SUBMODULE rx
92#include "usb-debug-levels.h"
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117static
118size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
119{
120 struct device *dev = &i2400mu->usb_iface->dev;
121 size_t rx_size;
122 const size_t max_pkt_size = 512;
123
124 rx_size = 2 * i2400mu->rx_size;
125 if (rx_size % max_pkt_size == 0) {
126 rx_size -= 8;
127 d_printf(1, dev,
128 "RX: expected size grew to %zu [adjusted -8] "
129 "from %zu\n",
130 rx_size, i2400mu->rx_size);
131 } else
132 d_printf(1, dev,
133 "RX: expected size grew to %zu from %zu\n",
134 rx_size, i2400mu->rx_size);
135 return rx_size;
136}
137
138
139static
140void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
141{
142 const size_t max_pkt_size = 512;
143 struct device *dev = &i2400mu->usb_iface->dev;
144
145 if (unlikely(i2400mu->rx_size_cnt >= 100
146 && i2400mu->rx_size_auto_shrink)) {
147 size_t avg_rx_size =
148 i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
149 size_t new_rx_size = i2400mu->rx_size / 2;
150 if (avg_rx_size < new_rx_size) {
151 if (new_rx_size % max_pkt_size == 0) {
152 new_rx_size -= 8;
153 d_printf(1, dev,
154 "RX: expected size shrank to %zu "
155 "[adjusted -8] from %zu\n",
156 new_rx_size, i2400mu->rx_size);
157 } else
158 d_printf(1, dev,
159 "RX: expected size shrank to %zu "
160 "from %zu\n",
161 new_rx_size, i2400mu->rx_size);
162 i2400mu->rx_size = new_rx_size;
163 i2400mu->rx_size_cnt = 0;
164 i2400mu->rx_size_acc = i2400mu->rx_size;
165 }
166 }
167}
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191static
192struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
193{
194 int result = 0;
195 struct device *dev = &i2400mu->usb_iface->dev;
196 int usb_pipe, read_size, rx_size, do_autopm;
197 struct usb_endpoint_descriptor *epd;
198 const size_t max_pkt_size = 512;
199
200 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
201 do_autopm = atomic_read(&i2400mu->do_autopm);
202 result = do_autopm ?
203 usb_autopm_get_interface(i2400mu->usb_iface) : 0;
204 if (result < 0) {
205 dev_err(dev, "RX: can't get autopm: %d\n", result);
206 do_autopm = 0;
207 }
208 epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
209 usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
210retry:
211 rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
212 if (unlikely(rx_size % max_pkt_size == 0)) {
213 rx_size -= 8;
214 d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
215 }
216 result = usb_bulk_msg(
217 i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
218 rx_size, &read_size, 200);
219 usb_mark_last_busy(i2400mu->usb_dev);
220 switch (result) {
221 case 0:
222 if (read_size == 0)
223 goto retry;
224 skb_put(rx_skb, read_size);
225 break;
226 case -EPIPE:
227
228
229
230
231
232
233
234
235
236
237 if (edc_inc(&i2400mu->urb_edc,
238 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
239 dev_err(dev, "BM-CMD: too many stalls in "
240 "URB; resetting device\n");
241 goto do_reset;
242 }
243 usb_clear_halt(i2400mu->usb_dev, usb_pipe);
244 msleep(10);
245 goto retry;
246 case -EINVAL:
247 case -ENODEV:
248 case -ENOENT:
249 case -ESHUTDOWN:
250 case -ECONNRESET:
251 break;
252 case -EOVERFLOW: {
253 struct sk_buff *new_skb;
254 rx_size = i2400mu_rx_size_grow(i2400mu);
255 if (rx_size <= (1 << 16))
256 i2400mu->rx_size = rx_size;
257 else if (printk_ratelimit()) {
258 dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
259 result = -EINVAL;
260 goto out;
261 }
262 skb_put(rx_skb, read_size);
263 new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
264 GFP_KERNEL);
265 if (new_skb == NULL) {
266 kfree_skb(rx_skb);
267 rx_skb = NULL;
268 goto out;
269 }
270 kfree_skb(rx_skb);
271 rx_skb = new_skb;
272 i2400mu->rx_size_cnt = 0;
273 i2400mu->rx_size_acc = i2400mu->rx_size;
274 d_printf(1, dev, "RX: size changed to %d, received %d, "
275 "copied %d, capacity %ld\n",
276 rx_size, read_size, rx_skb->len,
277 (long) skb_end_offset(new_skb));
278 goto retry;
279 }
280
281
282
283
284 case -ETIMEDOUT:
285 dev_err(dev, "RX: timeout: %d\n", result);
286 result = 0;
287 break;
288 default:
289 if (edc_inc(&i2400mu->urb_edc,
290 EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
291 goto error_reset;
292 dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
293 goto retry;
294 }
295out:
296 if (do_autopm)
297 usb_autopm_put_interface(i2400mu->usb_iface);
298 d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
299 return rx_skb;
300
301error_reset:
302 dev_err(dev, "RX: maximum errors in URB exceeded; "
303 "resetting device\n");
304do_reset:
305 usb_queue_reset_device(i2400mu->usb_iface);
306 rx_skb = ERR_PTR(result);
307 goto out;
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static
328int i2400mu_rxd(void *_i2400mu)
329{
330 int result = 0;
331 struct i2400mu *i2400mu = _i2400mu;
332 struct i2400m *i2400m = &i2400mu->i2400m;
333 struct device *dev = &i2400mu->usb_iface->dev;
334 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
335 size_t pending;
336 int rx_size;
337 struct sk_buff *rx_skb;
338 unsigned long flags;
339
340 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
341 spin_lock_irqsave(&i2400m->rx_lock, flags);
342 BUG_ON(i2400mu->rx_kthread != NULL);
343 i2400mu->rx_kthread = current;
344 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
345 while (1) {
346 d_printf(2, dev, "RX: waiting for messages\n");
347 pending = 0;
348 wait_event_interruptible(
349 i2400mu->rx_wq,
350 (kthread_should_stop()
351 || (pending = atomic_read(&i2400mu->rx_pending_count)))
352 );
353 if (kthread_should_stop())
354 break;
355 if (pending == 0)
356 continue;
357 rx_size = i2400mu->rx_size;
358 d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
359 rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
360 if (rx_skb == NULL) {
361 dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
362 rx_size);
363 msleep(50);
364 continue;
365 }
366
367
368 rx_skb = i2400mu_rx(i2400mu, rx_skb);
369 result = PTR_ERR(rx_skb);
370 if (IS_ERR(rx_skb))
371 goto out;
372 atomic_dec(&i2400mu->rx_pending_count);
373 if (rx_skb == NULL || rx_skb->len == 0) {
374
375 kfree_skb(rx_skb);
376 continue;
377 }
378
379
380 i2400mu->rx_size_cnt++;
381 i2400mu->rx_size_acc += rx_skb->len;
382 result = i2400m_rx(i2400m, rx_skb);
383 if (result == -EIO
384 && edc_inc(&i2400mu->urb_edc,
385 EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
386 goto error_reset;
387 }
388
389
390 i2400mu_rx_size_maybe_shrink(i2400mu);
391 }
392 result = 0;
393out:
394 spin_lock_irqsave(&i2400m->rx_lock, flags);
395 i2400mu->rx_kthread = NULL;
396 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
397 d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
398 return result;
399
400error_reset:
401 dev_err(dev, "RX: maximum errors in received buffer exceeded; "
402 "resetting device\n");
403 usb_queue_reset_device(i2400mu->usb_iface);
404 goto out;
405}
406
407
408
409
410
411
412
413
414
415void i2400mu_rx_kick(struct i2400mu *i2400mu)
416{
417 struct i2400m *i2400m = &i2400mu->i2400m;
418 struct device *dev = &i2400mu->usb_iface->dev;
419
420 d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
421 atomic_inc(&i2400mu->rx_pending_count);
422 wake_up_all(&i2400mu->rx_wq);
423 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
424}
425
426
427int i2400mu_rx_setup(struct i2400mu *i2400mu)
428{
429 int result = 0;
430 struct i2400m *i2400m = &i2400mu->i2400m;
431 struct device *dev = &i2400mu->usb_iface->dev;
432 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
433 struct task_struct *kthread;
434
435 kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
436 wimax_dev->name);
437
438 if (IS_ERR(kthread)) {
439 result = PTR_ERR(kthread);
440 dev_err(dev, "RX: cannot start thread: %d\n", result);
441 }
442 return result;
443}
444
445
446void i2400mu_rx_release(struct i2400mu *i2400mu)
447{
448 unsigned long flags;
449 struct i2400m *i2400m = &i2400mu->i2400m;
450 struct device *dev = i2400m_dev(i2400m);
451 struct task_struct *kthread;
452
453 spin_lock_irqsave(&i2400m->rx_lock, flags);
454 kthread = i2400mu->rx_kthread;
455 i2400mu->rx_kthread = NULL;
456 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
457 if (kthread)
458 kthread_stop(kthread);
459 else
460 d_printf(1, dev, "RX: kthread had already exited\n");
461}
462
463