1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84#include <linux/kernel.h>
85#include <linux/timer.h>
86#include <linux/slab.h>
87#include <linux/err.h>
88#include <linux/export.h>
89
90#include "uwb-internal.h"
91
92
93
94
95
96
97
98
99
100
101
102
103struct uwb_rc_neh {
104 struct kref kref;
105
106 struct uwb_rc *rc;
107 u8 evt_type;
108 __le16 evt;
109 u8 context;
110 u8 completed;
111 uwb_rc_cmd_cb_f cb;
112 void *arg;
113
114 struct timer_list timer;
115 struct list_head list_node;
116};
117
118static void uwb_rc_neh_timer(struct timer_list *t);
119
120static void uwb_rc_neh_release(struct kref *kref)
121{
122 struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref);
123
124 kfree(neh);
125}
126
127static void uwb_rc_neh_get(struct uwb_rc_neh *neh)
128{
129 kref_get(&neh->kref);
130}
131
132
133
134
135
136void uwb_rc_neh_put(struct uwb_rc_neh *neh)
137{
138 kref_put(&neh->kref, uwb_rc_neh_release);
139}
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162static
163int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh)
164{
165 int result;
166 result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX,
167 rc->ctx_roll++);
168 if (result < UWB_RC_CTX_MAX)
169 goto found;
170 result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX);
171 if (result < UWB_RC_CTX_MAX)
172 goto found;
173 return -ENFILE;
174found:
175 set_bit(result, rc->ctx_bm);
176 neh->context = result;
177 return 0;
178}
179
180
181
182static
183void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh)
184{
185 struct device *dev = &rc->uwb_dev.dev;
186 if (neh->context == 0)
187 return;
188 if (test_bit(neh->context, rc->ctx_bm) == 0) {
189 dev_err(dev, "context %u not set in bitmap\n",
190 neh->context);
191 WARN_ON(1);
192 }
193 clear_bit(neh->context, rc->ctx_bm);
194 neh->context = 0;
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
210 u8 expected_type, u16 expected_event,
211 uwb_rc_cmd_cb_f cb, void *arg)
212{
213 int result;
214 unsigned long flags;
215 struct device *dev = &rc->uwb_dev.dev;
216 struct uwb_rc_neh *neh;
217
218 neh = kzalloc(sizeof(*neh), GFP_KERNEL);
219 if (neh == NULL) {
220 result = -ENOMEM;
221 goto error_kzalloc;
222 }
223
224 kref_init(&neh->kref);
225 INIT_LIST_HEAD(&neh->list_node);
226 timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
227
228 neh->rc = rc;
229 neh->evt_type = expected_type;
230 neh->evt = cpu_to_le16(expected_event);
231 neh->cb = cb;
232 neh->arg = arg;
233
234 spin_lock_irqsave(&rc->neh_lock, flags);
235 result = __uwb_rc_ctx_get(rc, neh);
236 if (result >= 0) {
237 cmd->bCommandContext = neh->context;
238 list_add_tail(&neh->list_node, &rc->neh_list);
239 uwb_rc_neh_get(neh);
240 }
241 spin_unlock_irqrestore(&rc->neh_lock, flags);
242 if (result < 0)
243 goto error_ctx_get;
244
245 return neh;
246
247error_ctx_get:
248 kfree(neh);
249error_kzalloc:
250 dev_err(dev, "cannot open handle to radio controller: %d\n", result);
251 return ERR_PTR(result);
252}
253
254static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
255{
256 __uwb_rc_ctx_put(rc, neh);
257 list_del(&neh->list_node);
258}
259
260
261
262
263
264
265
266
267
268void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
269{
270 unsigned long flags;
271
272 spin_lock_irqsave(&rc->neh_lock, flags);
273 __uwb_rc_neh_rm(rc, neh);
274 spin_unlock_irqrestore(&rc->neh_lock, flags);
275
276 del_timer_sync(&neh->timer);
277 uwb_rc_neh_put(neh);
278}
279
280
281
282
283
284
285
286
287
288void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
289{
290 unsigned long flags;
291
292 spin_lock_irqsave(&rc->neh_lock, flags);
293 if (neh->context)
294 mod_timer(&neh->timer,
295 jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS));
296 spin_unlock_irqrestore(&rc->neh_lock, flags);
297}
298
299static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size)
300{
301 (*neh->cb)(neh->rc, neh->arg, rceb, size);
302 uwb_rc_neh_put(neh);
303}
304
305static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb)
306{
307 return neh->evt_type == rceb->bEventType
308 && neh->evt == rceb->wEvent
309 && neh->context == rceb->bEventContext;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326static
327struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc,
328 const struct uwb_rceb *rceb)
329{
330 struct uwb_rc_neh *neh = NULL, *h;
331 unsigned long flags;
332
333 spin_lock_irqsave(&rc->neh_lock, flags);
334
335 list_for_each_entry(h, &rc->neh_list, list_node) {
336 if (uwb_rc_neh_match(h, rceb)) {
337 neh = h;
338 break;
339 }
340 }
341
342 if (neh)
343 __uwb_rc_neh_rm(rc, neh);
344
345 spin_unlock_irqrestore(&rc->neh_lock, flags);
346
347 return neh;
348}
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376static
377void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size)
378{
379 struct device *dev = &rc->uwb_dev.dev;
380 struct uwb_event *uwb_evt;
381
382 if (size == -ESHUTDOWN)
383 return;
384 if (size < 0) {
385 dev_err(dev, "ignoring event with error code %zu\n",
386 size);
387 return;
388 }
389
390 uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC);
391 if (unlikely(uwb_evt == NULL)) {
392 dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n",
393 rceb->bEventType, le16_to_cpu(rceb->wEvent),
394 rceb->bEventContext);
395 return;
396 }
397 uwb_evt->rc = __uwb_rc_get(rc);
398 uwb_evt->ts_jiffies = jiffies;
399 uwb_evt->type = UWB_EVT_TYPE_NOTIF;
400 uwb_evt->notif.size = size;
401 uwb_evt->notif.rceb = rceb;
402
403 uwbd_event_queue(uwb_evt);
404}
405
406static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size)
407{
408 struct device *dev = &rc->uwb_dev.dev;
409 struct uwb_rc_neh *neh;
410 struct uwb_rceb *notif;
411 unsigned long flags;
412
413 if (rceb->bEventContext == 0) {
414 notif = kmalloc(size, GFP_ATOMIC);
415 if (notif) {
416 memcpy(notif, rceb, size);
417 uwb_rc_notif(rc, notif, size);
418 } else
419 dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n",
420 rceb->bEventType, le16_to_cpu(rceb->wEvent),
421 rceb->bEventContext, size);
422 } else {
423 neh = uwb_rc_neh_lookup(rc, rceb);
424 if (neh) {
425 spin_lock_irqsave(&rc->neh_lock, flags);
426
427 neh->completed = 1;
428 del_timer(&neh->timer);
429 spin_unlock_irqrestore(&rc->neh_lock, flags);
430 uwb_rc_neh_cb(neh, rceb, size);
431 } else
432 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
433 rceb->bEventType, le16_to_cpu(rceb->wEvent),
434 rceb->bEventContext, size);
435 }
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size)
479{
480 struct device *dev = &rc->uwb_dev.dev;
481 void *itr;
482 struct uwb_rceb *rceb;
483 size_t size, real_size, event_size;
484 int needtofree;
485
486 itr = buf;
487 size = buf_size;
488 while (size > 0) {
489 if (size < sizeof(*rceb)) {
490 dev_err(dev, "not enough data in event buffer to "
491 "process incoming events (%zu left, minimum is "
492 "%zu)\n", size, sizeof(*rceb));
493 break;
494 }
495
496 rceb = itr;
497 if (rc->filter_event) {
498 needtofree = rc->filter_event(rc, &rceb, size,
499 &real_size, &event_size);
500 if (needtofree < 0 && needtofree != -ENOANO) {
501 dev_err(dev, "BUG: Unable to filter event "
502 "(0x%02x/%04x/%02x) from "
503 "device. \n", rceb->bEventType,
504 le16_to_cpu(rceb->wEvent),
505 rceb->bEventContext);
506 break;
507 }
508 } else
509 needtofree = -ENOANO;
510
511
512 if (needtofree == -ENOANO) {
513 ssize_t ret = uwb_est_find_size(rc, rceb, size);
514 if (ret < 0)
515 break;
516 if (ret > size) {
517 dev_err(dev, "BUG: hw sent incomplete event "
518 "0x%02x/%04x/%02x (%zd bytes), only got "
519 "%zu bytes. We don't handle that.\n",
520 rceb->bEventType, le16_to_cpu(rceb->wEvent),
521 rceb->bEventContext, ret, size);
522 break;
523 }
524 real_size = event_size = ret;
525 }
526 uwb_rc_neh_grok_event(rc, rceb, event_size);
527
528 if (needtofree == 1)
529 kfree(rceb);
530
531 itr += real_size;
532 size -= real_size;
533 }
534}
535EXPORT_SYMBOL_GPL(uwb_rc_neh_grok);
536
537
538
539
540
541
542
543
544
545
546void uwb_rc_neh_error(struct uwb_rc *rc, int error)
547{
548 struct uwb_rc_neh *neh;
549 unsigned long flags;
550
551 for (;;) {
552 spin_lock_irqsave(&rc->neh_lock, flags);
553 if (list_empty(&rc->neh_list)) {
554 spin_unlock_irqrestore(&rc->neh_lock, flags);
555 break;
556 }
557 neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
558 __uwb_rc_neh_rm(rc, neh);
559 spin_unlock_irqrestore(&rc->neh_lock, flags);
560
561 del_timer_sync(&neh->timer);
562 uwb_rc_neh_cb(neh, NULL, error);
563 }
564}
565EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
566
567
568static void uwb_rc_neh_timer(struct timer_list *t)
569{
570 struct uwb_rc_neh *neh = from_timer(neh, t, timer);
571 struct uwb_rc *rc = neh->rc;
572 unsigned long flags;
573
574 spin_lock_irqsave(&rc->neh_lock, flags);
575 if (neh->completed) {
576 spin_unlock_irqrestore(&rc->neh_lock, flags);
577 return;
578 }
579 if (neh->context)
580 __uwb_rc_neh_rm(rc, neh);
581 else
582 neh = NULL;
583 spin_unlock_irqrestore(&rc->neh_lock, flags);
584
585 if (neh)
586 uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT);
587}
588
589
590
591void uwb_rc_neh_create(struct uwb_rc *rc)
592{
593 spin_lock_init(&rc->neh_lock);
594 INIT_LIST_HEAD(&rc->neh_list);
595 set_bit(0, rc->ctx_bm);
596 set_bit(0xff, rc->ctx_bm);
597 rc->ctx_roll = 1;
598}
599
600
601
602void uwb_rc_neh_destroy(struct uwb_rc *rc)
603{
604 unsigned long flags;
605 struct uwb_rc_neh *neh;
606
607 for (;;) {
608 spin_lock_irqsave(&rc->neh_lock, flags);
609 if (list_empty(&rc->neh_list)) {
610 spin_unlock_irqrestore(&rc->neh_lock, flags);
611 break;
612 }
613 neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
614 __uwb_rc_neh_rm(rc, neh);
615 spin_unlock_irqrestore(&rc->neh_lock, flags);
616
617 del_timer_sync(&neh->timer);
618 uwb_rc_neh_put(neh);
619 }
620}
621