1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84#include <linux/kernel.h>
85#include <linux/timer.h>
86#include <linux/slab.h>
87#include <linux/err.h>
88#include <linux/export.h>
89
90#include "uwb-internal.h"
91
92
93
94
95
96
97
98
99
100
101
102
103struct uwb_rc_neh {
104 struct kref kref;
105
106 struct uwb_rc *rc;
107 u8 evt_type;
108 __le16 evt;
109 u8 context;
110 u8 completed;
111 uwb_rc_cmd_cb_f cb;
112 void *arg;
113
114 struct timer_list timer;
115 struct list_head list_node;
116};
117
118static void uwb_rc_neh_timer(unsigned long arg);
119
120static void uwb_rc_neh_release(struct kref *kref)
121{
122 struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref);
123
124 kfree(neh);
125}
126
127static void uwb_rc_neh_get(struct uwb_rc_neh *neh)
128{
129 kref_get(&neh->kref);
130}
131
132
133
134
135
136void uwb_rc_neh_put(struct uwb_rc_neh *neh)
137{
138 kref_put(&neh->kref, uwb_rc_neh_release);
139}
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162static
163int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh)
164{
165 int result;
166 result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX,
167 rc->ctx_roll++);
168 if (result < UWB_RC_CTX_MAX)
169 goto found;
170 result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX);
171 if (result < UWB_RC_CTX_MAX)
172 goto found;
173 return -ENFILE;
174found:
175 set_bit(result, rc->ctx_bm);
176 neh->context = result;
177 return 0;
178}
179
180
181
182static
183void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh)
184{
185 struct device *dev = &rc->uwb_dev.dev;
186 if (neh->context == 0)
187 return;
188 if (test_bit(neh->context, rc->ctx_bm) == 0) {
189 dev_err(dev, "context %u not set in bitmap\n",
190 neh->context);
191 WARN_ON(1);
192 }
193 clear_bit(neh->context, rc->ctx_bm);
194 neh->context = 0;
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
210 u8 expected_type, u16 expected_event,
211 uwb_rc_cmd_cb_f cb, void *arg)
212{
213 int result;
214 unsigned long flags;
215 struct device *dev = &rc->uwb_dev.dev;
216 struct uwb_rc_neh *neh;
217
218 neh = kzalloc(sizeof(*neh), GFP_KERNEL);
219 if (neh == NULL) {
220 result = -ENOMEM;
221 goto error_kzalloc;
222 }
223
224 kref_init(&neh->kref);
225 INIT_LIST_HEAD(&neh->list_node);
226 init_timer(&neh->timer);
227 neh->timer.function = uwb_rc_neh_timer;
228 neh->timer.data = (unsigned long)neh;
229
230 neh->rc = rc;
231 neh->evt_type = expected_type;
232 neh->evt = cpu_to_le16(expected_event);
233 neh->cb = cb;
234 neh->arg = arg;
235
236 spin_lock_irqsave(&rc->neh_lock, flags);
237 result = __uwb_rc_ctx_get(rc, neh);
238 if (result >= 0) {
239 cmd->bCommandContext = neh->context;
240 list_add_tail(&neh->list_node, &rc->neh_list);
241 uwb_rc_neh_get(neh);
242 }
243 spin_unlock_irqrestore(&rc->neh_lock, flags);
244 if (result < 0)
245 goto error_ctx_get;
246
247 return neh;
248
249error_ctx_get:
250 kfree(neh);
251error_kzalloc:
252 dev_err(dev, "cannot open handle to radio controller: %d\n", result);
253 return ERR_PTR(result);
254}
255
256static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
257{
258 __uwb_rc_ctx_put(rc, neh);
259 list_del(&neh->list_node);
260}
261
262
263
264
265
266
267
268
269
270void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
271{
272 unsigned long flags;
273
274 spin_lock_irqsave(&rc->neh_lock, flags);
275 __uwb_rc_neh_rm(rc, neh);
276 spin_unlock_irqrestore(&rc->neh_lock, flags);
277
278 del_timer_sync(&neh->timer);
279 uwb_rc_neh_put(neh);
280}
281
282
283
284
285
286
287
288
289
290void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
291{
292 unsigned long flags;
293
294 spin_lock_irqsave(&rc->neh_lock, flags);
295 if (neh->context)
296 mod_timer(&neh->timer,
297 jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS));
298 spin_unlock_irqrestore(&rc->neh_lock, flags);
299}
300
301static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size)
302{
303 (*neh->cb)(neh->rc, neh->arg, rceb, size);
304 uwb_rc_neh_put(neh);
305}
306
307static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb)
308{
309 return neh->evt_type == rceb->bEventType
310 && neh->evt == rceb->wEvent
311 && neh->context == rceb->bEventContext;
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328static
329struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc,
330 const struct uwb_rceb *rceb)
331{
332 struct uwb_rc_neh *neh = NULL, *h;
333 unsigned long flags;
334
335 spin_lock_irqsave(&rc->neh_lock, flags);
336
337 list_for_each_entry(h, &rc->neh_list, list_node) {
338 if (uwb_rc_neh_match(h, rceb)) {
339 neh = h;
340 break;
341 }
342 }
343
344 if (neh)
345 __uwb_rc_neh_rm(rc, neh);
346
347 spin_unlock_irqrestore(&rc->neh_lock, flags);
348
349 return neh;
350}
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378static
379void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size)
380{
381 struct device *dev = &rc->uwb_dev.dev;
382 struct uwb_event *uwb_evt;
383
384 if (size == -ESHUTDOWN)
385 return;
386 if (size < 0) {
387 dev_err(dev, "ignoring event with error code %zu\n",
388 size);
389 return;
390 }
391
392 uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC);
393 if (unlikely(uwb_evt == NULL)) {
394 dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n",
395 rceb->bEventType, le16_to_cpu(rceb->wEvent),
396 rceb->bEventContext);
397 return;
398 }
399 uwb_evt->rc = __uwb_rc_get(rc);
400 uwb_evt->ts_jiffies = jiffies;
401 uwb_evt->type = UWB_EVT_TYPE_NOTIF;
402 uwb_evt->notif.size = size;
403 uwb_evt->notif.rceb = rceb;
404
405 uwbd_event_queue(uwb_evt);
406}
407
408static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size)
409{
410 struct device *dev = &rc->uwb_dev.dev;
411 struct uwb_rc_neh *neh;
412 struct uwb_rceb *notif;
413 unsigned long flags;
414
415 if (rceb->bEventContext == 0) {
416 notif = kmalloc(size, GFP_ATOMIC);
417 if (notif) {
418 memcpy(notif, rceb, size);
419 uwb_rc_notif(rc, notif, size);
420 } else
421 dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n",
422 rceb->bEventType, le16_to_cpu(rceb->wEvent),
423 rceb->bEventContext, size);
424 } else {
425 neh = uwb_rc_neh_lookup(rc, rceb);
426 if (neh) {
427 spin_lock_irqsave(&rc->neh_lock, flags);
428
429 neh->completed = 1;
430 del_timer(&neh->timer);
431 spin_unlock_irqrestore(&rc->neh_lock, flags);
432 uwb_rc_neh_cb(neh, rceb, size);
433 } else
434 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
435 rceb->bEventType, le16_to_cpu(rceb->wEvent),
436 rceb->bEventContext, size);
437 }
438}
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size)
481{
482 struct device *dev = &rc->uwb_dev.dev;
483 void *itr;
484 struct uwb_rceb *rceb;
485 size_t size, real_size, event_size;
486 int needtofree;
487
488 itr = buf;
489 size = buf_size;
490 while (size > 0) {
491 if (size < sizeof(*rceb)) {
492 dev_err(dev, "not enough data in event buffer to "
493 "process incoming events (%zu left, minimum is "
494 "%zu)\n", size, sizeof(*rceb));
495 break;
496 }
497
498 rceb = itr;
499 if (rc->filter_event) {
500 needtofree = rc->filter_event(rc, &rceb, size,
501 &real_size, &event_size);
502 if (needtofree < 0 && needtofree != -ENOANO) {
503 dev_err(dev, "BUG: Unable to filter event "
504 "(0x%02x/%04x/%02x) from "
505 "device. \n", rceb->bEventType,
506 le16_to_cpu(rceb->wEvent),
507 rceb->bEventContext);
508 break;
509 }
510 } else
511 needtofree = -ENOANO;
512
513
514 if (needtofree == -ENOANO) {
515 ssize_t ret = uwb_est_find_size(rc, rceb, size);
516 if (ret < 0)
517 break;
518 if (ret > size) {
519 dev_err(dev, "BUG: hw sent incomplete event "
520 "0x%02x/%04x/%02x (%zd bytes), only got "
521 "%zu bytes. We don't handle that.\n",
522 rceb->bEventType, le16_to_cpu(rceb->wEvent),
523 rceb->bEventContext, ret, size);
524 break;
525 }
526 real_size = event_size = ret;
527 }
528 uwb_rc_neh_grok_event(rc, rceb, event_size);
529
530 if (needtofree == 1)
531 kfree(rceb);
532
533 itr += real_size;
534 size -= real_size;
535 }
536}
537EXPORT_SYMBOL_GPL(uwb_rc_neh_grok);
538
539
540
541
542
543
544
545
546
547
548void uwb_rc_neh_error(struct uwb_rc *rc, int error)
549{
550 struct uwb_rc_neh *neh;
551 unsigned long flags;
552
553 for (;;) {
554 spin_lock_irqsave(&rc->neh_lock, flags);
555 if (list_empty(&rc->neh_list)) {
556 spin_unlock_irqrestore(&rc->neh_lock, flags);
557 break;
558 }
559 neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
560 __uwb_rc_neh_rm(rc, neh);
561 spin_unlock_irqrestore(&rc->neh_lock, flags);
562
563 del_timer_sync(&neh->timer);
564 uwb_rc_neh_cb(neh, NULL, error);
565 }
566}
567EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
568
569
570static void uwb_rc_neh_timer(unsigned long arg)
571{
572 struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
573 struct uwb_rc *rc = neh->rc;
574 unsigned long flags;
575
576 spin_lock_irqsave(&rc->neh_lock, flags);
577 if (neh->completed) {
578 spin_unlock_irqrestore(&rc->neh_lock, flags);
579 return;
580 }
581 if (neh->context)
582 __uwb_rc_neh_rm(rc, neh);
583 else
584 neh = NULL;
585 spin_unlock_irqrestore(&rc->neh_lock, flags);
586
587 if (neh)
588 uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT);
589}
590
591
592
593void uwb_rc_neh_create(struct uwb_rc *rc)
594{
595 spin_lock_init(&rc->neh_lock);
596 INIT_LIST_HEAD(&rc->neh_list);
597 set_bit(0, rc->ctx_bm);
598 set_bit(0xff, rc->ctx_bm);
599 rc->ctx_roll = 1;
600}
601
602
603
604void uwb_rc_neh_destroy(struct uwb_rc *rc)
605{
606 unsigned long flags;
607 struct uwb_rc_neh *neh;
608
609 for (;;) {
610 spin_lock_irqsave(&rc->neh_lock, flags);
611 if (list_empty(&rc->neh_list)) {
612 spin_unlock_irqrestore(&rc->neh_lock, flags);
613 break;
614 }
615 neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
616 __uwb_rc_neh_rm(rc, neh);
617 spin_unlock_irqrestore(&rc->neh_lock, flags);
618
619 del_timer_sync(&neh->timer);
620 uwb_rc_neh_put(neh);
621 }
622}
623