1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include "uwb-internal.h"
26
27
28
29enum uwb_drp_conflict_action {
30
31 UWB_DRP_CONFLICT_MANTAIN = 0,
32
33
34
35
36
37
38 UWB_DRP_CONFLICT_ACT1,
39
40
41
42
43
44
45 UWB_DRP_CONFLICT_ACT2,
46
47
48
49
50
51
52
53
54 UWB_DRP_CONFLICT_ACT3,
55};
56
57
58static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59 struct uwb_rceb *reply, ssize_t reply_size)
60{
61 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
62
63 if (r != NULL) {
64 if (r->bResultCode != UWB_RC_RES_SUCCESS)
65 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
66 uwb_rc_strerror(r->bResultCode), r->bResultCode);
67 } else
68 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
69
70 spin_lock_bh(&rc->rsvs_lock);
71 if (rc->set_drp_ie_pending > 1) {
72 rc->set_drp_ie_pending = 0;
73 uwb_rsv_queue_update(rc);
74 } else {
75 rc->set_drp_ie_pending = 0;
76 }
77 spin_unlock_bh(&rc->rsvs_lock);
78}
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
101{
102 int result;
103 struct uwb_rc_cmd_set_drp_ie *cmd;
104 struct uwb_rsv *rsv;
105 struct uwb_rsv_move *mv;
106 int num_bytes = 0;
107 u8 *IEDataptr;
108
109 result = -ENOMEM;
110
111 list_for_each_entry(rsv, &rc->reservations, rc_node) {
112 if (rsv->drp_ie != NULL) {
113 num_bytes += rsv->drp_ie->hdr.length + 2;
114 if (uwb_rsv_has_two_drp_ies(rsv) &&
115 (rsv->mv.companion_drp_ie != NULL)) {
116 mv = &rsv->mv;
117 num_bytes += mv->companion_drp_ie->hdr.length + 2;
118 }
119 }
120 }
121 num_bytes += sizeof(rc->drp_avail.ie);
122 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
123 if (cmd == NULL)
124 goto error;
125 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
126 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
127 cmd->wIELength = num_bytes;
128 IEDataptr = (u8 *)&cmd->IEData[0];
129
130
131
132 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
133 IEDataptr += sizeof(struct uwb_ie_drp_avail);
134
135
136 list_for_each_entry(rsv, &rc->reservations, rc_node) {
137 if (rsv->drp_ie != NULL) {
138 memcpy(IEDataptr, rsv->drp_ie,
139 rsv->drp_ie->hdr.length + 2);
140 IEDataptr += rsv->drp_ie->hdr.length + 2;
141
142 if (uwb_rsv_has_two_drp_ies(rsv) &&
143 (rsv->mv.companion_drp_ie != NULL)) {
144 mv = &rsv->mv;
145 memcpy(IEDataptr, mv->companion_drp_ie,
146 mv->companion_drp_ie->hdr.length + 2);
147 IEDataptr += mv->companion_drp_ie->hdr.length + 2;
148 }
149 }
150 }
151
152 result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes,
153 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
154 uwb_rc_set_drp_cmd_done, NULL);
155
156 rc->set_drp_ie_pending = 1;
157
158 kfree(cmd);
159error:
160 return result;
161}
162
163
164
165
166
167
168static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
169 struct uwb_rsv *rsv, int our_status)
170{
171 int our_tie_breaker = rsv->tiebreaker;
172 int our_type = rsv->type;
173 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
174
175 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
176 int ext_status = uwb_ie_drp_status(ext_drp_ie);
177 int ext_type = uwb_ie_drp_type(ext_drp_ie);
178
179
180
181 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
182 return UWB_DRP_CONFLICT_MANTAIN;
183 }
184
185
186 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
187 return UWB_DRP_CONFLICT_MANTAIN;
188 }
189
190
191 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
192
193 return UWB_DRP_CONFLICT_ACT1;
194 }
195
196
197 if (our_status == 0 && ext_status == 1) {
198 return UWB_DRP_CONFLICT_ACT2;
199 }
200
201
202 if (our_status == 1 && ext_status == 0) {
203 return UWB_DRP_CONFLICT_MANTAIN;
204 }
205
206
207 if (our_tie_breaker == ext_tie_breaker &&
208 our_beacon_slot < ext_beacon_slot) {
209 return UWB_DRP_CONFLICT_MANTAIN;
210 }
211
212
213 if (our_tie_breaker != ext_tie_breaker &&
214 our_beacon_slot > ext_beacon_slot) {
215 return UWB_DRP_CONFLICT_MANTAIN;
216 }
217
218 if (our_status == 0) {
219 if (our_tie_breaker == ext_tie_breaker) {
220
221 if (our_beacon_slot > ext_beacon_slot) {
222 return UWB_DRP_CONFLICT_ACT2;
223 }
224 } else {
225
226 if (our_beacon_slot < ext_beacon_slot) {
227 return UWB_DRP_CONFLICT_ACT2;
228 }
229 }
230 } else {
231 if (our_tie_breaker == ext_tie_breaker) {
232
233 if (our_beacon_slot > ext_beacon_slot) {
234 return UWB_DRP_CONFLICT_ACT3;
235 }
236 } else {
237
238 if (our_beacon_slot < ext_beacon_slot) {
239 return UWB_DRP_CONFLICT_ACT3;
240 }
241 }
242 }
243 return UWB_DRP_CONFLICT_MANTAIN;
244}
245
246static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
247 int ext_beacon_slot,
248 struct uwb_rsv *rsv,
249 struct uwb_mas_bm *conflicting_mas)
250{
251 struct uwb_rc *rc = rsv->rc;
252 struct uwb_rsv_move *mv = &rsv->mv;
253 struct uwb_drp_backoff_win *bow = &rc->bow;
254 int action;
255
256 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
257
258 if (uwb_rsv_is_owner(rsv)) {
259 switch(action) {
260 case UWB_DRP_CONFLICT_ACT2:
261
262 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
263 if (bow->can_reserve_extra_mases == false)
264 uwb_rsv_backoff_win_increment(rc);
265
266 break;
267 case UWB_DRP_CONFLICT_ACT3:
268 uwb_rsv_backoff_win_increment(rc);
269
270
271 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
272 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
273 default:
274 break;
275 }
276 } else {
277 switch(action) {
278 case UWB_DRP_CONFLICT_ACT2:
279 case UWB_DRP_CONFLICT_ACT3:
280 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
281 default:
282 break;
283 }
284
285 }
286
287}
288
289static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
290 struct uwb_rsv *rsv, bool companion_only,
291 struct uwb_mas_bm *conflicting_mas)
292{
293 struct uwb_rc *rc = rsv->rc;
294 struct uwb_drp_backoff_win *bow = &rc->bow;
295 struct uwb_rsv_move *mv = &rsv->mv;
296 int action;
297
298 if (companion_only) {
299
300 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
301 if (uwb_rsv_is_owner(rsv)) {
302 switch(action) {
303 case UWB_DRP_CONFLICT_ACT2:
304 case UWB_DRP_CONFLICT_ACT3:
305 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
306 rsv->needs_release_companion_mas = false;
307 if (bow->can_reserve_extra_mases == false)
308 uwb_rsv_backoff_win_increment(rc);
309 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
310 }
311 } else {
312 switch(action) {
313 case UWB_DRP_CONFLICT_ACT2:
314 case UWB_DRP_CONFLICT_ACT3:
315 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT);
316
317 }
318 }
319 } else {
320 if (uwb_rsv_is_owner(rsv)) {
321 uwb_rsv_backoff_win_increment(rc);
322
323 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
324
325
326
327
328 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
329 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
330 } else {
331 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
332
333 }
334 }
335}
336
337static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
338 struct uwb_rc_evt_drp *drp_evt,
339 struct uwb_ie_drp *drp_ie,
340 struct uwb_mas_bm *conflicting_mas)
341{
342 struct uwb_rsv_move *mv;
343
344
345 if (uwb_rsv_has_two_drp_ies(rsv)) {
346 mv = &rsv->mv;
347 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
348 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
349 rsv, false, conflicting_mas);
350 } else {
351 if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
352 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
353 rsv, true, conflicting_mas);
354 }
355 }
356 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
357 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas);
358 }
359}
360
361static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
362 struct uwb_rc_evt_drp *drp_evt,
363 struct uwb_ie_drp *drp_ie,
364 struct uwb_mas_bm *conflicting_mas)
365{
366 struct uwb_rsv *rsv;
367
368 list_for_each_entry(rsv, &rc->reservations, rc_node) {
369 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas);
370 }
371}
372
373
374
375
376
377static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
378 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
379{
380 struct device *dev = &rc->uwb_dev.dev;
381 struct uwb_rsv_move *mv = &rsv->mv;
382 int status;
383 enum uwb_drp_reason reason_code;
384 struct uwb_mas_bm mas;
385
386 status = uwb_ie_drp_status(drp_ie);
387 reason_code = uwb_ie_drp_reason_code(drp_ie);
388 uwb_drp_ie_to_bm(&mas, drp_ie);
389
390 switch (reason_code) {
391 case UWB_DRP_REASON_ACCEPTED:
392
393 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
394 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
395 break;
396 }
397
398 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
399
400 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS))
401
402 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
403 } else {
404 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
405 if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) {
406
407
408
409
410
411 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
412 } else {
413
414 bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS);
415 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
416 }
417 } else {
418 if (status) {
419 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
420 }
421 }
422
423 }
424 break;
425
426 case UWB_DRP_REASON_MODIFIED:
427
428 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
429 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
430 break;
431 }
432
433
434 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
435
436 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS);
437 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
438 }
439
440 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
441 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
442 break;
443 default:
444 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
445 reason_code, status);
446 }
447}
448
449
450
451
452
453static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
454 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
455 struct uwb_rc_evt_drp *drp_evt)
456{
457 struct device *dev = &rc->uwb_dev.dev;
458 struct uwb_rsv_move *mv = &rsv->mv;
459 int status;
460 enum uwb_drp_reason reason_code;
461 struct uwb_mas_bm mas;
462
463 status = uwb_ie_drp_status(drp_ie);
464 reason_code = uwb_ie_drp_reason_code(drp_ie);
465 uwb_drp_ie_to_bm(&mas, drp_ie);
466
467 if (status) {
468 switch (reason_code) {
469 case UWB_DRP_REASON_ACCEPTED:
470 switch (rsv->state) {
471 case UWB_RSV_STATE_O_PENDING:
472 case UWB_RSV_STATE_O_INITIATED:
473 case UWB_RSV_STATE_O_ESTABLISHED:
474 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
475 break;
476 case UWB_RSV_STATE_O_MODIFIED:
477 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
478 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
479 } else {
480 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
481 }
482 break;
483
484 case UWB_RSV_STATE_O_MOVE_REDUCING:
485 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
486 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
487 } else {
488 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
489 }
490 break;
491 case UWB_RSV_STATE_O_MOVE_EXPANDING:
492 if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
493
494 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
495 } else {
496 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
497 }
498 break;
499 case UWB_RSV_STATE_O_MOVE_COMBINING:
500 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS))
501 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
502 else
503 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
504 break;
505 default:
506 break;
507 }
508 break;
509 default:
510 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
511 reason_code, status);
512 }
513 } else {
514 switch (reason_code) {
515 case UWB_DRP_REASON_PENDING:
516 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
517 break;
518 case UWB_DRP_REASON_DENIED:
519 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
520 break;
521 case UWB_DRP_REASON_CONFLICT:
522
523 bitmap_complement(mas.bm, src->last_availability_bm,
524 UWB_NUM_MAS);
525 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
526 break;
527 default:
528 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
529 reason_code, status);
530 }
531 }
532}
533
534static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
535{
536 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
537 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
538}
539
540static void uwb_cnflt_update_work(struct work_struct *work)
541{
542 struct uwb_cnflt_alien *cnflt = container_of(work,
543 struct uwb_cnflt_alien,
544 cnflt_update_work);
545 struct uwb_cnflt_alien *c;
546 struct uwb_rc *rc = cnflt->rc;
547
548 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
549
550 mutex_lock(&rc->rsvs_mutex);
551
552 list_del(&cnflt->rc_node);
553
554
555 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
556
557 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
558 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS);
559 }
560
561 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
562
563 kfree(cnflt);
564 mutex_unlock(&rc->rsvs_mutex);
565}
566
567static void uwb_cnflt_timer(unsigned long arg)
568{
569 struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
570
571 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
572}
573
574
575
576
577
578static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
579{
580 struct device *dev = &rc->uwb_dev.dev;
581 struct uwb_mas_bm mas;
582 struct uwb_cnflt_alien *cnflt;
583 char buf[72];
584 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
585
586 uwb_drp_ie_to_bm(&mas, drp_ie);
587 bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
588
589 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
590 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
591
592
593 uwb_cnflt_alien_stroke_timer(cnflt);
594 return;
595 }
596 }
597
598
599
600
601 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
602 if (!cnflt)
603 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
604 INIT_LIST_HEAD(&cnflt->rc_node);
605 init_timer(&cnflt->timer);
606 cnflt->timer.function = uwb_cnflt_timer;
607 cnflt->timer.data = (unsigned long)cnflt;
608
609 cnflt->rc = rc;
610 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
611
612 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
613
614 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
615
616
617 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
618
619 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
620
621
622 uwb_cnflt_alien_stroke_timer(cnflt);
623}
624
625static void uwb_drp_process_not_involved(struct uwb_rc *rc,
626 struct uwb_rc_evt_drp *drp_evt,
627 struct uwb_ie_drp *drp_ie)
628{
629 struct uwb_mas_bm mas;
630
631 uwb_drp_ie_to_bm(&mas, drp_ie);
632 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
633}
634
635static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
636 struct uwb_rc_evt_drp *drp_evt,
637 struct uwb_ie_drp *drp_ie)
638{
639 struct uwb_rsv *rsv;
640
641 rsv = uwb_rsv_find(rc, src, drp_ie);
642 if (!rsv) {
643
644
645
646
647
648 return;
649 }
650
651
652
653
654
655 if (rsv->state == UWB_RSV_STATE_NONE) {
656 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
657 return;
658 }
659
660 if (uwb_ie_drp_owner(drp_ie))
661 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
662 else
663 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
664
665}
666
667
668static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
669{
670 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
671}
672
673
674
675
676static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
677 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
678{
679 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
680 uwb_drp_handle_alien_drp(rc, drp_ie);
681 else if (uwb_drp_involves_us(rc, drp_ie))
682 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
683 else
684 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
685}
686
687
688
689
690static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
691 struct uwb_ie_drp_avail *drp_availability_ie)
692{
693 bitmap_copy(src->last_availability_bm,
694 drp_availability_ie->bmp, UWB_NUM_MAS);
695}
696
697
698
699
700
701static
702void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
703 size_t ielen, struct uwb_dev *src_dev)
704{
705 struct device *dev = &rc->uwb_dev.dev;
706 struct uwb_ie_hdr *ie_hdr;
707 void *ptr;
708
709 ptr = drp_evt->ie_data;
710 for (;;) {
711 ie_hdr = uwb_ie_next(&ptr, &ielen);
712 if (!ie_hdr)
713 break;
714
715 switch (ie_hdr->element_id) {
716 case UWB_IE_DRP_AVAILABILITY:
717 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
718 break;
719 case UWB_IE_DRP:
720 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
721 break;
722 default:
723 dev_warn(dev, "unexpected IE in DRP notification\n");
724 break;
725 }
726 }
727
728 if (ielen > 0)
729 dev_warn(dev, "%d octets remaining in DRP notification\n",
730 (int)ielen);
731}
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
766{
767 struct device *dev = &evt->rc->uwb_dev.dev;
768 struct uwb_rc *rc = evt->rc;
769 struct uwb_rc_evt_drp *drp_evt;
770 size_t ielength, bytes_left;
771 struct uwb_dev_addr src_addr;
772 struct uwb_dev *src_dev;
773
774
775
776 if (evt->notif.size < sizeof(*drp_evt)) {
777 dev_err(dev, "DRP event: Not enough data to decode event "
778 "[%zu bytes left, %zu needed]\n",
779 evt->notif.size, sizeof(*drp_evt));
780 return 0;
781 }
782 bytes_left = evt->notif.size - sizeof(*drp_evt);
783 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
784 ielength = le16_to_cpu(drp_evt->ie_length);
785 if (bytes_left != ielength) {
786 dev_err(dev, "DRP event: Not enough data in payload [%zu"
787 "bytes left, %zu declared in the event]\n",
788 bytes_left, ielength);
789 return 0;
790 }
791
792 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
793 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
794 if (!src_dev) {
795
796
797
798
799
800
801
802
803 return 0;
804 }
805
806 mutex_lock(&rc->rsvs_mutex);
807
808
809 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
810
811 mutex_unlock(&rc->rsvs_mutex);
812
813 uwb_dev_put(src_dev);
814 return 0;
815}
816