1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include "uwb-internal.h"
26
27
28
29enum uwb_drp_conflict_action {
30
31 UWB_DRP_CONFLICT_MANTAIN = 0,
32
33
34
35
36
37
38 UWB_DRP_CONFLICT_ACT1,
39
40
41
42
43
44
45 UWB_DRP_CONFLICT_ACT2,
46
47
48
49
50
51
52
53
54 UWB_DRP_CONFLICT_ACT3,
55};
56
57
58static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59 struct uwb_rceb *reply, ssize_t reply_size)
60{
61 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
62 unsigned long flags;
63
64 if (r != NULL) {
65 if (r->bResultCode != UWB_RC_RES_SUCCESS)
66 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
67 uwb_rc_strerror(r->bResultCode), r->bResultCode);
68 } else
69 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
70
71 spin_lock_irqsave(&rc->rsvs_lock, flags);
72 if (rc->set_drp_ie_pending > 1) {
73 rc->set_drp_ie_pending = 0;
74 uwb_rsv_queue_update(rc);
75 } else {
76 rc->set_drp_ie_pending = 0;
77 }
78 spin_unlock_irqrestore(&rc->rsvs_lock, flags);
79}
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
102{
103 int result;
104 struct uwb_rc_cmd_set_drp_ie *cmd;
105 struct uwb_rsv *rsv;
106 struct uwb_rsv_move *mv;
107 int num_bytes = 0;
108 u8 *IEDataptr;
109
110 result = -ENOMEM;
111
112 list_for_each_entry(rsv, &rc->reservations, rc_node) {
113 if (rsv->drp_ie != NULL) {
114 num_bytes += rsv->drp_ie->hdr.length + 2;
115 if (uwb_rsv_has_two_drp_ies(rsv) &&
116 (rsv->mv.companion_drp_ie != NULL)) {
117 mv = &rsv->mv;
118 num_bytes +=
119 mv->companion_drp_ie->hdr.length + 2;
120 }
121 }
122 }
123 num_bytes += sizeof(rc->drp_avail.ie);
124 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
125 if (cmd == NULL)
126 goto error;
127 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
128 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
129 cmd->wIELength = num_bytes;
130 IEDataptr = (u8 *)&cmd->IEData[0];
131
132
133
134 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
135 IEDataptr += sizeof(struct uwb_ie_drp_avail);
136
137
138 list_for_each_entry(rsv, &rc->reservations, rc_node) {
139 if (rsv->drp_ie != NULL) {
140 memcpy(IEDataptr, rsv->drp_ie,
141 rsv->drp_ie->hdr.length + 2);
142 IEDataptr += rsv->drp_ie->hdr.length + 2;
143
144 if (uwb_rsv_has_two_drp_ies(rsv) &&
145 (rsv->mv.companion_drp_ie != NULL)) {
146 mv = &rsv->mv;
147 memcpy(IEDataptr, mv->companion_drp_ie,
148 mv->companion_drp_ie->hdr.length + 2);
149 IEDataptr +=
150 mv->companion_drp_ie->hdr.length + 2;
151 }
152 }
153 }
154
155 result = uwb_rc_cmd_async(rc, "SET-DRP-IE",
156 &cmd->rccb, sizeof(*cmd) + num_bytes,
157 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
158 uwb_rc_set_drp_cmd_done, NULL);
159
160 rc->set_drp_ie_pending = 1;
161
162 kfree(cmd);
163error:
164 return result;
165}
166
167
168
169
170
171
172static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
173 struct uwb_rsv *rsv, int our_status)
174{
175 int our_tie_breaker = rsv->tiebreaker;
176 int our_type = rsv->type;
177 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
178
179 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
180 int ext_status = uwb_ie_drp_status(ext_drp_ie);
181 int ext_type = uwb_ie_drp_type(ext_drp_ie);
182
183
184
185 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
186 return UWB_DRP_CONFLICT_MANTAIN;
187 }
188
189
190 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
191 return UWB_DRP_CONFLICT_MANTAIN;
192 }
193
194
195 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
196
197 return UWB_DRP_CONFLICT_ACT1;
198 }
199
200
201 if (our_status == 0 && ext_status == 1) {
202 return UWB_DRP_CONFLICT_ACT2;
203 }
204
205
206 if (our_status == 1 && ext_status == 0) {
207 return UWB_DRP_CONFLICT_MANTAIN;
208 }
209
210
211 if (our_tie_breaker == ext_tie_breaker &&
212 our_beacon_slot < ext_beacon_slot) {
213 return UWB_DRP_CONFLICT_MANTAIN;
214 }
215
216
217 if (our_tie_breaker != ext_tie_breaker &&
218 our_beacon_slot > ext_beacon_slot) {
219 return UWB_DRP_CONFLICT_MANTAIN;
220 }
221
222 if (our_status == 0) {
223 if (our_tie_breaker == ext_tie_breaker) {
224
225 if (our_beacon_slot > ext_beacon_slot) {
226 return UWB_DRP_CONFLICT_ACT2;
227 }
228 } else {
229
230 if (our_beacon_slot < ext_beacon_slot) {
231 return UWB_DRP_CONFLICT_ACT2;
232 }
233 }
234 } else {
235 if (our_tie_breaker == ext_tie_breaker) {
236
237 if (our_beacon_slot > ext_beacon_slot) {
238 return UWB_DRP_CONFLICT_ACT3;
239 }
240 } else {
241
242 if (our_beacon_slot < ext_beacon_slot) {
243 return UWB_DRP_CONFLICT_ACT3;
244 }
245 }
246 }
247 return UWB_DRP_CONFLICT_MANTAIN;
248}
249
250static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
251 int ext_beacon_slot,
252 struct uwb_rsv *rsv,
253 struct uwb_mas_bm *conflicting_mas)
254{
255 struct uwb_rc *rc = rsv->rc;
256 struct uwb_rsv_move *mv = &rsv->mv;
257 struct uwb_drp_backoff_win *bow = &rc->bow;
258 int action;
259
260 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
261
262 if (uwb_rsv_is_owner(rsv)) {
263 switch(action) {
264 case UWB_DRP_CONFLICT_ACT2:
265
266 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
267 if (bow->can_reserve_extra_mases == false)
268 uwb_rsv_backoff_win_increment(rc);
269
270 break;
271 case UWB_DRP_CONFLICT_ACT3:
272 uwb_rsv_backoff_win_increment(rc);
273
274
275 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
276 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
277 default:
278 break;
279 }
280 } else {
281 switch(action) {
282 case UWB_DRP_CONFLICT_ACT2:
283 case UWB_DRP_CONFLICT_ACT3:
284 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
285 default:
286 break;
287 }
288
289 }
290
291}
292
293static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
294 struct uwb_rsv *rsv, bool companion_only,
295 struct uwb_mas_bm *conflicting_mas)
296{
297 struct uwb_rc *rc = rsv->rc;
298 struct uwb_drp_backoff_win *bow = &rc->bow;
299 struct uwb_rsv_move *mv = &rsv->mv;
300 int action;
301
302 if (companion_only) {
303
304 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
305 if (uwb_rsv_is_owner(rsv)) {
306 switch(action) {
307 case UWB_DRP_CONFLICT_ACT2:
308 case UWB_DRP_CONFLICT_ACT3:
309 uwb_rsv_set_state(rsv,
310 UWB_RSV_STATE_O_ESTABLISHED);
311 rsv->needs_release_companion_mas = false;
312 if (bow->can_reserve_extra_mases == false)
313 uwb_rsv_backoff_win_increment(rc);
314 uwb_drp_avail_release(rsv->rc,
315 &rsv->mv.companion_mas);
316 }
317 } else {
318 switch(action) {
319 case UWB_DRP_CONFLICT_ACT2:
320 case UWB_DRP_CONFLICT_ACT3:
321 uwb_rsv_set_state(rsv,
322 UWB_RSV_STATE_T_EXPANDING_CONFLICT);
323
324 }
325 }
326 } else {
327 if (uwb_rsv_is_owner(rsv)) {
328 uwb_rsv_backoff_win_increment(rc);
329
330 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
331
332
333
334
335 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm,
336 conflicting_mas->bm, UWB_NUM_MAS);
337 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
338 } else {
339 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
340
341 }
342 }
343}
344
345static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
346 struct uwb_rc_evt_drp *drp_evt,
347 struct uwb_ie_drp *drp_ie,
348 struct uwb_mas_bm *conflicting_mas)
349{
350 struct uwb_rsv_move *mv;
351
352
353 if (uwb_rsv_has_two_drp_ies(rsv)) {
354 mv = &rsv->mv;
355 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
356 UWB_NUM_MAS)) {
357 handle_conflict_expanding(drp_ie,
358 drp_evt->beacon_slot_number,
359 rsv, false, conflicting_mas);
360 } else {
361 if (bitmap_intersects(mv->companion_mas.bm,
362 conflicting_mas->bm, UWB_NUM_MAS)) {
363 handle_conflict_expanding(
364 drp_ie, drp_evt->beacon_slot_number,
365 rsv, true, conflicting_mas);
366 }
367 }
368 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
369 UWB_NUM_MAS)) {
370 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number,
371 rsv, conflicting_mas);
372 }
373}
374
375static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
376 struct uwb_rc_evt_drp *drp_evt,
377 struct uwb_ie_drp *drp_ie,
378 struct uwb_mas_bm *conflicting_mas)
379{
380 struct uwb_rsv *rsv;
381
382 list_for_each_entry(rsv, &rc->reservations, rc_node) {
383 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie,
384 conflicting_mas);
385 }
386}
387
388static void uwb_drp_process_target_accepted(struct uwb_rc *rc,
389 struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt,
390 struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas)
391{
392 struct uwb_rsv_move *mv = &rsv->mv;
393 int status;
394
395 status = uwb_ie_drp_status(drp_ie);
396
397 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
398 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
399 return;
400 }
401
402 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
403
404 if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
405
406 uwb_rsv_set_state(rsv,
407 UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
408 }
409 } else {
410 if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
411 if (uwb_drp_avail_reserve_pending(rc, mas) == -EBUSY) {
412
413
414
415
416
417 uwb_drp_handle_all_conflict_rsv(rc, drp_evt,
418 drp_ie, mas);
419 } else {
420
421 bitmap_copy(mv->companion_mas.bm, mas->bm,
422 UWB_NUM_MAS);
423 uwb_rsv_set_state(rsv,
424 UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
425 }
426 } else {
427 if (status) {
428 uwb_rsv_set_state(rsv,
429 UWB_RSV_STATE_T_ACCEPTED);
430 }
431 }
432
433 }
434}
435
436
437
438
439
440static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
441 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
442{
443 struct device *dev = &rc->uwb_dev.dev;
444 struct uwb_rsv_move *mv = &rsv->mv;
445 int status;
446 enum uwb_drp_reason reason_code;
447 struct uwb_mas_bm mas;
448
449 status = uwb_ie_drp_status(drp_ie);
450 reason_code = uwb_ie_drp_reason_code(drp_ie);
451 uwb_drp_ie_to_bm(&mas, drp_ie);
452
453 switch (reason_code) {
454 case UWB_DRP_REASON_ACCEPTED:
455 uwb_drp_process_target_accepted(rc, rsv, drp_evt, drp_ie, &mas);
456 break;
457
458 case UWB_DRP_REASON_MODIFIED:
459
460 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
461 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
462 break;
463 }
464
465
466 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
467
468 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm,
469 UWB_NUM_MAS);
470 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
471 }
472
473 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
474 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
475 break;
476 default:
477 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
478 reason_code, status);
479 }
480}
481
482static void uwb_drp_process_owner_accepted(struct uwb_rsv *rsv,
483 struct uwb_mas_bm *mas)
484{
485 struct uwb_rsv_move *mv = &rsv->mv;
486
487 switch (rsv->state) {
488 case UWB_RSV_STATE_O_PENDING:
489 case UWB_RSV_STATE_O_INITIATED:
490 case UWB_RSV_STATE_O_ESTABLISHED:
491 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
492 break;
493 case UWB_RSV_STATE_O_MODIFIED:
494 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
495 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
496 else
497 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
498 break;
499
500 case UWB_RSV_STATE_O_MOVE_REDUCING:
501 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
502 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
503 else
504 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
505 break;
506 case UWB_RSV_STATE_O_MOVE_EXPANDING:
507 if (bitmap_equal(mas->bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
508
509 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
510 } else {
511 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
512 }
513 break;
514 case UWB_RSV_STATE_O_MOVE_COMBINING:
515 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
516 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
517 else
518 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
519 break;
520 default:
521 break;
522 }
523}
524
525
526
527
528static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
529 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
530 struct uwb_rc_evt_drp *drp_evt)
531{
532 struct device *dev = &rc->uwb_dev.dev;
533 int status;
534 enum uwb_drp_reason reason_code;
535 struct uwb_mas_bm mas;
536
537 status = uwb_ie_drp_status(drp_ie);
538 reason_code = uwb_ie_drp_reason_code(drp_ie);
539 uwb_drp_ie_to_bm(&mas, drp_ie);
540
541 if (status) {
542 switch (reason_code) {
543 case UWB_DRP_REASON_ACCEPTED:
544 uwb_drp_process_owner_accepted(rsv, &mas);
545 break;
546 default:
547 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
548 reason_code, status);
549 }
550 } else {
551 switch (reason_code) {
552 case UWB_DRP_REASON_PENDING:
553 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
554 break;
555 case UWB_DRP_REASON_DENIED:
556 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
557 break;
558 case UWB_DRP_REASON_CONFLICT:
559
560 bitmap_complement(mas.bm, src->last_availability_bm,
561 UWB_NUM_MAS);
562 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
563 break;
564 default:
565 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
566 reason_code, status);
567 }
568 }
569}
570
571static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
572{
573 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
574 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
575}
576
577static void uwb_cnflt_update_work(struct work_struct *work)
578{
579 struct uwb_cnflt_alien *cnflt = container_of(work,
580 struct uwb_cnflt_alien,
581 cnflt_update_work);
582 struct uwb_cnflt_alien *c;
583 struct uwb_rc *rc = cnflt->rc;
584
585 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
586
587 mutex_lock(&rc->rsvs_mutex);
588
589 list_del(&cnflt->rc_node);
590
591
592 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
593
594 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
595 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm,
596 c->mas.bm, UWB_NUM_MAS);
597 }
598
599 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work,
600 usecs_to_jiffies(delay_us));
601
602 kfree(cnflt);
603 mutex_unlock(&rc->rsvs_mutex);
604}
605
606static void uwb_cnflt_timer(unsigned long arg)
607{
608 struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
609
610 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
611}
612
613
614
615
616
617static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
618{
619 struct device *dev = &rc->uwb_dev.dev;
620 struct uwb_mas_bm mas;
621 struct uwb_cnflt_alien *cnflt;
622 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
623
624 uwb_drp_ie_to_bm(&mas, drp_ie);
625
626 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
627 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
628
629
630 uwb_cnflt_alien_stroke_timer(cnflt);
631 return;
632 }
633 }
634
635
636
637
638 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
639 if (!cnflt) {
640 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
641 return;
642 }
643
644 INIT_LIST_HEAD(&cnflt->rc_node);
645 setup_timer(&cnflt->timer, uwb_cnflt_timer, (unsigned long)cnflt);
646
647 cnflt->rc = rc;
648 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
649
650 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
651
652 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
653
654
655 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
656
657 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
658
659
660 uwb_cnflt_alien_stroke_timer(cnflt);
661}
662
663static void uwb_drp_process_not_involved(struct uwb_rc *rc,
664 struct uwb_rc_evt_drp *drp_evt,
665 struct uwb_ie_drp *drp_ie)
666{
667 struct uwb_mas_bm mas;
668
669 uwb_drp_ie_to_bm(&mas, drp_ie);
670 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
671}
672
673static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
674 struct uwb_rc_evt_drp *drp_evt,
675 struct uwb_ie_drp *drp_ie)
676{
677 struct uwb_rsv *rsv;
678
679 rsv = uwb_rsv_find(rc, src, drp_ie);
680 if (!rsv) {
681
682
683
684
685
686 return;
687 }
688
689
690
691
692
693 if (rsv->state == UWB_RSV_STATE_NONE) {
694 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
695 return;
696 }
697
698 if (uwb_ie_drp_owner(drp_ie))
699 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
700 else
701 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
702
703}
704
705
706static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
707{
708 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
709}
710
711
712
713
714static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
715 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
716{
717 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
718 uwb_drp_handle_alien_drp(rc, drp_ie);
719 else if (uwb_drp_involves_us(rc, drp_ie))
720 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
721 else
722 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
723}
724
725
726
727
728static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
729 struct uwb_ie_drp_avail *drp_availability_ie)
730{
731 bitmap_copy(src->last_availability_bm,
732 drp_availability_ie->bmp, UWB_NUM_MAS);
733}
734
735
736
737
738
739static
740void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
741 size_t ielen, struct uwb_dev *src_dev)
742{
743 struct device *dev = &rc->uwb_dev.dev;
744 struct uwb_ie_hdr *ie_hdr;
745 void *ptr;
746
747 ptr = drp_evt->ie_data;
748 for (;;) {
749 ie_hdr = uwb_ie_next(&ptr, &ielen);
750 if (!ie_hdr)
751 break;
752
753 switch (ie_hdr->element_id) {
754 case UWB_IE_DRP_AVAILABILITY:
755 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
756 break;
757 case UWB_IE_DRP:
758 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
759 break;
760 default:
761 dev_warn(dev, "unexpected IE in DRP notification\n");
762 break;
763 }
764 }
765
766 if (ielen > 0)
767 dev_warn(dev, "%d octets remaining in DRP notification\n",
768 (int)ielen);
769}
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
804{
805 struct device *dev = &evt->rc->uwb_dev.dev;
806 struct uwb_rc *rc = evt->rc;
807 struct uwb_rc_evt_drp *drp_evt;
808 size_t ielength, bytes_left;
809 struct uwb_dev_addr src_addr;
810 struct uwb_dev *src_dev;
811
812
813
814 if (evt->notif.size < sizeof(*drp_evt)) {
815 dev_err(dev, "DRP event: Not enough data to decode event "
816 "[%zu bytes left, %zu needed]\n",
817 evt->notif.size, sizeof(*drp_evt));
818 return 0;
819 }
820 bytes_left = evt->notif.size - sizeof(*drp_evt);
821 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
822 ielength = le16_to_cpu(drp_evt->ie_length);
823 if (bytes_left != ielength) {
824 dev_err(dev, "DRP event: Not enough data in payload [%zu"
825 "bytes left, %zu declared in the event]\n",
826 bytes_left, ielength);
827 return 0;
828 }
829
830 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
831 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
832 if (!src_dev) {
833
834
835
836
837
838
839
840
841 return 0;
842 }
843
844 mutex_lock(&rc->rsvs_mutex);
845
846
847 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
848
849 mutex_unlock(&rc->rsvs_mutex);
850
851 uwb_dev_put(src_dev);
852 return 0;
853}
854