1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/uwb.h>
20#include <linux/slab.h>
21#include <linux/random.h>
22#include <linux/export.h>
23
24#include "uwb-internal.h"
25
26static void uwb_rsv_timer(unsigned long arg);
27
28static const char *rsv_states[] = {
29 [UWB_RSV_STATE_NONE] = "none ",
30 [UWB_RSV_STATE_O_INITIATED] = "o initiated ",
31 [UWB_RSV_STATE_O_PENDING] = "o pending ",
32 [UWB_RSV_STATE_O_MODIFIED] = "o modified ",
33 [UWB_RSV_STATE_O_ESTABLISHED] = "o established ",
34 [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ",
35 [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding",
36 [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining",
37 [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ",
38 [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ",
39 [UWB_RSV_STATE_T_CONFLICT] = "t conflict ",
40 [UWB_RSV_STATE_T_PENDING] = "t pending ",
41 [UWB_RSV_STATE_T_DENIED] = "t denied ",
42 [UWB_RSV_STATE_T_RESIZED] = "t resized ",
43 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ",
44 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf",
45 [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend",
46 [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ",
47};
48
49static const char *rsv_types[] = {
50 [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp",
51 [UWB_DRP_TYPE_HARD] = "hard",
52 [UWB_DRP_TYPE_SOFT] = "soft",
53 [UWB_DRP_TYPE_PRIVATE] = "private",
54 [UWB_DRP_TYPE_PCA] = "pca",
55};
56
57bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv)
58{
59 static const bool has_two_drp_ies[] = {
60 [UWB_RSV_STATE_O_INITIATED] = false,
61 [UWB_RSV_STATE_O_PENDING] = false,
62 [UWB_RSV_STATE_O_MODIFIED] = false,
63 [UWB_RSV_STATE_O_ESTABLISHED] = false,
64 [UWB_RSV_STATE_O_TO_BE_MOVED] = false,
65 [UWB_RSV_STATE_O_MOVE_COMBINING] = false,
66 [UWB_RSV_STATE_O_MOVE_REDUCING] = false,
67 [UWB_RSV_STATE_O_MOVE_EXPANDING] = true,
68 [UWB_RSV_STATE_T_ACCEPTED] = false,
69 [UWB_RSV_STATE_T_CONFLICT] = false,
70 [UWB_RSV_STATE_T_PENDING] = false,
71 [UWB_RSV_STATE_T_DENIED] = false,
72 [UWB_RSV_STATE_T_RESIZED] = false,
73 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true,
74 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true,
75 [UWB_RSV_STATE_T_EXPANDING_PENDING] = true,
76 [UWB_RSV_STATE_T_EXPANDING_DENIED] = true,
77 };
78
79 return has_two_drp_ies[rsv->state];
80}
81
82
83
84
85
86const char *uwb_rsv_state_str(enum uwb_rsv_state state)
87{
88 if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST)
89 return "unknown";
90 return rsv_states[state];
91}
92EXPORT_SYMBOL_GPL(uwb_rsv_state_str);
93
94
95
96
97
98const char *uwb_rsv_type_str(enum uwb_drp_type type)
99{
100 if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA)
101 return "invalid";
102 return rsv_types[type];
103}
104EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
105
106void uwb_rsv_dump(char *text, struct uwb_rsv *rsv)
107{
108 struct device *dev = &rsv->rc->uwb_dev.dev;
109 struct uwb_dev_addr devaddr;
110 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
111
112 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
113 if (rsv->target.type == UWB_RSV_TARGET_DEV)
114 devaddr = rsv->target.dev->dev_addr;
115 else
116 devaddr = rsv->target.devaddr;
117 uwb_dev_addr_print(target, sizeof(target), &devaddr);
118
119 dev_dbg(dev, "rsv %s %s -> %s: %s\n",
120 text, owner, target, uwb_rsv_state_str(rsv->state));
121}
122
123static void uwb_rsv_release(struct kref *kref)
124{
125 struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref);
126
127 kfree(rsv);
128}
129
130void uwb_rsv_get(struct uwb_rsv *rsv)
131{
132 kref_get(&rsv->kref);
133}
134
135void uwb_rsv_put(struct uwb_rsv *rsv)
136{
137 kref_put(&rsv->kref, uwb_rsv_release);
138}
139
140
141
142
143
144
145
146
147static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
148{
149 struct uwb_rc *rc = rsv->rc;
150 struct device *dev = &rc->uwb_dev.dev;
151 unsigned long *streams_bm;
152 int stream;
153
154 switch (rsv->target.type) {
155 case UWB_RSV_TARGET_DEV:
156 streams_bm = rsv->target.dev->streams;
157 break;
158 case UWB_RSV_TARGET_DEVADDR:
159 streams_bm = rc->uwb_dev.streams;
160 break;
161 default:
162 return -EINVAL;
163 }
164
165 stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS);
166 if (stream >= UWB_NUM_STREAMS) {
167 dev_err(dev, "%s: no available stream found\n", __func__);
168 return -EBUSY;
169 }
170
171 rsv->stream = stream;
172 set_bit(stream, streams_bm);
173
174 dev_dbg(dev, "get stream %d\n", rsv->stream);
175
176 return 0;
177}
178
179static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
180{
181 struct uwb_rc *rc = rsv->rc;
182 struct device *dev = &rc->uwb_dev.dev;
183 unsigned long *streams_bm;
184
185 switch (rsv->target.type) {
186 case UWB_RSV_TARGET_DEV:
187 streams_bm = rsv->target.dev->streams;
188 break;
189 case UWB_RSV_TARGET_DEVADDR:
190 streams_bm = rc->uwb_dev.streams;
191 break;
192 default:
193 return;
194 }
195
196 clear_bit(rsv->stream, streams_bm);
197
198 dev_dbg(dev, "put stream %d\n", rsv->stream);
199}
200
201void uwb_rsv_backoff_win_timer(unsigned long arg)
202{
203 struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
204 struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
205 struct device *dev = &rc->uwb_dev.dev;
206
207 bow->can_reserve_extra_mases = true;
208 if (bow->total_expired <= 4) {
209 bow->total_expired++;
210 } else {
211
212
213 bow->total_expired = 0;
214 bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
215 }
216 dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n", bow->total_expired, bow->n);
217
218
219 uwb_rsv_handle_drp_avail_change(rc);
220}
221
222void uwb_rsv_backoff_win_increment(struct uwb_rc *rc)
223{
224 struct uwb_drp_backoff_win *bow = &rc->bow;
225 struct device *dev = &rc->uwb_dev.dev;
226 unsigned timeout_us;
227
228 dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window);
229
230 bow->can_reserve_extra_mases = false;
231
232 if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX)
233 return;
234
235 bow->window <<= 1;
236 bow->n = prandom_u32() & (bow->window - 1);
237 dev_dbg(dev, "new_window=%d, n=%d\n", bow->window, bow->n);
238
239
240 timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US;
241 bow->total_expired = 0;
242 mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us));
243}
244
245static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
246{
247 int sframes = UWB_MAX_LOST_BEACONS;
248
249
250
251
252
253
254 if (rsv->state == UWB_RSV_STATE_NONE) {
255 sframes = 0;
256 } else if (rsv->is_multicast) {
257 if (rsv->state == UWB_RSV_STATE_O_INITIATED
258 || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING
259 || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING
260 || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING)
261 sframes = 1;
262 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
263 sframes = 0;
264
265 }
266
267 if (sframes > 0) {
268
269
270
271
272 unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US;
273 mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us));
274 } else
275 del_timer(&rsv->timer);
276}
277
278
279
280
281
282static void uwb_rsv_state_update(struct uwb_rsv *rsv,
283 enum uwb_rsv_state new_state)
284{
285 rsv->state = new_state;
286 rsv->ie_valid = false;
287
288 uwb_rsv_dump("SU", rsv);
289
290 uwb_rsv_stroke_timer(rsv);
291 uwb_rsv_sched_update(rsv->rc);
292}
293
294static void uwb_rsv_callback(struct uwb_rsv *rsv)
295{
296 if (rsv->callback)
297 rsv->callback(rsv);
298}
299
300void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
301{
302 struct uwb_rsv_move *mv = &rsv->mv;
303
304 if (rsv->state == new_state) {
305 switch (rsv->state) {
306 case UWB_RSV_STATE_O_ESTABLISHED:
307 case UWB_RSV_STATE_O_MOVE_EXPANDING:
308 case UWB_RSV_STATE_O_MOVE_COMBINING:
309 case UWB_RSV_STATE_O_MOVE_REDUCING:
310 case UWB_RSV_STATE_T_ACCEPTED:
311 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
312 case UWB_RSV_STATE_T_RESIZED:
313 case UWB_RSV_STATE_NONE:
314 uwb_rsv_stroke_timer(rsv);
315 break;
316 default:
317
318
319 break;
320 }
321 return;
322 }
323
324 uwb_rsv_dump("SC", rsv);
325
326 switch (new_state) {
327 case UWB_RSV_STATE_NONE:
328 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
329 uwb_rsv_remove(rsv);
330 uwb_rsv_callback(rsv);
331 break;
332 case UWB_RSV_STATE_O_INITIATED:
333 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED);
334 break;
335 case UWB_RSV_STATE_O_PENDING:
336 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
337 break;
338 case UWB_RSV_STATE_O_MODIFIED:
339
340 bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
341 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED);
342 break;
343 case UWB_RSV_STATE_O_ESTABLISHED:
344 if (rsv->state == UWB_RSV_STATE_O_MODIFIED
345 || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) {
346 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
347 rsv->needs_release_companion_mas = false;
348 }
349 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
350 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
351 uwb_rsv_callback(rsv);
352 break;
353 case UWB_RSV_STATE_O_MOVE_EXPANDING:
354 rsv->needs_release_companion_mas = true;
355 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
356 break;
357 case UWB_RSV_STATE_O_MOVE_COMBINING:
358 rsv->needs_release_companion_mas = false;
359 uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
360 bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
361 rsv->mas.safe += mv->companion_mas.safe;
362 rsv->mas.unsafe += mv->companion_mas.unsafe;
363 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
364 break;
365 case UWB_RSV_STATE_O_MOVE_REDUCING:
366 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
367 rsv->needs_release_companion_mas = true;
368 rsv->mas.safe = mv->final_mas.safe;
369 rsv->mas.unsafe = mv->final_mas.unsafe;
370 bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
371 bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS);
372 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
373 break;
374 case UWB_RSV_STATE_T_ACCEPTED:
375 case UWB_RSV_STATE_T_RESIZED:
376 rsv->needs_release_companion_mas = false;
377 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
378 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
379 uwb_rsv_callback(rsv);
380 break;
381 case UWB_RSV_STATE_T_DENIED:
382 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
383 break;
384 case UWB_RSV_STATE_T_CONFLICT:
385 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT);
386 break;
387 case UWB_RSV_STATE_T_PENDING:
388 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING);
389 break;
390 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
391 rsv->needs_release_companion_mas = true;
392 uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
393 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
394 break;
395 default:
396 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
397 uwb_rsv_state_str(new_state), new_state);
398 }
399}
400
401static void uwb_rsv_handle_timeout_work(struct work_struct *work)
402{
403 struct uwb_rsv *rsv = container_of(work, struct uwb_rsv,
404 handle_timeout_work);
405 struct uwb_rc *rc = rsv->rc;
406
407 mutex_lock(&rc->rsvs_mutex);
408
409 uwb_rsv_dump("TO", rsv);
410
411 switch (rsv->state) {
412 case UWB_RSV_STATE_O_INITIATED:
413 if (rsv->is_multicast) {
414 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
415 goto unlock;
416 }
417 break;
418 case UWB_RSV_STATE_O_MOVE_EXPANDING:
419 if (rsv->is_multicast) {
420 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
421 goto unlock;
422 }
423 break;
424 case UWB_RSV_STATE_O_MOVE_COMBINING:
425 if (rsv->is_multicast) {
426 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
427 goto unlock;
428 }
429 break;
430 case UWB_RSV_STATE_O_MOVE_REDUCING:
431 if (rsv->is_multicast) {
432 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
433 goto unlock;
434 }
435 break;
436 case UWB_RSV_STATE_O_ESTABLISHED:
437 if (rsv->is_multicast)
438 goto unlock;
439 break;
440 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
441
442
443
444
445
446
447 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
448 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
449 goto unlock;
450 case UWB_RSV_STATE_NONE:
451 goto unlock;
452 default:
453 break;
454 }
455
456 uwb_rsv_remove(rsv);
457
458unlock:
459 mutex_unlock(&rc->rsvs_mutex);
460}
461
462static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
463{
464 struct uwb_rsv *rsv;
465
466 rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL);
467 if (!rsv)
468 return NULL;
469
470 INIT_LIST_HEAD(&rsv->rc_node);
471 INIT_LIST_HEAD(&rsv->pal_node);
472 kref_init(&rsv->kref);
473 init_timer(&rsv->timer);
474 rsv->timer.function = uwb_rsv_timer;
475 rsv->timer.data = (unsigned long)rsv;
476
477 rsv->rc = rc;
478 INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
479
480 return rsv;
481}
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv)
497{
498 struct uwb_rsv *rsv;
499
500 rsv = uwb_rsv_alloc(rc);
501 if (!rsv)
502 return NULL;
503
504 rsv->callback = cb;
505 rsv->pal_priv = pal_priv;
506
507 return rsv;
508}
509EXPORT_SYMBOL_GPL(uwb_rsv_create);
510
511void uwb_rsv_remove(struct uwb_rsv *rsv)
512{
513 uwb_rsv_dump("RM", rsv);
514
515 if (rsv->state != UWB_RSV_STATE_NONE)
516 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
517
518 if (rsv->needs_release_companion_mas)
519 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
520 uwb_drp_avail_release(rsv->rc, &rsv->mas);
521
522 if (uwb_rsv_is_owner(rsv))
523 uwb_rsv_put_stream(rsv);
524
525 uwb_dev_put(rsv->owner);
526 if (rsv->target.type == UWB_RSV_TARGET_DEV)
527 uwb_dev_put(rsv->target.dev);
528
529 list_del_init(&rsv->rc_node);
530 uwb_rsv_put(rsv);
531}
532
533
534
535
536
537
538
539void uwb_rsv_destroy(struct uwb_rsv *rsv)
540{
541 uwb_rsv_put(rsv);
542}
543EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
544
545
546
547
548
549
550
551
552
553
554
555
556int uwb_rsv_establish(struct uwb_rsv *rsv)
557{
558 struct uwb_rc *rc = rsv->rc;
559 struct uwb_mas_bm available;
560 struct device *dev = &rc->uwb_dev.dev;
561 int ret;
562
563 mutex_lock(&rc->rsvs_mutex);
564 ret = uwb_rsv_get_stream(rsv);
565 if (ret) {
566 dev_err(dev, "%s: uwb_rsv_get_stream failed: %d\n",
567 __func__, ret);
568 goto out;
569 }
570
571 rsv->tiebreaker = prandom_u32() & 1;
572
573 uwb_drp_available(rc, &available);
574
575 ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas);
576 if (ret == UWB_RSV_ALLOC_NOT_FOUND) {
577 ret = -EBUSY;
578 uwb_rsv_put_stream(rsv);
579 dev_err(dev, "%s: uwb_rsv_find_best_allocation failed: %d\n",
580 __func__, ret);
581 goto out;
582 }
583
584 ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas);
585 if (ret != 0) {
586 uwb_rsv_put_stream(rsv);
587 dev_err(dev, "%s: uwb_drp_avail_reserve_pending failed: %d\n",
588 __func__, ret);
589 goto out;
590 }
591
592 uwb_rsv_get(rsv);
593 list_add_tail(&rsv->rc_node, &rc->reservations);
594 rsv->owner = &rc->uwb_dev;
595 uwb_dev_get(rsv->owner);
596 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED);
597out:
598 mutex_unlock(&rc->rsvs_mutex);
599 return ret;
600}
601EXPORT_SYMBOL_GPL(uwb_rsv_establish);
602
603
604
605
606
607
608
609
610
611
612int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval)
613{
614 return -ENOSYS;
615}
616EXPORT_SYMBOL_GPL(uwb_rsv_modify);
617
618
619
620
621
622int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
623{
624 struct uwb_rc *rc = rsv->rc;
625 struct uwb_drp_backoff_win *bow = &rc->bow;
626 struct device *dev = &rc->uwb_dev.dev;
627 struct uwb_rsv_move *mv;
628 int ret = 0;
629
630 if (bow->can_reserve_extra_mases == false)
631 return -EBUSY;
632
633 mv = &rsv->mv;
634
635 if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) {
636
637 if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) {
638
639 bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS);
640 uwb_drp_avail_reserve_pending(rc, &mv->companion_mas);
641 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
642 }
643 } else {
644 dev_dbg(dev, "new allocation not found\n");
645 }
646
647 return ret;
648}
649
650
651
652
653void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
654{
655 struct uwb_drp_backoff_win *bow = &rc->bow;
656 struct uwb_rsv *rsv;
657 struct uwb_mas_bm mas;
658
659 if (bow->can_reserve_extra_mases == false)
660 return;
661
662 list_for_each_entry(rsv, &rc->reservations, rc_node) {
663 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED ||
664 rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) {
665 uwb_drp_available(rc, &mas);
666 bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS);
667 uwb_rsv_try_move(rsv, &mas);
668 }
669 }
670
671}
672
673
674
675
676
677
678
679
680
681
682
683void uwb_rsv_terminate(struct uwb_rsv *rsv)
684{
685 struct uwb_rc *rc = rsv->rc;
686
687 mutex_lock(&rc->rsvs_mutex);
688
689 if (rsv->state != UWB_RSV_STATE_NONE)
690 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
691
692 mutex_unlock(&rc->rsvs_mutex);
693}
694EXPORT_SYMBOL_GPL(uwb_rsv_terminate);
695
696
697
698
699
700
701
702
703
704
705
706
707
708void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv)
709{
710 uwb_rsv_get(rsv);
711
712 rsv->callback = cb;
713 rsv->pal_priv = pal_priv;
714 rsv->state = UWB_RSV_STATE_T_ACCEPTED;
715}
716EXPORT_SYMBOL_GPL(uwb_rsv_accept);
717
718
719
720
721static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src,
722 struct uwb_ie_drp *drp_ie)
723{
724 struct uwb_dev_addr *rsv_src;
725 int stream;
726
727 stream = uwb_ie_drp_stream_index(drp_ie);
728
729 if (rsv->stream != stream)
730 return false;
731
732 switch (rsv->target.type) {
733 case UWB_RSV_TARGET_DEVADDR:
734 return rsv->stream == stream;
735 case UWB_RSV_TARGET_DEV:
736 if (uwb_ie_drp_owner(drp_ie))
737 rsv_src = &rsv->owner->dev_addr;
738 else
739 rsv_src = &rsv->target.dev->dev_addr;
740 return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0;
741 }
742 return false;
743}
744
745static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
746 struct uwb_dev *src,
747 struct uwb_ie_drp *drp_ie)
748{
749 struct uwb_rsv *rsv;
750 struct uwb_pal *pal;
751 enum uwb_rsv_state state;
752
753 rsv = uwb_rsv_alloc(rc);
754 if (!rsv)
755 return NULL;
756
757 rsv->rc = rc;
758 rsv->owner = src;
759 uwb_dev_get(rsv->owner);
760 rsv->target.type = UWB_RSV_TARGET_DEV;
761 rsv->target.dev = &rc->uwb_dev;
762 uwb_dev_get(&rc->uwb_dev);
763 rsv->type = uwb_ie_drp_type(drp_ie);
764 rsv->stream = uwb_ie_drp_stream_index(drp_ie);
765 uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
766
767
768
769
770
771 rsv->state = UWB_RSV_STATE_T_DENIED;
772 mutex_lock(&rc->uwb_dev.mutex);
773 list_for_each_entry(pal, &rc->pals, node) {
774 if (pal->new_rsv)
775 pal->new_rsv(pal, rsv);
776 if (rsv->state == UWB_RSV_STATE_T_ACCEPTED)
777 break;
778 }
779 mutex_unlock(&rc->uwb_dev.mutex);
780
781 list_add_tail(&rsv->rc_node, &rc->reservations);
782 state = rsv->state;
783 rsv->state = UWB_RSV_STATE_NONE;
784
785
786 if (state == UWB_RSV_STATE_T_ACCEPTED
787 && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) {
788
789 } else {
790 uwb_rsv_set_state(rsv, state);
791 }
792
793 return rsv;
794}
795
796
797
798
799
800
801
802
803
804void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas)
805{
806 bitmap_zero(mas->bm, UWB_NUM_MAS);
807 bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
808}
809EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas);
810
811
812
813
814
815
816
817
818
819
820
821struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
822 struct uwb_ie_drp *drp_ie)
823{
824 struct uwb_rsv *rsv;
825
826 list_for_each_entry(rsv, &rc->reservations, rc_node) {
827 if (uwb_rsv_match(rsv, src, drp_ie))
828 return rsv;
829 }
830
831 if (uwb_ie_drp_owner(drp_ie))
832 return uwb_rsv_new_target(rc, src, drp_ie);
833
834 return NULL;
835}
836
837
838
839
840
841
842
843
844static bool uwb_rsv_update_all(struct uwb_rc *rc)
845{
846 struct uwb_rsv *rsv, *t;
847 bool ie_updated = false;
848
849 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
850 if (!rsv->ie_valid) {
851 uwb_drp_ie_update(rsv);
852 ie_updated = true;
853 }
854 }
855
856 return ie_updated;
857}
858
859void uwb_rsv_queue_update(struct uwb_rc *rc)
860{
861 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
862
863 queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us));
864}
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888void uwb_rsv_sched_update(struct uwb_rc *rc)
889{
890 spin_lock_irq(&rc->rsvs_lock);
891 if (!delayed_work_pending(&rc->rsv_update_work)) {
892 if (rc->set_drp_ie_pending > 0) {
893 rc->set_drp_ie_pending++;
894 goto unlock;
895 }
896 uwb_rsv_queue_update(rc);
897 }
898unlock:
899 spin_unlock_irq(&rc->rsvs_lock);
900}
901
902
903
904
905
906static void uwb_rsv_update_work(struct work_struct *work)
907{
908 struct uwb_rc *rc = container_of(work, struct uwb_rc,
909 rsv_update_work.work);
910 bool ie_updated;
911
912 mutex_lock(&rc->rsvs_mutex);
913
914 ie_updated = uwb_rsv_update_all(rc);
915
916 if (!rc->drp_avail.ie_valid) {
917 uwb_drp_avail_ie_update(rc);
918 ie_updated = true;
919 }
920
921 if (ie_updated && (rc->set_drp_ie_pending == 0))
922 uwb_rc_send_all_drp_ie(rc);
923
924 mutex_unlock(&rc->rsvs_mutex);
925}
926
927static void uwb_rsv_alien_bp_work(struct work_struct *work)
928{
929 struct uwb_rc *rc = container_of(work, struct uwb_rc,
930 rsv_alien_bp_work.work);
931 struct uwb_rsv *rsv;
932
933 mutex_lock(&rc->rsvs_mutex);
934
935 list_for_each_entry(rsv, &rc->reservations, rc_node) {
936 if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) {
937 uwb_rsv_callback(rsv);
938 }
939 }
940
941 mutex_unlock(&rc->rsvs_mutex);
942}
943
944static void uwb_rsv_timer(unsigned long arg)
945{
946 struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
947
948 queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
949}
950
951
952
953
954
955
956
957void uwb_rsv_remove_all(struct uwb_rc *rc)
958{
959 struct uwb_rsv *rsv, *t;
960
961 mutex_lock(&rc->rsvs_mutex);
962 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
963 if (rsv->state != UWB_RSV_STATE_NONE)
964 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
965 del_timer_sync(&rsv->timer);
966 }
967
968 rc->set_drp_ie_pending = 0;
969 mutex_unlock(&rc->rsvs_mutex);
970
971 cancel_delayed_work_sync(&rc->rsv_update_work);
972 flush_workqueue(rc->rsv_workq);
973
974 mutex_lock(&rc->rsvs_mutex);
975 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
976 uwb_rsv_remove(rsv);
977 }
978 mutex_unlock(&rc->rsvs_mutex);
979}
980
981void uwb_rsv_init(struct uwb_rc *rc)
982{
983 INIT_LIST_HEAD(&rc->reservations);
984 INIT_LIST_HEAD(&rc->cnflt_alien_list);
985 mutex_init(&rc->rsvs_mutex);
986 spin_lock_init(&rc->rsvs_lock);
987 INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
988 INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work);
989 rc->bow.can_reserve_extra_mases = true;
990 rc->bow.total_expired = 0;
991 rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
992 init_timer(&rc->bow.timer);
993 rc->bow.timer.function = uwb_rsv_backoff_win_timer;
994 rc->bow.timer.data = (unsigned long)&rc->bow;
995
996 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
997}
998
999int uwb_rsv_setup(struct uwb_rc *rc)
1000{
1001 char name[16];
1002
1003 snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev));
1004 rc->rsv_workq = create_singlethread_workqueue(name);
1005 if (rc->rsv_workq == NULL)
1006 return -ENOMEM;
1007
1008 return 0;
1009}
1010
1011void uwb_rsv_cleanup(struct uwb_rc *rc)
1012{
1013 uwb_rsv_remove_all(rc);
1014 destroy_workqueue(rc->rsv_workq);
1015}
1016