1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/sched/signal.h>
25
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
28#include <net/bluetooth/mgmt.h>
29
30#include "smp.h"
31#include "hci_request.h"
32
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
44void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
49static int req_run(struct hci_request *req, hci_req_complete_t complete,
50 hci_req_complete_skb_t complete_skb)
51{
52 struct hci_dev *hdev = req->hdev;
53 struct sk_buff *skb;
54 unsigned long flags;
55
56 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
57
58
59
60
61 if (req->err) {
62 skb_queue_purge(&req->cmd_q);
63 return req->err;
64 }
65
66
67 if (skb_queue_empty(&req->cmd_q))
68 return -ENODATA;
69
70 skb = skb_peek_tail(&req->cmd_q);
71 if (complete) {
72 bt_cb(skb)->hci.req_complete = complete;
73 } else if (complete_skb) {
74 bt_cb(skb)->hci.req_complete_skb = complete_skb;
75 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
76 }
77
78 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
79 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
80 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
81
82 queue_work(hdev->workqueue, &hdev->cmd_work);
83
84 return 0;
85}
86
87int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
88{
89 return req_run(req, complete, NULL);
90}
91
92int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
93{
94 return req_run(req, NULL, complete);
95}
96
97static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
98 struct sk_buff *skb)
99{
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 if (skb)
106 hdev->req_skb = skb_get(skb);
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111void hci_req_sync_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123 const void *param, u8 event, u32 timeout)
124{
125 DECLARE_WAITQUEUE(wait, current);
126 struct hci_request req;
127 struct sk_buff *skb;
128 int err = 0;
129
130 BT_DBG("%s", hdev->name);
131
132 hci_req_init(&req, hdev);
133
134 hci_req_add_ev(&req, opcode, plen, param, event);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 err = hci_req_run_skb(&req, hci_req_sync_complete);
142 if (err < 0) {
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144 set_current_state(TASK_RUNNING);
145 return ERR_PTR(err);
146 }
147
148 schedule_timeout(timeout);
149
150 remove_wait_queue(&hdev->req_wait_q, &wait);
151
152 if (signal_pending(current))
153 return ERR_PTR(-EINTR);
154
155 switch (hdev->req_status) {
156 case HCI_REQ_DONE:
157 err = -bt_to_errno(hdev->req_result);
158 break;
159
160 case HCI_REQ_CANCELED:
161 err = -hdev->req_result;
162 break;
163
164 default:
165 err = -ETIMEDOUT;
166 break;
167 }
168
169 hdev->req_status = hdev->req_result = 0;
170 skb = hdev->req_skb;
171 hdev->req_skb = NULL;
172
173 BT_DBG("%s end: err %d", hdev->name, err);
174
175 if (err < 0) {
176 kfree_skb(skb);
177 return ERR_PTR(err);
178 }
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 return skb;
184}
185EXPORT_SYMBOL(__hci_cmd_sync_ev);
186
187struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
188 const void *param, u32 timeout)
189{
190 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
191}
192EXPORT_SYMBOL(__hci_cmd_sync);
193
194
195int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
196 unsigned long opt),
197 unsigned long opt, u32 timeout, u8 *hci_status)
198{
199 struct hci_request req;
200 DECLARE_WAITQUEUE(wait, current);
201 int err = 0;
202
203 BT_DBG("%s start", hdev->name);
204
205 hci_req_init(&req, hdev);
206
207 hdev->req_status = HCI_REQ_PEND;
208
209 err = func(&req, opt);
210 if (err) {
211 if (hci_status)
212 *hci_status = HCI_ERROR_UNSPECIFIED;
213 return err;
214 }
215
216 add_wait_queue(&hdev->req_wait_q, &wait);
217 set_current_state(TASK_INTERRUPTIBLE);
218
219 err = hci_req_run_skb(&req, hci_req_sync_complete);
220 if (err < 0) {
221 hdev->req_status = 0;
222
223 remove_wait_queue(&hdev->req_wait_q, &wait);
224 set_current_state(TASK_RUNNING);
225
226
227
228
229
230
231 if (err == -ENODATA) {
232 if (hci_status)
233 *hci_status = 0;
234 return 0;
235 }
236
237 if (hci_status)
238 *hci_status = HCI_ERROR_UNSPECIFIED;
239
240 return err;
241 }
242
243 schedule_timeout(timeout);
244
245 remove_wait_queue(&hdev->req_wait_q, &wait);
246
247 if (signal_pending(current))
248 return -EINTR;
249
250 switch (hdev->req_status) {
251 case HCI_REQ_DONE:
252 err = -bt_to_errno(hdev->req_result);
253 if (hci_status)
254 *hci_status = hdev->req_result;
255 break;
256
257 case HCI_REQ_CANCELED:
258 err = -hdev->req_result;
259 if (hci_status)
260 *hci_status = HCI_ERROR_UNSPECIFIED;
261 break;
262
263 default:
264 err = -ETIMEDOUT;
265 if (hci_status)
266 *hci_status = HCI_ERROR_UNSPECIFIED;
267 break;
268 }
269
270 kfree_skb(hdev->req_skb);
271 hdev->req_skb = NULL;
272 hdev->req_status = hdev->req_result = 0;
273
274 BT_DBG("%s end: err %d", hdev->name, err);
275
276 return err;
277}
278
279int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
280 unsigned long opt),
281 unsigned long opt, u32 timeout, u8 *hci_status)
282{
283 int ret;
284
285 if (!test_bit(HCI_UP, &hdev->flags))
286 return -ENETDOWN;
287
288
289 hci_req_sync_lock(hdev);
290 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
291 hci_req_sync_unlock(hdev);
292
293 return ret;
294}
295
296struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
297 const void *param)
298{
299 int len = HCI_COMMAND_HDR_SIZE + plen;
300 struct hci_command_hdr *hdr;
301 struct sk_buff *skb;
302
303 skb = bt_skb_alloc(len, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
308 hdr->opcode = cpu_to_le16(opcode);
309 hdr->plen = plen;
310
311 if (plen)
312 skb_put_data(skb, param, plen);
313
314 BT_DBG("skb len %d", skb->len);
315
316 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
317 hci_skb_opcode(skb) = opcode;
318
319 return skb;
320}
321
322
323void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
324 const void *param, u8 event)
325{
326 struct hci_dev *hdev = req->hdev;
327 struct sk_buff *skb;
328
329 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
330
331
332
333
334 if (req->err)
335 return;
336
337 skb = hci_prepare_cmd(hdev, opcode, plen, param);
338 if (!skb) {
339 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
340 opcode);
341 req->err = -ENOMEM;
342 return;
343 }
344
345 if (skb_queue_empty(&req->cmd_q))
346 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
347
348 bt_cb(skb)->hci.req_event = event;
349
350 skb_queue_tail(&req->cmd_q, skb);
351}
352
353void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
354 const void *param)
355{
356 hci_req_add_ev(req, opcode, plen, param, 0);
357}
358
359void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
360{
361 struct hci_dev *hdev = req->hdev;
362 struct hci_cp_write_page_scan_activity acp;
363 u8 type;
364
365 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
366 return;
367
368 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
369 return;
370
371 if (enable) {
372 type = PAGE_SCAN_TYPE_INTERLACED;
373
374
375 acp.interval = cpu_to_le16(0x0100);
376 } else {
377 type = PAGE_SCAN_TYPE_STANDARD;
378
379
380 acp.interval = cpu_to_le16(0x0800);
381 }
382
383 acp.window = cpu_to_le16(0x0012);
384
385 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
386 __cpu_to_le16(hdev->page_scan_window) != acp.window)
387 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
388 sizeof(acp), &acp);
389
390 if (hdev->page_scan_type != type)
391 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
392}
393
394
395
396
397
398
399
400static void __hci_update_background_scan(struct hci_request *req)
401{
402 struct hci_dev *hdev = req->hdev;
403
404 if (!test_bit(HCI_UP, &hdev->flags) ||
405 test_bit(HCI_INIT, &hdev->flags) ||
406 hci_dev_test_flag(hdev, HCI_SETUP) ||
407 hci_dev_test_flag(hdev, HCI_CONFIG) ||
408 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
409 hci_dev_test_flag(hdev, HCI_UNREGISTER))
410 return;
411
412
413 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
414 return;
415
416
417 if (hdev->discovery.state != DISCOVERY_STOPPED)
418 return;
419
420
421
422
423
424
425
426
427 hci_discovery_filter_clear(hdev);
428
429 if (list_empty(&hdev->pend_le_conns) &&
430 list_empty(&hdev->pend_le_reports)) {
431
432
433
434
435
436
437 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
438 return;
439
440 hci_req_add_le_scan_disable(req);
441
442 BT_DBG("%s stopping background scanning", hdev->name);
443 } else {
444
445
446
447
448
449
450
451
452 if (hci_lookup_le_connect(hdev))
453 return;
454
455
456
457
458 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
459 hci_req_add_le_scan_disable(req);
460
461 hci_req_add_le_passive_scan(req);
462
463 BT_DBG("%s starting background scanning", hdev->name);
464 }
465}
466
467void __hci_req_update_name(struct hci_request *req)
468{
469 struct hci_dev *hdev = req->hdev;
470 struct hci_cp_write_local_name cp;
471
472 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
473
474 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
475}
476
477#define PNP_INFO_SVCLASS_ID 0x1200
478
479static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
480{
481 u8 *ptr = data, *uuids_start = NULL;
482 struct bt_uuid *uuid;
483
484 if (len < 4)
485 return ptr;
486
487 list_for_each_entry(uuid, &hdev->uuids, list) {
488 u16 uuid16;
489
490 if (uuid->size != 16)
491 continue;
492
493 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
494 if (uuid16 < 0x1100)
495 continue;
496
497 if (uuid16 == PNP_INFO_SVCLASS_ID)
498 continue;
499
500 if (!uuids_start) {
501 uuids_start = ptr;
502 uuids_start[0] = 1;
503 uuids_start[1] = EIR_UUID16_ALL;
504 ptr += 2;
505 }
506
507
508 if ((ptr - data) + sizeof(u16) > len) {
509 uuids_start[1] = EIR_UUID16_SOME;
510 break;
511 }
512
513 *ptr++ = (uuid16 & 0x00ff);
514 *ptr++ = (uuid16 & 0xff00) >> 8;
515 uuids_start[0] += sizeof(uuid16);
516 }
517
518 return ptr;
519}
520
521static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
522{
523 u8 *ptr = data, *uuids_start = NULL;
524 struct bt_uuid *uuid;
525
526 if (len < 6)
527 return ptr;
528
529 list_for_each_entry(uuid, &hdev->uuids, list) {
530 if (uuid->size != 32)
531 continue;
532
533 if (!uuids_start) {
534 uuids_start = ptr;
535 uuids_start[0] = 1;
536 uuids_start[1] = EIR_UUID32_ALL;
537 ptr += 2;
538 }
539
540
541 if ((ptr - data) + sizeof(u32) > len) {
542 uuids_start[1] = EIR_UUID32_SOME;
543 break;
544 }
545
546 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
547 ptr += sizeof(u32);
548 uuids_start[0] += sizeof(u32);
549 }
550
551 return ptr;
552}
553
554static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
555{
556 u8 *ptr = data, *uuids_start = NULL;
557 struct bt_uuid *uuid;
558
559 if (len < 18)
560 return ptr;
561
562 list_for_each_entry(uuid, &hdev->uuids, list) {
563 if (uuid->size != 128)
564 continue;
565
566 if (!uuids_start) {
567 uuids_start = ptr;
568 uuids_start[0] = 1;
569 uuids_start[1] = EIR_UUID128_ALL;
570 ptr += 2;
571 }
572
573
574 if ((ptr - data) + 16 > len) {
575 uuids_start[1] = EIR_UUID128_SOME;
576 break;
577 }
578
579 memcpy(ptr, uuid->uuid, 16);
580 ptr += 16;
581 uuids_start[0] += 16;
582 }
583
584 return ptr;
585}
586
587static void create_eir(struct hci_dev *hdev, u8 *data)
588{
589 u8 *ptr = data;
590 size_t name_len;
591
592 name_len = strlen(hdev->dev_name);
593
594 if (name_len > 0) {
595
596 if (name_len > 48) {
597 name_len = 48;
598 ptr[1] = EIR_NAME_SHORT;
599 } else
600 ptr[1] = EIR_NAME_COMPLETE;
601
602
603 ptr[0] = name_len + 1;
604
605 memcpy(ptr + 2, hdev->dev_name, name_len);
606
607 ptr += (name_len + 2);
608 }
609
610 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
611 ptr[0] = 2;
612 ptr[1] = EIR_TX_POWER;
613 ptr[2] = (u8) hdev->inq_tx_power;
614
615 ptr += 3;
616 }
617
618 if (hdev->devid_source > 0) {
619 ptr[0] = 9;
620 ptr[1] = EIR_DEVICE_ID;
621
622 put_unaligned_le16(hdev->devid_source, ptr + 2);
623 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
624 put_unaligned_le16(hdev->devid_product, ptr + 6);
625 put_unaligned_le16(hdev->devid_version, ptr + 8);
626
627 ptr += 10;
628 }
629
630 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
631 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
632 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
633}
634
635void __hci_req_update_eir(struct hci_request *req)
636{
637 struct hci_dev *hdev = req->hdev;
638 struct hci_cp_write_eir cp;
639
640 if (!hdev_is_powered(hdev))
641 return;
642
643 if (!lmp_ext_inq_capable(hdev))
644 return;
645
646 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
647 return;
648
649 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
650 return;
651
652 memset(&cp, 0, sizeof(cp));
653
654 create_eir(hdev, cp.data);
655
656 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
657 return;
658
659 memcpy(hdev->eir, cp.data, sizeof(cp.data));
660
661 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
662}
663
664void hci_req_add_le_scan_disable(struct hci_request *req)
665{
666 struct hci_cp_le_set_scan_enable cp;
667
668 memset(&cp, 0, sizeof(cp));
669 cp.enable = LE_SCAN_DISABLE;
670 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
671}
672
673static void add_to_white_list(struct hci_request *req,
674 struct hci_conn_params *params)
675{
676 struct hci_cp_le_add_to_white_list cp;
677
678 cp.bdaddr_type = params->addr_type;
679 bacpy(&cp.bdaddr, ¶ms->addr);
680
681 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682}
683
684static u8 update_white_list(struct hci_request *req)
685{
686 struct hci_dev *hdev = req->hdev;
687 struct hci_conn_params *params;
688 struct bdaddr_list *b;
689 uint8_t white_list_entries = 0;
690
691
692
693
694
695
696
697 list_for_each_entry(b, &hdev->le_white_list, list) {
698
699
700
701 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702 &b->bdaddr, b->bdaddr_type) &&
703 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704 &b->bdaddr, b->bdaddr_type)) {
705 struct hci_cp_le_del_from_white_list cp;
706
707 cp.bdaddr_type = b->bdaddr_type;
708 bacpy(&cp.bdaddr, &b->bdaddr);
709
710 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711 sizeof(cp), &cp);
712 continue;
713 }
714
715 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716
717 return 0x00;
718 }
719
720 white_list_entries++;
721 }
722
723
724
725
726
727
728
729
730
731
732
733 list_for_each_entry(params, &hdev->pend_le_conns, action) {
734 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735 ¶ms->addr, params->addr_type))
736 continue;
737
738 if (white_list_entries >= hdev->le_white_list_size) {
739
740 return 0x00;
741 }
742
743 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
744 params->addr_type)) {
745
746 return 0x00;
747 }
748
749 white_list_entries++;
750 add_to_white_list(req, params);
751 }
752
753
754
755
756
757 list_for_each_entry(params, &hdev->pend_le_reports, action) {
758 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759 ¶ms->addr, params->addr_type))
760 continue;
761
762 if (white_list_entries >= hdev->le_white_list_size) {
763
764 return 0x00;
765 }
766
767 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
768 params->addr_type)) {
769
770 return 0x00;
771 }
772
773 white_list_entries++;
774 add_to_white_list(req, params);
775 }
776
777
778 return 0x01;
779}
780
781static bool scan_use_rpa(struct hci_dev *hdev)
782{
783 return hci_dev_test_flag(hdev, HCI_PRIVACY);
784}
785
786void hci_req_add_le_passive_scan(struct hci_request *req)
787{
788 struct hci_cp_le_set_scan_param param_cp;
789 struct hci_cp_le_set_scan_enable enable_cp;
790 struct hci_dev *hdev = req->hdev;
791 u8 own_addr_type;
792 u8 filter_policy;
793
794
795
796
797
798
799
800 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
801 &own_addr_type))
802 return;
803
804
805
806
807
808 filter_policy = update_white_list(req);
809
810
811
812
813
814
815
816
817
818
819 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
820 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
821 filter_policy |= 0x02;
822
823 memset(¶m_cp, 0, sizeof(param_cp));
824 param_cp.type = LE_SCAN_PASSIVE;
825 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
826 param_cp.window = cpu_to_le16(hdev->le_scan_window);
827 param_cp.own_address_type = own_addr_type;
828 param_cp.filter_policy = filter_policy;
829 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
830 ¶m_cp);
831
832 memset(&enable_cp, 0, sizeof(enable_cp));
833 enable_cp.enable = LE_SCAN_ENABLE;
834 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
835 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
836 &enable_cp);
837}
838
839static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
840{
841 u8 instance = hdev->cur_adv_instance;
842 struct adv_info *adv_instance;
843
844
845 if (instance == 0x00)
846 return 0;
847
848 adv_instance = hci_find_adv_instance(hdev, instance);
849 if (!adv_instance)
850 return 0;
851
852
853
854
855 return adv_instance->scan_rsp_len;
856}
857
858void __hci_req_disable_advertising(struct hci_request *req)
859{
860 u8 enable = 0x00;
861
862 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
863}
864
865static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
866{
867 u32 flags;
868 struct adv_info *adv_instance;
869
870 if (instance == 0x00) {
871
872
873
874 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
875
876
877
878
879 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
880 flags |= MGMT_ADV_FLAG_CONNECTABLE;
881
882 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
883 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
884 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 flags |= MGMT_ADV_FLAG_DISCOV;
886
887 return flags;
888 }
889
890 adv_instance = hci_find_adv_instance(hdev, instance);
891
892
893 if (!adv_instance)
894 return 0;
895
896 return adv_instance->flags;
897}
898
899static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
900{
901
902 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
903 return false;
904
905
906 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
907 return true;
908
909
910
911
912 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
913 hci_dev_test_flag(hdev, HCI_BONDABLE))
914 return false;
915
916
917
918
919 return true;
920}
921
922void __hci_req_enable_advertising(struct hci_request *req)
923{
924 struct hci_dev *hdev = req->hdev;
925 struct hci_cp_le_set_adv_param cp;
926 u8 own_addr_type, enable = 0x01;
927 bool connectable;
928 u32 flags;
929
930 if (hci_conn_num(hdev, LE_LINK) > 0)
931 return;
932
933 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
934 __hci_req_disable_advertising(req);
935
936
937
938
939
940
941 hci_dev_clear_flag(hdev, HCI_LE_ADV);
942
943 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
944
945
946
947
948 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
949 mgmt_get_connectable(hdev);
950
951
952
953
954
955 if (hci_update_random_address(req, !connectable,
956 adv_use_rpa(hdev, flags),
957 &own_addr_type) < 0)
958 return;
959
960 memset(&cp, 0, sizeof(cp));
961 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
962 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
963
964 if (connectable)
965 cp.type = LE_ADV_IND;
966 else if (get_cur_adv_instance_scan_rsp_len(hdev))
967 cp.type = LE_ADV_SCAN_IND;
968 else
969 cp.type = LE_ADV_NONCONN_IND;
970
971 cp.own_address_type = own_addr_type;
972 cp.channel_map = hdev->le_adv_channel_map;
973
974 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
975
976 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
977}
978
979u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
980{
981 size_t short_len;
982 size_t complete_len;
983
984
985 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
986 return ad_len;
987
988
989 complete_len = strlen(hdev->dev_name);
990 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
991 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
992 hdev->dev_name, complete_len + 1);
993
994
995 short_len = strlen(hdev->short_name);
996 if (short_len)
997 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
998 hdev->short_name, short_len + 1);
999
1000
1001
1002
1003 if (complete_len) {
1004 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1005
1006 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1007 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1008
1009 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1010 sizeof(name));
1011 }
1012
1013 return ad_len;
1014}
1015
1016static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1017{
1018 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1019}
1020
1021static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1022{
1023 u8 scan_rsp_len = 0;
1024
1025 if (hdev->appearance) {
1026 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1027 }
1028
1029 return append_local_name(hdev, ptr, scan_rsp_len);
1030}
1031
1032static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1033 u8 *ptr)
1034{
1035 struct adv_info *adv_instance;
1036 u32 instance_flags;
1037 u8 scan_rsp_len = 0;
1038
1039 adv_instance = hci_find_adv_instance(hdev, instance);
1040 if (!adv_instance)
1041 return 0;
1042
1043 instance_flags = adv_instance->flags;
1044
1045 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1046 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1047 }
1048
1049 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1050 adv_instance->scan_rsp_len);
1051
1052 scan_rsp_len += adv_instance->scan_rsp_len;
1053
1054 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1055 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1056
1057 return scan_rsp_len;
1058}
1059
1060void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1061{
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_scan_rsp_data cp;
1064 u8 len;
1065
1066 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1067 return;
1068
1069 memset(&cp, 0, sizeof(cp));
1070
1071 if (instance)
1072 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1073 else
1074 len = create_default_scan_rsp_data(hdev, cp.data);
1075
1076 if (hdev->scan_rsp_data_len == len &&
1077 !memcmp(cp.data, hdev->scan_rsp_data, len))
1078 return;
1079
1080 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1081 hdev->scan_rsp_data_len = len;
1082
1083 cp.length = len;
1084
1085 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1086}
1087
1088static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1089{
1090 struct adv_info *adv_instance = NULL;
1091 u8 ad_len = 0, flags = 0;
1092 u32 instance_flags;
1093
1094
1095 if (instance) {
1096 adv_instance = hci_find_adv_instance(hdev, instance);
1097 if (!adv_instance)
1098 return 0;
1099 }
1100
1101 instance_flags = get_adv_instance_flags(hdev, instance);
1102
1103
1104
1105
1106 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1107 flags |= LE_AD_GENERAL;
1108
1109 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1110 flags |= LE_AD_LIMITED;
1111
1112 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1113 flags |= LE_AD_NO_BREDR;
1114
1115 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1116
1117
1118
1119 if (!flags)
1120 flags |= mgmt_get_adv_discov_flags(hdev);
1121
1122
1123
1124
1125 if (flags) {
1126 ptr[0] = 0x02;
1127 ptr[1] = EIR_FLAGS;
1128 ptr[2] = flags;
1129
1130 ad_len += 3;
1131 ptr += 3;
1132 }
1133 }
1134
1135 if (adv_instance) {
1136 memcpy(ptr, adv_instance->adv_data,
1137 adv_instance->adv_data_len);
1138 ad_len += adv_instance->adv_data_len;
1139 ptr += adv_instance->adv_data_len;
1140 }
1141
1142
1143 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1144 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1145 ptr[0] = 0x02;
1146 ptr[1] = EIR_TX_POWER;
1147 ptr[2] = (u8)hdev->adv_tx_power;
1148
1149 ad_len += 3;
1150 ptr += 3;
1151 }
1152
1153 return ad_len;
1154}
1155
1156void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1157{
1158 struct hci_dev *hdev = req->hdev;
1159 struct hci_cp_le_set_adv_data cp;
1160 u8 len;
1161
1162 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1163 return;
1164
1165 memset(&cp, 0, sizeof(cp));
1166
1167 len = create_instance_adv_data(hdev, instance, cp.data);
1168
1169
1170 if (hdev->adv_data_len == len &&
1171 memcmp(cp.data, hdev->adv_data, len) == 0)
1172 return;
1173
1174 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1175 hdev->adv_data_len = len;
1176
1177 cp.length = len;
1178
1179 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1180}
1181
1182int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1183{
1184 struct hci_request req;
1185
1186 hci_req_init(&req, hdev);
1187 __hci_req_update_adv_data(&req, instance);
1188
1189 return hci_req_run(&req, NULL);
1190}
1191
1192static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1193{
1194 BT_DBG("%s status %u", hdev->name, status);
1195}
1196
1197void hci_req_reenable_advertising(struct hci_dev *hdev)
1198{
1199 struct hci_request req;
1200
1201 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1202 list_empty(&hdev->adv_instances))
1203 return;
1204
1205 hci_req_init(&req, hdev);
1206
1207 if (hdev->cur_adv_instance) {
1208 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1209 true);
1210 } else {
1211 __hci_req_update_adv_data(&req, 0x00);
1212 __hci_req_update_scan_rsp_data(&req, 0x00);
1213 __hci_req_enable_advertising(&req);
1214 }
1215
1216 hci_req_run(&req, adv_enable_complete);
1217}
1218
1219static void adv_timeout_expire(struct work_struct *work)
1220{
1221 struct hci_dev *hdev = container_of(work, struct hci_dev,
1222 adv_instance_expire.work);
1223
1224 struct hci_request req;
1225 u8 instance;
1226
1227 BT_DBG("%s", hdev->name);
1228
1229 hci_dev_lock(hdev);
1230
1231 hdev->adv_instance_timeout = 0;
1232
1233 instance = hdev->cur_adv_instance;
1234 if (instance == 0x00)
1235 goto unlock;
1236
1237 hci_req_init(&req, hdev);
1238
1239 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1240
1241 if (list_empty(&hdev->adv_instances))
1242 __hci_req_disable_advertising(&req);
1243
1244 hci_req_run(&req, NULL);
1245
1246unlock:
1247 hci_dev_unlock(hdev);
1248}
1249
1250int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1251 bool force)
1252{
1253 struct hci_dev *hdev = req->hdev;
1254 struct adv_info *adv_instance = NULL;
1255 u16 timeout;
1256
1257 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1258 list_empty(&hdev->adv_instances))
1259 return -EPERM;
1260
1261 if (hdev->adv_instance_timeout)
1262 return -EBUSY;
1263
1264 adv_instance = hci_find_adv_instance(hdev, instance);
1265 if (!adv_instance)
1266 return -ENOENT;
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 if (adv_instance->timeout == 0 ||
1277 adv_instance->duration <= adv_instance->remaining_time)
1278 timeout = adv_instance->duration;
1279 else
1280 timeout = adv_instance->remaining_time;
1281
1282
1283
1284
1285 if (adv_instance->timeout)
1286 adv_instance->remaining_time =
1287 adv_instance->remaining_time - timeout;
1288
1289 hdev->adv_instance_timeout = timeout;
1290 queue_delayed_work(hdev->req_workqueue,
1291 &hdev->adv_instance_expire,
1292 msecs_to_jiffies(timeout * 1000));
1293
1294
1295
1296
1297
1298 if (!force && hdev->cur_adv_instance == instance &&
1299 hci_dev_test_flag(hdev, HCI_LE_ADV))
1300 return 0;
1301
1302 hdev->cur_adv_instance = instance;
1303 __hci_req_update_adv_data(req, instance);
1304 __hci_req_update_scan_rsp_data(req, instance);
1305 __hci_req_enable_advertising(req);
1306
1307 return 0;
1308}
1309
1310static void cancel_adv_timeout(struct hci_dev *hdev)
1311{
1312 if (hdev->adv_instance_timeout) {
1313 hdev->adv_instance_timeout = 0;
1314 cancel_delayed_work(&hdev->adv_instance_expire);
1315 }
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1330 struct hci_request *req, u8 instance,
1331 bool force)
1332{
1333 struct adv_info *adv_instance, *n, *next_instance = NULL;
1334 int err;
1335 u8 rem_inst;
1336
1337
1338 if (!instance || hdev->cur_adv_instance == instance)
1339 cancel_adv_timeout(hdev);
1340
1341
1342
1343
1344
1345 if (instance && hdev->cur_adv_instance == instance)
1346 next_instance = hci_get_next_instance(hdev, instance);
1347
1348 if (instance == 0x00) {
1349 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1350 list) {
1351 if (!(force || adv_instance->timeout))
1352 continue;
1353
1354 rem_inst = adv_instance->instance;
1355 err = hci_remove_adv_instance(hdev, rem_inst);
1356 if (!err)
1357 mgmt_advertising_removed(sk, hdev, rem_inst);
1358 }
1359 } else {
1360 adv_instance = hci_find_adv_instance(hdev, instance);
1361
1362 if (force || (adv_instance && adv_instance->timeout &&
1363 !adv_instance->remaining_time)) {
1364
1365 if (next_instance &&
1366 next_instance->instance == instance)
1367 next_instance = NULL;
1368
1369 err = hci_remove_adv_instance(hdev, instance);
1370 if (!err)
1371 mgmt_advertising_removed(sk, hdev, instance);
1372 }
1373 }
1374
1375 if (!req || !hdev_is_powered(hdev) ||
1376 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1377 return;
1378
1379 if (next_instance)
1380 __hci_req_schedule_adv_instance(req, next_instance->instance,
1381 false);
1382}
1383
1384static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1385{
1386 struct hci_dev *hdev = req->hdev;
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1399 hci_lookup_le_connect(hdev)) {
1400 BT_DBG("Deferring random address update");
1401 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1402 return;
1403 }
1404
1405 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1406}
1407
1408int hci_update_random_address(struct hci_request *req, bool require_privacy,
1409 bool use_rpa, u8 *own_addr_type)
1410{
1411 struct hci_dev *hdev = req->hdev;
1412 int err;
1413
1414
1415
1416
1417
1418 if (use_rpa) {
1419 int to;
1420
1421 *own_addr_type = ADDR_LE_DEV_RANDOM;
1422
1423 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1424 !bacmp(&hdev->random_addr, &hdev->rpa))
1425 return 0;
1426
1427 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1428 if (err < 0) {
1429 bt_dev_err(hdev, "failed to generate new RPA");
1430 return err;
1431 }
1432
1433 set_random_addr(req, &hdev->rpa);
1434
1435 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1436 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1437
1438 return 0;
1439 }
1440
1441
1442
1443
1444
1445 if (require_privacy) {
1446 bdaddr_t nrpa;
1447
1448 while (true) {
1449
1450
1451
1452
1453 get_random_bytes(&nrpa, 6);
1454 nrpa.b[5] &= 0x3f;
1455
1456
1457
1458
1459 if (bacmp(&hdev->bdaddr, &nrpa))
1460 break;
1461 }
1462
1463 *own_addr_type = ADDR_LE_DEV_RANDOM;
1464 set_random_addr(req, &nrpa);
1465 return 0;
1466 }
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1478 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1479 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1480 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1481 *own_addr_type = ADDR_LE_DEV_RANDOM;
1482 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1483 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1484 &hdev->static_addr);
1485 return 0;
1486 }
1487
1488
1489
1490
1491 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1492
1493 return 0;
1494}
1495
1496static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1497{
1498 struct bdaddr_list *b;
1499
1500 list_for_each_entry(b, &hdev->whitelist, list) {
1501 struct hci_conn *conn;
1502
1503 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1504 if (!conn)
1505 return true;
1506
1507 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1508 return true;
1509 }
1510
1511 return false;
1512}
1513
1514void __hci_req_update_scan(struct hci_request *req)
1515{
1516 struct hci_dev *hdev = req->hdev;
1517 u8 scan;
1518
1519 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1520 return;
1521
1522 if (!hdev_is_powered(hdev))
1523 return;
1524
1525 if (mgmt_powering_down(hdev))
1526 return;
1527
1528 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1529 disconnected_whitelist_entries(hdev))
1530 scan = SCAN_PAGE;
1531 else
1532 scan = SCAN_DISABLED;
1533
1534 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1535 scan |= SCAN_INQUIRY;
1536
1537 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1538 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1539 return;
1540
1541 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1542}
1543
1544static int update_scan(struct hci_request *req, unsigned long opt)
1545{
1546 hci_dev_lock(req->hdev);
1547 __hci_req_update_scan(req);
1548 hci_dev_unlock(req->hdev);
1549 return 0;
1550}
1551
1552static void scan_update_work(struct work_struct *work)
1553{
1554 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1555
1556 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1557}
1558
1559static int connectable_update(struct hci_request *req, unsigned long opt)
1560{
1561 struct hci_dev *hdev = req->hdev;
1562
1563 hci_dev_lock(hdev);
1564
1565 __hci_req_update_scan(req);
1566
1567
1568
1569
1570
1571 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1572 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1573
1574
1575 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1576 !list_empty(&hdev->adv_instances))
1577 __hci_req_enable_advertising(req);
1578
1579 __hci_update_background_scan(req);
1580
1581 hci_dev_unlock(hdev);
1582
1583 return 0;
1584}
1585
1586static void connectable_update_work(struct work_struct *work)
1587{
1588 struct hci_dev *hdev = container_of(work, struct hci_dev,
1589 connectable_update);
1590 u8 status;
1591
1592 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1593 mgmt_set_connectable_complete(hdev, status);
1594}
1595
1596static u8 get_service_classes(struct hci_dev *hdev)
1597{
1598 struct bt_uuid *uuid;
1599 u8 val = 0;
1600
1601 list_for_each_entry(uuid, &hdev->uuids, list)
1602 val |= uuid->svc_hint;
1603
1604 return val;
1605}
1606
1607void __hci_req_update_class(struct hci_request *req)
1608{
1609 struct hci_dev *hdev = req->hdev;
1610 u8 cod[3];
1611
1612 BT_DBG("%s", hdev->name);
1613
1614 if (!hdev_is_powered(hdev))
1615 return;
1616
1617 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1618 return;
1619
1620 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1621 return;
1622
1623 cod[0] = hdev->minor_class;
1624 cod[1] = hdev->major_class;
1625 cod[2] = get_service_classes(hdev);
1626
1627 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1628 cod[1] |= 0x20;
1629
1630 if (memcmp(cod, hdev->dev_class, 3) == 0)
1631 return;
1632
1633 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1634}
1635
1636static void write_iac(struct hci_request *req)
1637{
1638 struct hci_dev *hdev = req->hdev;
1639 struct hci_cp_write_current_iac_lap cp;
1640
1641 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1642 return;
1643
1644 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1645
1646 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1647 cp.iac_lap[0] = 0x00;
1648 cp.iac_lap[1] = 0x8b;
1649 cp.iac_lap[2] = 0x9e;
1650 cp.iac_lap[3] = 0x33;
1651 cp.iac_lap[4] = 0x8b;
1652 cp.iac_lap[5] = 0x9e;
1653 } else {
1654
1655 cp.num_iac = 1;
1656 cp.iac_lap[0] = 0x33;
1657 cp.iac_lap[1] = 0x8b;
1658 cp.iac_lap[2] = 0x9e;
1659 }
1660
1661 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1662 (cp.num_iac * 3) + 1, &cp);
1663}
1664
1665static int discoverable_update(struct hci_request *req, unsigned long opt)
1666{
1667 struct hci_dev *hdev = req->hdev;
1668
1669 hci_dev_lock(hdev);
1670
1671 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1672 write_iac(req);
1673 __hci_req_update_scan(req);
1674 __hci_req_update_class(req);
1675 }
1676
1677
1678
1679
1680 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1681 __hci_req_update_adv_data(req, 0x00);
1682
1683
1684
1685
1686 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1687 __hci_req_enable_advertising(req);
1688 }
1689
1690 hci_dev_unlock(hdev);
1691
1692 return 0;
1693}
1694
1695static void discoverable_update_work(struct work_struct *work)
1696{
1697 struct hci_dev *hdev = container_of(work, struct hci_dev,
1698 discoverable_update);
1699 u8 status;
1700
1701 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1702 mgmt_set_discoverable_complete(hdev, status);
1703}
1704
1705void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1706 u8 reason)
1707{
1708 switch (conn->state) {
1709 case BT_CONNECTED:
1710 case BT_CONFIG:
1711 if (conn->type == AMP_LINK) {
1712 struct hci_cp_disconn_phy_link cp;
1713
1714 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1715 cp.reason = reason;
1716 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1717 &cp);
1718 } else {
1719 struct hci_cp_disconnect dc;
1720
1721 dc.handle = cpu_to_le16(conn->handle);
1722 dc.reason = reason;
1723 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1724 }
1725
1726 conn->state = BT_DISCONN;
1727
1728 break;
1729 case BT_CONNECT:
1730 if (conn->type == LE_LINK) {
1731 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1732 break;
1733 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1734 0, NULL);
1735 } else if (conn->type == ACL_LINK) {
1736 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1737 break;
1738 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1739 6, &conn->dst);
1740 }
1741 break;
1742 case BT_CONNECT2:
1743 if (conn->type == ACL_LINK) {
1744 struct hci_cp_reject_conn_req rej;
1745
1746 bacpy(&rej.bdaddr, &conn->dst);
1747 rej.reason = reason;
1748
1749 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1750 sizeof(rej), &rej);
1751 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1752 struct hci_cp_reject_sync_conn_req rej;
1753
1754 bacpy(&rej.bdaddr, &conn->dst);
1755
1756
1757
1758
1759
1760
1761
1762 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1763
1764 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1765 sizeof(rej), &rej);
1766 }
1767 break;
1768 default:
1769 conn->state = BT_CLOSED;
1770 break;
1771 }
1772}
1773
1774static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1775{
1776 if (status)
1777 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1778}
1779
1780int hci_abort_conn(struct hci_conn *conn, u8 reason)
1781{
1782 struct hci_request req;
1783 int err;
1784
1785 hci_req_init(&req, conn->hdev);
1786
1787 __hci_abort_conn(&req, conn, reason);
1788
1789 err = hci_req_run(&req, abort_conn_complete);
1790 if (err && err != -ENODATA) {
1791 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
1792 return err;
1793 }
1794
1795 return 0;
1796}
1797
1798static int update_bg_scan(struct hci_request *req, unsigned long opt)
1799{
1800 hci_dev_lock(req->hdev);
1801 __hci_update_background_scan(req);
1802 hci_dev_unlock(req->hdev);
1803 return 0;
1804}
1805
1806static void bg_scan_update(struct work_struct *work)
1807{
1808 struct hci_dev *hdev = container_of(work, struct hci_dev,
1809 bg_scan_update);
1810 struct hci_conn *conn;
1811 u8 status;
1812 int err;
1813
1814 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1815 if (!err)
1816 return;
1817
1818 hci_dev_lock(hdev);
1819
1820 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1821 if (conn)
1822 hci_le_conn_failed(conn, status);
1823
1824 hci_dev_unlock(hdev);
1825}
1826
1827static int le_scan_disable(struct hci_request *req, unsigned long opt)
1828{
1829 hci_req_add_le_scan_disable(req);
1830 return 0;
1831}
1832
1833static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1834{
1835 u8 length = opt;
1836 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1837 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1838 struct hci_cp_inquiry cp;
1839
1840 BT_DBG("%s", req->hdev->name);
1841
1842 hci_dev_lock(req->hdev);
1843 hci_inquiry_cache_flush(req->hdev);
1844 hci_dev_unlock(req->hdev);
1845
1846 memset(&cp, 0, sizeof(cp));
1847
1848 if (req->hdev->discovery.limited)
1849 memcpy(&cp.lap, liac, sizeof(cp.lap));
1850 else
1851 memcpy(&cp.lap, giac, sizeof(cp.lap));
1852
1853 cp.length = length;
1854
1855 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1856
1857 return 0;
1858}
1859
1860static void le_scan_disable_work(struct work_struct *work)
1861{
1862 struct hci_dev *hdev = container_of(work, struct hci_dev,
1863 le_scan_disable.work);
1864 u8 status;
1865
1866 BT_DBG("%s", hdev->name);
1867
1868 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1869 return;
1870
1871 cancel_delayed_work(&hdev->le_scan_restart);
1872
1873 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1874 if (status) {
1875 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
1876 status);
1877 return;
1878 }
1879
1880 hdev->discovery.scan_start = 0;
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890 if (hdev->discovery.type == DISCOV_TYPE_LE)
1891 goto discov_stopped;
1892
1893 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1894 return;
1895
1896 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1897 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1898 hdev->discovery.state != DISCOVERY_RESOLVING)
1899 goto discov_stopped;
1900
1901 return;
1902 }
1903
1904 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1905 HCI_CMD_TIMEOUT, &status);
1906 if (status) {
1907 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
1908 goto discov_stopped;
1909 }
1910
1911 return;
1912
1913discov_stopped:
1914 hci_dev_lock(hdev);
1915 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1916 hci_dev_unlock(hdev);
1917}
1918
1919static int le_scan_restart(struct hci_request *req, unsigned long opt)
1920{
1921 struct hci_dev *hdev = req->hdev;
1922 struct hci_cp_le_set_scan_enable cp;
1923
1924
1925 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1926 return 0;
1927
1928 hci_req_add_le_scan_disable(req);
1929
1930 memset(&cp, 0, sizeof(cp));
1931 cp.enable = LE_SCAN_ENABLE;
1932 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1933 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1934
1935 return 0;
1936}
1937
1938static void le_scan_restart_work(struct work_struct *work)
1939{
1940 struct hci_dev *hdev = container_of(work, struct hci_dev,
1941 le_scan_restart.work);
1942 unsigned long timeout, duration, scan_start, now;
1943 u8 status;
1944
1945 BT_DBG("%s", hdev->name);
1946
1947 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1948 if (status) {
1949 bt_dev_err(hdev, "failed to restart LE scan: status %d",
1950 status);
1951 return;
1952 }
1953
1954 hci_dev_lock(hdev);
1955
1956 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1957 !hdev->discovery.scan_start)
1958 goto unlock;
1959
1960
1961
1962
1963
1964
1965 duration = hdev->discovery.scan_duration;
1966 scan_start = hdev->discovery.scan_start;
1967 now = jiffies;
1968 if (now - scan_start <= duration) {
1969 int elapsed;
1970
1971 if (now >= scan_start)
1972 elapsed = now - scan_start;
1973 else
1974 elapsed = ULONG_MAX - scan_start + now;
1975
1976 timeout = duration - elapsed;
1977 } else {
1978 timeout = 0;
1979 }
1980
1981 queue_delayed_work(hdev->req_workqueue,
1982 &hdev->le_scan_disable, timeout);
1983
1984unlock:
1985 hci_dev_unlock(hdev);
1986}
1987
1988static void disable_advertising(struct hci_request *req)
1989{
1990 u8 enable = 0x00;
1991
1992 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1993}
1994
1995static int active_scan(struct hci_request *req, unsigned long opt)
1996{
1997 uint16_t interval = opt;
1998 struct hci_dev *hdev = req->hdev;
1999 struct hci_cp_le_set_scan_param param_cp;
2000 struct hci_cp_le_set_scan_enable enable_cp;
2001 u8 own_addr_type;
2002 int err;
2003
2004 BT_DBG("%s", hdev->name);
2005
2006 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2007 hci_dev_lock(hdev);
2008
2009
2010
2011
2012 if (hci_lookup_le_connect(hdev)) {
2013 hci_dev_unlock(hdev);
2014 return -EBUSY;
2015 }
2016
2017 cancel_adv_timeout(hdev);
2018 hci_dev_unlock(hdev);
2019
2020 disable_advertising(req);
2021 }
2022
2023
2024
2025
2026
2027 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2028 hci_req_add_le_scan_disable(req);
2029
2030
2031
2032
2033
2034 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2035 &own_addr_type);
2036 if (err < 0)
2037 own_addr_type = ADDR_LE_DEV_PUBLIC;
2038
2039 memset(¶m_cp, 0, sizeof(param_cp));
2040 param_cp.type = LE_SCAN_ACTIVE;
2041 param_cp.interval = cpu_to_le16(interval);
2042 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2043 param_cp.own_address_type = own_addr_type;
2044
2045 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2046 ¶m_cp);
2047
2048 memset(&enable_cp, 0, sizeof(enable_cp));
2049 enable_cp.enable = LE_SCAN_ENABLE;
2050 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2051
2052 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2053 &enable_cp);
2054
2055 return 0;
2056}
2057
2058static int interleaved_discov(struct hci_request *req, unsigned long opt)
2059{
2060 int err;
2061
2062 BT_DBG("%s", req->hdev->name);
2063
2064 err = active_scan(req, opt);
2065 if (err)
2066 return err;
2067
2068 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2069}
2070
2071static void start_discovery(struct hci_dev *hdev, u8 *status)
2072{
2073 unsigned long timeout;
2074
2075 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2076
2077 switch (hdev->discovery.type) {
2078 case DISCOV_TYPE_BREDR:
2079 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2080 hci_req_sync(hdev, bredr_inquiry,
2081 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2082 status);
2083 return;
2084 case DISCOV_TYPE_INTERLEAVED:
2085
2086
2087
2088
2089
2090
2091
2092
2093 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2094 &hdev->quirks)) {
2095 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2096
2097
2098
2099
2100 hci_req_sync(hdev, interleaved_discov,
2101 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2102 status);
2103 break;
2104 }
2105
2106 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2107 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2108 HCI_CMD_TIMEOUT, status);
2109 break;
2110 case DISCOV_TYPE_LE:
2111 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2112 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2113 HCI_CMD_TIMEOUT, status);
2114 break;
2115 default:
2116 *status = HCI_ERROR_UNSPECIFIED;
2117 return;
2118 }
2119
2120 if (*status)
2121 return;
2122
2123 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2124
2125
2126
2127
2128
2129
2130 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2131 hdev->discovery.result_filtering) {
2132 hdev->discovery.scan_start = jiffies;
2133 hdev->discovery.scan_duration = timeout;
2134 }
2135
2136 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2137 timeout);
2138}
2139
2140bool hci_req_stop_discovery(struct hci_request *req)
2141{
2142 struct hci_dev *hdev = req->hdev;
2143 struct discovery_state *d = &hdev->discovery;
2144 struct hci_cp_remote_name_req_cancel cp;
2145 struct inquiry_entry *e;
2146 bool ret = false;
2147
2148 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2149
2150 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2151 if (test_bit(HCI_INQUIRY, &hdev->flags))
2152 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2153
2154 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2155 cancel_delayed_work(&hdev->le_scan_disable);
2156 hci_req_add_le_scan_disable(req);
2157 }
2158
2159 ret = true;
2160 } else {
2161
2162 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2163 hci_req_add_le_scan_disable(req);
2164 ret = true;
2165 }
2166 }
2167
2168
2169 if (d->type == DISCOV_TYPE_LE)
2170 return ret;
2171
2172 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2173 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2174 NAME_PENDING);
2175 if (!e)
2176 return ret;
2177
2178 bacpy(&cp.bdaddr, &e->data.bdaddr);
2179 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2180 &cp);
2181 ret = true;
2182 }
2183
2184 return ret;
2185}
2186
2187static int stop_discovery(struct hci_request *req, unsigned long opt)
2188{
2189 hci_dev_lock(req->hdev);
2190 hci_req_stop_discovery(req);
2191 hci_dev_unlock(req->hdev);
2192
2193 return 0;
2194}
2195
2196static void discov_update(struct work_struct *work)
2197{
2198 struct hci_dev *hdev = container_of(work, struct hci_dev,
2199 discov_update);
2200 u8 status = 0;
2201
2202 switch (hdev->discovery.state) {
2203 case DISCOVERY_STARTING:
2204 start_discovery(hdev, &status);
2205 mgmt_start_discovery_complete(hdev, status);
2206 if (status)
2207 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2208 else
2209 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2210 break;
2211 case DISCOVERY_STOPPING:
2212 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2213 mgmt_stop_discovery_complete(hdev, status);
2214 if (!status)
2215 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2216 break;
2217 case DISCOVERY_STOPPED:
2218 default:
2219 return;
2220 }
2221}
2222
2223static void discov_off(struct work_struct *work)
2224{
2225 struct hci_dev *hdev = container_of(work, struct hci_dev,
2226 discov_off.work);
2227
2228 BT_DBG("%s", hdev->name);
2229
2230 hci_dev_lock(hdev);
2231
2232
2233
2234
2235
2236
2237 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2238 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2239 hdev->discov_timeout = 0;
2240
2241 hci_dev_unlock(hdev);
2242
2243 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2244 mgmt_new_settings(hdev);
2245}
2246
2247static int powered_update_hci(struct hci_request *req, unsigned long opt)
2248{
2249 struct hci_dev *hdev = req->hdev;
2250 u8 link_sec;
2251
2252 hci_dev_lock(hdev);
2253
2254 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2255 !lmp_host_ssp_capable(hdev)) {
2256 u8 mode = 0x01;
2257
2258 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2259
2260 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2261 u8 support = 0x01;
2262
2263 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2264 sizeof(support), &support);
2265 }
2266 }
2267
2268 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2269 lmp_bredr_capable(hdev)) {
2270 struct hci_cp_write_le_host_supported cp;
2271
2272 cp.le = 0x01;
2273 cp.simul = 0x00;
2274
2275
2276
2277
2278 if (cp.le != lmp_host_le_capable(hdev) ||
2279 cp.simul != lmp_host_le_br_capable(hdev))
2280 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2281 sizeof(cp), &cp);
2282 }
2283
2284 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2285
2286
2287
2288
2289 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2290 list_empty(&hdev->adv_instances)) {
2291 __hci_req_update_adv_data(req, 0x00);
2292 __hci_req_update_scan_rsp_data(req, 0x00);
2293
2294 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2295 __hci_req_enable_advertising(req);
2296 } else if (!list_empty(&hdev->adv_instances)) {
2297 struct adv_info *adv_instance;
2298
2299 adv_instance = list_first_entry(&hdev->adv_instances,
2300 struct adv_info, list);
2301 __hci_req_schedule_adv_instance(req,
2302 adv_instance->instance,
2303 true);
2304 }
2305 }
2306
2307 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2308 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2309 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2310 sizeof(link_sec), &link_sec);
2311
2312 if (lmp_bredr_capable(hdev)) {
2313 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2314 __hci_req_write_fast_connectable(req, true);
2315 else
2316 __hci_req_write_fast_connectable(req, false);
2317 __hci_req_update_scan(req);
2318 __hci_req_update_class(req);
2319 __hci_req_update_name(req);
2320 __hci_req_update_eir(req);
2321 }
2322
2323 hci_dev_unlock(hdev);
2324 return 0;
2325}
2326
2327int __hci_req_hci_power_on(struct hci_dev *hdev)
2328{
2329
2330
2331
2332
2333
2334 smp_register(hdev);
2335
2336 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2337 NULL);
2338}
2339
2340void hci_request_setup(struct hci_dev *hdev)
2341{
2342 INIT_WORK(&hdev->discov_update, discov_update);
2343 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2344 INIT_WORK(&hdev->scan_update, scan_update_work);
2345 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2346 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2347 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2348 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2349 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2350 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2351}
2352
2353void hci_request_cancel_all(struct hci_dev *hdev)
2354{
2355 hci_req_sync_cancel(hdev, ENODEV);
2356
2357 cancel_work_sync(&hdev->discov_update);
2358 cancel_work_sync(&hdev->bg_scan_update);
2359 cancel_work_sync(&hdev->scan_update);
2360 cancel_work_sync(&hdev->connectable_update);
2361 cancel_work_sync(&hdev->discoverable_update);
2362 cancel_delayed_work_sync(&hdev->discov_off);
2363 cancel_delayed_work_sync(&hdev->le_scan_disable);
2364 cancel_delayed_work_sync(&hdev->le_scan_restart);
2365
2366 if (hdev->adv_instance_timeout) {
2367 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2368 hdev->adv_instance_timeout = 0;
2369 }
2370}
2371