1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26#include <net/bluetooth/mgmt.h>
27
28#include "smp.h"
29#include "hci_request.h"
30
31#define HCI_REQ_DONE 0
32#define HCI_REQ_PEND 1
33#define HCI_REQ_CANCELED 2
34
35void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36{
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
40}
41
42static int req_run(struct hci_request *req, hci_req_complete_t complete,
43 hci_req_complete_skb_t complete_skb)
44{
45 struct hci_dev *hdev = req->hdev;
46 struct sk_buff *skb;
47 unsigned long flags;
48
49 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
50
51
52
53
54 if (req->err) {
55 skb_queue_purge(&req->cmd_q);
56 return req->err;
57 }
58
59
60 if (skb_queue_empty(&req->cmd_q))
61 return -ENODATA;
62
63 skb = skb_peek_tail(&req->cmd_q);
64 if (complete) {
65 bt_cb(skb)->hci.req_complete = complete;
66 } else if (complete_skb) {
67 bt_cb(skb)->hci.req_complete_skb = complete_skb;
68 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
69 }
70
71 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
72 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
73 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
74
75 queue_work(hdev->workqueue, &hdev->cmd_work);
76
77 return 0;
78}
79
80int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
81{
82 return req_run(req, complete, NULL);
83}
84
85int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
86{
87 return req_run(req, NULL, complete);
88}
89
90static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
91 struct sk_buff *skb)
92{
93 BT_DBG("%s result 0x%2.2x", hdev->name, result);
94
95 if (hdev->req_status == HCI_REQ_PEND) {
96 hdev->req_result = result;
97 hdev->req_status = HCI_REQ_DONE;
98 if (skb)
99 hdev->req_skb = skb_get(skb);
100 wake_up_interruptible(&hdev->req_wait_q);
101 }
102}
103
104void hci_req_sync_cancel(struct hci_dev *hdev, int err)
105{
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
116 const void *param, u8 event, u32 timeout)
117{
118 DECLARE_WAITQUEUE(wait, current);
119 struct hci_request req;
120 struct sk_buff *skb;
121 int err = 0;
122
123 BT_DBG("%s", hdev->name);
124
125 hci_req_init(&req, hdev);
126
127 hci_req_add_ev(&req, opcode, plen, param, event);
128
129 hdev->req_status = HCI_REQ_PEND;
130
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
133
134 err = hci_req_run_skb(&req, hci_req_sync_complete);
135 if (err < 0) {
136 remove_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_RUNNING);
138 return ERR_PTR(err);
139 }
140
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return ERR_PTR(-EINTR);
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
150 err = -bt_to_errno(hdev->req_result);
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
160 }
161
162 hdev->req_status = hdev->req_result = 0;
163 skb = hdev->req_skb;
164 hdev->req_skb = NULL;
165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 if (err < 0) {
169 kfree_skb(skb);
170 return ERR_PTR(err);
171 }
172
173 if (!skb)
174 return ERR_PTR(-ENODATA);
175
176 return skb;
177}
178EXPORT_SYMBOL(__hci_cmd_sync_ev);
179
180struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
181 const void *param, u32 timeout)
182{
183 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
184}
185EXPORT_SYMBOL(__hci_cmd_sync);
186
187
188int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
189 unsigned long opt),
190 unsigned long opt, u32 timeout, u8 *hci_status)
191{
192 struct hci_request req;
193 DECLARE_WAITQUEUE(wait, current);
194 int err = 0;
195
196 BT_DBG("%s start", hdev->name);
197
198 hci_req_init(&req, hdev);
199
200 hdev->req_status = HCI_REQ_PEND;
201
202 err = func(&req, opt);
203 if (err) {
204 if (hci_status)
205 *hci_status = HCI_ERROR_UNSPECIFIED;
206 return err;
207 }
208
209 add_wait_queue(&hdev->req_wait_q, &wait);
210 set_current_state(TASK_INTERRUPTIBLE);
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 remove_wait_queue(&hdev->req_wait_q, &wait);
217 set_current_state(TASK_RUNNING);
218
219
220
221
222
223
224 if (err == -ENODATA) {
225 if (hci_status)
226 *hci_status = 0;
227 return 0;
228 }
229
230 if (hci_status)
231 *hci_status = HCI_ERROR_UNSPECIFIED;
232
233 return err;
234 }
235
236 schedule_timeout(timeout);
237
238 remove_wait_queue(&hdev->req_wait_q, &wait);
239
240 if (signal_pending(current))
241 return -EINTR;
242
243 switch (hdev->req_status) {
244 case HCI_REQ_DONE:
245 err = -bt_to_errno(hdev->req_result);
246 if (hci_status)
247 *hci_status = hdev->req_result;
248 break;
249
250 case HCI_REQ_CANCELED:
251 err = -hdev->req_result;
252 if (hci_status)
253 *hci_status = HCI_ERROR_UNSPECIFIED;
254 break;
255
256 default:
257 err = -ETIMEDOUT;
258 if (hci_status)
259 *hci_status = HCI_ERROR_UNSPECIFIED;
260 break;
261 }
262
263 kfree_skb(hdev->req_skb);
264 hdev->req_skb = NULL;
265 hdev->req_status = hdev->req_result = 0;
266
267 BT_DBG("%s end: err %d", hdev->name, err);
268
269 return err;
270}
271
272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
274 unsigned long opt, u32 timeout, u8 *hci_status)
275{
276 int ret;
277
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
280
281
282 hci_req_sync_lock(hdev);
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 hci_req_sync_unlock(hdev);
285
286 return ret;
287}
288
289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291{
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
311
312 return skb;
313}
314
315
316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318{
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324
325
326
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341 bt_cb(skb)->hci.req_event = event;
342
343 skb_queue_tail(&req->cmd_q, skb);
344}
345
346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348{
349 hci_req_add_ev(req, opcode, plen, param, 0);
350}
351
352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353{
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD;
371
372
373 acp.interval = cpu_to_le16(0x0800);
374 }
375
376 acp.window = cpu_to_le16(0x0012);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
387
388
389
390
391
392
393static void __hci_update_background_scan(struct hci_request *req)
394{
395 struct hci_dev *hdev = req->hdev;
396
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
404
405
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
408
409
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
412
413
414
415
416
417
418
419
420 hci_discovery_filter_clear(hdev);
421
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424
425
426
427
428
429
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
432
433 hci_req_add_le_scan_disable(req);
434
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437
438
439
440
441
442
443
444
445 if (hci_lookup_le_connect(hdev))
446 return;
447
448
449
450
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
453
454 hci_req_add_le_passive_scan(req);
455
456 BT_DBG("%s starting background scanning", hdev->name);
457 }
458}
459
460void __hci_req_update_name(struct hci_request *req)
461{
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
464
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468}
469
470#define PNP_INFO_SVCLASS_ID 0x1200
471
472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473{
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
476
477 if (len < 4)
478 return ptr;
479
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
482
483 if (uuid->size != 16)
484 continue;
485
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
489
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
498 }
499
500
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
504 }
505
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
509 }
510
511 return ptr;
512}
513
514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515{
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 6)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
531 }
532
533
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
537 }
538
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
542 }
543
544 return ptr;
545}
546
547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548{
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
551
552 if (len < 18)
553 return ptr;
554
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
558
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
564 }
565
566
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
570 }
571
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
575 }
576
577 return ptr;
578}
579
580static void create_eir(struct hci_dev *hdev, u8 *data)
581{
582 u8 *ptr = data;
583 size_t name_len;
584
585 name_len = strlen(hdev->dev_name);
586
587 if (name_len > 0) {
588
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
594
595
596 ptr[0] = name_len + 1;
597
598 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600 ptr += (name_len + 2);
601 }
602
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
607
608 ptr += 3;
609 }
610
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
614
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620 ptr += 10;
621 }
622
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626}
627
628void __hci_req_update_eir(struct hci_request *req)
629{
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
632
633 if (!hdev_is_powered(hdev))
634 return;
635
636 if (!lmp_ext_inq_capable(hdev))
637 return;
638
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
641
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 create_eir(hdev, cp.data);
648
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
651
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655}
656
657void hci_req_add_le_scan_disable(struct hci_request *req)
658{
659 struct hci_cp_le_set_scan_enable cp;
660
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664}
665
666static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
668{
669 struct hci_cp_le_add_to_white_list cp;
670
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, ¶ms->addr);
673
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675}
676
677static u8 update_white_list(struct hci_request *req)
678{
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
683
684
685
686
687
688
689
690 list_for_each_entry(b, &hdev->le_white_list, list) {
691
692
693
694 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
695 &b->bdaddr, b->bdaddr_type) &&
696 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
697 &b->bdaddr, b->bdaddr_type)) {
698 struct hci_cp_le_del_from_white_list cp;
699
700 cp.bdaddr_type = b->bdaddr_type;
701 bacpy(&cp.bdaddr, &b->bdaddr);
702
703 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
704 sizeof(cp), &cp);
705 continue;
706 }
707
708 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
709
710 return 0x00;
711 }
712
713 white_list_entries++;
714 }
715
716
717
718
719
720
721
722
723
724
725
726 list_for_each_entry(params, &hdev->pend_le_conns, action) {
727 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
728 ¶ms->addr, params->addr_type))
729 continue;
730
731 if (white_list_entries >= hdev->le_white_list_size) {
732
733 return 0x00;
734 }
735
736 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
737 params->addr_type)) {
738
739 return 0x00;
740 }
741
742 white_list_entries++;
743 add_to_white_list(req, params);
744 }
745
746
747
748
749
750 list_for_each_entry(params, &hdev->pend_le_reports, action) {
751 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
752 ¶ms->addr, params->addr_type))
753 continue;
754
755 if (white_list_entries >= hdev->le_white_list_size) {
756
757 return 0x00;
758 }
759
760 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
761 params->addr_type)) {
762
763 return 0x00;
764 }
765
766 white_list_entries++;
767 add_to_white_list(req, params);
768 }
769
770
771 return 0x01;
772}
773
774static bool scan_use_rpa(struct hci_dev *hdev)
775{
776 return hci_dev_test_flag(hdev, HCI_PRIVACY);
777}
778
779void hci_req_add_le_passive_scan(struct hci_request *req)
780{
781 struct hci_cp_le_set_scan_param param_cp;
782 struct hci_cp_le_set_scan_enable enable_cp;
783 struct hci_dev *hdev = req->hdev;
784 u8 own_addr_type;
785 u8 filter_policy;
786
787
788
789
790
791
792
793 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
794 &own_addr_type))
795 return;
796
797
798
799
800
801 filter_policy = update_white_list(req);
802
803
804
805
806
807
808
809
810
811
812 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
813 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
814 filter_policy |= 0x02;
815
816 memset(¶m_cp, 0, sizeof(param_cp));
817 param_cp.type = LE_SCAN_PASSIVE;
818 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
819 param_cp.window = cpu_to_le16(hdev->le_scan_window);
820 param_cp.own_address_type = own_addr_type;
821 param_cp.filter_policy = filter_policy;
822 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
823 ¶m_cp);
824
825 memset(&enable_cp, 0, sizeof(enable_cp));
826 enable_cp.enable = LE_SCAN_ENABLE;
827 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
828 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
829 &enable_cp);
830}
831
832static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
833{
834 u8 instance = hdev->cur_adv_instance;
835 struct adv_info *adv_instance;
836
837
838 if (instance == 0x00)
839 return 0;
840
841 adv_instance = hci_find_adv_instance(hdev, instance);
842 if (!adv_instance)
843 return 0;
844
845
846
847
848 return adv_instance->scan_rsp_len;
849}
850
851void __hci_req_disable_advertising(struct hci_request *req)
852{
853 u8 enable = 0x00;
854
855 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
856}
857
858static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
859{
860 u32 flags;
861 struct adv_info *adv_instance;
862
863 if (instance == 0x00) {
864
865
866
867 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
868
869
870
871
872 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873 flags |= MGMT_ADV_FLAG_CONNECTABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
876 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
877 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
878 flags |= MGMT_ADV_FLAG_DISCOV;
879
880 return flags;
881 }
882
883 adv_instance = hci_find_adv_instance(hdev, instance);
884
885
886 if (!adv_instance)
887 return 0;
888
889 return adv_instance->flags;
890}
891
892static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
893{
894
895 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
896 return false;
897
898
899 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
900 return true;
901
902
903
904
905 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
906 hci_dev_test_flag(hdev, HCI_BONDABLE))
907 return false;
908
909
910
911
912 return true;
913}
914
915void __hci_req_enable_advertising(struct hci_request *req)
916{
917 struct hci_dev *hdev = req->hdev;
918 struct hci_cp_le_set_adv_param cp;
919 u8 own_addr_type, enable = 0x01;
920 bool connectable;
921 u32 flags;
922
923 if (hci_conn_num(hdev, LE_LINK) > 0)
924 return;
925
926 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
927 __hci_req_disable_advertising(req);
928
929
930
931
932
933
934 hci_dev_clear_flag(hdev, HCI_LE_ADV);
935
936 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
937
938
939
940
941 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
942 mgmt_get_connectable(hdev);
943
944
945
946
947
948 if (hci_update_random_address(req, !connectable,
949 adv_use_rpa(hdev, flags),
950 &own_addr_type) < 0)
951 return;
952
953 memset(&cp, 0, sizeof(cp));
954 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
955 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
956
957 if (connectable)
958 cp.type = LE_ADV_IND;
959 else if (get_cur_adv_instance_scan_rsp_len(hdev))
960 cp.type = LE_ADV_SCAN_IND;
961 else
962 cp.type = LE_ADV_NONCONN_IND;
963
964 cp.own_address_type = own_addr_type;
965 cp.channel_map = hdev->le_adv_channel_map;
966
967 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
968
969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
970}
971
972u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
973{
974 size_t short_len;
975 size_t complete_len;
976
977
978 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
979 return ad_len;
980
981
982 complete_len = strlen(hdev->dev_name);
983 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
984 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
985 hdev->dev_name, complete_len + 1);
986
987
988 short_len = strlen(hdev->short_name);
989 if (short_len)
990 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
991 hdev->short_name, short_len + 1);
992
993
994
995
996 if (complete_len) {
997 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
998
999 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1000 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1001
1002 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1003 sizeof(name));
1004 }
1005
1006 return ad_len;
1007}
1008
1009static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1010{
1011 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1012}
1013
1014static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1015{
1016 u8 scan_rsp_len = 0;
1017
1018 if (hdev->appearance) {
1019 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1020 }
1021
1022 return append_local_name(hdev, ptr, scan_rsp_len);
1023}
1024
1025static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1026 u8 *ptr)
1027{
1028 struct adv_info *adv_instance;
1029 u32 instance_flags;
1030 u8 scan_rsp_len = 0;
1031
1032 adv_instance = hci_find_adv_instance(hdev, instance);
1033 if (!adv_instance)
1034 return 0;
1035
1036 instance_flags = adv_instance->flags;
1037
1038 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1039 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1040 }
1041
1042 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1043 adv_instance->scan_rsp_len);
1044
1045 scan_rsp_len += adv_instance->scan_rsp_len;
1046
1047 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1048 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1049
1050 return scan_rsp_len;
1051}
1052
1053void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1054{
1055 struct hci_dev *hdev = req->hdev;
1056 struct hci_cp_le_set_scan_rsp_data cp;
1057 u8 len;
1058
1059 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1060 return;
1061
1062 memset(&cp, 0, sizeof(cp));
1063
1064 if (instance)
1065 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1066 else
1067 len = create_default_scan_rsp_data(hdev, cp.data);
1068
1069 if (hdev->scan_rsp_data_len == len &&
1070 !memcmp(cp.data, hdev->scan_rsp_data, len))
1071 return;
1072
1073 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1074 hdev->scan_rsp_data_len = len;
1075
1076 cp.length = len;
1077
1078 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1079}
1080
1081static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1082{
1083 struct adv_info *adv_instance = NULL;
1084 u8 ad_len = 0, flags = 0;
1085 u32 instance_flags;
1086
1087
1088 if (instance) {
1089 adv_instance = hci_find_adv_instance(hdev, instance);
1090 if (!adv_instance)
1091 return 0;
1092 }
1093
1094 instance_flags = get_adv_instance_flags(hdev, instance);
1095
1096
1097
1098
1099 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1100 flags |= LE_AD_GENERAL;
1101
1102 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1103 flags |= LE_AD_LIMITED;
1104
1105 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1106 flags |= LE_AD_NO_BREDR;
1107
1108 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1109
1110
1111
1112 if (!flags)
1113 flags |= mgmt_get_adv_discov_flags(hdev);
1114
1115
1116
1117
1118 if (flags) {
1119 ptr[0] = 0x02;
1120 ptr[1] = EIR_FLAGS;
1121 ptr[2] = flags;
1122
1123 ad_len += 3;
1124 ptr += 3;
1125 }
1126 }
1127
1128 if (adv_instance) {
1129 memcpy(ptr, adv_instance->adv_data,
1130 adv_instance->adv_data_len);
1131 ad_len += adv_instance->adv_data_len;
1132 ptr += adv_instance->adv_data_len;
1133 }
1134
1135
1136 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1137 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1138 ptr[0] = 0x02;
1139 ptr[1] = EIR_TX_POWER;
1140 ptr[2] = (u8)hdev->adv_tx_power;
1141
1142 ad_len += 3;
1143 ptr += 3;
1144 }
1145
1146 return ad_len;
1147}
1148
1149void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1150{
1151 struct hci_dev *hdev = req->hdev;
1152 struct hci_cp_le_set_adv_data cp;
1153 u8 len;
1154
1155 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1156 return;
1157
1158 memset(&cp, 0, sizeof(cp));
1159
1160 len = create_instance_adv_data(hdev, instance, cp.data);
1161
1162
1163 if (hdev->adv_data_len == len &&
1164 memcmp(cp.data, hdev->adv_data, len) == 0)
1165 return;
1166
1167 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1168 hdev->adv_data_len = len;
1169
1170 cp.length = len;
1171
1172 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1173}
1174
1175int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1176{
1177 struct hci_request req;
1178
1179 hci_req_init(&req, hdev);
1180 __hci_req_update_adv_data(&req, instance);
1181
1182 return hci_req_run(&req, NULL);
1183}
1184
1185static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1186{
1187 BT_DBG("%s status %u", hdev->name, status);
1188}
1189
1190void hci_req_reenable_advertising(struct hci_dev *hdev)
1191{
1192 struct hci_request req;
1193
1194 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1195 list_empty(&hdev->adv_instances))
1196 return;
1197
1198 hci_req_init(&req, hdev);
1199
1200 if (hdev->cur_adv_instance) {
1201 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1202 true);
1203 } else {
1204 __hci_req_update_adv_data(&req, 0x00);
1205 __hci_req_update_scan_rsp_data(&req, 0x00);
1206 __hci_req_enable_advertising(&req);
1207 }
1208
1209 hci_req_run(&req, adv_enable_complete);
1210}
1211
1212static void adv_timeout_expire(struct work_struct *work)
1213{
1214 struct hci_dev *hdev = container_of(work, struct hci_dev,
1215 adv_instance_expire.work);
1216
1217 struct hci_request req;
1218 u8 instance;
1219
1220 BT_DBG("%s", hdev->name);
1221
1222 hci_dev_lock(hdev);
1223
1224 hdev->adv_instance_timeout = 0;
1225
1226 instance = hdev->cur_adv_instance;
1227 if (instance == 0x00)
1228 goto unlock;
1229
1230 hci_req_init(&req, hdev);
1231
1232 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1233
1234 if (list_empty(&hdev->adv_instances))
1235 __hci_req_disable_advertising(&req);
1236
1237 hci_req_run(&req, NULL);
1238
1239unlock:
1240 hci_dev_unlock(hdev);
1241}
1242
1243int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1244 bool force)
1245{
1246 struct hci_dev *hdev = req->hdev;
1247 struct adv_info *adv_instance = NULL;
1248 u16 timeout;
1249
1250 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1251 list_empty(&hdev->adv_instances))
1252 return -EPERM;
1253
1254 if (hdev->adv_instance_timeout)
1255 return -EBUSY;
1256
1257 adv_instance = hci_find_adv_instance(hdev, instance);
1258 if (!adv_instance)
1259 return -ENOENT;
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 if (adv_instance->timeout == 0 ||
1270 adv_instance->duration <= adv_instance->remaining_time)
1271 timeout = adv_instance->duration;
1272 else
1273 timeout = adv_instance->remaining_time;
1274
1275
1276
1277
1278 if (adv_instance->timeout)
1279 adv_instance->remaining_time =
1280 adv_instance->remaining_time - timeout;
1281
1282 hdev->adv_instance_timeout = timeout;
1283 queue_delayed_work(hdev->req_workqueue,
1284 &hdev->adv_instance_expire,
1285 msecs_to_jiffies(timeout * 1000));
1286
1287
1288
1289
1290
1291 if (!force && hdev->cur_adv_instance == instance &&
1292 hci_dev_test_flag(hdev, HCI_LE_ADV))
1293 return 0;
1294
1295 hdev->cur_adv_instance = instance;
1296 __hci_req_update_adv_data(req, instance);
1297 __hci_req_update_scan_rsp_data(req, instance);
1298 __hci_req_enable_advertising(req);
1299
1300 return 0;
1301}
1302
1303static void cancel_adv_timeout(struct hci_dev *hdev)
1304{
1305 if (hdev->adv_instance_timeout) {
1306 hdev->adv_instance_timeout = 0;
1307 cancel_delayed_work(&hdev->adv_instance_expire);
1308 }
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1323 struct hci_request *req, u8 instance,
1324 bool force)
1325{
1326 struct adv_info *adv_instance, *n, *next_instance = NULL;
1327 int err;
1328 u8 rem_inst;
1329
1330
1331 if (!instance || hdev->cur_adv_instance == instance)
1332 cancel_adv_timeout(hdev);
1333
1334
1335
1336
1337
1338 if (instance && hdev->cur_adv_instance == instance)
1339 next_instance = hci_get_next_instance(hdev, instance);
1340
1341 if (instance == 0x00) {
1342 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1343 list) {
1344 if (!(force || adv_instance->timeout))
1345 continue;
1346
1347 rem_inst = adv_instance->instance;
1348 err = hci_remove_adv_instance(hdev, rem_inst);
1349 if (!err)
1350 mgmt_advertising_removed(sk, hdev, rem_inst);
1351 }
1352 } else {
1353 adv_instance = hci_find_adv_instance(hdev, instance);
1354
1355 if (force || (adv_instance && adv_instance->timeout &&
1356 !adv_instance->remaining_time)) {
1357
1358 if (next_instance &&
1359 next_instance->instance == instance)
1360 next_instance = NULL;
1361
1362 err = hci_remove_adv_instance(hdev, instance);
1363 if (!err)
1364 mgmt_advertising_removed(sk, hdev, instance);
1365 }
1366 }
1367
1368 if (!req || !hdev_is_powered(hdev) ||
1369 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1370 return;
1371
1372 if (next_instance)
1373 __hci_req_schedule_adv_instance(req, next_instance->instance,
1374 false);
1375}
1376
1377static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1378{
1379 struct hci_dev *hdev = req->hdev;
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1392 hci_lookup_le_connect(hdev)) {
1393 BT_DBG("Deferring random address update");
1394 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1395 return;
1396 }
1397
1398 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1399}
1400
1401int hci_update_random_address(struct hci_request *req, bool require_privacy,
1402 bool use_rpa, u8 *own_addr_type)
1403{
1404 struct hci_dev *hdev = req->hdev;
1405 int err;
1406
1407
1408
1409
1410
1411 if (use_rpa) {
1412 int to;
1413
1414 *own_addr_type = ADDR_LE_DEV_RANDOM;
1415
1416 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1417 !bacmp(&hdev->random_addr, &hdev->rpa))
1418 return 0;
1419
1420 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1421 if (err < 0) {
1422 BT_ERR("%s failed to generate new RPA", hdev->name);
1423 return err;
1424 }
1425
1426 set_random_addr(req, &hdev->rpa);
1427
1428 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1429 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1430
1431 return 0;
1432 }
1433
1434
1435
1436
1437
1438 if (require_privacy) {
1439 bdaddr_t nrpa;
1440
1441 while (true) {
1442
1443
1444
1445
1446 get_random_bytes(&nrpa, 6);
1447 nrpa.b[5] &= 0x3f;
1448
1449
1450
1451
1452 if (bacmp(&hdev->bdaddr, &nrpa))
1453 break;
1454 }
1455
1456 *own_addr_type = ADDR_LE_DEV_RANDOM;
1457 set_random_addr(req, &nrpa);
1458 return 0;
1459 }
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1471 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1472 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1473 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1474 *own_addr_type = ADDR_LE_DEV_RANDOM;
1475 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1476 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1477 &hdev->static_addr);
1478 return 0;
1479 }
1480
1481
1482
1483
1484 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1485
1486 return 0;
1487}
1488
1489static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1490{
1491 struct bdaddr_list *b;
1492
1493 list_for_each_entry(b, &hdev->whitelist, list) {
1494 struct hci_conn *conn;
1495
1496 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1497 if (!conn)
1498 return true;
1499
1500 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1501 return true;
1502 }
1503
1504 return false;
1505}
1506
1507void __hci_req_update_scan(struct hci_request *req)
1508{
1509 struct hci_dev *hdev = req->hdev;
1510 u8 scan;
1511
1512 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1513 return;
1514
1515 if (!hdev_is_powered(hdev))
1516 return;
1517
1518 if (mgmt_powering_down(hdev))
1519 return;
1520
1521 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1522 disconnected_whitelist_entries(hdev))
1523 scan = SCAN_PAGE;
1524 else
1525 scan = SCAN_DISABLED;
1526
1527 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1528 scan |= SCAN_INQUIRY;
1529
1530 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1531 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1532 return;
1533
1534 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1535}
1536
1537static int update_scan(struct hci_request *req, unsigned long opt)
1538{
1539 hci_dev_lock(req->hdev);
1540 __hci_req_update_scan(req);
1541 hci_dev_unlock(req->hdev);
1542 return 0;
1543}
1544
1545static void scan_update_work(struct work_struct *work)
1546{
1547 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1548
1549 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1550}
1551
1552static int connectable_update(struct hci_request *req, unsigned long opt)
1553{
1554 struct hci_dev *hdev = req->hdev;
1555
1556 hci_dev_lock(hdev);
1557
1558 __hci_req_update_scan(req);
1559
1560
1561
1562
1563
1564 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1566
1567
1568 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1569 !list_empty(&hdev->adv_instances))
1570 __hci_req_enable_advertising(req);
1571
1572 __hci_update_background_scan(req);
1573
1574 hci_dev_unlock(hdev);
1575
1576 return 0;
1577}
1578
1579static void connectable_update_work(struct work_struct *work)
1580{
1581 struct hci_dev *hdev = container_of(work, struct hci_dev,
1582 connectable_update);
1583 u8 status;
1584
1585 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1586 mgmt_set_connectable_complete(hdev, status);
1587}
1588
1589static u8 get_service_classes(struct hci_dev *hdev)
1590{
1591 struct bt_uuid *uuid;
1592 u8 val = 0;
1593
1594 list_for_each_entry(uuid, &hdev->uuids, list)
1595 val |= uuid->svc_hint;
1596
1597 return val;
1598}
1599
1600void __hci_req_update_class(struct hci_request *req)
1601{
1602 struct hci_dev *hdev = req->hdev;
1603 u8 cod[3];
1604
1605 BT_DBG("%s", hdev->name);
1606
1607 if (!hdev_is_powered(hdev))
1608 return;
1609
1610 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1611 return;
1612
1613 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1614 return;
1615
1616 cod[0] = hdev->minor_class;
1617 cod[1] = hdev->major_class;
1618 cod[2] = get_service_classes(hdev);
1619
1620 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1621 cod[1] |= 0x20;
1622
1623 if (memcmp(cod, hdev->dev_class, 3) == 0)
1624 return;
1625
1626 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1627}
1628
1629static void write_iac(struct hci_request *req)
1630{
1631 struct hci_dev *hdev = req->hdev;
1632 struct hci_cp_write_current_iac_lap cp;
1633
1634 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1635 return;
1636
1637 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1638
1639 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1640 cp.iac_lap[0] = 0x00;
1641 cp.iac_lap[1] = 0x8b;
1642 cp.iac_lap[2] = 0x9e;
1643 cp.iac_lap[3] = 0x33;
1644 cp.iac_lap[4] = 0x8b;
1645 cp.iac_lap[5] = 0x9e;
1646 } else {
1647
1648 cp.num_iac = 1;
1649 cp.iac_lap[0] = 0x33;
1650 cp.iac_lap[1] = 0x8b;
1651 cp.iac_lap[2] = 0x9e;
1652 }
1653
1654 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1655 (cp.num_iac * 3) + 1, &cp);
1656}
1657
1658static int discoverable_update(struct hci_request *req, unsigned long opt)
1659{
1660 struct hci_dev *hdev = req->hdev;
1661
1662 hci_dev_lock(hdev);
1663
1664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1665 write_iac(req);
1666 __hci_req_update_scan(req);
1667 __hci_req_update_class(req);
1668 }
1669
1670
1671
1672
1673 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1674 __hci_req_update_adv_data(req, 0x00);
1675
1676
1677
1678
1679 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1680 __hci_req_enable_advertising(req);
1681 }
1682
1683 hci_dev_unlock(hdev);
1684
1685 return 0;
1686}
1687
1688static void discoverable_update_work(struct work_struct *work)
1689{
1690 struct hci_dev *hdev = container_of(work, struct hci_dev,
1691 discoverable_update);
1692 u8 status;
1693
1694 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1695 mgmt_set_discoverable_complete(hdev, status);
1696}
1697
1698void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1699 u8 reason)
1700{
1701 switch (conn->state) {
1702 case BT_CONNECTED:
1703 case BT_CONFIG:
1704 if (conn->type == AMP_LINK) {
1705 struct hci_cp_disconn_phy_link cp;
1706
1707 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1708 cp.reason = reason;
1709 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1710 &cp);
1711 } else {
1712 struct hci_cp_disconnect dc;
1713
1714 dc.handle = cpu_to_le16(conn->handle);
1715 dc.reason = reason;
1716 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1717 }
1718
1719 conn->state = BT_DISCONN;
1720
1721 break;
1722 case BT_CONNECT:
1723 if (conn->type == LE_LINK) {
1724 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1725 break;
1726 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1727 0, NULL);
1728 } else if (conn->type == ACL_LINK) {
1729 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1730 break;
1731 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1732 6, &conn->dst);
1733 }
1734 break;
1735 case BT_CONNECT2:
1736 if (conn->type == ACL_LINK) {
1737 struct hci_cp_reject_conn_req rej;
1738
1739 bacpy(&rej.bdaddr, &conn->dst);
1740 rej.reason = reason;
1741
1742 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1743 sizeof(rej), &rej);
1744 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1745 struct hci_cp_reject_sync_conn_req rej;
1746
1747 bacpy(&rej.bdaddr, &conn->dst);
1748
1749
1750
1751
1752
1753
1754
1755 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1756
1757 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1758 sizeof(rej), &rej);
1759 }
1760 break;
1761 default:
1762 conn->state = BT_CLOSED;
1763 break;
1764 }
1765}
1766
1767static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1768{
1769 if (status)
1770 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1771}
1772
1773int hci_abort_conn(struct hci_conn *conn, u8 reason)
1774{
1775 struct hci_request req;
1776 int err;
1777
1778 hci_req_init(&req, conn->hdev);
1779
1780 __hci_abort_conn(&req, conn, reason);
1781
1782 err = hci_req_run(&req, abort_conn_complete);
1783 if (err && err != -ENODATA) {
1784 BT_ERR("Failed to run HCI request: err %d", err);
1785 return err;
1786 }
1787
1788 return 0;
1789}
1790
1791static int update_bg_scan(struct hci_request *req, unsigned long opt)
1792{
1793 hci_dev_lock(req->hdev);
1794 __hci_update_background_scan(req);
1795 hci_dev_unlock(req->hdev);
1796 return 0;
1797}
1798
1799static void bg_scan_update(struct work_struct *work)
1800{
1801 struct hci_dev *hdev = container_of(work, struct hci_dev,
1802 bg_scan_update);
1803 struct hci_conn *conn;
1804 u8 status;
1805 int err;
1806
1807 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1808 if (!err)
1809 return;
1810
1811 hci_dev_lock(hdev);
1812
1813 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1814 if (conn)
1815 hci_le_conn_failed(conn, status);
1816
1817 hci_dev_unlock(hdev);
1818}
1819
1820static int le_scan_disable(struct hci_request *req, unsigned long opt)
1821{
1822 hci_req_add_le_scan_disable(req);
1823 return 0;
1824}
1825
1826static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1827{
1828 u8 length = opt;
1829 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1830 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1831 struct hci_cp_inquiry cp;
1832
1833 BT_DBG("%s", req->hdev->name);
1834
1835 hci_dev_lock(req->hdev);
1836 hci_inquiry_cache_flush(req->hdev);
1837 hci_dev_unlock(req->hdev);
1838
1839 memset(&cp, 0, sizeof(cp));
1840
1841 if (req->hdev->discovery.limited)
1842 memcpy(&cp.lap, liac, sizeof(cp.lap));
1843 else
1844 memcpy(&cp.lap, giac, sizeof(cp.lap));
1845
1846 cp.length = length;
1847
1848 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1849
1850 return 0;
1851}
1852
1853static void le_scan_disable_work(struct work_struct *work)
1854{
1855 struct hci_dev *hdev = container_of(work, struct hci_dev,
1856 le_scan_disable.work);
1857 u8 status;
1858
1859 BT_DBG("%s", hdev->name);
1860
1861 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1862 return;
1863
1864 cancel_delayed_work(&hdev->le_scan_restart);
1865
1866 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1867 if (status) {
1868 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1869 return;
1870 }
1871
1872 hdev->discovery.scan_start = 0;
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882 if (hdev->discovery.type == DISCOV_TYPE_LE)
1883 goto discov_stopped;
1884
1885 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1886 return;
1887
1888 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1889 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1890 hdev->discovery.state != DISCOVERY_RESOLVING)
1891 goto discov_stopped;
1892
1893 return;
1894 }
1895
1896 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1897 HCI_CMD_TIMEOUT, &status);
1898 if (status) {
1899 BT_ERR("Inquiry failed: status 0x%02x", status);
1900 goto discov_stopped;
1901 }
1902
1903 return;
1904
1905discov_stopped:
1906 hci_dev_lock(hdev);
1907 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1908 hci_dev_unlock(hdev);
1909}
1910
1911static int le_scan_restart(struct hci_request *req, unsigned long opt)
1912{
1913 struct hci_dev *hdev = req->hdev;
1914 struct hci_cp_le_set_scan_enable cp;
1915
1916
1917 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1918 return 0;
1919
1920 hci_req_add_le_scan_disable(req);
1921
1922 memset(&cp, 0, sizeof(cp));
1923 cp.enable = LE_SCAN_ENABLE;
1924 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1925 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1926
1927 return 0;
1928}
1929
1930static void le_scan_restart_work(struct work_struct *work)
1931{
1932 struct hci_dev *hdev = container_of(work, struct hci_dev,
1933 le_scan_restart.work);
1934 unsigned long timeout, duration, scan_start, now;
1935 u8 status;
1936
1937 BT_DBG("%s", hdev->name);
1938
1939 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1940 if (status) {
1941 BT_ERR("Failed to restart LE scan: status %d", status);
1942 return;
1943 }
1944
1945 hci_dev_lock(hdev);
1946
1947 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1948 !hdev->discovery.scan_start)
1949 goto unlock;
1950
1951
1952
1953
1954
1955
1956 duration = hdev->discovery.scan_duration;
1957 scan_start = hdev->discovery.scan_start;
1958 now = jiffies;
1959 if (now - scan_start <= duration) {
1960 int elapsed;
1961
1962 if (now >= scan_start)
1963 elapsed = now - scan_start;
1964 else
1965 elapsed = ULONG_MAX - scan_start + now;
1966
1967 timeout = duration - elapsed;
1968 } else {
1969 timeout = 0;
1970 }
1971
1972 queue_delayed_work(hdev->req_workqueue,
1973 &hdev->le_scan_disable, timeout);
1974
1975unlock:
1976 hci_dev_unlock(hdev);
1977}
1978
1979static void disable_advertising(struct hci_request *req)
1980{
1981 u8 enable = 0x00;
1982
1983 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1984}
1985
1986static int active_scan(struct hci_request *req, unsigned long opt)
1987{
1988 uint16_t interval = opt;
1989 struct hci_dev *hdev = req->hdev;
1990 struct hci_cp_le_set_scan_param param_cp;
1991 struct hci_cp_le_set_scan_enable enable_cp;
1992 u8 own_addr_type;
1993 int err;
1994
1995 BT_DBG("%s", hdev->name);
1996
1997 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1998 hci_dev_lock(hdev);
1999
2000
2001
2002
2003 if (hci_lookup_le_connect(hdev)) {
2004 hci_dev_unlock(hdev);
2005 return -EBUSY;
2006 }
2007
2008 cancel_adv_timeout(hdev);
2009 hci_dev_unlock(hdev);
2010
2011 disable_advertising(req);
2012 }
2013
2014
2015
2016
2017
2018 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2019 hci_req_add_le_scan_disable(req);
2020
2021
2022
2023
2024
2025 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2026 &own_addr_type);
2027 if (err < 0)
2028 own_addr_type = ADDR_LE_DEV_PUBLIC;
2029
2030 memset(¶m_cp, 0, sizeof(param_cp));
2031 param_cp.type = LE_SCAN_ACTIVE;
2032 param_cp.interval = cpu_to_le16(interval);
2033 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2034 param_cp.own_address_type = own_addr_type;
2035
2036 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2037 ¶m_cp);
2038
2039 memset(&enable_cp, 0, sizeof(enable_cp));
2040 enable_cp.enable = LE_SCAN_ENABLE;
2041 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2042
2043 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2044 &enable_cp);
2045
2046 return 0;
2047}
2048
2049static int interleaved_discov(struct hci_request *req, unsigned long opt)
2050{
2051 int err;
2052
2053 BT_DBG("%s", req->hdev->name);
2054
2055 err = active_scan(req, opt);
2056 if (err)
2057 return err;
2058
2059 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2060}
2061
2062static void start_discovery(struct hci_dev *hdev, u8 *status)
2063{
2064 unsigned long timeout;
2065
2066 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2067
2068 switch (hdev->discovery.type) {
2069 case DISCOV_TYPE_BREDR:
2070 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2071 hci_req_sync(hdev, bredr_inquiry,
2072 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2073 status);
2074 return;
2075 case DISCOV_TYPE_INTERLEAVED:
2076
2077
2078
2079
2080
2081
2082
2083
2084 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2085 &hdev->quirks)) {
2086 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2087
2088
2089
2090
2091 hci_req_sync(hdev, interleaved_discov,
2092 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2093 status);
2094 break;
2095 }
2096
2097 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2098 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2099 HCI_CMD_TIMEOUT, status);
2100 break;
2101 case DISCOV_TYPE_LE:
2102 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2103 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2104 HCI_CMD_TIMEOUT, status);
2105 break;
2106 default:
2107 *status = HCI_ERROR_UNSPECIFIED;
2108 return;
2109 }
2110
2111 if (*status)
2112 return;
2113
2114 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2115
2116
2117
2118
2119
2120
2121 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2122 hdev->discovery.result_filtering) {
2123 hdev->discovery.scan_start = jiffies;
2124 hdev->discovery.scan_duration = timeout;
2125 }
2126
2127 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2128 timeout);
2129}
2130
2131bool hci_req_stop_discovery(struct hci_request *req)
2132{
2133 struct hci_dev *hdev = req->hdev;
2134 struct discovery_state *d = &hdev->discovery;
2135 struct hci_cp_remote_name_req_cancel cp;
2136 struct inquiry_entry *e;
2137 bool ret = false;
2138
2139 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2140
2141 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2142 if (test_bit(HCI_INQUIRY, &hdev->flags))
2143 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2144
2145 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2146 cancel_delayed_work(&hdev->le_scan_disable);
2147 hci_req_add_le_scan_disable(req);
2148 }
2149
2150 ret = true;
2151 } else {
2152
2153 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2154 hci_req_add_le_scan_disable(req);
2155 ret = true;
2156 }
2157 }
2158
2159
2160 if (d->type == DISCOV_TYPE_LE)
2161 return ret;
2162
2163 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2164 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2165 NAME_PENDING);
2166 if (!e)
2167 return ret;
2168
2169 bacpy(&cp.bdaddr, &e->data.bdaddr);
2170 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2171 &cp);
2172 ret = true;
2173 }
2174
2175 return ret;
2176}
2177
2178static int stop_discovery(struct hci_request *req, unsigned long opt)
2179{
2180 hci_dev_lock(req->hdev);
2181 hci_req_stop_discovery(req);
2182 hci_dev_unlock(req->hdev);
2183
2184 return 0;
2185}
2186
2187static void discov_update(struct work_struct *work)
2188{
2189 struct hci_dev *hdev = container_of(work, struct hci_dev,
2190 discov_update);
2191 u8 status = 0;
2192
2193 switch (hdev->discovery.state) {
2194 case DISCOVERY_STARTING:
2195 start_discovery(hdev, &status);
2196 mgmt_start_discovery_complete(hdev, status);
2197 if (status)
2198 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2199 else
2200 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2201 break;
2202 case DISCOVERY_STOPPING:
2203 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2204 mgmt_stop_discovery_complete(hdev, status);
2205 if (!status)
2206 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2207 break;
2208 case DISCOVERY_STOPPED:
2209 default:
2210 return;
2211 }
2212}
2213
2214static void discov_off(struct work_struct *work)
2215{
2216 struct hci_dev *hdev = container_of(work, struct hci_dev,
2217 discov_off.work);
2218
2219 BT_DBG("%s", hdev->name);
2220
2221 hci_dev_lock(hdev);
2222
2223
2224
2225
2226
2227
2228 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2229 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2230 hdev->discov_timeout = 0;
2231
2232 hci_dev_unlock(hdev);
2233
2234 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2235 mgmt_new_settings(hdev);
2236}
2237
2238static int powered_update_hci(struct hci_request *req, unsigned long opt)
2239{
2240 struct hci_dev *hdev = req->hdev;
2241 u8 link_sec;
2242
2243 hci_dev_lock(hdev);
2244
2245 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2246 !lmp_host_ssp_capable(hdev)) {
2247 u8 mode = 0x01;
2248
2249 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2250
2251 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2252 u8 support = 0x01;
2253
2254 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2255 sizeof(support), &support);
2256 }
2257 }
2258
2259 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2260 lmp_bredr_capable(hdev)) {
2261 struct hci_cp_write_le_host_supported cp;
2262
2263 cp.le = 0x01;
2264 cp.simul = 0x00;
2265
2266
2267
2268
2269 if (cp.le != lmp_host_le_capable(hdev) ||
2270 cp.simul != lmp_host_le_br_capable(hdev))
2271 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2272 sizeof(cp), &cp);
2273 }
2274
2275 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2276
2277
2278
2279
2280 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2281 list_empty(&hdev->adv_instances)) {
2282 __hci_req_update_adv_data(req, 0x00);
2283 __hci_req_update_scan_rsp_data(req, 0x00);
2284
2285 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2286 __hci_req_enable_advertising(req);
2287 } else if (!list_empty(&hdev->adv_instances)) {
2288 struct adv_info *adv_instance;
2289
2290 adv_instance = list_first_entry(&hdev->adv_instances,
2291 struct adv_info, list);
2292 __hci_req_schedule_adv_instance(req,
2293 adv_instance->instance,
2294 true);
2295 }
2296 }
2297
2298 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2299 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2300 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2301 sizeof(link_sec), &link_sec);
2302
2303 if (lmp_bredr_capable(hdev)) {
2304 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2305 __hci_req_write_fast_connectable(req, true);
2306 else
2307 __hci_req_write_fast_connectable(req, false);
2308 __hci_req_update_scan(req);
2309 __hci_req_update_class(req);
2310 __hci_req_update_name(req);
2311 __hci_req_update_eir(req);
2312 }
2313
2314 hci_dev_unlock(hdev);
2315 return 0;
2316}
2317
2318int __hci_req_hci_power_on(struct hci_dev *hdev)
2319{
2320
2321
2322
2323
2324
2325 smp_register(hdev);
2326
2327 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2328 NULL);
2329}
2330
2331void hci_request_setup(struct hci_dev *hdev)
2332{
2333 INIT_WORK(&hdev->discov_update, discov_update);
2334 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2335 INIT_WORK(&hdev->scan_update, scan_update_work);
2336 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2337 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2338 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2339 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2340 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2341 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2342}
2343
2344void hci_request_cancel_all(struct hci_dev *hdev)
2345{
2346 hci_req_sync_cancel(hdev, ENODEV);
2347
2348 cancel_work_sync(&hdev->discov_update);
2349 cancel_work_sync(&hdev->bg_scan_update);
2350 cancel_work_sync(&hdev->scan_update);
2351 cancel_work_sync(&hdev->connectable_update);
2352 cancel_work_sync(&hdev->discoverable_update);
2353 cancel_delayed_work_sync(&hdev->discov_off);
2354 cancel_delayed_work_sync(&hdev->le_scan_disable);
2355 cancel_delayed_work_sync(&hdev->le_scan_restart);
2356
2357 if (hdev->adv_instance_timeout) {
2358 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2359 hdev->adv_instance_timeout = 0;
2360 }
2361}
2362