1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <asm/unaligned.h>
25
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
28#include <net/bluetooth/mgmt.h>
29
30#include "smp.h"
31#include "hci_request.h"
32
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
44static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
46{
47 struct hci_dev *hdev = req->hdev;
48 struct sk_buff *skb;
49 unsigned long flags;
50
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53
54
55
56 if (req->err) {
57 skb_queue_purge(&req->cmd_q);
58 return req->err;
59 }
60
61
62 if (skb_queue_empty(&req->cmd_q))
63 return -ENODATA;
64
65 skb = skb_peek_tail(&req->cmd_q);
66 if (complete) {
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 }
72
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77 queue_work(hdev->workqueue, &hdev->cmd_work);
78
79 return 0;
80}
81
82int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83{
84 return req_run(req, complete, NULL);
85}
86
87int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88{
89 return req_run(req, NULL, complete);
90}
91
92static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 struct sk_buff *skb)
94{
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 if (skb)
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
106void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107{
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
122 struct sk_buff *skb;
123 int err = 0;
124
125 BT_DBG("%s", hdev->name);
126
127 hci_req_init(&req, hdev);
128
129 hci_req_add_ev(&req, opcode, plen, param, event);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 if (err < 0) {
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
140 return ERR_PTR(err);
141 }
142
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
152 err = -bt_to_errno(hdev->req_result);
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
162 }
163
164 hdev->req_status = hdev->req_result = 0;
165 skb = hdev->req_skb;
166 hdev->req_skb = NULL;
167
168 BT_DBG("%s end: err %d", hdev->name, err);
169
170 if (err < 0) {
171 kfree_skb(skb);
172 return ERR_PTR(err);
173 }
174
175 if (!skb)
176 return ERR_PTR(-ENODATA);
177
178 return skb;
179}
180EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
184{
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186}
187EXPORT_SYMBOL(__hci_cmd_sync);
188
189
190int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191 unsigned long opt),
192 unsigned long opt, u32 timeout, u8 *hci_status)
193{
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
196 int err = 0;
197
198 BT_DBG("%s start", hdev->name);
199
200 hci_req_init(&req, hdev);
201
202 hdev->req_status = HCI_REQ_PEND;
203
204 err = func(&req, opt);
205 if (err) {
206 if (hci_status)
207 *hci_status = HCI_ERROR_UNSPECIFIED;
208 return err;
209 }
210
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
220
221
222
223
224
225
226 if (err == -ENODATA) {
227 if (hci_status)
228 *hci_status = 0;
229 return 0;
230 }
231
232 if (hci_status)
233 *hci_status = HCI_ERROR_UNSPECIFIED;
234
235 return err;
236 }
237
238 schedule_timeout(timeout);
239
240 remove_wait_queue(&hdev->req_wait_q, &wait);
241
242 if (signal_pending(current))
243 return -EINTR;
244
245 switch (hdev->req_status) {
246 case HCI_REQ_DONE:
247 err = -bt_to_errno(hdev->req_result);
248 if (hci_status)
249 *hci_status = hdev->req_result;
250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257
258 default:
259 err = -ETIMEDOUT;
260 if (hci_status)
261 *hci_status = HCI_ERROR_UNSPECIFIED;
262 break;
263 }
264
265 hdev->req_status = hdev->req_result = 0;
266
267 BT_DBG("%s end: err %d", hdev->name, err);
268
269 return err;
270}
271
272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
274 unsigned long opt, u32 timeout, u8 *hci_status)
275{
276 int ret;
277
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
280
281
282 hci_req_sync_lock(hdev);
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 hci_req_sync_unlock(hdev);
285
286 return ret;
287}
288
289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291{
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
311
312 return skb;
313}
314
315
316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318{
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324
325
326
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341 bt_cb(skb)->hci.req_event = event;
342
343 skb_queue_tail(&req->cmd_q, skb);
344}
345
346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348{
349 hci_req_add_ev(req, opcode, plen, param, 0);
350}
351
352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353{
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD;
371
372
373 acp.interval = cpu_to_le16(0x0800);
374 }
375
376 acp.window = cpu_to_le16(0x0012);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
387
388
389
390
391
392
393static void __hci_update_background_scan(struct hci_request *req)
394{
395 struct hci_dev *hdev = req->hdev;
396
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
404
405
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
408
409
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
412
413
414
415
416
417
418
419
420 hci_discovery_filter_clear(hdev);
421
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424
425
426
427
428
429
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
432
433 hci_req_add_le_scan_disable(req);
434
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437
438
439
440
441
442
443
444
445 if (hci_lookup_le_connect(hdev))
446 return;
447
448
449
450
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
453
454 hci_req_add_le_passive_scan(req);
455
456 BT_DBG("%s starting background scanning", hdev->name);
457 }
458}
459
460void __hci_req_update_name(struct hci_request *req)
461{
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
464
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468}
469
470#define PNP_INFO_SVCLASS_ID 0x1200
471
472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473{
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
476
477 if (len < 4)
478 return ptr;
479
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
482
483 if (uuid->size != 16)
484 continue;
485
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
489
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
498 }
499
500
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
504 }
505
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
509 }
510
511 return ptr;
512}
513
514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515{
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 6)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
531 }
532
533
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
537 }
538
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
542 }
543
544 return ptr;
545}
546
547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548{
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
551
552 if (len < 18)
553 return ptr;
554
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
558
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
564 }
565
566
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
570 }
571
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
575 }
576
577 return ptr;
578}
579
580static void create_eir(struct hci_dev *hdev, u8 *data)
581{
582 u8 *ptr = data;
583 size_t name_len;
584
585 name_len = strlen(hdev->dev_name);
586
587 if (name_len > 0) {
588
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
594
595
596 ptr[0] = name_len + 1;
597
598 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600 ptr += (name_len + 2);
601 }
602
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
607
608 ptr += 3;
609 }
610
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
614
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620 ptr += 10;
621 }
622
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626}
627
628void __hci_req_update_eir(struct hci_request *req)
629{
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
632
633 if (!hdev_is_powered(hdev))
634 return;
635
636 if (!lmp_ext_inq_capable(hdev))
637 return;
638
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
641
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 create_eir(hdev, cp.data);
648
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
651
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655}
656
657void hci_req_add_le_scan_disable(struct hci_request *req)
658{
659 struct hci_cp_le_set_scan_enable cp;
660
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664}
665
666static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
668{
669 struct hci_cp_le_add_to_white_list cp;
670
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, ¶ms->addr);
673
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675}
676
677static u8 update_white_list(struct hci_request *req)
678{
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
683
684
685
686
687
688
689
690 list_for_each_entry(b, &hdev->le_white_list, list) {
691
692
693
694 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
695 &b->bdaddr, b->bdaddr_type) &&
696 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
697 &b->bdaddr, b->bdaddr_type)) {
698 struct hci_cp_le_del_from_white_list cp;
699
700 cp.bdaddr_type = b->bdaddr_type;
701 bacpy(&cp.bdaddr, &b->bdaddr);
702
703 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
704 sizeof(cp), &cp);
705 continue;
706 }
707
708 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
709
710 return 0x00;
711 }
712
713 white_list_entries++;
714 }
715
716
717
718
719
720
721
722
723
724
725
726 list_for_each_entry(params, &hdev->pend_le_conns, action) {
727 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
728 ¶ms->addr, params->addr_type))
729 continue;
730
731 if (white_list_entries >= hdev->le_white_list_size) {
732
733 return 0x00;
734 }
735
736 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
737 params->addr_type)) {
738
739 return 0x00;
740 }
741
742 white_list_entries++;
743 add_to_white_list(req, params);
744 }
745
746
747
748
749
750 list_for_each_entry(params, &hdev->pend_le_reports, action) {
751 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
752 ¶ms->addr, params->addr_type))
753 continue;
754
755 if (white_list_entries >= hdev->le_white_list_size) {
756
757 return 0x00;
758 }
759
760 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
761 params->addr_type)) {
762
763 return 0x00;
764 }
765
766 white_list_entries++;
767 add_to_white_list(req, params);
768 }
769
770
771 return 0x01;
772}
773
774static bool scan_use_rpa(struct hci_dev *hdev)
775{
776 return hci_dev_test_flag(hdev, HCI_PRIVACY);
777}
778
779void hci_req_add_le_passive_scan(struct hci_request *req)
780{
781 struct hci_cp_le_set_scan_param param_cp;
782 struct hci_cp_le_set_scan_enable enable_cp;
783 struct hci_dev *hdev = req->hdev;
784 u8 own_addr_type;
785 u8 filter_policy;
786
787
788
789
790
791
792
793 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
794 &own_addr_type))
795 return;
796
797
798
799
800
801 filter_policy = update_white_list(req);
802
803
804
805
806
807
808
809
810
811
812 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
813 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
814 filter_policy |= 0x02;
815
816 memset(¶m_cp, 0, sizeof(param_cp));
817 param_cp.type = LE_SCAN_PASSIVE;
818 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
819 param_cp.window = cpu_to_le16(hdev->le_scan_window);
820 param_cp.own_address_type = own_addr_type;
821 param_cp.filter_policy = filter_policy;
822 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
823 ¶m_cp);
824
825 memset(&enable_cp, 0, sizeof(enable_cp));
826 enable_cp.enable = LE_SCAN_ENABLE;
827 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
828 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
829 &enable_cp);
830}
831
832static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
833{
834 u8 instance = hdev->cur_adv_instance;
835 struct adv_info *adv_instance;
836
837
838 if (instance == 0x00)
839 return 0;
840
841 adv_instance = hci_find_adv_instance(hdev, instance);
842 if (!adv_instance)
843 return 0;
844
845
846
847
848 return adv_instance->scan_rsp_len;
849}
850
851void __hci_req_disable_advertising(struct hci_request *req)
852{
853 u8 enable = 0x00;
854
855 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
856}
857
858static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
859{
860 u32 flags;
861 struct adv_info *adv_instance;
862
863 if (instance == 0x00) {
864
865
866
867 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
868
869
870
871
872 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873 flags |= MGMT_ADV_FLAG_CONNECTABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
876 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
877 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
878 flags |= MGMT_ADV_FLAG_DISCOV;
879
880 return flags;
881 }
882
883 adv_instance = hci_find_adv_instance(hdev, instance);
884
885
886 if (!adv_instance)
887 return 0;
888
889 return adv_instance->flags;
890}
891
892static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
893{
894
895 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
896 return false;
897
898
899 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
900 return true;
901
902
903
904
905 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
906 hci_dev_test_flag(hdev, HCI_BONDABLE))
907 return false;
908
909
910
911
912 return true;
913}
914
915void __hci_req_enable_advertising(struct hci_request *req)
916{
917 struct hci_dev *hdev = req->hdev;
918 struct hci_cp_le_set_adv_param cp;
919 u8 own_addr_type, enable = 0x01;
920 bool connectable;
921 u32 flags;
922
923 if (hci_conn_num(hdev, LE_LINK) > 0)
924 return;
925
926 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
927 __hci_req_disable_advertising(req);
928
929
930
931
932
933
934 hci_dev_clear_flag(hdev, HCI_LE_ADV);
935
936 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
937
938
939
940
941 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
942 mgmt_get_connectable(hdev);
943
944
945
946
947
948 if (hci_update_random_address(req, !connectable,
949 adv_use_rpa(hdev, flags),
950 &own_addr_type) < 0)
951 return;
952
953 memset(&cp, 0, sizeof(cp));
954 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
955 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
956
957 if (connectable)
958 cp.type = LE_ADV_IND;
959 else if (get_cur_adv_instance_scan_rsp_len(hdev))
960 cp.type = LE_ADV_SCAN_IND;
961 else
962 cp.type = LE_ADV_NONCONN_IND;
963
964 cp.own_address_type = own_addr_type;
965 cp.channel_map = hdev->le_adv_channel_map;
966
967 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
968
969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
970}
971
972static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
973{
974 u8 ad_len = 0;
975 size_t name_len;
976
977 name_len = strlen(hdev->dev_name);
978 if (name_len > 0) {
979 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
980
981 if (name_len > max_len) {
982 name_len = max_len;
983 ptr[1] = EIR_NAME_SHORT;
984 } else
985 ptr[1] = EIR_NAME_COMPLETE;
986
987 ptr[0] = name_len + 1;
988
989 memcpy(ptr + 2, hdev->dev_name, name_len);
990
991 ad_len += (name_len + 2);
992 ptr += (name_len + 2);
993 }
994
995 return ad_len;
996}
997
998static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
999 u8 *ptr)
1000{
1001 struct adv_info *adv_instance;
1002
1003 adv_instance = hci_find_adv_instance(hdev, instance);
1004 if (!adv_instance)
1005 return 0;
1006
1007
1008
1009
1010 memcpy(ptr, adv_instance->scan_rsp_data,
1011 adv_instance->scan_rsp_len);
1012
1013 return adv_instance->scan_rsp_len;
1014}
1015
1016void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1017{
1018 struct hci_dev *hdev = req->hdev;
1019 struct hci_cp_le_set_scan_rsp_data cp;
1020 u8 len;
1021
1022 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1023 return;
1024
1025 memset(&cp, 0, sizeof(cp));
1026
1027 if (instance)
1028 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1029 else
1030 len = create_default_scan_rsp_data(hdev, cp.data);
1031
1032 if (hdev->scan_rsp_data_len == len &&
1033 !memcmp(cp.data, hdev->scan_rsp_data, len))
1034 return;
1035
1036 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1037 hdev->scan_rsp_data_len = len;
1038
1039 cp.length = len;
1040
1041 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1042}
1043
1044static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1045{
1046 struct adv_info *adv_instance = NULL;
1047 u8 ad_len = 0, flags = 0;
1048 u32 instance_flags;
1049
1050
1051 if (instance) {
1052 adv_instance = hci_find_adv_instance(hdev, instance);
1053 if (!adv_instance)
1054 return 0;
1055 }
1056
1057 instance_flags = get_adv_instance_flags(hdev, instance);
1058
1059
1060
1061
1062 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1063 flags |= LE_AD_GENERAL;
1064
1065 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1066 flags |= LE_AD_LIMITED;
1067
1068 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1069
1070
1071
1072 if (!flags)
1073 flags |= mgmt_get_adv_discov_flags(hdev);
1074
1075 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1076 flags |= LE_AD_NO_BREDR;
1077
1078
1079
1080
1081 if (flags) {
1082 ptr[0] = 0x02;
1083 ptr[1] = EIR_FLAGS;
1084 ptr[2] = flags;
1085
1086 ad_len += 3;
1087 ptr += 3;
1088 }
1089 }
1090
1091 if (adv_instance) {
1092 memcpy(ptr, adv_instance->adv_data,
1093 adv_instance->adv_data_len);
1094 ad_len += adv_instance->adv_data_len;
1095 ptr += adv_instance->adv_data_len;
1096 }
1097
1098
1099 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1100 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1101 ptr[0] = 0x02;
1102 ptr[1] = EIR_TX_POWER;
1103 ptr[2] = (u8)hdev->adv_tx_power;
1104
1105 ad_len += 3;
1106 ptr += 3;
1107 }
1108
1109 return ad_len;
1110}
1111
1112void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1113{
1114 struct hci_dev *hdev = req->hdev;
1115 struct hci_cp_le_set_adv_data cp;
1116 u8 len;
1117
1118 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1119 return;
1120
1121 memset(&cp, 0, sizeof(cp));
1122
1123 len = create_instance_adv_data(hdev, instance, cp.data);
1124
1125
1126 if (hdev->adv_data_len == len &&
1127 memcmp(cp.data, hdev->adv_data, len) == 0)
1128 return;
1129
1130 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1131 hdev->adv_data_len = len;
1132
1133 cp.length = len;
1134
1135 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1136}
1137
1138int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1139{
1140 struct hci_request req;
1141
1142 hci_req_init(&req, hdev);
1143 __hci_req_update_adv_data(&req, instance);
1144
1145 return hci_req_run(&req, NULL);
1146}
1147
1148static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1149{
1150 BT_DBG("%s status %u", hdev->name, status);
1151}
1152
1153void hci_req_reenable_advertising(struct hci_dev *hdev)
1154{
1155 struct hci_request req;
1156
1157 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1158 list_empty(&hdev->adv_instances))
1159 return;
1160
1161 hci_req_init(&req, hdev);
1162
1163 if (hdev->cur_adv_instance) {
1164 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1165 true);
1166 } else {
1167 __hci_req_update_adv_data(&req, 0x00);
1168 __hci_req_update_scan_rsp_data(&req, 0x00);
1169 __hci_req_enable_advertising(&req);
1170 }
1171
1172 hci_req_run(&req, adv_enable_complete);
1173}
1174
1175static void adv_timeout_expire(struct work_struct *work)
1176{
1177 struct hci_dev *hdev = container_of(work, struct hci_dev,
1178 adv_instance_expire.work);
1179
1180 struct hci_request req;
1181 u8 instance;
1182
1183 BT_DBG("%s", hdev->name);
1184
1185 hci_dev_lock(hdev);
1186
1187 hdev->adv_instance_timeout = 0;
1188
1189 instance = hdev->cur_adv_instance;
1190 if (instance == 0x00)
1191 goto unlock;
1192
1193 hci_req_init(&req, hdev);
1194
1195 hci_req_clear_adv_instance(hdev, &req, instance, false);
1196
1197 if (list_empty(&hdev->adv_instances))
1198 __hci_req_disable_advertising(&req);
1199
1200 hci_req_run(&req, NULL);
1201
1202unlock:
1203 hci_dev_unlock(hdev);
1204}
1205
1206int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1207 bool force)
1208{
1209 struct hci_dev *hdev = req->hdev;
1210 struct adv_info *adv_instance = NULL;
1211 u16 timeout;
1212
1213 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1214 list_empty(&hdev->adv_instances))
1215 return -EPERM;
1216
1217 if (hdev->adv_instance_timeout)
1218 return -EBUSY;
1219
1220 adv_instance = hci_find_adv_instance(hdev, instance);
1221 if (!adv_instance)
1222 return -ENOENT;
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 if (adv_instance->timeout == 0 ||
1233 adv_instance->duration <= adv_instance->remaining_time)
1234 timeout = adv_instance->duration;
1235 else
1236 timeout = adv_instance->remaining_time;
1237
1238
1239
1240
1241 if (adv_instance->timeout)
1242 adv_instance->remaining_time =
1243 adv_instance->remaining_time - timeout;
1244
1245 hdev->adv_instance_timeout = timeout;
1246 queue_delayed_work(hdev->req_workqueue,
1247 &hdev->adv_instance_expire,
1248 msecs_to_jiffies(timeout * 1000));
1249
1250
1251
1252
1253
1254 if (!force && hdev->cur_adv_instance == instance &&
1255 hci_dev_test_flag(hdev, HCI_LE_ADV))
1256 return 0;
1257
1258 hdev->cur_adv_instance = instance;
1259 __hci_req_update_adv_data(req, instance);
1260 __hci_req_update_scan_rsp_data(req, instance);
1261 __hci_req_enable_advertising(req);
1262
1263 return 0;
1264}
1265
1266static void cancel_adv_timeout(struct hci_dev *hdev)
1267{
1268 if (hdev->adv_instance_timeout) {
1269 hdev->adv_instance_timeout = 0;
1270 cancel_delayed_work(&hdev->adv_instance_expire);
1271 }
1272}
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1286 u8 instance, bool force)
1287{
1288 struct adv_info *adv_instance, *n, *next_instance = NULL;
1289 int err;
1290 u8 rem_inst;
1291
1292
1293 if (!instance || hdev->cur_adv_instance == instance)
1294 cancel_adv_timeout(hdev);
1295
1296
1297
1298
1299
1300 if (instance && hdev->cur_adv_instance == instance)
1301 next_instance = hci_get_next_instance(hdev, instance);
1302
1303 if (instance == 0x00) {
1304 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1305 list) {
1306 if (!(force || adv_instance->timeout))
1307 continue;
1308
1309 rem_inst = adv_instance->instance;
1310 err = hci_remove_adv_instance(hdev, rem_inst);
1311 if (!err)
1312 mgmt_advertising_removed(NULL, hdev, rem_inst);
1313 }
1314 } else {
1315 adv_instance = hci_find_adv_instance(hdev, instance);
1316
1317 if (force || (adv_instance && adv_instance->timeout &&
1318 !adv_instance->remaining_time)) {
1319
1320 if (next_instance &&
1321 next_instance->instance == instance)
1322 next_instance = NULL;
1323
1324 err = hci_remove_adv_instance(hdev, instance);
1325 if (!err)
1326 mgmt_advertising_removed(NULL, hdev, instance);
1327 }
1328 }
1329
1330 if (!req || !hdev_is_powered(hdev) ||
1331 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1332 return;
1333
1334 if (next_instance)
1335 __hci_req_schedule_adv_instance(req, next_instance->instance,
1336 false);
1337}
1338
1339static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1340{
1341 struct hci_dev *hdev = req->hdev;
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1354 hci_lookup_le_connect(hdev)) {
1355 BT_DBG("Deferring random address update");
1356 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1357 return;
1358 }
1359
1360 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1361}
1362
1363int hci_update_random_address(struct hci_request *req, bool require_privacy,
1364 bool use_rpa, u8 *own_addr_type)
1365{
1366 struct hci_dev *hdev = req->hdev;
1367 int err;
1368
1369
1370
1371
1372
1373 if (use_rpa) {
1374 int to;
1375
1376 *own_addr_type = ADDR_LE_DEV_RANDOM;
1377
1378 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1379 !bacmp(&hdev->random_addr, &hdev->rpa))
1380 return 0;
1381
1382 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1383 if (err < 0) {
1384 BT_ERR("%s failed to generate new RPA", hdev->name);
1385 return err;
1386 }
1387
1388 set_random_addr(req, &hdev->rpa);
1389
1390 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1391 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1392
1393 return 0;
1394 }
1395
1396
1397
1398
1399
1400 if (require_privacy) {
1401 bdaddr_t nrpa;
1402
1403 while (true) {
1404
1405
1406
1407
1408 get_random_bytes(&nrpa, 6);
1409 nrpa.b[5] &= 0x3f;
1410
1411
1412
1413
1414 if (bacmp(&hdev->bdaddr, &nrpa))
1415 break;
1416 }
1417
1418 *own_addr_type = ADDR_LE_DEV_RANDOM;
1419 set_random_addr(req, &nrpa);
1420 return 0;
1421 }
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1433 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1434 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1435 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1436 *own_addr_type = ADDR_LE_DEV_RANDOM;
1437 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1438 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1439 &hdev->static_addr);
1440 return 0;
1441 }
1442
1443
1444
1445
1446 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1447
1448 return 0;
1449}
1450
1451static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1452{
1453 struct bdaddr_list *b;
1454
1455 list_for_each_entry(b, &hdev->whitelist, list) {
1456 struct hci_conn *conn;
1457
1458 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1459 if (!conn)
1460 return true;
1461
1462 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1463 return true;
1464 }
1465
1466 return false;
1467}
1468
1469void __hci_req_update_scan(struct hci_request *req)
1470{
1471 struct hci_dev *hdev = req->hdev;
1472 u8 scan;
1473
1474 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1475 return;
1476
1477 if (!hdev_is_powered(hdev))
1478 return;
1479
1480 if (mgmt_powering_down(hdev))
1481 return;
1482
1483 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1484 disconnected_whitelist_entries(hdev))
1485 scan = SCAN_PAGE;
1486 else
1487 scan = SCAN_DISABLED;
1488
1489 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1490 scan |= SCAN_INQUIRY;
1491
1492 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1493 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1494 return;
1495
1496 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1497}
1498
1499static int update_scan(struct hci_request *req, unsigned long opt)
1500{
1501 hci_dev_lock(req->hdev);
1502 __hci_req_update_scan(req);
1503 hci_dev_unlock(req->hdev);
1504 return 0;
1505}
1506
1507static void scan_update_work(struct work_struct *work)
1508{
1509 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1510
1511 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1512}
1513
1514static int connectable_update(struct hci_request *req, unsigned long opt)
1515{
1516 struct hci_dev *hdev = req->hdev;
1517
1518 hci_dev_lock(hdev);
1519
1520 __hci_req_update_scan(req);
1521
1522
1523
1524
1525
1526 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1527 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1528
1529
1530 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1531 !list_empty(&hdev->adv_instances))
1532 __hci_req_enable_advertising(req);
1533
1534 __hci_update_background_scan(req);
1535
1536 hci_dev_unlock(hdev);
1537
1538 return 0;
1539}
1540
1541static void connectable_update_work(struct work_struct *work)
1542{
1543 struct hci_dev *hdev = container_of(work, struct hci_dev,
1544 connectable_update);
1545 u8 status;
1546
1547 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1548 mgmt_set_connectable_complete(hdev, status);
1549}
1550
1551static u8 get_service_classes(struct hci_dev *hdev)
1552{
1553 struct bt_uuid *uuid;
1554 u8 val = 0;
1555
1556 list_for_each_entry(uuid, &hdev->uuids, list)
1557 val |= uuid->svc_hint;
1558
1559 return val;
1560}
1561
1562void __hci_req_update_class(struct hci_request *req)
1563{
1564 struct hci_dev *hdev = req->hdev;
1565 u8 cod[3];
1566
1567 BT_DBG("%s", hdev->name);
1568
1569 if (!hdev_is_powered(hdev))
1570 return;
1571
1572 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573 return;
1574
1575 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1576 return;
1577
1578 cod[0] = hdev->minor_class;
1579 cod[1] = hdev->major_class;
1580 cod[2] = get_service_classes(hdev);
1581
1582 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1583 cod[1] |= 0x20;
1584
1585 if (memcmp(cod, hdev->dev_class, 3) == 0)
1586 return;
1587
1588 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1589}
1590
1591static void write_iac(struct hci_request *req)
1592{
1593 struct hci_dev *hdev = req->hdev;
1594 struct hci_cp_write_current_iac_lap cp;
1595
1596 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1597 return;
1598
1599 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1600
1601 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1602 cp.iac_lap[0] = 0x00;
1603 cp.iac_lap[1] = 0x8b;
1604 cp.iac_lap[2] = 0x9e;
1605 cp.iac_lap[3] = 0x33;
1606 cp.iac_lap[4] = 0x8b;
1607 cp.iac_lap[5] = 0x9e;
1608 } else {
1609
1610 cp.num_iac = 1;
1611 cp.iac_lap[0] = 0x33;
1612 cp.iac_lap[1] = 0x8b;
1613 cp.iac_lap[2] = 0x9e;
1614 }
1615
1616 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1617 (cp.num_iac * 3) + 1, &cp);
1618}
1619
1620static int discoverable_update(struct hci_request *req, unsigned long opt)
1621{
1622 struct hci_dev *hdev = req->hdev;
1623
1624 hci_dev_lock(hdev);
1625
1626 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1627 write_iac(req);
1628 __hci_req_update_scan(req);
1629 __hci_req_update_class(req);
1630 }
1631
1632
1633
1634
1635 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1636 __hci_req_update_adv_data(req, 0x00);
1637
1638
1639
1640
1641 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1642 __hci_req_enable_advertising(req);
1643 }
1644
1645 hci_dev_unlock(hdev);
1646
1647 return 0;
1648}
1649
1650static void discoverable_update_work(struct work_struct *work)
1651{
1652 struct hci_dev *hdev = container_of(work, struct hci_dev,
1653 discoverable_update);
1654 u8 status;
1655
1656 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1657 mgmt_set_discoverable_complete(hdev, status);
1658}
1659
1660void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1661 u8 reason)
1662{
1663 switch (conn->state) {
1664 case BT_CONNECTED:
1665 case BT_CONFIG:
1666 if (conn->type == AMP_LINK) {
1667 struct hci_cp_disconn_phy_link cp;
1668
1669 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1670 cp.reason = reason;
1671 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1672 &cp);
1673 } else {
1674 struct hci_cp_disconnect dc;
1675
1676 dc.handle = cpu_to_le16(conn->handle);
1677 dc.reason = reason;
1678 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1679 }
1680
1681 conn->state = BT_DISCONN;
1682
1683 break;
1684 case BT_CONNECT:
1685 if (conn->type == LE_LINK) {
1686 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1687 break;
1688 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1689 0, NULL);
1690 } else if (conn->type == ACL_LINK) {
1691 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1692 break;
1693 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1694 6, &conn->dst);
1695 }
1696 break;
1697 case BT_CONNECT2:
1698 if (conn->type == ACL_LINK) {
1699 struct hci_cp_reject_conn_req rej;
1700
1701 bacpy(&rej.bdaddr, &conn->dst);
1702 rej.reason = reason;
1703
1704 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1705 sizeof(rej), &rej);
1706 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1707 struct hci_cp_reject_sync_conn_req rej;
1708
1709 bacpy(&rej.bdaddr, &conn->dst);
1710
1711
1712
1713
1714
1715
1716
1717 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1718
1719 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1720 sizeof(rej), &rej);
1721 }
1722 break;
1723 default:
1724 conn->state = BT_CLOSED;
1725 break;
1726 }
1727}
1728
1729static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1730{
1731 if (status)
1732 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1733}
1734
1735int hci_abort_conn(struct hci_conn *conn, u8 reason)
1736{
1737 struct hci_request req;
1738 int err;
1739
1740 hci_req_init(&req, conn->hdev);
1741
1742 __hci_abort_conn(&req, conn, reason);
1743
1744 err = hci_req_run(&req, abort_conn_complete);
1745 if (err && err != -ENODATA) {
1746 BT_ERR("Failed to run HCI request: err %d", err);
1747 return err;
1748 }
1749
1750 return 0;
1751}
1752
1753static int update_bg_scan(struct hci_request *req, unsigned long opt)
1754{
1755 hci_dev_lock(req->hdev);
1756 __hci_update_background_scan(req);
1757 hci_dev_unlock(req->hdev);
1758 return 0;
1759}
1760
1761static void bg_scan_update(struct work_struct *work)
1762{
1763 struct hci_dev *hdev = container_of(work, struct hci_dev,
1764 bg_scan_update);
1765 struct hci_conn *conn;
1766 u8 status;
1767 int err;
1768
1769 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1770 if (!err)
1771 return;
1772
1773 hci_dev_lock(hdev);
1774
1775 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1776 if (conn)
1777 hci_le_conn_failed(conn, status);
1778
1779 hci_dev_unlock(hdev);
1780}
1781
1782static int le_scan_disable(struct hci_request *req, unsigned long opt)
1783{
1784 hci_req_add_le_scan_disable(req);
1785 return 0;
1786}
1787
1788static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1789{
1790 u8 length = opt;
1791 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1792 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1793 struct hci_cp_inquiry cp;
1794
1795 BT_DBG("%s", req->hdev->name);
1796
1797 hci_dev_lock(req->hdev);
1798 hci_inquiry_cache_flush(req->hdev);
1799 hci_dev_unlock(req->hdev);
1800
1801 memset(&cp, 0, sizeof(cp));
1802
1803 if (req->hdev->discovery.limited)
1804 memcpy(&cp.lap, liac, sizeof(cp.lap));
1805 else
1806 memcpy(&cp.lap, giac, sizeof(cp.lap));
1807
1808 cp.length = length;
1809
1810 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1811
1812 return 0;
1813}
1814
1815static void le_scan_disable_work(struct work_struct *work)
1816{
1817 struct hci_dev *hdev = container_of(work, struct hci_dev,
1818 le_scan_disable.work);
1819 u8 status;
1820
1821 BT_DBG("%s", hdev->name);
1822
1823 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1824 return;
1825
1826 cancel_delayed_work(&hdev->le_scan_restart);
1827
1828 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1829 if (status) {
1830 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1831 return;
1832 }
1833
1834 hdev->discovery.scan_start = 0;
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844 if (hdev->discovery.type == DISCOV_TYPE_LE)
1845 goto discov_stopped;
1846
1847 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1848 return;
1849
1850 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1851 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1852 hdev->discovery.state != DISCOVERY_RESOLVING)
1853 goto discov_stopped;
1854
1855 return;
1856 }
1857
1858 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1859 HCI_CMD_TIMEOUT, &status);
1860 if (status) {
1861 BT_ERR("Inquiry failed: status 0x%02x", status);
1862 goto discov_stopped;
1863 }
1864
1865 return;
1866
1867discov_stopped:
1868 hci_dev_lock(hdev);
1869 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1870 hci_dev_unlock(hdev);
1871}
1872
1873static int le_scan_restart(struct hci_request *req, unsigned long opt)
1874{
1875 struct hci_dev *hdev = req->hdev;
1876 struct hci_cp_le_set_scan_enable cp;
1877
1878
1879 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1880 return 0;
1881
1882 hci_req_add_le_scan_disable(req);
1883
1884 memset(&cp, 0, sizeof(cp));
1885 cp.enable = LE_SCAN_ENABLE;
1886 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1887 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1888
1889 return 0;
1890}
1891
1892static void le_scan_restart_work(struct work_struct *work)
1893{
1894 struct hci_dev *hdev = container_of(work, struct hci_dev,
1895 le_scan_restart.work);
1896 unsigned long timeout, duration, scan_start, now;
1897 u8 status;
1898
1899 BT_DBG("%s", hdev->name);
1900
1901 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1902 if (status) {
1903 BT_ERR("Failed to restart LE scan: status %d", status);
1904 return;
1905 }
1906
1907 hci_dev_lock(hdev);
1908
1909 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1910 !hdev->discovery.scan_start)
1911 goto unlock;
1912
1913
1914
1915
1916
1917
1918 duration = hdev->discovery.scan_duration;
1919 scan_start = hdev->discovery.scan_start;
1920 now = jiffies;
1921 if (now - scan_start <= duration) {
1922 int elapsed;
1923
1924 if (now >= scan_start)
1925 elapsed = now - scan_start;
1926 else
1927 elapsed = ULONG_MAX - scan_start + now;
1928
1929 timeout = duration - elapsed;
1930 } else {
1931 timeout = 0;
1932 }
1933
1934 queue_delayed_work(hdev->req_workqueue,
1935 &hdev->le_scan_disable, timeout);
1936
1937unlock:
1938 hci_dev_unlock(hdev);
1939}
1940
1941static void disable_advertising(struct hci_request *req)
1942{
1943 u8 enable = 0x00;
1944
1945 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1946}
1947
1948static int active_scan(struct hci_request *req, unsigned long opt)
1949{
1950 uint16_t interval = opt;
1951 struct hci_dev *hdev = req->hdev;
1952 struct hci_cp_le_set_scan_param param_cp;
1953 struct hci_cp_le_set_scan_enable enable_cp;
1954 u8 own_addr_type;
1955 int err;
1956
1957 BT_DBG("%s", hdev->name);
1958
1959 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1960 hci_dev_lock(hdev);
1961
1962
1963
1964
1965 if (hci_lookup_le_connect(hdev)) {
1966 hci_dev_unlock(hdev);
1967 return -EBUSY;
1968 }
1969
1970 cancel_adv_timeout(hdev);
1971 hci_dev_unlock(hdev);
1972
1973 disable_advertising(req);
1974 }
1975
1976
1977
1978
1979
1980 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1981 hci_req_add_le_scan_disable(req);
1982
1983
1984
1985
1986
1987 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
1988 &own_addr_type);
1989 if (err < 0)
1990 own_addr_type = ADDR_LE_DEV_PUBLIC;
1991
1992 memset(¶m_cp, 0, sizeof(param_cp));
1993 param_cp.type = LE_SCAN_ACTIVE;
1994 param_cp.interval = cpu_to_le16(interval);
1995 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1996 param_cp.own_address_type = own_addr_type;
1997
1998 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1999 ¶m_cp);
2000
2001 memset(&enable_cp, 0, sizeof(enable_cp));
2002 enable_cp.enable = LE_SCAN_ENABLE;
2003 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2004
2005 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2006 &enable_cp);
2007
2008 return 0;
2009}
2010
2011static int interleaved_discov(struct hci_request *req, unsigned long opt)
2012{
2013 int err;
2014
2015 BT_DBG("%s", req->hdev->name);
2016
2017 err = active_scan(req, opt);
2018 if (err)
2019 return err;
2020
2021 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2022}
2023
2024static void start_discovery(struct hci_dev *hdev, u8 *status)
2025{
2026 unsigned long timeout;
2027
2028 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2029
2030 switch (hdev->discovery.type) {
2031 case DISCOV_TYPE_BREDR:
2032 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2033 hci_req_sync(hdev, bredr_inquiry,
2034 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2035 status);
2036 return;
2037 case DISCOV_TYPE_INTERLEAVED:
2038
2039
2040
2041
2042
2043
2044
2045
2046 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2047 &hdev->quirks)) {
2048 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2049
2050
2051
2052
2053 hci_req_sync(hdev, interleaved_discov,
2054 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2055 status);
2056 break;
2057 }
2058
2059 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2060 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2061 HCI_CMD_TIMEOUT, status);
2062 break;
2063 case DISCOV_TYPE_LE:
2064 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2065 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2066 HCI_CMD_TIMEOUT, status);
2067 break;
2068 default:
2069 *status = HCI_ERROR_UNSPECIFIED;
2070 return;
2071 }
2072
2073 if (*status)
2074 return;
2075
2076 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2077
2078
2079
2080
2081
2082
2083 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2084 hdev->discovery.result_filtering) {
2085 hdev->discovery.scan_start = jiffies;
2086 hdev->discovery.scan_duration = timeout;
2087 }
2088
2089 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2090 timeout);
2091}
2092
2093bool hci_req_stop_discovery(struct hci_request *req)
2094{
2095 struct hci_dev *hdev = req->hdev;
2096 struct discovery_state *d = &hdev->discovery;
2097 struct hci_cp_remote_name_req_cancel cp;
2098 struct inquiry_entry *e;
2099 bool ret = false;
2100
2101 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2102
2103 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2104 if (test_bit(HCI_INQUIRY, &hdev->flags))
2105 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2106
2107 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2108 cancel_delayed_work(&hdev->le_scan_disable);
2109 hci_req_add_le_scan_disable(req);
2110 }
2111
2112 ret = true;
2113 } else {
2114
2115 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2116 hci_req_add_le_scan_disable(req);
2117 ret = true;
2118 }
2119 }
2120
2121
2122 if (d->type == DISCOV_TYPE_LE)
2123 return ret;
2124
2125 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2126 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2127 NAME_PENDING);
2128 if (!e)
2129 return ret;
2130
2131 bacpy(&cp.bdaddr, &e->data.bdaddr);
2132 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2133 &cp);
2134 ret = true;
2135 }
2136
2137 return ret;
2138}
2139
2140static int stop_discovery(struct hci_request *req, unsigned long opt)
2141{
2142 hci_dev_lock(req->hdev);
2143 hci_req_stop_discovery(req);
2144 hci_dev_unlock(req->hdev);
2145
2146 return 0;
2147}
2148
2149static void discov_update(struct work_struct *work)
2150{
2151 struct hci_dev *hdev = container_of(work, struct hci_dev,
2152 discov_update);
2153 u8 status = 0;
2154
2155 switch (hdev->discovery.state) {
2156 case DISCOVERY_STARTING:
2157 start_discovery(hdev, &status);
2158 mgmt_start_discovery_complete(hdev, status);
2159 if (status)
2160 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2161 else
2162 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2163 break;
2164 case DISCOVERY_STOPPING:
2165 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2166 mgmt_stop_discovery_complete(hdev, status);
2167 if (!status)
2168 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2169 break;
2170 case DISCOVERY_STOPPED:
2171 default:
2172 return;
2173 }
2174}
2175
2176static void discov_off(struct work_struct *work)
2177{
2178 struct hci_dev *hdev = container_of(work, struct hci_dev,
2179 discov_off.work);
2180
2181 BT_DBG("%s", hdev->name);
2182
2183 hci_dev_lock(hdev);
2184
2185
2186
2187
2188
2189
2190 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2191 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2192 hdev->discov_timeout = 0;
2193
2194 hci_dev_unlock(hdev);
2195
2196 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2197 mgmt_new_settings(hdev);
2198}
2199
2200static int powered_update_hci(struct hci_request *req, unsigned long opt)
2201{
2202 struct hci_dev *hdev = req->hdev;
2203 u8 link_sec;
2204
2205 hci_dev_lock(hdev);
2206
2207 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2208 !lmp_host_ssp_capable(hdev)) {
2209 u8 mode = 0x01;
2210
2211 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2212
2213 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2214 u8 support = 0x01;
2215
2216 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2217 sizeof(support), &support);
2218 }
2219 }
2220
2221 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2222 lmp_bredr_capable(hdev)) {
2223 struct hci_cp_write_le_host_supported cp;
2224
2225 cp.le = 0x01;
2226 cp.simul = 0x00;
2227
2228
2229
2230
2231 if (cp.le != lmp_host_le_capable(hdev) ||
2232 cp.simul != lmp_host_le_br_capable(hdev))
2233 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2234 sizeof(cp), &cp);
2235 }
2236
2237 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2238
2239
2240
2241
2242 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2243 list_empty(&hdev->adv_instances)) {
2244 __hci_req_update_adv_data(req, 0x00);
2245 __hci_req_update_scan_rsp_data(req, 0x00);
2246
2247 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2248 __hci_req_enable_advertising(req);
2249 } else if (!list_empty(&hdev->adv_instances)) {
2250 struct adv_info *adv_instance;
2251
2252 adv_instance = list_first_entry(&hdev->adv_instances,
2253 struct adv_info, list);
2254 __hci_req_schedule_adv_instance(req,
2255 adv_instance->instance,
2256 true);
2257 }
2258 }
2259
2260 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2261 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2262 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2263 sizeof(link_sec), &link_sec);
2264
2265 if (lmp_bredr_capable(hdev)) {
2266 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2267 __hci_req_write_fast_connectable(req, true);
2268 else
2269 __hci_req_write_fast_connectable(req, false);
2270 __hci_req_update_scan(req);
2271 __hci_req_update_class(req);
2272 __hci_req_update_name(req);
2273 __hci_req_update_eir(req);
2274 }
2275
2276 hci_dev_unlock(hdev);
2277 return 0;
2278}
2279
2280int __hci_req_hci_power_on(struct hci_dev *hdev)
2281{
2282
2283
2284
2285
2286
2287 smp_register(hdev);
2288
2289 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2290 NULL);
2291}
2292
2293void hci_request_setup(struct hci_dev *hdev)
2294{
2295 INIT_WORK(&hdev->discov_update, discov_update);
2296 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2297 INIT_WORK(&hdev->scan_update, scan_update_work);
2298 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2299 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2300 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2301 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2302 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2303 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2304}
2305
2306void hci_request_cancel_all(struct hci_dev *hdev)
2307{
2308 hci_req_sync_cancel(hdev, ENODEV);
2309
2310 cancel_work_sync(&hdev->discov_update);
2311 cancel_work_sync(&hdev->bg_scan_update);
2312 cancel_work_sync(&hdev->scan_update);
2313 cancel_work_sync(&hdev->connectable_update);
2314 cancel_work_sync(&hdev->discoverable_update);
2315 cancel_delayed_work_sync(&hdev->discov_off);
2316 cancel_delayed_work_sync(&hdev->le_scan_disable);
2317 cancel_delayed_work_sync(&hdev->le_scan_restart);
2318
2319 if (hdev->adv_instance_timeout) {
2320 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2321 hdev->adv_instance_timeout = 0;
2322 }
2323}
2324