1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/sched/signal.h>
25
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
28#include <net/bluetooth/mgmt.h>
29
30#include "smp.h"
31#include "hci_request.h"
32
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
44void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
49bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
54static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63
64
65
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
92int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
143 if (err < 0)
144 return ERR_PTR(err);
145
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
148
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191
192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
194 unsigned long opt, u32 timeout, u8 *hci_status)
195{
196 struct hci_request req;
197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216
217
218
219
220
221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
224 return 0;
225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
229
230 return err;
231 }
232
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
235
236 if (err == -ERESTARTSYS)
237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
242 if (hci_status)
243 *hci_status = hdev->req_result;
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
250 break;
251
252 default:
253 err = -ETIMEDOUT;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257 }
258
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
270 unsigned long opt, u32 timeout, u8 *hci_status)
271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277
278 hci_req_sync_lock(hdev);
279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 hci_req_sync_unlock(hdev);
281
282 return ret;
283}
284
285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
301 skb_put_data(skb, param, plen);
302
303 BT_DBG("skb len %d", skb->len);
304
305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
307
308 return skb;
309}
310
311
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320
321
322
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336
337 bt_cb(skb)->hci.req_event = event;
338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
366 type = PAGE_SCAN_TYPE_STANDARD;
367
368
369 acp.interval = cpu_to_le16(0x0800);
370 }
371
372 acp.window = cpu_to_le16(0x0012);
373
374 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375 __cpu_to_le16(hdev->page_scan_window) != acp.window)
376 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377 sizeof(acp), &acp);
378
379 if (hdev->page_scan_type != type)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381}
382
383
384
385
386
387
388
389static void __hci_update_background_scan(struct hci_request *req)
390{
391 struct hci_dev *hdev = req->hdev;
392
393 if (!test_bit(HCI_UP, &hdev->flags) ||
394 test_bit(HCI_INIT, &hdev->flags) ||
395 hci_dev_test_flag(hdev, HCI_SETUP) ||
396 hci_dev_test_flag(hdev, HCI_CONFIG) ||
397 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398 hci_dev_test_flag(hdev, HCI_UNREGISTER))
399 return;
400
401
402 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403 return;
404
405
406 if (hdev->discovery.state != DISCOVERY_STOPPED)
407 return;
408
409
410
411
412
413
414
415
416 hci_discovery_filter_clear(hdev);
417
418 if (list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports)) {
420
421
422
423
424
425
426 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
427 return;
428
429 hci_req_add_le_scan_disable(req);
430
431 BT_DBG("%s stopping background scanning", hdev->name);
432 } else {
433
434
435
436
437
438
439
440
441 if (hci_lookup_le_connect(hdev))
442 return;
443
444
445
446
447 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448 hci_req_add_le_scan_disable(req);
449
450 hci_req_add_le_passive_scan(req);
451
452 BT_DBG("%s starting background scanning", hdev->name);
453 }
454}
455
456void __hci_req_update_name(struct hci_request *req)
457{
458 struct hci_dev *hdev = req->hdev;
459 struct hci_cp_write_local_name cp;
460
461 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
462
463 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
464}
465
466#define PNP_INFO_SVCLASS_ID 0x1200
467
468static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
469{
470 u8 *ptr = data, *uuids_start = NULL;
471 struct bt_uuid *uuid;
472
473 if (len < 4)
474 return ptr;
475
476 list_for_each_entry(uuid, &hdev->uuids, list) {
477 u16 uuid16;
478
479 if (uuid->size != 16)
480 continue;
481
482 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
483 if (uuid16 < 0x1100)
484 continue;
485
486 if (uuid16 == PNP_INFO_SVCLASS_ID)
487 continue;
488
489 if (!uuids_start) {
490 uuids_start = ptr;
491 uuids_start[0] = 1;
492 uuids_start[1] = EIR_UUID16_ALL;
493 ptr += 2;
494 }
495
496
497 if ((ptr - data) + sizeof(u16) > len) {
498 uuids_start[1] = EIR_UUID16_SOME;
499 break;
500 }
501
502 *ptr++ = (uuid16 & 0x00ff);
503 *ptr++ = (uuid16 & 0xff00) >> 8;
504 uuids_start[0] += sizeof(uuid16);
505 }
506
507 return ptr;
508}
509
510static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511{
512 u8 *ptr = data, *uuids_start = NULL;
513 struct bt_uuid *uuid;
514
515 if (len < 6)
516 return ptr;
517
518 list_for_each_entry(uuid, &hdev->uuids, list) {
519 if (uuid->size != 32)
520 continue;
521
522 if (!uuids_start) {
523 uuids_start = ptr;
524 uuids_start[0] = 1;
525 uuids_start[1] = EIR_UUID32_ALL;
526 ptr += 2;
527 }
528
529
530 if ((ptr - data) + sizeof(u32) > len) {
531 uuids_start[1] = EIR_UUID32_SOME;
532 break;
533 }
534
535 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
536 ptr += sizeof(u32);
537 uuids_start[0] += sizeof(u32);
538 }
539
540 return ptr;
541}
542
543static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
544{
545 u8 *ptr = data, *uuids_start = NULL;
546 struct bt_uuid *uuid;
547
548 if (len < 18)
549 return ptr;
550
551 list_for_each_entry(uuid, &hdev->uuids, list) {
552 if (uuid->size != 128)
553 continue;
554
555 if (!uuids_start) {
556 uuids_start = ptr;
557 uuids_start[0] = 1;
558 uuids_start[1] = EIR_UUID128_ALL;
559 ptr += 2;
560 }
561
562
563 if ((ptr - data) + 16 > len) {
564 uuids_start[1] = EIR_UUID128_SOME;
565 break;
566 }
567
568 memcpy(ptr, uuid->uuid, 16);
569 ptr += 16;
570 uuids_start[0] += 16;
571 }
572
573 return ptr;
574}
575
576static void create_eir(struct hci_dev *hdev, u8 *data)
577{
578 u8 *ptr = data;
579 size_t name_len;
580
581 name_len = strlen(hdev->dev_name);
582
583 if (name_len > 0) {
584
585 if (name_len > 48) {
586 name_len = 48;
587 ptr[1] = EIR_NAME_SHORT;
588 } else
589 ptr[1] = EIR_NAME_COMPLETE;
590
591
592 ptr[0] = name_len + 1;
593
594 memcpy(ptr + 2, hdev->dev_name, name_len);
595
596 ptr += (name_len + 2);
597 }
598
599 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
600 ptr[0] = 2;
601 ptr[1] = EIR_TX_POWER;
602 ptr[2] = (u8) hdev->inq_tx_power;
603
604 ptr += 3;
605 }
606
607 if (hdev->devid_source > 0) {
608 ptr[0] = 9;
609 ptr[1] = EIR_DEVICE_ID;
610
611 put_unaligned_le16(hdev->devid_source, ptr + 2);
612 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613 put_unaligned_le16(hdev->devid_product, ptr + 6);
614 put_unaligned_le16(hdev->devid_version, ptr + 8);
615
616 ptr += 10;
617 }
618
619 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622}
623
624void __hci_req_update_eir(struct hci_request *req)
625{
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_write_eir cp;
628
629 if (!hdev_is_powered(hdev))
630 return;
631
632 if (!lmp_ext_inq_capable(hdev))
633 return;
634
635 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
636 return;
637
638 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
639 return;
640
641 memset(&cp, 0, sizeof(cp));
642
643 create_eir(hdev, cp.data);
644
645 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
646 return;
647
648 memcpy(hdev->eir, cp.data, sizeof(cp.data));
649
650 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651}
652
653void hci_req_add_le_scan_disable(struct hci_request *req)
654{
655 struct hci_dev *hdev = req->hdev;
656
657 if (use_ext_scan(hdev)) {
658 struct hci_cp_le_set_ext_scan_enable cp;
659
660 memset(&cp, 0, sizeof(cp));
661 cp.enable = LE_SCAN_DISABLE;
662 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
663 &cp);
664 } else {
665 struct hci_cp_le_set_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670 }
671}
672
673static void add_to_white_list(struct hci_request *req,
674 struct hci_conn_params *params)
675{
676 struct hci_cp_le_add_to_white_list cp;
677
678 cp.bdaddr_type = params->addr_type;
679 bacpy(&cp.bdaddr, ¶ms->addr);
680
681 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682}
683
684static u8 update_white_list(struct hci_request *req)
685{
686 struct hci_dev *hdev = req->hdev;
687 struct hci_conn_params *params;
688 struct bdaddr_list *b;
689 uint8_t white_list_entries = 0;
690
691
692
693
694
695
696
697 list_for_each_entry(b, &hdev->le_white_list, list) {
698
699
700
701 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702 &b->bdaddr, b->bdaddr_type) &&
703 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704 &b->bdaddr, b->bdaddr_type)) {
705 struct hci_cp_le_del_from_white_list cp;
706
707 cp.bdaddr_type = b->bdaddr_type;
708 bacpy(&cp.bdaddr, &b->bdaddr);
709
710 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711 sizeof(cp), &cp);
712 continue;
713 }
714
715 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716
717 return 0x00;
718 }
719
720 white_list_entries++;
721 }
722
723
724
725
726
727
728
729
730
731
732
733 list_for_each_entry(params, &hdev->pend_le_conns, action) {
734 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735 ¶ms->addr, params->addr_type))
736 continue;
737
738 if (white_list_entries >= hdev->le_white_list_size) {
739
740 return 0x00;
741 }
742
743 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
744 params->addr_type)) {
745
746 return 0x00;
747 }
748
749 white_list_entries++;
750 add_to_white_list(req, params);
751 }
752
753
754
755
756
757 list_for_each_entry(params, &hdev->pend_le_reports, action) {
758 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759 ¶ms->addr, params->addr_type))
760 continue;
761
762 if (white_list_entries >= hdev->le_white_list_size) {
763
764 return 0x00;
765 }
766
767 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
768 params->addr_type)) {
769
770 return 0x00;
771 }
772
773 white_list_entries++;
774 add_to_white_list(req, params);
775 }
776
777
778 return 0x01;
779}
780
781static bool scan_use_rpa(struct hci_dev *hdev)
782{
783 return hci_dev_test_flag(hdev, HCI_PRIVACY);
784}
785
786static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787 u16 window, u8 own_addr_type, u8 filter_policy)
788{
789 struct hci_dev *hdev = req->hdev;
790
791
792
793
794 if (use_ext_scan(hdev)) {
795 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797 struct hci_cp_le_scan_phy_params *phy_params;
798 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
799 u32 plen;
800
801 ext_param_cp = (void *)data;
802 phy_params = (void *)ext_param_cp->data;
803
804 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805 ext_param_cp->own_addr_type = own_addr_type;
806 ext_param_cp->filter_policy = filter_policy;
807
808 plen = sizeof(*ext_param_cp);
809
810 if (scan_1m(hdev) || scan_2m(hdev)) {
811 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
812
813 memset(phy_params, 0, sizeof(*phy_params));
814 phy_params->type = type;
815 phy_params->interval = cpu_to_le16(interval);
816 phy_params->window = cpu_to_le16(window);
817
818 plen += sizeof(*phy_params);
819 phy_params++;
820 }
821
822 if (scan_coded(hdev)) {
823 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
824
825 memset(phy_params, 0, sizeof(*phy_params));
826 phy_params->type = type;
827 phy_params->interval = cpu_to_le16(interval);
828 phy_params->window = cpu_to_le16(window);
829
830 plen += sizeof(*phy_params);
831 phy_params++;
832 }
833
834 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
835 plen, ext_param_cp);
836
837 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838 ext_enable_cp.enable = LE_SCAN_ENABLE;
839 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
840
841 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842 sizeof(ext_enable_cp), &ext_enable_cp);
843 } else {
844 struct hci_cp_le_set_scan_param param_cp;
845 struct hci_cp_le_set_scan_enable enable_cp;
846
847 memset(¶m_cp, 0, sizeof(param_cp));
848 param_cp.type = type;
849 param_cp.interval = cpu_to_le16(interval);
850 param_cp.window = cpu_to_le16(window);
851 param_cp.own_address_type = own_addr_type;
852 param_cp.filter_policy = filter_policy;
853 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
854 ¶m_cp);
855
856 memset(&enable_cp, 0, sizeof(enable_cp));
857 enable_cp.enable = LE_SCAN_ENABLE;
858 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
860 &enable_cp);
861 }
862}
863
864void hci_req_add_le_passive_scan(struct hci_request *req)
865{
866 struct hci_dev *hdev = req->hdev;
867 u8 own_addr_type;
868 u8 filter_policy;
869
870
871
872
873
874
875
876 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
877 &own_addr_type))
878 return;
879
880
881
882
883
884 filter_policy = update_white_list(req);
885
886
887
888
889
890
891
892
893
894
895 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
896 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897 filter_policy |= 0x02;
898
899 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900 hdev->le_scan_window, own_addr_type, filter_policy);
901}
902
903static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
904{
905 struct adv_info *adv_instance;
906
907
908 if (instance == 0x00)
909 return 0;
910
911 adv_instance = hci_find_adv_instance(hdev, instance);
912 if (!adv_instance)
913 return 0;
914
915
916
917
918 return adv_instance->scan_rsp_len;
919}
920
921static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
922{
923 u8 instance = hdev->cur_adv_instance;
924 struct adv_info *adv_instance;
925
926
927 if (instance == 0x00)
928 return 0;
929
930 adv_instance = hci_find_adv_instance(hdev, instance);
931 if (!adv_instance)
932 return 0;
933
934
935
936
937 return adv_instance->scan_rsp_len;
938}
939
940void __hci_req_disable_advertising(struct hci_request *req)
941{
942 if (ext_adv_capable(req->hdev)) {
943 struct hci_cp_le_set_ext_adv_enable cp;
944
945 cp.enable = 0x00;
946
947 cp.num_of_sets = 0x00;
948
949 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
950 } else {
951 u8 enable = 0x00;
952
953 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
954 }
955}
956
957static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
958{
959 u32 flags;
960 struct adv_info *adv_instance;
961
962 if (instance == 0x00) {
963
964
965
966 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
967
968
969
970
971 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972 flags |= MGMT_ADV_FLAG_CONNECTABLE;
973
974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977 flags |= MGMT_ADV_FLAG_DISCOV;
978
979 return flags;
980 }
981
982 adv_instance = hci_find_adv_instance(hdev, instance);
983
984
985 if (!adv_instance)
986 return 0;
987
988 return adv_instance->flags;
989}
990
991static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
992{
993
994 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
995 return false;
996
997
998 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
999 return true;
1000
1001
1002
1003
1004 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005 hci_dev_test_flag(hdev, HCI_BONDABLE))
1006 return false;
1007
1008
1009
1010
1011 return true;
1012}
1013
1014static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015{
1016
1017 if (hci_conn_num(hdev, LE_LINK) == 0)
1018 return true;
1019
1020
1021 if (hdev->conn_hash.le_num_slave > 0) {
1022
1023 if (!connectable && !(hdev->le_states[2] & 0x10))
1024 return false;
1025
1026
1027
1028
1029 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030 !(hdev->le_states[2] & 0x20)))
1031 return false;
1032 }
1033
1034
1035 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036
1037 if (!connectable && !(hdev->le_states[2] & 0x02))
1038 return false;
1039
1040
1041
1042
1043 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1044 !(hdev->le_states[2] & 0x08)))
1045 return false;
1046 }
1047
1048 return true;
1049}
1050
1051void __hci_req_enable_advertising(struct hci_request *req)
1052{
1053 struct hci_dev *hdev = req->hdev;
1054 struct hci_cp_le_set_adv_param cp;
1055 u8 own_addr_type, enable = 0x01;
1056 bool connectable;
1057 u16 adv_min_interval, adv_max_interval;
1058 u32 flags;
1059
1060 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1061
1062
1063
1064
1065 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1066 mgmt_get_connectable(hdev);
1067
1068 if (!is_advertising_allowed(hdev, connectable))
1069 return;
1070
1071 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1072 __hci_req_disable_advertising(req);
1073
1074
1075
1076
1077
1078
1079 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1080
1081
1082
1083
1084
1085 if (hci_update_random_address(req, !connectable,
1086 adv_use_rpa(hdev, flags),
1087 &own_addr_type) < 0)
1088 return;
1089
1090 memset(&cp, 0, sizeof(cp));
1091
1092 if (connectable) {
1093 cp.type = LE_ADV_IND;
1094
1095 adv_min_interval = hdev->le_adv_min_interval;
1096 adv_max_interval = hdev->le_adv_max_interval;
1097 } else {
1098 if (get_cur_adv_instance_scan_rsp_len(hdev))
1099 cp.type = LE_ADV_SCAN_IND;
1100 else
1101 cp.type = LE_ADV_NONCONN_IND;
1102
1103 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1104 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1105 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1106 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1107 } else {
1108 adv_min_interval = hdev->le_adv_min_interval;
1109 adv_max_interval = hdev->le_adv_max_interval;
1110 }
1111 }
1112
1113 cp.min_interval = cpu_to_le16(adv_min_interval);
1114 cp.max_interval = cpu_to_le16(adv_max_interval);
1115 cp.own_address_type = own_addr_type;
1116 cp.channel_map = hdev->le_adv_channel_map;
1117
1118 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1119
1120 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1121}
1122
1123u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1124{
1125 size_t short_len;
1126 size_t complete_len;
1127
1128
1129 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1130 return ad_len;
1131
1132
1133 complete_len = strlen(hdev->dev_name);
1134 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1135 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1136 hdev->dev_name, complete_len + 1);
1137
1138
1139 short_len = strlen(hdev->short_name);
1140 if (short_len)
1141 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1142 hdev->short_name, short_len + 1);
1143
1144
1145
1146
1147 if (complete_len) {
1148 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1149
1150 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1151 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1152
1153 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1154 sizeof(name));
1155 }
1156
1157 return ad_len;
1158}
1159
1160static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1161{
1162 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1163}
1164
1165static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1166{
1167 u8 scan_rsp_len = 0;
1168
1169 if (hdev->appearance) {
1170 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1171 }
1172
1173 return append_local_name(hdev, ptr, scan_rsp_len);
1174}
1175
1176static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1177 u8 *ptr)
1178{
1179 struct adv_info *adv_instance;
1180 u32 instance_flags;
1181 u8 scan_rsp_len = 0;
1182
1183 adv_instance = hci_find_adv_instance(hdev, instance);
1184 if (!adv_instance)
1185 return 0;
1186
1187 instance_flags = adv_instance->flags;
1188
1189 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1190 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1191 }
1192
1193 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1194 adv_instance->scan_rsp_len);
1195
1196 scan_rsp_len += adv_instance->scan_rsp_len;
1197
1198 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1199 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1200
1201 return scan_rsp_len;
1202}
1203
1204void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1205{
1206 struct hci_dev *hdev = req->hdev;
1207 u8 len;
1208
1209 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1210 return;
1211
1212 if (ext_adv_capable(hdev)) {
1213 struct hci_cp_le_set_ext_scan_rsp_data cp;
1214
1215 memset(&cp, 0, sizeof(cp));
1216
1217 if (instance)
1218 len = create_instance_scan_rsp_data(hdev, instance,
1219 cp.data);
1220 else
1221 len = create_default_scan_rsp_data(hdev, cp.data);
1222
1223 if (hdev->scan_rsp_data_len == len &&
1224 !memcmp(cp.data, hdev->scan_rsp_data, len))
1225 return;
1226
1227 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1228 hdev->scan_rsp_data_len = len;
1229
1230 cp.handle = 0;
1231 cp.length = len;
1232 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1233 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1234
1235 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1236 &cp);
1237 } else {
1238 struct hci_cp_le_set_scan_rsp_data cp;
1239
1240 memset(&cp, 0, sizeof(cp));
1241
1242 if (instance)
1243 len = create_instance_scan_rsp_data(hdev, instance,
1244 cp.data);
1245 else
1246 len = create_default_scan_rsp_data(hdev, cp.data);
1247
1248 if (hdev->scan_rsp_data_len == len &&
1249 !memcmp(cp.data, hdev->scan_rsp_data, len))
1250 return;
1251
1252 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1253 hdev->scan_rsp_data_len = len;
1254
1255 cp.length = len;
1256
1257 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1258 }
1259}
1260
1261static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1262{
1263 struct adv_info *adv_instance = NULL;
1264 u8 ad_len = 0, flags = 0;
1265 u32 instance_flags;
1266
1267
1268 if (instance) {
1269 adv_instance = hci_find_adv_instance(hdev, instance);
1270 if (!adv_instance)
1271 return 0;
1272 }
1273
1274 instance_flags = get_adv_instance_flags(hdev, instance);
1275
1276
1277
1278
1279 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1280 flags |= LE_AD_GENERAL;
1281
1282 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1283 flags |= LE_AD_LIMITED;
1284
1285 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1286 flags |= LE_AD_NO_BREDR;
1287
1288 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1289
1290
1291
1292 if (!flags)
1293 flags |= mgmt_get_adv_discov_flags(hdev);
1294
1295
1296
1297
1298 if (flags) {
1299 ptr[0] = 0x02;
1300 ptr[1] = EIR_FLAGS;
1301 ptr[2] = flags;
1302
1303 ad_len += 3;
1304 ptr += 3;
1305 }
1306 }
1307
1308 if (adv_instance) {
1309 memcpy(ptr, adv_instance->adv_data,
1310 adv_instance->adv_data_len);
1311 ad_len += adv_instance->adv_data_len;
1312 ptr += adv_instance->adv_data_len;
1313 }
1314
1315 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1316 s8 adv_tx_power;
1317
1318 if (ext_adv_capable(hdev)) {
1319 if (adv_instance)
1320 adv_tx_power = adv_instance->tx_power;
1321 else
1322 adv_tx_power = hdev->adv_tx_power;
1323 } else {
1324 adv_tx_power = hdev->adv_tx_power;
1325 }
1326
1327
1328 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1329 ptr[0] = 0x02;
1330 ptr[1] = EIR_TX_POWER;
1331 ptr[2] = (u8)adv_tx_power;
1332
1333 ad_len += 3;
1334 ptr += 3;
1335 }
1336 }
1337
1338 return ad_len;
1339}
1340
1341void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1342{
1343 struct hci_dev *hdev = req->hdev;
1344 u8 len;
1345
1346 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347 return;
1348
1349 if (ext_adv_capable(hdev)) {
1350 struct hci_cp_le_set_ext_adv_data cp;
1351
1352 memset(&cp, 0, sizeof(cp));
1353
1354 len = create_instance_adv_data(hdev, instance, cp.data);
1355
1356
1357 if (hdev->adv_data_len == len &&
1358 memcmp(cp.data, hdev->adv_data, len) == 0)
1359 return;
1360
1361 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1362 hdev->adv_data_len = len;
1363
1364 cp.length = len;
1365 cp.handle = 0;
1366 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1367 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1368
1369 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1370 } else {
1371 struct hci_cp_le_set_adv_data cp;
1372
1373 memset(&cp, 0, sizeof(cp));
1374
1375 len = create_instance_adv_data(hdev, instance, cp.data);
1376
1377
1378 if (hdev->adv_data_len == len &&
1379 memcmp(cp.data, hdev->adv_data, len) == 0)
1380 return;
1381
1382 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1383 hdev->adv_data_len = len;
1384
1385 cp.length = len;
1386
1387 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1388 }
1389}
1390
1391int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1392{
1393 struct hci_request req;
1394
1395 hci_req_init(&req, hdev);
1396 __hci_req_update_adv_data(&req, instance);
1397
1398 return hci_req_run(&req, NULL);
1399}
1400
1401static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1402{
1403 BT_DBG("%s status %u", hdev->name, status);
1404}
1405
1406void hci_req_reenable_advertising(struct hci_dev *hdev)
1407{
1408 struct hci_request req;
1409
1410 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1411 list_empty(&hdev->adv_instances))
1412 return;
1413
1414 hci_req_init(&req, hdev);
1415
1416 if (hdev->cur_adv_instance) {
1417 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1418 true);
1419 } else {
1420 if (ext_adv_capable(hdev)) {
1421 __hci_req_start_ext_adv(&req, 0x00);
1422 } else {
1423 __hci_req_update_adv_data(&req, 0x00);
1424 __hci_req_update_scan_rsp_data(&req, 0x00);
1425 __hci_req_enable_advertising(&req);
1426 }
1427 }
1428
1429 hci_req_run(&req, adv_enable_complete);
1430}
1431
1432static void adv_timeout_expire(struct work_struct *work)
1433{
1434 struct hci_dev *hdev = container_of(work, struct hci_dev,
1435 adv_instance_expire.work);
1436
1437 struct hci_request req;
1438 u8 instance;
1439
1440 BT_DBG("%s", hdev->name);
1441
1442 hci_dev_lock(hdev);
1443
1444 hdev->adv_instance_timeout = 0;
1445
1446 instance = hdev->cur_adv_instance;
1447 if (instance == 0x00)
1448 goto unlock;
1449
1450 hci_req_init(&req, hdev);
1451
1452 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1453
1454 if (list_empty(&hdev->adv_instances))
1455 __hci_req_disable_advertising(&req);
1456
1457 hci_req_run(&req, NULL);
1458
1459unlock:
1460 hci_dev_unlock(hdev);
1461}
1462
1463int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1464 bool use_rpa, struct adv_info *adv_instance,
1465 u8 *own_addr_type, bdaddr_t *rand_addr)
1466{
1467 int err;
1468
1469 bacpy(rand_addr, BDADDR_ANY);
1470
1471
1472
1473
1474 if (use_rpa) {
1475 int to;
1476
1477 *own_addr_type = ADDR_LE_DEV_RANDOM;
1478
1479 if (adv_instance) {
1480 if (!adv_instance->rpa_expired &&
1481 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1482 return 0;
1483
1484 adv_instance->rpa_expired = false;
1485 } else {
1486 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1487 !bacmp(&hdev->random_addr, &hdev->rpa))
1488 return 0;
1489 }
1490
1491 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1492 if (err < 0) {
1493 BT_ERR("%s failed to generate new RPA", hdev->name);
1494 return err;
1495 }
1496
1497 bacpy(rand_addr, &hdev->rpa);
1498
1499 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1500 if (adv_instance)
1501 queue_delayed_work(hdev->workqueue,
1502 &adv_instance->rpa_expired_cb, to);
1503 else
1504 queue_delayed_work(hdev->workqueue,
1505 &hdev->rpa_expired, to);
1506
1507 return 0;
1508 }
1509
1510
1511
1512
1513
1514 if (require_privacy) {
1515 bdaddr_t nrpa;
1516
1517 while (true) {
1518
1519
1520
1521
1522 get_random_bytes(&nrpa, 6);
1523 nrpa.b[5] &= 0x3f;
1524
1525
1526
1527
1528 if (bacmp(&hdev->bdaddr, &nrpa))
1529 break;
1530 }
1531
1532 *own_addr_type = ADDR_LE_DEV_RANDOM;
1533 bacpy(rand_addr, &nrpa);
1534
1535 return 0;
1536 }
1537
1538
1539 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1540
1541 return 0;
1542}
1543
1544void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1545{
1546 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1547}
1548
1549int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1550{
1551 struct hci_cp_le_set_ext_adv_params cp;
1552 struct hci_dev *hdev = req->hdev;
1553 bool connectable;
1554 u32 flags;
1555 bdaddr_t random_addr;
1556 u8 own_addr_type;
1557 int err;
1558 struct adv_info *adv_instance;
1559 bool secondary_adv;
1560
1561 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1562
1563 if (instance > 0) {
1564 adv_instance = hci_find_adv_instance(hdev, instance);
1565 if (!adv_instance)
1566 return -EINVAL;
1567 } else {
1568 adv_instance = NULL;
1569 }
1570
1571 flags = get_adv_instance_flags(hdev, instance);
1572
1573
1574
1575
1576 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1577 mgmt_get_connectable(hdev);
1578
1579 if (!is_advertising_allowed(hdev, connectable))
1580 return -EPERM;
1581
1582
1583
1584
1585
1586 err = hci_get_random_address(hdev, !connectable,
1587 adv_use_rpa(hdev, flags), adv_instance,
1588 &own_addr_type, &random_addr);
1589 if (err < 0)
1590 return err;
1591
1592 memset(&cp, 0, sizeof(cp));
1593
1594 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1595 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1596
1597 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1598
1599 if (connectable) {
1600 if (secondary_adv)
1601 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1602 else
1603 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1604 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1605 if (secondary_adv)
1606 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1607 else
1608 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1609 } else {
1610 if (secondary_adv)
1611 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1612 else
1613 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1614 }
1615
1616 cp.own_addr_type = own_addr_type;
1617 cp.channel_map = hdev->le_adv_channel_map;
1618 cp.tx_power = 127;
1619 cp.handle = instance;
1620
1621 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1622 cp.primary_phy = HCI_ADV_PHY_1M;
1623 cp.secondary_phy = HCI_ADV_PHY_2M;
1624 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1625 cp.primary_phy = HCI_ADV_PHY_CODED;
1626 cp.secondary_phy = HCI_ADV_PHY_CODED;
1627 } else {
1628
1629 cp.primary_phy = HCI_ADV_PHY_1M;
1630 cp.secondary_phy = HCI_ADV_PHY_1M;
1631 }
1632
1633 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1634
1635 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1636 bacmp(&random_addr, BDADDR_ANY)) {
1637 struct hci_cp_le_set_adv_set_rand_addr cp;
1638
1639
1640 if (adv_instance) {
1641 if (!bacmp(&random_addr, &adv_instance->random_addr))
1642 return 0;
1643 } else {
1644 if (!bacmp(&random_addr, &hdev->random_addr))
1645 return 0;
1646 }
1647
1648 memset(&cp, 0, sizeof(cp));
1649
1650 cp.handle = 0;
1651 bacpy(&cp.bdaddr, &random_addr);
1652
1653 hci_req_add(req,
1654 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1655 sizeof(cp), &cp);
1656 }
1657
1658 return 0;
1659}
1660
1661int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1662{
1663 struct hci_dev *hdev = req->hdev;
1664 struct hci_cp_le_set_ext_adv_enable *cp;
1665 struct hci_cp_ext_adv_set *adv_set;
1666 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1667 struct adv_info *adv_instance;
1668
1669 if (instance > 0) {
1670 adv_instance = hci_find_adv_instance(hdev, instance);
1671 if (!adv_instance)
1672 return -EINVAL;
1673 } else {
1674 adv_instance = NULL;
1675 }
1676
1677 cp = (void *) data;
1678 adv_set = (void *) cp->data;
1679
1680 memset(cp, 0, sizeof(*cp));
1681
1682 cp->enable = 0x01;
1683 cp->num_of_sets = 0x01;
1684
1685 memset(adv_set, 0, sizeof(*adv_set));
1686
1687 adv_set->handle = instance;
1688
1689
1690
1691
1692 if (adv_instance && adv_instance->duration) {
1693 u16 duration = adv_instance->duration * MSEC_PER_SEC;
1694
1695
1696 adv_set->duration = cpu_to_le16(duration / 10);
1697 }
1698
1699 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1700 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1701 data);
1702
1703 return 0;
1704}
1705
1706int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1707{
1708 struct hci_dev *hdev = req->hdev;
1709 int err;
1710
1711 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1712 __hci_req_disable_advertising(req);
1713
1714 err = __hci_req_setup_ext_adv_instance(req, instance);
1715 if (err < 0)
1716 return err;
1717
1718 __hci_req_update_scan_rsp_data(req, instance);
1719 __hci_req_enable_ext_advertising(req, instance);
1720
1721 return 0;
1722}
1723
1724int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1725 bool force)
1726{
1727 struct hci_dev *hdev = req->hdev;
1728 struct adv_info *adv_instance = NULL;
1729 u16 timeout;
1730
1731 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1732 list_empty(&hdev->adv_instances))
1733 return -EPERM;
1734
1735 if (hdev->adv_instance_timeout)
1736 return -EBUSY;
1737
1738 adv_instance = hci_find_adv_instance(hdev, instance);
1739 if (!adv_instance)
1740 return -ENOENT;
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750 if (adv_instance->timeout == 0 ||
1751 adv_instance->duration <= adv_instance->remaining_time)
1752 timeout = adv_instance->duration;
1753 else
1754 timeout = adv_instance->remaining_time;
1755
1756
1757
1758
1759 if (adv_instance->timeout)
1760 adv_instance->remaining_time =
1761 adv_instance->remaining_time - timeout;
1762
1763
1764 if (!ext_adv_capable(hdev)) {
1765 hdev->adv_instance_timeout = timeout;
1766 queue_delayed_work(hdev->req_workqueue,
1767 &hdev->adv_instance_expire,
1768 msecs_to_jiffies(timeout * 1000));
1769 }
1770
1771
1772
1773
1774
1775 if (!force && hdev->cur_adv_instance == instance &&
1776 hci_dev_test_flag(hdev, HCI_LE_ADV))
1777 return 0;
1778
1779 hdev->cur_adv_instance = instance;
1780 if (ext_adv_capable(hdev)) {
1781 __hci_req_start_ext_adv(req, instance);
1782 } else {
1783 __hci_req_update_adv_data(req, instance);
1784 __hci_req_update_scan_rsp_data(req, instance);
1785 __hci_req_enable_advertising(req);
1786 }
1787
1788 return 0;
1789}
1790
1791static void cancel_adv_timeout(struct hci_dev *hdev)
1792{
1793 if (hdev->adv_instance_timeout) {
1794 hdev->adv_instance_timeout = 0;
1795 cancel_delayed_work(&hdev->adv_instance_expire);
1796 }
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1811 struct hci_request *req, u8 instance,
1812 bool force)
1813{
1814 struct adv_info *adv_instance, *n, *next_instance = NULL;
1815 int err;
1816 u8 rem_inst;
1817
1818
1819 if (!instance || hdev->cur_adv_instance == instance)
1820 cancel_adv_timeout(hdev);
1821
1822
1823
1824
1825
1826 if (instance && hdev->cur_adv_instance == instance)
1827 next_instance = hci_get_next_instance(hdev, instance);
1828
1829 if (instance == 0x00) {
1830 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1831 list) {
1832 if (!(force || adv_instance->timeout))
1833 continue;
1834
1835 rem_inst = adv_instance->instance;
1836 err = hci_remove_adv_instance(hdev, rem_inst);
1837 if (!err)
1838 mgmt_advertising_removed(sk, hdev, rem_inst);
1839 }
1840 } else {
1841 adv_instance = hci_find_adv_instance(hdev, instance);
1842
1843 if (force || (adv_instance && adv_instance->timeout &&
1844 !adv_instance->remaining_time)) {
1845
1846 if (next_instance &&
1847 next_instance->instance == instance)
1848 next_instance = NULL;
1849
1850 err = hci_remove_adv_instance(hdev, instance);
1851 if (!err)
1852 mgmt_advertising_removed(sk, hdev, instance);
1853 }
1854 }
1855
1856 if (!req || !hdev_is_powered(hdev) ||
1857 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1858 return;
1859
1860 if (next_instance)
1861 __hci_req_schedule_adv_instance(req, next_instance->instance,
1862 false);
1863}
1864
1865static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1866{
1867 struct hci_dev *hdev = req->hdev;
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1880 hci_lookup_le_connect(hdev)) {
1881 BT_DBG("Deferring random address update");
1882 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1883 return;
1884 }
1885
1886 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1887}
1888
1889int hci_update_random_address(struct hci_request *req, bool require_privacy,
1890 bool use_rpa, u8 *own_addr_type)
1891{
1892 struct hci_dev *hdev = req->hdev;
1893 int err;
1894
1895
1896
1897
1898
1899 if (use_rpa) {
1900 int to;
1901
1902 *own_addr_type = ADDR_LE_DEV_RANDOM;
1903
1904 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1905 !bacmp(&hdev->random_addr, &hdev->rpa))
1906 return 0;
1907
1908 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1909 if (err < 0) {
1910 bt_dev_err(hdev, "failed to generate new RPA");
1911 return err;
1912 }
1913
1914 set_random_addr(req, &hdev->rpa);
1915
1916 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1917 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1918
1919 return 0;
1920 }
1921
1922
1923
1924
1925
1926 if (require_privacy) {
1927 bdaddr_t nrpa;
1928
1929 while (true) {
1930
1931
1932
1933
1934 get_random_bytes(&nrpa, 6);
1935 nrpa.b[5] &= 0x3f;
1936
1937
1938
1939
1940 if (bacmp(&hdev->bdaddr, &nrpa))
1941 break;
1942 }
1943
1944 *own_addr_type = ADDR_LE_DEV_RANDOM;
1945 set_random_addr(req, &nrpa);
1946 return 0;
1947 }
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1959 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1960 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1961 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1962 *own_addr_type = ADDR_LE_DEV_RANDOM;
1963 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1964 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1965 &hdev->static_addr);
1966 return 0;
1967 }
1968
1969
1970
1971
1972 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1973
1974 return 0;
1975}
1976
1977static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1978{
1979 struct bdaddr_list *b;
1980
1981 list_for_each_entry(b, &hdev->whitelist, list) {
1982 struct hci_conn *conn;
1983
1984 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1985 if (!conn)
1986 return true;
1987
1988 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1989 return true;
1990 }
1991
1992 return false;
1993}
1994
1995void __hci_req_update_scan(struct hci_request *req)
1996{
1997 struct hci_dev *hdev = req->hdev;
1998 u8 scan;
1999
2000 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2001 return;
2002
2003 if (!hdev_is_powered(hdev))
2004 return;
2005
2006 if (mgmt_powering_down(hdev))
2007 return;
2008
2009 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2010 disconnected_whitelist_entries(hdev))
2011 scan = SCAN_PAGE;
2012 else
2013 scan = SCAN_DISABLED;
2014
2015 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2016 scan |= SCAN_INQUIRY;
2017
2018 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2019 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2020 return;
2021
2022 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2023}
2024
2025static int update_scan(struct hci_request *req, unsigned long opt)
2026{
2027 hci_dev_lock(req->hdev);
2028 __hci_req_update_scan(req);
2029 hci_dev_unlock(req->hdev);
2030 return 0;
2031}
2032
2033static void scan_update_work(struct work_struct *work)
2034{
2035 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2036
2037 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2038}
2039
2040static int connectable_update(struct hci_request *req, unsigned long opt)
2041{
2042 struct hci_dev *hdev = req->hdev;
2043
2044 hci_dev_lock(hdev);
2045
2046 __hci_req_update_scan(req);
2047
2048
2049
2050
2051
2052 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2053 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2054
2055
2056 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2057 !list_empty(&hdev->adv_instances)) {
2058 if (ext_adv_capable(hdev))
2059 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2060 else
2061 __hci_req_enable_advertising(req);
2062 }
2063
2064 __hci_update_background_scan(req);
2065
2066 hci_dev_unlock(hdev);
2067
2068 return 0;
2069}
2070
2071static void connectable_update_work(struct work_struct *work)
2072{
2073 struct hci_dev *hdev = container_of(work, struct hci_dev,
2074 connectable_update);
2075 u8 status;
2076
2077 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2078 mgmt_set_connectable_complete(hdev, status);
2079}
2080
2081static u8 get_service_classes(struct hci_dev *hdev)
2082{
2083 struct bt_uuid *uuid;
2084 u8 val = 0;
2085
2086 list_for_each_entry(uuid, &hdev->uuids, list)
2087 val |= uuid->svc_hint;
2088
2089 return val;
2090}
2091
2092void __hci_req_update_class(struct hci_request *req)
2093{
2094 struct hci_dev *hdev = req->hdev;
2095 u8 cod[3];
2096
2097 BT_DBG("%s", hdev->name);
2098
2099 if (!hdev_is_powered(hdev))
2100 return;
2101
2102 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2103 return;
2104
2105 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2106 return;
2107
2108 cod[0] = hdev->minor_class;
2109 cod[1] = hdev->major_class;
2110 cod[2] = get_service_classes(hdev);
2111
2112 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2113 cod[1] |= 0x20;
2114
2115 if (memcmp(cod, hdev->dev_class, 3) == 0)
2116 return;
2117
2118 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2119}
2120
2121static void write_iac(struct hci_request *req)
2122{
2123 struct hci_dev *hdev = req->hdev;
2124 struct hci_cp_write_current_iac_lap cp;
2125
2126 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2127 return;
2128
2129 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2130
2131 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2132 cp.iac_lap[0] = 0x00;
2133 cp.iac_lap[1] = 0x8b;
2134 cp.iac_lap[2] = 0x9e;
2135 cp.iac_lap[3] = 0x33;
2136 cp.iac_lap[4] = 0x8b;
2137 cp.iac_lap[5] = 0x9e;
2138 } else {
2139
2140 cp.num_iac = 1;
2141 cp.iac_lap[0] = 0x33;
2142 cp.iac_lap[1] = 0x8b;
2143 cp.iac_lap[2] = 0x9e;
2144 }
2145
2146 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2147 (cp.num_iac * 3) + 1, &cp);
2148}
2149
2150static int discoverable_update(struct hci_request *req, unsigned long opt)
2151{
2152 struct hci_dev *hdev = req->hdev;
2153
2154 hci_dev_lock(hdev);
2155
2156 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2157 write_iac(req);
2158 __hci_req_update_scan(req);
2159 __hci_req_update_class(req);
2160 }
2161
2162
2163
2164
2165 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2166 __hci_req_update_adv_data(req, 0x00);
2167
2168
2169
2170
2171 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2172 if (ext_adv_capable(hdev))
2173 __hci_req_start_ext_adv(req, 0x00);
2174 else
2175 __hci_req_enable_advertising(req);
2176 }
2177 }
2178
2179 hci_dev_unlock(hdev);
2180
2181 return 0;
2182}
2183
2184static void discoverable_update_work(struct work_struct *work)
2185{
2186 struct hci_dev *hdev = container_of(work, struct hci_dev,
2187 discoverable_update);
2188 u8 status;
2189
2190 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2191 mgmt_set_discoverable_complete(hdev, status);
2192}
2193
2194void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2195 u8 reason)
2196{
2197 switch (conn->state) {
2198 case BT_CONNECTED:
2199 case BT_CONFIG:
2200 if (conn->type == AMP_LINK) {
2201 struct hci_cp_disconn_phy_link cp;
2202
2203 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2204 cp.reason = reason;
2205 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2206 &cp);
2207 } else {
2208 struct hci_cp_disconnect dc;
2209
2210 dc.handle = cpu_to_le16(conn->handle);
2211 dc.reason = reason;
2212 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2213 }
2214
2215 conn->state = BT_DISCONN;
2216
2217 break;
2218 case BT_CONNECT:
2219 if (conn->type == LE_LINK) {
2220 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2221 break;
2222 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2223 0, NULL);
2224 } else if (conn->type == ACL_LINK) {
2225 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2226 break;
2227 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2228 6, &conn->dst);
2229 }
2230 break;
2231 case BT_CONNECT2:
2232 if (conn->type == ACL_LINK) {
2233 struct hci_cp_reject_conn_req rej;
2234
2235 bacpy(&rej.bdaddr, &conn->dst);
2236 rej.reason = reason;
2237
2238 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2239 sizeof(rej), &rej);
2240 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2241 struct hci_cp_reject_sync_conn_req rej;
2242
2243 bacpy(&rej.bdaddr, &conn->dst);
2244
2245
2246
2247
2248
2249
2250
2251 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2252
2253 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2254 sizeof(rej), &rej);
2255 }
2256 break;
2257 default:
2258 conn->state = BT_CLOSED;
2259 break;
2260 }
2261}
2262
2263static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2264{
2265 if (status)
2266 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2267}
2268
2269int hci_abort_conn(struct hci_conn *conn, u8 reason)
2270{
2271 struct hci_request req;
2272 int err;
2273
2274 hci_req_init(&req, conn->hdev);
2275
2276 __hci_abort_conn(&req, conn, reason);
2277
2278 err = hci_req_run(&req, abort_conn_complete);
2279 if (err && err != -ENODATA) {
2280 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2281 return err;
2282 }
2283
2284 return 0;
2285}
2286
2287static int update_bg_scan(struct hci_request *req, unsigned long opt)
2288{
2289 hci_dev_lock(req->hdev);
2290 __hci_update_background_scan(req);
2291 hci_dev_unlock(req->hdev);
2292 return 0;
2293}
2294
2295static void bg_scan_update(struct work_struct *work)
2296{
2297 struct hci_dev *hdev = container_of(work, struct hci_dev,
2298 bg_scan_update);
2299 struct hci_conn *conn;
2300 u8 status;
2301 int err;
2302
2303 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2304 if (!err)
2305 return;
2306
2307 hci_dev_lock(hdev);
2308
2309 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2310 if (conn)
2311 hci_le_conn_failed(conn, status);
2312
2313 hci_dev_unlock(hdev);
2314}
2315
2316static int le_scan_disable(struct hci_request *req, unsigned long opt)
2317{
2318 hci_req_add_le_scan_disable(req);
2319 return 0;
2320}
2321
2322static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2323{
2324 u8 length = opt;
2325 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2326 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2327 struct hci_cp_inquiry cp;
2328
2329 BT_DBG("%s", req->hdev->name);
2330
2331 hci_dev_lock(req->hdev);
2332 hci_inquiry_cache_flush(req->hdev);
2333 hci_dev_unlock(req->hdev);
2334
2335 memset(&cp, 0, sizeof(cp));
2336
2337 if (req->hdev->discovery.limited)
2338 memcpy(&cp.lap, liac, sizeof(cp.lap));
2339 else
2340 memcpy(&cp.lap, giac, sizeof(cp.lap));
2341
2342 cp.length = length;
2343
2344 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2345
2346 return 0;
2347}
2348
2349static void le_scan_disable_work(struct work_struct *work)
2350{
2351 struct hci_dev *hdev = container_of(work, struct hci_dev,
2352 le_scan_disable.work);
2353 u8 status;
2354
2355 BT_DBG("%s", hdev->name);
2356
2357 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2358 return;
2359
2360 cancel_delayed_work(&hdev->le_scan_restart);
2361
2362 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2363 if (status) {
2364 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2365 status);
2366 return;
2367 }
2368
2369 hdev->discovery.scan_start = 0;
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379 if (hdev->discovery.type == DISCOV_TYPE_LE)
2380 goto discov_stopped;
2381
2382 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2383 return;
2384
2385 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2386 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2387 hdev->discovery.state != DISCOVERY_RESOLVING)
2388 goto discov_stopped;
2389
2390 return;
2391 }
2392
2393 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2394 HCI_CMD_TIMEOUT, &status);
2395 if (status) {
2396 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2397 goto discov_stopped;
2398 }
2399
2400 return;
2401
2402discov_stopped:
2403 hci_dev_lock(hdev);
2404 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2405 hci_dev_unlock(hdev);
2406}
2407
2408static int le_scan_restart(struct hci_request *req, unsigned long opt)
2409{
2410 struct hci_dev *hdev = req->hdev;
2411
2412
2413 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2414 return 0;
2415
2416 hci_req_add_le_scan_disable(req);
2417
2418 if (use_ext_scan(hdev)) {
2419 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2420
2421 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2422 ext_enable_cp.enable = LE_SCAN_ENABLE;
2423 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2424
2425 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2426 sizeof(ext_enable_cp), &ext_enable_cp);
2427 } else {
2428 struct hci_cp_le_set_scan_enable cp;
2429
2430 memset(&cp, 0, sizeof(cp));
2431 cp.enable = LE_SCAN_ENABLE;
2432 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2433 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2434 }
2435
2436 return 0;
2437}
2438
2439static void le_scan_restart_work(struct work_struct *work)
2440{
2441 struct hci_dev *hdev = container_of(work, struct hci_dev,
2442 le_scan_restart.work);
2443 unsigned long timeout, duration, scan_start, now;
2444 u8 status;
2445
2446 BT_DBG("%s", hdev->name);
2447
2448 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2449 if (status) {
2450 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2451 status);
2452 return;
2453 }
2454
2455 hci_dev_lock(hdev);
2456
2457 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2458 !hdev->discovery.scan_start)
2459 goto unlock;
2460
2461
2462
2463
2464
2465
2466 duration = hdev->discovery.scan_duration;
2467 scan_start = hdev->discovery.scan_start;
2468 now = jiffies;
2469 if (now - scan_start <= duration) {
2470 int elapsed;
2471
2472 if (now >= scan_start)
2473 elapsed = now - scan_start;
2474 else
2475 elapsed = ULONG_MAX - scan_start + now;
2476
2477 timeout = duration - elapsed;
2478 } else {
2479 timeout = 0;
2480 }
2481
2482 queue_delayed_work(hdev->req_workqueue,
2483 &hdev->le_scan_disable, timeout);
2484
2485unlock:
2486 hci_dev_unlock(hdev);
2487}
2488
2489static int active_scan(struct hci_request *req, unsigned long opt)
2490{
2491 uint16_t interval = opt;
2492 struct hci_dev *hdev = req->hdev;
2493 u8 own_addr_type;
2494 int err;
2495
2496 BT_DBG("%s", hdev->name);
2497
2498 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2499 hci_dev_lock(hdev);
2500
2501
2502
2503
2504 if (hci_lookup_le_connect(hdev)) {
2505 hci_dev_unlock(hdev);
2506 return -EBUSY;
2507 }
2508
2509 cancel_adv_timeout(hdev);
2510 hci_dev_unlock(hdev);
2511
2512 __hci_req_disable_advertising(req);
2513 }
2514
2515
2516
2517
2518
2519 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2520 hci_req_add_le_scan_disable(req);
2521
2522
2523
2524
2525
2526 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2527 &own_addr_type);
2528 if (err < 0)
2529 own_addr_type = ADDR_LE_DEV_PUBLIC;
2530
2531 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2532 own_addr_type, 0);
2533 return 0;
2534}
2535
2536static int interleaved_discov(struct hci_request *req, unsigned long opt)
2537{
2538 int err;
2539
2540 BT_DBG("%s", req->hdev->name);
2541
2542 err = active_scan(req, opt);
2543 if (err)
2544 return err;
2545
2546 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2547}
2548
2549static void start_discovery(struct hci_dev *hdev, u8 *status)
2550{
2551 unsigned long timeout;
2552
2553 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2554
2555 switch (hdev->discovery.type) {
2556 case DISCOV_TYPE_BREDR:
2557 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2558 hci_req_sync(hdev, bredr_inquiry,
2559 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2560 status);
2561 return;
2562 case DISCOV_TYPE_INTERLEAVED:
2563
2564
2565
2566
2567
2568
2569
2570
2571 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2572 &hdev->quirks)) {
2573 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2574
2575
2576
2577
2578 hci_req_sync(hdev, interleaved_discov,
2579 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2580 status);
2581 break;
2582 }
2583
2584 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2585 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2586 HCI_CMD_TIMEOUT, status);
2587 break;
2588 case DISCOV_TYPE_LE:
2589 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2590 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2591 HCI_CMD_TIMEOUT, status);
2592 break;
2593 default:
2594 *status = HCI_ERROR_UNSPECIFIED;
2595 return;
2596 }
2597
2598 if (*status)
2599 return;
2600
2601 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2602
2603
2604
2605
2606
2607
2608 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2609 hdev->discovery.result_filtering) {
2610 hdev->discovery.scan_start = jiffies;
2611 hdev->discovery.scan_duration = timeout;
2612 }
2613
2614 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2615 timeout);
2616}
2617
2618bool hci_req_stop_discovery(struct hci_request *req)
2619{
2620 struct hci_dev *hdev = req->hdev;
2621 struct discovery_state *d = &hdev->discovery;
2622 struct hci_cp_remote_name_req_cancel cp;
2623 struct inquiry_entry *e;
2624 bool ret = false;
2625
2626 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2627
2628 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2629 if (test_bit(HCI_INQUIRY, &hdev->flags))
2630 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2631
2632 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2633 cancel_delayed_work(&hdev->le_scan_disable);
2634 hci_req_add_le_scan_disable(req);
2635 }
2636
2637 ret = true;
2638 } else {
2639
2640 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2641 hci_req_add_le_scan_disable(req);
2642 ret = true;
2643 }
2644 }
2645
2646
2647 if (d->type == DISCOV_TYPE_LE)
2648 return ret;
2649
2650 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2651 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2652 NAME_PENDING);
2653 if (!e)
2654 return ret;
2655
2656 bacpy(&cp.bdaddr, &e->data.bdaddr);
2657 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2658 &cp);
2659 ret = true;
2660 }
2661
2662 return ret;
2663}
2664
2665static int stop_discovery(struct hci_request *req, unsigned long opt)
2666{
2667 hci_dev_lock(req->hdev);
2668 hci_req_stop_discovery(req);
2669 hci_dev_unlock(req->hdev);
2670
2671 return 0;
2672}
2673
2674static void discov_update(struct work_struct *work)
2675{
2676 struct hci_dev *hdev = container_of(work, struct hci_dev,
2677 discov_update);
2678 u8 status = 0;
2679
2680 switch (hdev->discovery.state) {
2681 case DISCOVERY_STARTING:
2682 start_discovery(hdev, &status);
2683 mgmt_start_discovery_complete(hdev, status);
2684 if (status)
2685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2686 else
2687 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2688 break;
2689 case DISCOVERY_STOPPING:
2690 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2691 mgmt_stop_discovery_complete(hdev, status);
2692 if (!status)
2693 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2694 break;
2695 case DISCOVERY_STOPPED:
2696 default:
2697 return;
2698 }
2699}
2700
2701static void discov_off(struct work_struct *work)
2702{
2703 struct hci_dev *hdev = container_of(work, struct hci_dev,
2704 discov_off.work);
2705
2706 BT_DBG("%s", hdev->name);
2707
2708 hci_dev_lock(hdev);
2709
2710
2711
2712
2713
2714
2715 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2716 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2717 hdev->discov_timeout = 0;
2718
2719 hci_dev_unlock(hdev);
2720
2721 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2722 mgmt_new_settings(hdev);
2723}
2724
2725static int powered_update_hci(struct hci_request *req, unsigned long opt)
2726{
2727 struct hci_dev *hdev = req->hdev;
2728 u8 link_sec;
2729
2730 hci_dev_lock(hdev);
2731
2732 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2733 !lmp_host_ssp_capable(hdev)) {
2734 u8 mode = 0x01;
2735
2736 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2737
2738 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2739 u8 support = 0x01;
2740
2741 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2742 sizeof(support), &support);
2743 }
2744 }
2745
2746 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2747 lmp_bredr_capable(hdev)) {
2748 struct hci_cp_write_le_host_supported cp;
2749
2750 cp.le = 0x01;
2751 cp.simul = 0x00;
2752
2753
2754
2755
2756 if (cp.le != lmp_host_le_capable(hdev) ||
2757 cp.simul != lmp_host_le_br_capable(hdev))
2758 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2759 sizeof(cp), &cp);
2760 }
2761
2762 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2763
2764
2765
2766
2767 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2768 list_empty(&hdev->adv_instances)) {
2769 int err;
2770
2771 if (ext_adv_capable(hdev)) {
2772 err = __hci_req_setup_ext_adv_instance(req,
2773 0x00);
2774 if (!err)
2775 __hci_req_update_scan_rsp_data(req,
2776 0x00);
2777 } else {
2778 err = 0;
2779 __hci_req_update_adv_data(req, 0x00);
2780 __hci_req_update_scan_rsp_data(req, 0x00);
2781 }
2782
2783 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2784 if (!ext_adv_capable(hdev))
2785 __hci_req_enable_advertising(req);
2786 else if (!err)
2787 __hci_req_enable_ext_advertising(req,
2788 0x00);
2789 }
2790 } else if (!list_empty(&hdev->adv_instances)) {
2791 struct adv_info *adv_instance;
2792
2793 adv_instance = list_first_entry(&hdev->adv_instances,
2794 struct adv_info, list);
2795 __hci_req_schedule_adv_instance(req,
2796 adv_instance->instance,
2797 true);
2798 }
2799 }
2800
2801 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2802 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2803 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2804 sizeof(link_sec), &link_sec);
2805
2806 if (lmp_bredr_capable(hdev)) {
2807 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2808 __hci_req_write_fast_connectable(req, true);
2809 else
2810 __hci_req_write_fast_connectable(req, false);
2811 __hci_req_update_scan(req);
2812 __hci_req_update_class(req);
2813 __hci_req_update_name(req);
2814 __hci_req_update_eir(req);
2815 }
2816
2817 hci_dev_unlock(hdev);
2818 return 0;
2819}
2820
2821int __hci_req_hci_power_on(struct hci_dev *hdev)
2822{
2823
2824
2825
2826
2827
2828 smp_register(hdev);
2829
2830 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2831 NULL);
2832}
2833
2834void hci_request_setup(struct hci_dev *hdev)
2835{
2836 INIT_WORK(&hdev->discov_update, discov_update);
2837 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2838 INIT_WORK(&hdev->scan_update, scan_update_work);
2839 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2840 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2841 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2842 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2843 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2844 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2845}
2846
2847void hci_request_cancel_all(struct hci_dev *hdev)
2848{
2849 hci_req_sync_cancel(hdev, ENODEV);
2850
2851 cancel_work_sync(&hdev->discov_update);
2852 cancel_work_sync(&hdev->bg_scan_update);
2853 cancel_work_sync(&hdev->scan_update);
2854 cancel_work_sync(&hdev->connectable_update);
2855 cancel_work_sync(&hdev->discoverable_update);
2856 cancel_delayed_work_sync(&hdev->discov_off);
2857 cancel_delayed_work_sync(&hdev->le_scan_disable);
2858 cancel_delayed_work_sync(&hdev->le_scan_restart);
2859
2860 if (hdev->adv_instance_timeout) {
2861 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2862 hdev->adv_instance_timeout = 0;
2863 }
2864}
2865