1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/export.h>
29#include <linux/rfkill.h>
30#include <linux/debugfs.h>
31#include <linux/crypto.h>
32#include <linux/property.h>
33#include <linux/suspend.h>
34#include <linux/wait.h>
35#include <asm/unaligned.h>
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
39#include <net/bluetooth/l2cap.h>
40#include <net/bluetooth/mgmt.h>
41
42#include "hci_request.h"
43#include "hci_debugfs.h"
44#include "smp.h"
45#include "leds.h"
46#include "msft.h"
47
48static void hci_rx_work(struct work_struct *work);
49static void hci_cmd_work(struct work_struct *work);
50static void hci_tx_work(struct work_struct *work);
51
52
53LIST_HEAD(hci_dev_list);
54DEFINE_RWLOCK(hci_dev_list_lock);
55
56
57LIST_HEAD(hci_cb_list);
58DEFINE_MUTEX(hci_cb_list_lock);
59
60
61static DEFINE_IDA(hci_index_ida);
62
63
64
65static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
71 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 bool enable;
83 int err;
84
85 if (!test_bit(HCI_UP, &hdev->flags))
86 return -ENETDOWN;
87
88 err = kstrtobool_from_user(user_buf, count, &enable);
89 if (err)
90 return err;
91
92 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
93 return -EALREADY;
94
95 hci_req_sync_lock(hdev);
96 if (enable)
97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98 HCI_CMD_TIMEOUT);
99 else
100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101 HCI_CMD_TIMEOUT);
102 hci_req_sync_unlock(hdev);
103
104 if (IS_ERR(skb))
105 return PTR_ERR(skb);
106
107 kfree_skb(skb);
108
109 hci_dev_change_flag(hdev, HCI_DUT_MODE);
110
111 return count;
112}
113
114static const struct file_operations dut_mode_fops = {
115 .open = simple_open,
116 .read = dut_mode_read,
117 .write = dut_mode_write,
118 .llseek = default_llseek,
119};
120
121static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos)
123{
124 struct hci_dev *hdev = file->private_data;
125 char buf[3];
126
127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128 buf[1] = '\n';
129 buf[2] = '\0';
130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131}
132
133static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134 size_t count, loff_t *ppos)
135{
136 struct hci_dev *hdev = file->private_data;
137 bool enable;
138 int err;
139
140 err = kstrtobool_from_user(user_buf, count, &enable);
141 if (err)
142 return err;
143
144
145
146
147
148
149 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150 (!test_bit(HCI_RUNNING, &hdev->flags) ||
151 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
152 goto done;
153
154 hci_req_sync_lock(hdev);
155 err = hdev->set_diag(hdev, enable);
156 hci_req_sync_unlock(hdev);
157
158 if (err < 0)
159 return err;
160
161done:
162 if (enable)
163 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164 else
165 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166
167 return count;
168}
169
170static const struct file_operations vendor_diag_fops = {
171 .open = simple_open,
172 .read = vendor_diag_read,
173 .write = vendor_diag_write,
174 .llseek = default_llseek,
175};
176
177static void hci_debugfs_create_basic(struct hci_dev *hdev)
178{
179 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180 &dut_mode_fops);
181
182 if (hdev->set_diag)
183 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184 &vendor_diag_fops);
185}
186
187static int hci_reset_req(struct hci_request *req, unsigned long opt)
188{
189 BT_DBG("%s %ld", req->hdev->name, opt);
190
191
192 set_bit(HCI_RESET, &req->hdev->flags);
193 hci_req_add(req, HCI_OP_RESET, 0, NULL);
194 return 0;
195}
196
197static void bredr_init(struct hci_request *req)
198{
199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
201
202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
203
204
205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
206
207
208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
209}
210
211static void amp_init1(struct hci_request *req)
212{
213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
214
215
216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217
218
219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220
221
222 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
223
224
225 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
226
227
228 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229
230
231 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
232}
233
234static int amp_init2(struct hci_request *req)
235{
236
237
238
239
240 if (req->hdev->commands[14] & 0x20)
241 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
242
243 return 0;
244}
245
246static int hci_init1_req(struct hci_request *req, unsigned long opt)
247{
248 struct hci_dev *hdev = req->hdev;
249
250 BT_DBG("%s %ld", hdev->name, opt);
251
252
253 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254 hci_reset_req(req, 0);
255
256 switch (hdev->dev_type) {
257 case HCI_PRIMARY:
258 bredr_init(req);
259 break;
260 case HCI_AMP:
261 amp_init1(req);
262 break;
263 default:
264 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
265 break;
266 }
267
268 return 0;
269}
270
271static void bredr_setup(struct hci_request *req)
272{
273 __le16 param;
274 __u8 flt_type;
275
276
277 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
278
279
280 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
281
282
283 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
284
285
286 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
287
288
289 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290
291
292 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293
294
295 flt_type = HCI_FLT_CLEAR_ALL;
296 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
297
298
299 param = cpu_to_le16(0x7d00);
300 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
301}
302
303static void le_setup(struct hci_request *req)
304{
305 struct hci_dev *hdev = req->hdev;
306
307
308 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
309
310
311 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
312
313
314 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315
316
317 if (!lmp_bredr_capable(hdev))
318 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
319}
320
321static void hci_setup_event_mask(struct hci_request *req)
322{
323 struct hci_dev *hdev = req->hdev;
324
325
326
327
328
329 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330
331
332
333
334 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335 return;
336
337 if (lmp_bredr_capable(hdev)) {
338 events[4] |= 0x01;
339 } else {
340
341 memset(events, 0, sizeof(events));
342 events[1] |= 0x20;
343 events[1] |= 0x40;
344 events[1] |= 0x80;
345
346
347
348
349
350 if (hdev->commands[0] & 0x20) {
351 events[0] |= 0x10;
352 events[2] |= 0x04;
353 events[3] |= 0x02;
354 }
355
356
357
358
359 if (hdev->commands[2] & 0x80)
360 events[1] |= 0x08;
361
362
363
364 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365 events[0] |= 0x80;
366 events[5] |= 0x80;
367 }
368 }
369
370 if (lmp_inq_rssi_capable(hdev) ||
371 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372 events[4] |= 0x02;
373
374 if (lmp_ext_feat_capable(hdev))
375 events[4] |= 0x04;
376
377 if (lmp_esco_capable(hdev)) {
378 events[5] |= 0x08;
379 events[5] |= 0x10;
380 }
381
382 if (lmp_sniffsubr_capable(hdev))
383 events[5] |= 0x20;
384
385 if (lmp_pause_enc_capable(hdev))
386 events[5] |= 0x80;
387
388 if (lmp_ext_inq_capable(hdev))
389 events[5] |= 0x40;
390
391 if (lmp_no_flush_capable(hdev))
392 events[7] |= 0x01;
393
394 if (lmp_lsto_capable(hdev))
395 events[6] |= 0x80;
396
397 if (lmp_ssp_capable(hdev)) {
398 events[6] |= 0x01;
399 events[6] |= 0x02;
400 events[6] |= 0x04;
401 events[6] |= 0x08;
402 events[6] |= 0x10;
403 events[6] |= 0x20;
404 events[7] |= 0x04;
405 events[7] |= 0x08;
406 events[7] |= 0x10;
407
408
409 }
410
411 if (lmp_le_capable(hdev))
412 events[7] |= 0x20;
413
414 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
415}
416
417static int hci_init2_req(struct hci_request *req, unsigned long opt)
418{
419 struct hci_dev *hdev = req->hdev;
420
421 if (hdev->dev_type == HCI_AMP)
422 return amp_init2(req);
423
424 if (lmp_bredr_capable(hdev))
425 bredr_setup(req);
426 else
427 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
428
429 if (lmp_le_capable(hdev))
430 le_setup(req);
431
432
433
434
435
436
437
438
439
440 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
443
444 if (lmp_ssp_capable(hdev)) {
445
446
447
448
449
450
451 hdev->max_page = 0x01;
452
453 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
454 u8 mode = 0x01;
455
456 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457 sizeof(mode), &mode);
458 } else {
459 struct hci_cp_write_eir cp;
460
461 memset(hdev->eir, 0, sizeof(hdev->eir));
462 memset(&cp, 0, sizeof(cp));
463
464 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465 }
466 }
467
468 if (lmp_inq_rssi_capable(hdev) ||
469 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
470 u8 mode;
471
472
473
474
475
476 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477
478 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479 }
480
481 if (lmp_inq_tx_pwr_capable(hdev))
482 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
483
484 if (lmp_ext_feat_capable(hdev)) {
485 struct hci_cp_read_local_ext_features cp;
486
487 cp.page = 0x01;
488 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489 sizeof(cp), &cp);
490 }
491
492 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
493 u8 enable = 1;
494 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495 &enable);
496 }
497
498 return 0;
499}
500
501static void hci_setup_link_policy(struct hci_request *req)
502{
503 struct hci_dev *hdev = req->hdev;
504 struct hci_cp_write_def_link_policy cp;
505 u16 link_policy = 0;
506
507 if (lmp_rswitch_capable(hdev))
508 link_policy |= HCI_LP_RSWITCH;
509 if (lmp_hold_capable(hdev))
510 link_policy |= HCI_LP_HOLD;
511 if (lmp_sniff_capable(hdev))
512 link_policy |= HCI_LP_SNIFF;
513 if (lmp_park_capable(hdev))
514 link_policy |= HCI_LP_PARK;
515
516 cp.policy = cpu_to_le16(link_policy);
517 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
518}
519
520static void hci_set_le_support(struct hci_request *req)
521{
522 struct hci_dev *hdev = req->hdev;
523 struct hci_cp_write_le_host_supported cp;
524
525
526 if (!lmp_bredr_capable(hdev))
527 return;
528
529 memset(&cp, 0, sizeof(cp));
530
531 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
532 cp.le = 0x01;
533 cp.simul = 0x00;
534 }
535
536 if (cp.le != lmp_host_le_capable(hdev))
537 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538 &cp);
539}
540
541static void hci_set_event_mask_page_2(struct hci_request *req)
542{
543 struct hci_dev *hdev = req->hdev;
544 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545 bool changed = false;
546
547
548
549
550 if (lmp_csb_master_capable(hdev)) {
551 events[1] |= 0x40;
552 events[1] |= 0x80;
553 events[2] |= 0x10;
554 events[2] |= 0x20;
555 changed = true;
556 }
557
558
559
560
561 if (lmp_csb_slave_capable(hdev)) {
562 events[2] |= 0x01;
563 events[2] |= 0x02;
564 events[2] |= 0x04;
565 events[2] |= 0x08;
566 changed = true;
567 }
568
569
570 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
571 events[2] |= 0x80;
572 changed = true;
573 }
574
575
576
577
578
579
580
581 if (changed)
582 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583 sizeof(events), events);
584}
585
586static int hci_init3_req(struct hci_request *req, unsigned long opt)
587{
588 struct hci_dev *hdev = req->hdev;
589 u8 p;
590
591 hci_setup_event_mask(req);
592
593 if (hdev->commands[6] & 0x20 &&
594 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595 struct hci_cp_read_stored_link_key cp;
596
597 bacpy(&cp.bdaddr, BDADDR_ANY);
598 cp.read_all = 0x01;
599 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600 }
601
602 if (hdev->commands[5] & 0x10)
603 hci_setup_link_policy(req);
604
605 if (hdev->commands[8] & 0x01)
606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607
608 if (hdev->commands[18] & 0x04 &&
609 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
610 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
612
613
614
615
616 if (hdev->commands[13] & 0x01)
617 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
619 if (lmp_le_capable(hdev)) {
620 u8 events[8];
621
622 memset(events, 0, sizeof(events));
623
624 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625 events[0] |= 0x10;
626
627
628
629
630 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631 events[0] |= 0x20;
632
633
634
635
636
637
638 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639 events[0] |= 0x40;
640
641
642
643
644 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645 events[1] |= 0x02;
646
647
648
649
650
651
652 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653 events[1] |= 0x04;
654
655
656
657
658
659
660 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661 events[2] |= 0x08;
662
663
664
665
666
667
668 if (hdev->commands[26] & 0x08)
669 events[0] |= 0x02;
670
671
672
673
674 if (hdev->commands[26] & 0x10)
675 events[0] |= 0x01;
676
677
678
679
680 if (hdev->commands[27] & 0x04)
681 events[0] |= 0x04;
682
683
684
685
686
687
688 if (hdev->commands[27] & 0x20)
689 events[0] |= 0x08;
690
691
692
693
694
695
696 if (hdev->commands[34] & 0x02)
697 events[0] |= 0x80;
698
699
700
701
702
703
704 if (hdev->commands[34] & 0x04)
705 events[1] |= 0x01;
706
707
708
709
710 if (hdev->commands[35] & (0x20 | 0x40))
711 events[1] |= 0x08;
712
713
714
715
716
717 if (use_ext_scan(hdev))
718 events[1] |= 0x10;
719
720
721
722
723
724
725 if (ext_adv_capable(hdev))
726 events[2] |= 0x02;
727
728
729
730 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731 events);
732
733
734 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735
736
737
738
739
740
741 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742 }
743
744 if (hdev->commands[38] & 0x80) {
745
746 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
747 0, NULL);
748 }
749
750 if (hdev->commands[26] & 0x40) {
751
752 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
753 0, NULL);
754 }
755
756 if (hdev->commands[26] & 0x80) {
757
758 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
759 }
760
761 if (hdev->commands[34] & 0x40) {
762
763 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
764 0, NULL);
765 }
766
767 if (hdev->commands[34] & 0x20) {
768
769 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
770 }
771
772 if (hdev->commands[35] & 0x04) {
773 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
774
775
776 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
777 &rpa_timeout);
778 }
779
780 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
781
782 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
783
784
785 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
786 }
787
788 if (ext_adv_capable(hdev)) {
789
790 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
791 0, NULL);
792 }
793
794 hci_set_le_support(req);
795 }
796
797
798 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
799 struct hci_cp_read_local_ext_features cp;
800
801 cp.page = p;
802 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
803 sizeof(cp), &cp);
804 }
805
806 return 0;
807}
808
809static int hci_init4_req(struct hci_request *req, unsigned long opt)
810{
811 struct hci_dev *hdev = req->hdev;
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826 if (hdev->commands[6] & 0x80 &&
827 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
828 struct hci_cp_delete_stored_link_key cp;
829
830 bacpy(&cp.bdaddr, BDADDR_ANY);
831 cp.delete_all = 0x01;
832 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
833 sizeof(cp), &cp);
834 }
835
836
837 if (hdev->commands[22] & 0x04)
838 hci_set_event_mask_page_2(req);
839
840
841 if (hdev->commands[29] & 0x20)
842 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
843
844
845 if (hdev->commands[41] & 0x08)
846 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
847
848
849 if (hdev->commands[30] & 0x08)
850 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
851
852
853 if (lmp_sync_train_capable(hdev))
854 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
855
856
857 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
858 bredr_sc_enabled(hdev)) {
859 u8 support = 0x01;
860
861 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
862 sizeof(support), &support);
863 }
864
865
866
867
868 if (hdev->commands[18] & 0x08 &&
869 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
870 bool enabled = hci_dev_test_flag(hdev,
871 HCI_WIDEBAND_SPEECH_ENABLED);
872
873 if (enabled !=
874 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
875 struct hci_cp_write_def_err_data_reporting cp;
876
877 cp.err_data_reporting = enabled ?
878 ERR_DATA_REPORTING_ENABLED :
879 ERR_DATA_REPORTING_DISABLED;
880
881 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
882 sizeof(cp), &cp);
883 }
884 }
885
886
887 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
888 struct hci_cp_le_write_def_data_len cp;
889
890 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
891 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
892 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
893 }
894
895
896 if (hdev->commands[35] & 0x20) {
897 struct hci_cp_le_set_default_phy cp;
898
899 cp.all_phys = 0x00;
900 cp.tx_phys = hdev->le_tx_def_phys;
901 cp.rx_phys = hdev->le_rx_def_phys;
902
903 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
904 }
905
906 return 0;
907}
908
909static int __hci_init(struct hci_dev *hdev)
910{
911 int err;
912
913 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
914 if (err < 0)
915 return err;
916
917 if (hci_dev_test_flag(hdev, HCI_SETUP))
918 hci_debugfs_create_basic(hdev);
919
920 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
921 if (err < 0)
922 return err;
923
924
925
926
927
928 if (hdev->dev_type != HCI_PRIMARY)
929 return 0;
930
931 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
932 if (err < 0)
933 return err;
934
935 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
936 if (err < 0)
937 return err;
938
939
940
941
942
943
944
945
946
947
948
949
950
951 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
952 !hci_dev_test_flag(hdev, HCI_CONFIG))
953 return 0;
954
955 hci_debugfs_create_common(hdev);
956
957 if (lmp_bredr_capable(hdev))
958 hci_debugfs_create_bredr(hdev);
959
960 if (lmp_le_capable(hdev))
961 hci_debugfs_create_le(hdev);
962
963 return 0;
964}
965
966static int hci_init0_req(struct hci_request *req, unsigned long opt)
967{
968 struct hci_dev *hdev = req->hdev;
969
970 BT_DBG("%s %ld", hdev->name, opt);
971
972
973 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
974 hci_reset_req(req, 0);
975
976
977 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
978
979
980 if (hdev->set_bdaddr)
981 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
982
983 return 0;
984}
985
986static int __hci_unconf_init(struct hci_dev *hdev)
987{
988 int err;
989
990 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
991 return 0;
992
993 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
994 if (err < 0)
995 return err;
996
997 if (hci_dev_test_flag(hdev, HCI_SETUP))
998 hci_debugfs_create_basic(hdev);
999
1000 return 0;
1001}
1002
1003static int hci_scan_req(struct hci_request *req, unsigned long opt)
1004{
1005 __u8 scan = opt;
1006
1007 BT_DBG("%s %x", req->hdev->name, scan);
1008
1009
1010 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1011 return 0;
1012}
1013
1014static int hci_auth_req(struct hci_request *req, unsigned long opt)
1015{
1016 __u8 auth = opt;
1017
1018 BT_DBG("%s %x", req->hdev->name, auth);
1019
1020
1021 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1022 return 0;
1023}
1024
1025static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1026{
1027 __u8 encrypt = opt;
1028
1029 BT_DBG("%s %x", req->hdev->name, encrypt);
1030
1031
1032 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1033 return 0;
1034}
1035
1036static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1037{
1038 __le16 policy = cpu_to_le16(opt);
1039
1040 BT_DBG("%s %x", req->hdev->name, policy);
1041
1042
1043 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1044 return 0;
1045}
1046
1047
1048
1049struct hci_dev *hci_dev_get(int index)
1050{
1051 struct hci_dev *hdev = NULL, *d;
1052
1053 BT_DBG("%d", index);
1054
1055 if (index < 0)
1056 return NULL;
1057
1058 read_lock(&hci_dev_list_lock);
1059 list_for_each_entry(d, &hci_dev_list, list) {
1060 if (d->id == index) {
1061 hdev = hci_dev_hold(d);
1062 break;
1063 }
1064 }
1065 read_unlock(&hci_dev_list_lock);
1066 return hdev;
1067}
1068
1069
1070
1071bool hci_discovery_active(struct hci_dev *hdev)
1072{
1073 struct discovery_state *discov = &hdev->discovery;
1074
1075 switch (discov->state) {
1076 case DISCOVERY_FINDING:
1077 case DISCOVERY_RESOLVING:
1078 return true;
1079
1080 default:
1081 return false;
1082 }
1083}
1084
1085void hci_discovery_set_state(struct hci_dev *hdev, int state)
1086{
1087 int old_state = hdev->discovery.state;
1088
1089 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1090
1091 if (old_state == state)
1092 return;
1093
1094 hdev->discovery.state = state;
1095
1096 switch (state) {
1097 case DISCOVERY_STOPPED:
1098 hci_update_background_scan(hdev);
1099
1100 if (old_state != DISCOVERY_STARTING)
1101 mgmt_discovering(hdev, 0);
1102 break;
1103 case DISCOVERY_STARTING:
1104 break;
1105 case DISCOVERY_FINDING:
1106 mgmt_discovering(hdev, 1);
1107 break;
1108 case DISCOVERY_RESOLVING:
1109 break;
1110 case DISCOVERY_STOPPING:
1111 break;
1112 }
1113}
1114
1115void hci_inquiry_cache_flush(struct hci_dev *hdev)
1116{
1117 struct discovery_state *cache = &hdev->discovery;
1118 struct inquiry_entry *p, *n;
1119
1120 list_for_each_entry_safe(p, n, &cache->all, all) {
1121 list_del(&p->all);
1122 kfree(p);
1123 }
1124
1125 INIT_LIST_HEAD(&cache->unknown);
1126 INIT_LIST_HEAD(&cache->resolve);
1127}
1128
1129struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1130 bdaddr_t *bdaddr)
1131{
1132 struct discovery_state *cache = &hdev->discovery;
1133 struct inquiry_entry *e;
1134
1135 BT_DBG("cache %p, %pMR", cache, bdaddr);
1136
1137 list_for_each_entry(e, &cache->all, all) {
1138 if (!bacmp(&e->data.bdaddr, bdaddr))
1139 return e;
1140 }
1141
1142 return NULL;
1143}
1144
1145struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1146 bdaddr_t *bdaddr)
1147{
1148 struct discovery_state *cache = &hdev->discovery;
1149 struct inquiry_entry *e;
1150
1151 BT_DBG("cache %p, %pMR", cache, bdaddr);
1152
1153 list_for_each_entry(e, &cache->unknown, list) {
1154 if (!bacmp(&e->data.bdaddr, bdaddr))
1155 return e;
1156 }
1157
1158 return NULL;
1159}
1160
1161struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1162 bdaddr_t *bdaddr,
1163 int state)
1164{
1165 struct discovery_state *cache = &hdev->discovery;
1166 struct inquiry_entry *e;
1167
1168 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1169
1170 list_for_each_entry(e, &cache->resolve, list) {
1171 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1172 return e;
1173 if (!bacmp(&e->data.bdaddr, bdaddr))
1174 return e;
1175 }
1176
1177 return NULL;
1178}
1179
1180void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1181 struct inquiry_entry *ie)
1182{
1183 struct discovery_state *cache = &hdev->discovery;
1184 struct list_head *pos = &cache->resolve;
1185 struct inquiry_entry *p;
1186
1187 list_del(&ie->list);
1188
1189 list_for_each_entry(p, &cache->resolve, list) {
1190 if (p->name_state != NAME_PENDING &&
1191 abs(p->data.rssi) >= abs(ie->data.rssi))
1192 break;
1193 pos = &p->list;
1194 }
1195
1196 list_add(&ie->list, pos);
1197}
1198
1199u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1200 bool name_known)
1201{
1202 struct discovery_state *cache = &hdev->discovery;
1203 struct inquiry_entry *ie;
1204 u32 flags = 0;
1205
1206 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1207
1208 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1209
1210 if (!data->ssp_mode)
1211 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1212
1213 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1214 if (ie) {
1215 if (!ie->data.ssp_mode)
1216 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1217
1218 if (ie->name_state == NAME_NEEDED &&
1219 data->rssi != ie->data.rssi) {
1220 ie->data.rssi = data->rssi;
1221 hci_inquiry_cache_update_resolve(hdev, ie);
1222 }
1223
1224 goto update;
1225 }
1226
1227
1228 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1229 if (!ie) {
1230 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1231 goto done;
1232 }
1233
1234 list_add(&ie->all, &cache->all);
1235
1236 if (name_known) {
1237 ie->name_state = NAME_KNOWN;
1238 } else {
1239 ie->name_state = NAME_NOT_KNOWN;
1240 list_add(&ie->list, &cache->unknown);
1241 }
1242
1243update:
1244 if (name_known && ie->name_state != NAME_KNOWN &&
1245 ie->name_state != NAME_PENDING) {
1246 ie->name_state = NAME_KNOWN;
1247 list_del(&ie->list);
1248 }
1249
1250 memcpy(&ie->data, data, sizeof(*data));
1251 ie->timestamp = jiffies;
1252 cache->timestamp = jiffies;
1253
1254 if (ie->name_state == NAME_NOT_KNOWN)
1255 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1256
1257done:
1258 return flags;
1259}
1260
1261static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1262{
1263 struct discovery_state *cache = &hdev->discovery;
1264 struct inquiry_info *info = (struct inquiry_info *) buf;
1265 struct inquiry_entry *e;
1266 int copied = 0;
1267
1268 list_for_each_entry(e, &cache->all, all) {
1269 struct inquiry_data *data = &e->data;
1270
1271 if (copied >= num)
1272 break;
1273
1274 bacpy(&info->bdaddr, &data->bdaddr);
1275 info->pscan_rep_mode = data->pscan_rep_mode;
1276 info->pscan_period_mode = data->pscan_period_mode;
1277 info->pscan_mode = data->pscan_mode;
1278 memcpy(info->dev_class, data->dev_class, 3);
1279 info->clock_offset = data->clock_offset;
1280
1281 info++;
1282 copied++;
1283 }
1284
1285 BT_DBG("cache %p, copied %d", cache, copied);
1286 return copied;
1287}
1288
1289static int hci_inq_req(struct hci_request *req, unsigned long opt)
1290{
1291 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1292 struct hci_dev *hdev = req->hdev;
1293 struct hci_cp_inquiry cp;
1294
1295 BT_DBG("%s", hdev->name);
1296
1297 if (test_bit(HCI_INQUIRY, &hdev->flags))
1298 return 0;
1299
1300
1301 memcpy(&cp.lap, &ir->lap, 3);
1302 cp.length = ir->length;
1303 cp.num_rsp = ir->num_rsp;
1304 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1305
1306 return 0;
1307}
1308
1309int hci_inquiry(void __user *arg)
1310{
1311 __u8 __user *ptr = arg;
1312 struct hci_inquiry_req ir;
1313 struct hci_dev *hdev;
1314 int err = 0, do_inquiry = 0, max_rsp;
1315 long timeo;
1316 __u8 *buf;
1317
1318 if (copy_from_user(&ir, ptr, sizeof(ir)))
1319 return -EFAULT;
1320
1321 hdev = hci_dev_get(ir.dev_id);
1322 if (!hdev)
1323 return -ENODEV;
1324
1325 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1326 err = -EBUSY;
1327 goto done;
1328 }
1329
1330 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1331 err = -EOPNOTSUPP;
1332 goto done;
1333 }
1334
1335 if (hdev->dev_type != HCI_PRIMARY) {
1336 err = -EOPNOTSUPP;
1337 goto done;
1338 }
1339
1340 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1341 err = -EOPNOTSUPP;
1342 goto done;
1343 }
1344
1345 hci_dev_lock(hdev);
1346 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1347 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1348 hci_inquiry_cache_flush(hdev);
1349 do_inquiry = 1;
1350 }
1351 hci_dev_unlock(hdev);
1352
1353 timeo = ir.length * msecs_to_jiffies(2000);
1354
1355 if (do_inquiry) {
1356 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1357 timeo, NULL);
1358 if (err < 0)
1359 goto done;
1360
1361
1362
1363
1364 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1365 TASK_INTERRUPTIBLE))
1366 return -EINTR;
1367 }
1368
1369
1370
1371
1372 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1373
1374
1375
1376
1377 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1378 if (!buf) {
1379 err = -ENOMEM;
1380 goto done;
1381 }
1382
1383 hci_dev_lock(hdev);
1384 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1385 hci_dev_unlock(hdev);
1386
1387 BT_DBG("num_rsp %d", ir.num_rsp);
1388
1389 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1390 ptr += sizeof(ir);
1391 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1392 ir.num_rsp))
1393 err = -EFAULT;
1394 } else
1395 err = -EFAULT;
1396
1397 kfree(buf);
1398
1399done:
1400 hci_dev_put(hdev);
1401 return err;
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1417{
1418 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1419 bdaddr_t ba;
1420 int ret;
1421
1422 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1423 (u8 *)&ba, sizeof(ba));
1424 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1425 return;
1426
1427 bacpy(&hdev->public_addr, &ba);
1428}
1429
1430static int hci_dev_do_open(struct hci_dev *hdev)
1431{
1432 int ret = 0;
1433
1434 BT_DBG("%s %p", hdev->name, hdev);
1435
1436 hci_req_sync_lock(hdev);
1437
1438 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1439 ret = -ENODEV;
1440 goto done;
1441 }
1442
1443 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1444 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1445
1446
1447
1448 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1449 ret = -ERFKILL;
1450 goto done;
1451 }
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1466 hdev->dev_type == HCI_PRIMARY &&
1467 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1468 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1469 ret = -EADDRNOTAVAIL;
1470 goto done;
1471 }
1472 }
1473
1474 if (test_bit(HCI_UP, &hdev->flags)) {
1475 ret = -EALREADY;
1476 goto done;
1477 }
1478
1479 if (hdev->open(hdev)) {
1480 ret = -EIO;
1481 goto done;
1482 }
1483
1484 set_bit(HCI_RUNNING, &hdev->flags);
1485 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1486
1487 atomic_set(&hdev->cmd_cnt, 1);
1488 set_bit(HCI_INIT, &hdev->flags);
1489
1490 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1491 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1492 bool invalid_bdaddr;
1493
1494 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1495
1496 if (hdev->setup)
1497 ret = hdev->setup(hdev);
1498
1499
1500
1501
1502
1503 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1504 &hdev->quirks);
1505
1506 if (ret)
1507 goto setup_failed;
1508
1509 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1510 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1511 hci_dev_get_bd_addr_from_property(hdev);
1512
1513 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1514 hdev->set_bdaddr) {
1515 ret = hdev->set_bdaddr(hdev,
1516 &hdev->public_addr);
1517
1518
1519
1520
1521
1522
1523 if (!ret)
1524 invalid_bdaddr = false;
1525 }
1526 }
1527
1528setup_failed:
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1540 invalid_bdaddr)
1541 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1552 ret = __hci_unconf_init(hdev);
1553 }
1554
1555 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1556
1557
1558
1559
1560
1561 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1562 hdev->set_bdaddr)
1563 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1564 else
1565 ret = -EADDRNOTAVAIL;
1566 }
1567
1568 if (!ret) {
1569 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1570 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1571 ret = __hci_init(hdev);
1572 if (!ret && hdev->post_init)
1573 ret = hdev->post_init(hdev);
1574 }
1575 }
1576
1577
1578
1579
1580
1581 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1582 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1583 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1584 ret = hdev->set_diag(hdev, true);
1585
1586 msft_do_open(hdev);
1587
1588 clear_bit(HCI_INIT, &hdev->flags);
1589
1590 if (!ret) {
1591 hci_dev_hold(hdev);
1592 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1593 hci_adv_instances_set_rpa_expired(hdev, true);
1594 set_bit(HCI_UP, &hdev->flags);
1595 hci_sock_dev_event(hdev, HCI_DEV_UP);
1596 hci_leds_update_powered(hdev, true);
1597 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1598 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1599 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1600 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1601 hci_dev_test_flag(hdev, HCI_MGMT) &&
1602 hdev->dev_type == HCI_PRIMARY) {
1603 ret = __hci_req_hci_power_on(hdev);
1604 mgmt_power_on(hdev, ret);
1605 }
1606 } else {
1607
1608 flush_work(&hdev->tx_work);
1609 flush_work(&hdev->cmd_work);
1610 flush_work(&hdev->rx_work);
1611
1612 skb_queue_purge(&hdev->cmd_q);
1613 skb_queue_purge(&hdev->rx_q);
1614
1615 if (hdev->flush)
1616 hdev->flush(hdev);
1617
1618 if (hdev->sent_cmd) {
1619 kfree_skb(hdev->sent_cmd);
1620 hdev->sent_cmd = NULL;
1621 }
1622
1623 clear_bit(HCI_RUNNING, &hdev->flags);
1624 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1625
1626 hdev->close(hdev);
1627 hdev->flags &= BIT(HCI_RAW);
1628 }
1629
1630done:
1631 hci_req_sync_unlock(hdev);
1632 return ret;
1633}
1634
1635
1636
1637int hci_dev_open(__u16 dev)
1638{
1639 struct hci_dev *hdev;
1640 int err;
1641
1642 hdev = hci_dev_get(dev);
1643 if (!hdev)
1644 return -ENODEV;
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1656 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1657 err = -EOPNOTSUPP;
1658 goto done;
1659 }
1660
1661
1662
1663
1664
1665
1666 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1667 cancel_delayed_work(&hdev->power_off);
1668
1669
1670
1671
1672
1673 flush_workqueue(hdev->req_workqueue);
1674
1675
1676
1677
1678
1679
1680
1681 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1682 !hci_dev_test_flag(hdev, HCI_MGMT))
1683 hci_dev_set_flag(hdev, HCI_BONDABLE);
1684
1685 err = hci_dev_do_open(hdev);
1686
1687done:
1688 hci_dev_put(hdev);
1689 return err;
1690}
1691
1692
1693static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1694{
1695 struct hci_conn_params *p;
1696
1697 list_for_each_entry(p, &hdev->le_conn_params, list) {
1698 if (p->conn) {
1699 hci_conn_drop(p->conn);
1700 hci_conn_put(p->conn);
1701 p->conn = NULL;
1702 }
1703 list_del_init(&p->action);
1704 }
1705
1706 BT_DBG("All LE pending actions cleared");
1707}
1708
1709int hci_dev_do_close(struct hci_dev *hdev)
1710{
1711 bool auto_off;
1712
1713 BT_DBG("%s %p", hdev->name, hdev);
1714
1715 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1716 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1717 test_bit(HCI_UP, &hdev->flags)) {
1718
1719 if (hdev->shutdown)
1720 hdev->shutdown(hdev);
1721 }
1722
1723 cancel_delayed_work(&hdev->power_off);
1724
1725 hci_request_cancel_all(hdev);
1726 hci_req_sync_lock(hdev);
1727
1728 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1729 cancel_delayed_work_sync(&hdev->cmd_timer);
1730 hci_req_sync_unlock(hdev);
1731 return 0;
1732 }
1733
1734 hci_leds_update_powered(hdev, false);
1735
1736
1737 flush_work(&hdev->tx_work);
1738 flush_work(&hdev->rx_work);
1739
1740 if (hdev->discov_timeout > 0) {
1741 hdev->discov_timeout = 0;
1742 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1743 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1744 }
1745
1746 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1747 cancel_delayed_work(&hdev->service_cache);
1748
1749 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1750 struct adv_info *adv_instance;
1751
1752 cancel_delayed_work_sync(&hdev->rpa_expired);
1753
1754 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1755 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1756 }
1757
1758
1759
1760
1761 drain_workqueue(hdev->workqueue);
1762
1763 hci_dev_lock(hdev);
1764
1765 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1766
1767 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1768
1769 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1770 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1771 hci_dev_test_flag(hdev, HCI_MGMT))
1772 __mgmt_power_off(hdev);
1773
1774 hci_inquiry_cache_flush(hdev);
1775 hci_pend_le_actions_clear(hdev);
1776 hci_conn_hash_flush(hdev);
1777 hci_dev_unlock(hdev);
1778
1779 smp_unregister(hdev);
1780
1781 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1782
1783 msft_do_close(hdev);
1784
1785 if (hdev->flush)
1786 hdev->flush(hdev);
1787
1788
1789 skb_queue_purge(&hdev->cmd_q);
1790 atomic_set(&hdev->cmd_cnt, 1);
1791 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1792 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1793 set_bit(HCI_INIT, &hdev->flags);
1794 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1795 clear_bit(HCI_INIT, &hdev->flags);
1796 }
1797
1798
1799 flush_work(&hdev->cmd_work);
1800
1801
1802 skb_queue_purge(&hdev->rx_q);
1803 skb_queue_purge(&hdev->cmd_q);
1804 skb_queue_purge(&hdev->raw_q);
1805
1806
1807 if (hdev->sent_cmd) {
1808 cancel_delayed_work_sync(&hdev->cmd_timer);
1809 kfree_skb(hdev->sent_cmd);
1810 hdev->sent_cmd = NULL;
1811 }
1812
1813 clear_bit(HCI_RUNNING, &hdev->flags);
1814 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1815
1816 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1817 wake_up(&hdev->suspend_wait_q);
1818
1819
1820
1821 hdev->close(hdev);
1822
1823
1824 hdev->flags &= BIT(HCI_RAW);
1825 hci_dev_clear_volatile_flags(hdev);
1826
1827
1828 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1829
1830 memset(hdev->eir, 0, sizeof(hdev->eir));
1831 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1832 bacpy(&hdev->random_addr, BDADDR_ANY);
1833
1834 hci_req_sync_unlock(hdev);
1835
1836 hci_dev_put(hdev);
1837 return 0;
1838}
1839
1840int hci_dev_close(__u16 dev)
1841{
1842 struct hci_dev *hdev;
1843 int err;
1844
1845 hdev = hci_dev_get(dev);
1846 if (!hdev)
1847 return -ENODEV;
1848
1849 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1850 err = -EBUSY;
1851 goto done;
1852 }
1853
1854 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1855 cancel_delayed_work(&hdev->power_off);
1856
1857 err = hci_dev_do_close(hdev);
1858
1859done:
1860 hci_dev_put(hdev);
1861 return err;
1862}
1863
1864static int hci_dev_do_reset(struct hci_dev *hdev)
1865{
1866 int ret;
1867
1868 BT_DBG("%s %p", hdev->name, hdev);
1869
1870 hci_req_sync_lock(hdev);
1871
1872
1873 skb_queue_purge(&hdev->rx_q);
1874 skb_queue_purge(&hdev->cmd_q);
1875
1876
1877
1878
1879 drain_workqueue(hdev->workqueue);
1880
1881 hci_dev_lock(hdev);
1882 hci_inquiry_cache_flush(hdev);
1883 hci_conn_hash_flush(hdev);
1884 hci_dev_unlock(hdev);
1885
1886 if (hdev->flush)
1887 hdev->flush(hdev);
1888
1889 atomic_set(&hdev->cmd_cnt, 1);
1890 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1891
1892 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1893
1894 hci_req_sync_unlock(hdev);
1895 return ret;
1896}
1897
1898int hci_dev_reset(__u16 dev)
1899{
1900 struct hci_dev *hdev;
1901 int err;
1902
1903 hdev = hci_dev_get(dev);
1904 if (!hdev)
1905 return -ENODEV;
1906
1907 if (!test_bit(HCI_UP, &hdev->flags)) {
1908 err = -ENETDOWN;
1909 goto done;
1910 }
1911
1912 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1913 err = -EBUSY;
1914 goto done;
1915 }
1916
1917 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1918 err = -EOPNOTSUPP;
1919 goto done;
1920 }
1921
1922 err = hci_dev_do_reset(hdev);
1923
1924done:
1925 hci_dev_put(hdev);
1926 return err;
1927}
1928
1929int hci_dev_reset_stat(__u16 dev)
1930{
1931 struct hci_dev *hdev;
1932 int ret = 0;
1933
1934 hdev = hci_dev_get(dev);
1935 if (!hdev)
1936 return -ENODEV;
1937
1938 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1939 ret = -EBUSY;
1940 goto done;
1941 }
1942
1943 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1944 ret = -EOPNOTSUPP;
1945 goto done;
1946 }
1947
1948 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1949
1950done:
1951 hci_dev_put(hdev);
1952 return ret;
1953}
1954
1955static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1956{
1957 bool conn_changed, discov_changed;
1958
1959 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1960
1961 if ((scan & SCAN_PAGE))
1962 conn_changed = !hci_dev_test_and_set_flag(hdev,
1963 HCI_CONNECTABLE);
1964 else
1965 conn_changed = hci_dev_test_and_clear_flag(hdev,
1966 HCI_CONNECTABLE);
1967
1968 if ((scan & SCAN_INQUIRY)) {
1969 discov_changed = !hci_dev_test_and_set_flag(hdev,
1970 HCI_DISCOVERABLE);
1971 } else {
1972 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1973 discov_changed = hci_dev_test_and_clear_flag(hdev,
1974 HCI_DISCOVERABLE);
1975 }
1976
1977 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1978 return;
1979
1980 if (conn_changed || discov_changed) {
1981
1982 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1983
1984 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1985 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1986
1987 mgmt_new_settings(hdev);
1988 }
1989}
1990
1991int hci_dev_cmd(unsigned int cmd, void __user *arg)
1992{
1993 struct hci_dev *hdev;
1994 struct hci_dev_req dr;
1995 int err = 0;
1996
1997 if (copy_from_user(&dr, arg, sizeof(dr)))
1998 return -EFAULT;
1999
2000 hdev = hci_dev_get(dr.dev_id);
2001 if (!hdev)
2002 return -ENODEV;
2003
2004 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2005 err = -EBUSY;
2006 goto done;
2007 }
2008
2009 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2010 err = -EOPNOTSUPP;
2011 goto done;
2012 }
2013
2014 if (hdev->dev_type != HCI_PRIMARY) {
2015 err = -EOPNOTSUPP;
2016 goto done;
2017 }
2018
2019 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2020 err = -EOPNOTSUPP;
2021 goto done;
2022 }
2023
2024 switch (cmd) {
2025 case HCISETAUTH:
2026 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2027 HCI_INIT_TIMEOUT, NULL);
2028 break;
2029
2030 case HCISETENCRYPT:
2031 if (!lmp_encrypt_capable(hdev)) {
2032 err = -EOPNOTSUPP;
2033 break;
2034 }
2035
2036 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2037
2038 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2039 HCI_INIT_TIMEOUT, NULL);
2040 if (err)
2041 break;
2042 }
2043
2044 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2045 HCI_INIT_TIMEOUT, NULL);
2046 break;
2047
2048 case HCISETSCAN:
2049 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2050 HCI_INIT_TIMEOUT, NULL);
2051
2052
2053
2054
2055 if (!err)
2056 hci_update_scan_state(hdev, dr.dev_opt);
2057 break;
2058
2059 case HCISETLINKPOL:
2060 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2061 HCI_INIT_TIMEOUT, NULL);
2062 break;
2063
2064 case HCISETLINKMODE:
2065 hdev->link_mode = ((__u16) dr.dev_opt) &
2066 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2067 break;
2068
2069 case HCISETPTYPE:
2070 if (hdev->pkt_type == (__u16) dr.dev_opt)
2071 break;
2072
2073 hdev->pkt_type = (__u16) dr.dev_opt;
2074 mgmt_phy_configuration_changed(hdev, NULL);
2075 break;
2076
2077 case HCISETACLMTU:
2078 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2079 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2080 break;
2081
2082 case HCISETSCOMTU:
2083 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2084 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2085 break;
2086
2087 default:
2088 err = -EINVAL;
2089 break;
2090 }
2091
2092done:
2093 hci_dev_put(hdev);
2094 return err;
2095}
2096
2097int hci_get_dev_list(void __user *arg)
2098{
2099 struct hci_dev *hdev;
2100 struct hci_dev_list_req *dl;
2101 struct hci_dev_req *dr;
2102 int n = 0, size, err;
2103 __u16 dev_num;
2104
2105 if (get_user(dev_num, (__u16 __user *) arg))
2106 return -EFAULT;
2107
2108 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2109 return -EINVAL;
2110
2111 size = sizeof(*dl) + dev_num * sizeof(*dr);
2112
2113 dl = kzalloc(size, GFP_KERNEL);
2114 if (!dl)
2115 return -ENOMEM;
2116
2117 dr = dl->dev_req;
2118
2119 read_lock(&hci_dev_list_lock);
2120 list_for_each_entry(hdev, &hci_dev_list, list) {
2121 unsigned long flags = hdev->flags;
2122
2123
2124
2125
2126
2127 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2128 flags &= ~BIT(HCI_UP);
2129
2130 (dr + n)->dev_id = hdev->id;
2131 (dr + n)->dev_opt = flags;
2132
2133 if (++n >= dev_num)
2134 break;
2135 }
2136 read_unlock(&hci_dev_list_lock);
2137
2138 dl->dev_num = n;
2139 size = sizeof(*dl) + n * sizeof(*dr);
2140
2141 err = copy_to_user(arg, dl, size);
2142 kfree(dl);
2143
2144 return err ? -EFAULT : 0;
2145}
2146
2147int hci_get_dev_info(void __user *arg)
2148{
2149 struct hci_dev *hdev;
2150 struct hci_dev_info di;
2151 unsigned long flags;
2152 int err = 0;
2153
2154 if (copy_from_user(&di, arg, sizeof(di)))
2155 return -EFAULT;
2156
2157 hdev = hci_dev_get(di.dev_id);
2158 if (!hdev)
2159 return -ENODEV;
2160
2161
2162
2163
2164
2165 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2166 flags = hdev->flags & ~BIT(HCI_UP);
2167 else
2168 flags = hdev->flags;
2169
2170 strcpy(di.name, hdev->name);
2171 di.bdaddr = hdev->bdaddr;
2172 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2173 di.flags = flags;
2174 di.pkt_type = hdev->pkt_type;
2175 if (lmp_bredr_capable(hdev)) {
2176 di.acl_mtu = hdev->acl_mtu;
2177 di.acl_pkts = hdev->acl_pkts;
2178 di.sco_mtu = hdev->sco_mtu;
2179 di.sco_pkts = hdev->sco_pkts;
2180 } else {
2181 di.acl_mtu = hdev->le_mtu;
2182 di.acl_pkts = hdev->le_pkts;
2183 di.sco_mtu = 0;
2184 di.sco_pkts = 0;
2185 }
2186 di.link_policy = hdev->link_policy;
2187 di.link_mode = hdev->link_mode;
2188
2189 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2190 memcpy(&di.features, &hdev->features, sizeof(di.features));
2191
2192 if (copy_to_user(arg, &di, sizeof(di)))
2193 err = -EFAULT;
2194
2195 hci_dev_put(hdev);
2196
2197 return err;
2198}
2199
2200
2201
2202static int hci_rfkill_set_block(void *data, bool blocked)
2203{
2204 struct hci_dev *hdev = data;
2205
2206 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2207
2208 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2209 return -EBUSY;
2210
2211 if (blocked) {
2212 hci_dev_set_flag(hdev, HCI_RFKILLED);
2213 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2214 !hci_dev_test_flag(hdev, HCI_CONFIG))
2215 hci_dev_do_close(hdev);
2216 } else {
2217 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2218 }
2219
2220 return 0;
2221}
2222
2223static const struct rfkill_ops hci_rfkill_ops = {
2224 .set_block = hci_rfkill_set_block,
2225};
2226
2227static void hci_power_on(struct work_struct *work)
2228{
2229 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2230 int err;
2231
2232 BT_DBG("%s", hdev->name);
2233
2234 if (test_bit(HCI_UP, &hdev->flags) &&
2235 hci_dev_test_flag(hdev, HCI_MGMT) &&
2236 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2237 cancel_delayed_work(&hdev->power_off);
2238 hci_req_sync_lock(hdev);
2239 err = __hci_req_hci_power_on(hdev);
2240 hci_req_sync_unlock(hdev);
2241 mgmt_power_on(hdev, err);
2242 return;
2243 }
2244
2245 err = hci_dev_do_open(hdev);
2246 if (err < 0) {
2247 hci_dev_lock(hdev);
2248 mgmt_set_powered_failed(hdev, err);
2249 hci_dev_unlock(hdev);
2250 return;
2251 }
2252
2253
2254
2255
2256
2257 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2258 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2259 (hdev->dev_type == HCI_PRIMARY &&
2260 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2261 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2262 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2263 hci_dev_do_close(hdev);
2264 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2265 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2266 HCI_AUTO_OFF_TIMEOUT);
2267 }
2268
2269 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2270
2271
2272
2273 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2274 set_bit(HCI_RAW, &hdev->flags);
2275
2276
2277
2278
2279
2280
2281
2282
2283 mgmt_index_added(hdev);
2284 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2285
2286
2287
2288 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2289 clear_bit(HCI_RAW, &hdev->flags);
2290
2291
2292
2293
2294
2295 mgmt_index_added(hdev);
2296 }
2297}
2298
2299static void hci_power_off(struct work_struct *work)
2300{
2301 struct hci_dev *hdev = container_of(work, struct hci_dev,
2302 power_off.work);
2303
2304 BT_DBG("%s", hdev->name);
2305
2306 hci_dev_do_close(hdev);
2307}
2308
2309static void hci_error_reset(struct work_struct *work)
2310{
2311 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2312
2313 BT_DBG("%s", hdev->name);
2314
2315 if (hdev->hw_error)
2316 hdev->hw_error(hdev, hdev->hw_error_code);
2317 else
2318 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2319
2320 if (hci_dev_do_close(hdev))
2321 return;
2322
2323 hci_dev_do_open(hdev);
2324}
2325
2326void hci_uuids_clear(struct hci_dev *hdev)
2327{
2328 struct bt_uuid *uuid, *tmp;
2329
2330 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2331 list_del(&uuid->list);
2332 kfree(uuid);
2333 }
2334}
2335
2336void hci_link_keys_clear(struct hci_dev *hdev)
2337{
2338 struct link_key *key;
2339
2340 list_for_each_entry(key, &hdev->link_keys, list) {
2341 list_del_rcu(&key->list);
2342 kfree_rcu(key, rcu);
2343 }
2344}
2345
2346void hci_smp_ltks_clear(struct hci_dev *hdev)
2347{
2348 struct smp_ltk *k;
2349
2350 list_for_each_entry(k, &hdev->long_term_keys, list) {
2351 list_del_rcu(&k->list);
2352 kfree_rcu(k, rcu);
2353 }
2354}
2355
2356void hci_smp_irks_clear(struct hci_dev *hdev)
2357{
2358 struct smp_irk *k;
2359
2360 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2361 list_del_rcu(&k->list);
2362 kfree_rcu(k, rcu);
2363 }
2364}
2365
2366void hci_blocked_keys_clear(struct hci_dev *hdev)
2367{
2368 struct blocked_key *b;
2369
2370 list_for_each_entry(b, &hdev->blocked_keys, list) {
2371 list_del_rcu(&b->list);
2372 kfree_rcu(b, rcu);
2373 }
2374}
2375
2376bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2377{
2378 bool blocked = false;
2379 struct blocked_key *b;
2380
2381 rcu_read_lock();
2382 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2383 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2384 blocked = true;
2385 break;
2386 }
2387 }
2388
2389 rcu_read_unlock();
2390 return blocked;
2391}
2392
2393struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2394{
2395 struct link_key *k;
2396
2397 rcu_read_lock();
2398 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2399 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2400 rcu_read_unlock();
2401
2402 if (hci_is_blocked_key(hdev,
2403 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2404 k->val)) {
2405 bt_dev_warn_ratelimited(hdev,
2406 "Link key blocked for %pMR",
2407 &k->bdaddr);
2408 return NULL;
2409 }
2410
2411 return k;
2412 }
2413 }
2414 rcu_read_unlock();
2415
2416 return NULL;
2417}
2418
2419static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2420 u8 key_type, u8 old_key_type)
2421{
2422
2423 if (key_type < 0x03)
2424 return true;
2425
2426
2427 if (key_type == HCI_LK_DEBUG_COMBINATION)
2428 return false;
2429
2430
2431 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2432 return false;
2433
2434
2435 if (!conn)
2436 return true;
2437
2438
2439 if (conn->type == LE_LINK)
2440 return true;
2441
2442
2443 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2444 return true;
2445
2446
2447 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2448 return true;
2449
2450
2451 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2452 return true;
2453
2454
2455
2456 return false;
2457}
2458
2459static u8 ltk_role(u8 type)
2460{
2461 if (type == SMP_LTK)
2462 return HCI_ROLE_MASTER;
2463
2464 return HCI_ROLE_SLAVE;
2465}
2466
2467struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2468 u8 addr_type, u8 role)
2469{
2470 struct smp_ltk *k;
2471
2472 rcu_read_lock();
2473 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2474 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2475 continue;
2476
2477 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2478 rcu_read_unlock();
2479
2480 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2481 k->val)) {
2482 bt_dev_warn_ratelimited(hdev,
2483 "LTK blocked for %pMR",
2484 &k->bdaddr);
2485 return NULL;
2486 }
2487
2488 return k;
2489 }
2490 }
2491 rcu_read_unlock();
2492
2493 return NULL;
2494}
2495
2496struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2497{
2498 struct smp_irk *irk_to_return = NULL;
2499 struct smp_irk *irk;
2500
2501 rcu_read_lock();
2502 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2503 if (!bacmp(&irk->rpa, rpa)) {
2504 irk_to_return = irk;
2505 goto done;
2506 }
2507 }
2508
2509 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2510 if (smp_irk_matches(hdev, irk->val, rpa)) {
2511 bacpy(&irk->rpa, rpa);
2512 irk_to_return = irk;
2513 goto done;
2514 }
2515 }
2516
2517done:
2518 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2519 irk_to_return->val)) {
2520 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2521 &irk_to_return->bdaddr);
2522 irk_to_return = NULL;
2523 }
2524
2525 rcu_read_unlock();
2526
2527 return irk_to_return;
2528}
2529
2530struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2531 u8 addr_type)
2532{
2533 struct smp_irk *irk_to_return = NULL;
2534 struct smp_irk *irk;
2535
2536
2537 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2538 return NULL;
2539
2540 rcu_read_lock();
2541 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2542 if (addr_type == irk->addr_type &&
2543 bacmp(bdaddr, &irk->bdaddr) == 0) {
2544 irk_to_return = irk;
2545 goto done;
2546 }
2547 }
2548
2549done:
2550
2551 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2552 irk_to_return->val)) {
2553 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2554 &irk_to_return->bdaddr);
2555 irk_to_return = NULL;
2556 }
2557
2558 rcu_read_unlock();
2559
2560 return irk_to_return;
2561}
2562
2563struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2564 bdaddr_t *bdaddr, u8 *val, u8 type,
2565 u8 pin_len, bool *persistent)
2566{
2567 struct link_key *key, *old_key;
2568 u8 old_key_type;
2569
2570 old_key = hci_find_link_key(hdev, bdaddr);
2571 if (old_key) {
2572 old_key_type = old_key->type;
2573 key = old_key;
2574 } else {
2575 old_key_type = conn ? conn->key_type : 0xff;
2576 key = kzalloc(sizeof(*key), GFP_KERNEL);
2577 if (!key)
2578 return NULL;
2579 list_add_rcu(&key->list, &hdev->link_keys);
2580 }
2581
2582 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2583
2584
2585
2586
2587 if (type == HCI_LK_CHANGED_COMBINATION &&
2588 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2589 type = HCI_LK_COMBINATION;
2590 if (conn)
2591 conn->key_type = type;
2592 }
2593
2594 bacpy(&key->bdaddr, bdaddr);
2595 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2596 key->pin_len = pin_len;
2597
2598 if (type == HCI_LK_CHANGED_COMBINATION)
2599 key->type = old_key_type;
2600 else
2601 key->type = type;
2602
2603 if (persistent)
2604 *persistent = hci_persistent_key(hdev, conn, type,
2605 old_key_type);
2606
2607 return key;
2608}
2609
2610struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2611 u8 addr_type, u8 type, u8 authenticated,
2612 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2613{
2614 struct smp_ltk *key, *old_key;
2615 u8 role = ltk_role(type);
2616
2617 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2618 if (old_key)
2619 key = old_key;
2620 else {
2621 key = kzalloc(sizeof(*key), GFP_KERNEL);
2622 if (!key)
2623 return NULL;
2624 list_add_rcu(&key->list, &hdev->long_term_keys);
2625 }
2626
2627 bacpy(&key->bdaddr, bdaddr);
2628 key->bdaddr_type = addr_type;
2629 memcpy(key->val, tk, sizeof(key->val));
2630 key->authenticated = authenticated;
2631 key->ediv = ediv;
2632 key->rand = rand;
2633 key->enc_size = enc_size;
2634 key->type = type;
2635
2636 return key;
2637}
2638
2639struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2640 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2641{
2642 struct smp_irk *irk;
2643
2644 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2645 if (!irk) {
2646 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2647 if (!irk)
2648 return NULL;
2649
2650 bacpy(&irk->bdaddr, bdaddr);
2651 irk->addr_type = addr_type;
2652
2653 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2654 }
2655
2656 memcpy(irk->val, val, 16);
2657 bacpy(&irk->rpa, rpa);
2658
2659 return irk;
2660}
2661
2662int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2663{
2664 struct link_key *key;
2665
2666 key = hci_find_link_key(hdev, bdaddr);
2667 if (!key)
2668 return -ENOENT;
2669
2670 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2671
2672 list_del_rcu(&key->list);
2673 kfree_rcu(key, rcu);
2674
2675 return 0;
2676}
2677
2678int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2679{
2680 struct smp_ltk *k;
2681 int removed = 0;
2682
2683 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2684 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2685 continue;
2686
2687 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2688
2689 list_del_rcu(&k->list);
2690 kfree_rcu(k, rcu);
2691 removed++;
2692 }
2693
2694 return removed ? 0 : -ENOENT;
2695}
2696
2697void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2698{
2699 struct smp_irk *k;
2700
2701 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2702 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2703 continue;
2704
2705 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2706
2707 list_del_rcu(&k->list);
2708 kfree_rcu(k, rcu);
2709 }
2710}
2711
2712bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2713{
2714 struct smp_ltk *k;
2715 struct smp_irk *irk;
2716 u8 addr_type;
2717
2718 if (type == BDADDR_BREDR) {
2719 if (hci_find_link_key(hdev, bdaddr))
2720 return true;
2721 return false;
2722 }
2723
2724
2725 if (type == BDADDR_LE_PUBLIC)
2726 addr_type = ADDR_LE_DEV_PUBLIC;
2727 else
2728 addr_type = ADDR_LE_DEV_RANDOM;
2729
2730 irk = hci_get_irk(hdev, bdaddr, addr_type);
2731 if (irk) {
2732 bdaddr = &irk->bdaddr;
2733 addr_type = irk->addr_type;
2734 }
2735
2736 rcu_read_lock();
2737 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2738 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2739 rcu_read_unlock();
2740 return true;
2741 }
2742 }
2743 rcu_read_unlock();
2744
2745 return false;
2746}
2747
2748
2749static void hci_cmd_timeout(struct work_struct *work)
2750{
2751 struct hci_dev *hdev = container_of(work, struct hci_dev,
2752 cmd_timer.work);
2753
2754 if (hdev->sent_cmd) {
2755 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2756 u16 opcode = __le16_to_cpu(sent->opcode);
2757
2758 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2759 } else {
2760 bt_dev_err(hdev, "command tx timeout");
2761 }
2762
2763 if (hdev->cmd_timeout)
2764 hdev->cmd_timeout(hdev);
2765
2766 atomic_set(&hdev->cmd_cnt, 1);
2767 queue_work(hdev->workqueue, &hdev->cmd_work);
2768}
2769
2770struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2771 bdaddr_t *bdaddr, u8 bdaddr_type)
2772{
2773 struct oob_data *data;
2774
2775 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2776 if (bacmp(bdaddr, &data->bdaddr) != 0)
2777 continue;
2778 if (data->bdaddr_type != bdaddr_type)
2779 continue;
2780 return data;
2781 }
2782
2783 return NULL;
2784}
2785
2786int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2787 u8 bdaddr_type)
2788{
2789 struct oob_data *data;
2790
2791 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2792 if (!data)
2793 return -ENOENT;
2794
2795 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2796
2797 list_del(&data->list);
2798 kfree(data);
2799
2800 return 0;
2801}
2802
2803void hci_remote_oob_data_clear(struct hci_dev *hdev)
2804{
2805 struct oob_data *data, *n;
2806
2807 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2808 list_del(&data->list);
2809 kfree(data);
2810 }
2811}
2812
2813int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2814 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2815 u8 *hash256, u8 *rand256)
2816{
2817 struct oob_data *data;
2818
2819 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2820 if (!data) {
2821 data = kmalloc(sizeof(*data), GFP_KERNEL);
2822 if (!data)
2823 return -ENOMEM;
2824
2825 bacpy(&data->bdaddr, bdaddr);
2826 data->bdaddr_type = bdaddr_type;
2827 list_add(&data->list, &hdev->remote_oob_data);
2828 }
2829
2830 if (hash192 && rand192) {
2831 memcpy(data->hash192, hash192, sizeof(data->hash192));
2832 memcpy(data->rand192, rand192, sizeof(data->rand192));
2833 if (hash256 && rand256)
2834 data->present = 0x03;
2835 } else {
2836 memset(data->hash192, 0, sizeof(data->hash192));
2837 memset(data->rand192, 0, sizeof(data->rand192));
2838 if (hash256 && rand256)
2839 data->present = 0x02;
2840 else
2841 data->present = 0x00;
2842 }
2843
2844 if (hash256 && rand256) {
2845 memcpy(data->hash256, hash256, sizeof(data->hash256));
2846 memcpy(data->rand256, rand256, sizeof(data->rand256));
2847 } else {
2848 memset(data->hash256, 0, sizeof(data->hash256));
2849 memset(data->rand256, 0, sizeof(data->rand256));
2850 if (hash192 && rand192)
2851 data->present = 0x01;
2852 }
2853
2854 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2855
2856 return 0;
2857}
2858
2859
2860struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2861{
2862 struct adv_info *adv_instance;
2863
2864 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2865 if (adv_instance->instance == instance)
2866 return adv_instance;
2867 }
2868
2869 return NULL;
2870}
2871
2872
2873struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2874{
2875 struct adv_info *cur_instance;
2876
2877 cur_instance = hci_find_adv_instance(hdev, instance);
2878 if (!cur_instance)
2879 return NULL;
2880
2881 if (cur_instance == list_last_entry(&hdev->adv_instances,
2882 struct adv_info, list))
2883 return list_first_entry(&hdev->adv_instances,
2884 struct adv_info, list);
2885 else
2886 return list_next_entry(cur_instance, list);
2887}
2888
2889
2890int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2891{
2892 struct adv_info *adv_instance;
2893
2894 adv_instance = hci_find_adv_instance(hdev, instance);
2895 if (!adv_instance)
2896 return -ENOENT;
2897
2898 BT_DBG("%s removing %dMR", hdev->name, instance);
2899
2900 if (hdev->cur_adv_instance == instance) {
2901 if (hdev->adv_instance_timeout) {
2902 cancel_delayed_work(&hdev->adv_instance_expire);
2903 hdev->adv_instance_timeout = 0;
2904 }
2905 hdev->cur_adv_instance = 0x00;
2906 }
2907
2908 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2909
2910 list_del(&adv_instance->list);
2911 kfree(adv_instance);
2912
2913 hdev->adv_instance_cnt--;
2914
2915 return 0;
2916}
2917
2918void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2919{
2920 struct adv_info *adv_instance, *n;
2921
2922 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2923 adv_instance->rpa_expired = rpa_expired;
2924}
2925
2926
2927void hci_adv_instances_clear(struct hci_dev *hdev)
2928{
2929 struct adv_info *adv_instance, *n;
2930
2931 if (hdev->adv_instance_timeout) {
2932 cancel_delayed_work(&hdev->adv_instance_expire);
2933 hdev->adv_instance_timeout = 0;
2934 }
2935
2936 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2937 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2938 list_del(&adv_instance->list);
2939 kfree(adv_instance);
2940 }
2941
2942 hdev->adv_instance_cnt = 0;
2943 hdev->cur_adv_instance = 0x00;
2944}
2945
2946static void adv_instance_rpa_expired(struct work_struct *work)
2947{
2948 struct adv_info *adv_instance = container_of(work, struct adv_info,
2949 rpa_expired_cb.work);
2950
2951 BT_DBG("");
2952
2953 adv_instance->rpa_expired = true;
2954}
2955
2956
2957int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2958 u16 adv_data_len, u8 *adv_data,
2959 u16 scan_rsp_len, u8 *scan_rsp_data,
2960 u16 timeout, u16 duration, s8 tx_power,
2961 u32 min_interval, u32 max_interval)
2962{
2963 struct adv_info *adv_instance;
2964
2965 adv_instance = hci_find_adv_instance(hdev, instance);
2966 if (adv_instance) {
2967 memset(adv_instance->adv_data, 0,
2968 sizeof(adv_instance->adv_data));
2969 memset(adv_instance->scan_rsp_data, 0,
2970 sizeof(adv_instance->scan_rsp_data));
2971 } else {
2972 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2973 instance < 1 || instance > hdev->le_num_of_adv_sets)
2974 return -EOVERFLOW;
2975
2976 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2977 if (!adv_instance)
2978 return -ENOMEM;
2979
2980 adv_instance->pending = true;
2981 adv_instance->instance = instance;
2982 list_add(&adv_instance->list, &hdev->adv_instances);
2983 hdev->adv_instance_cnt++;
2984 }
2985
2986 adv_instance->flags = flags;
2987 adv_instance->adv_data_len = adv_data_len;
2988 adv_instance->scan_rsp_len = scan_rsp_len;
2989 adv_instance->min_interval = min_interval;
2990 adv_instance->max_interval = max_interval;
2991 adv_instance->tx_power = tx_power;
2992
2993 if (adv_data_len)
2994 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2995
2996 if (scan_rsp_len)
2997 memcpy(adv_instance->scan_rsp_data,
2998 scan_rsp_data, scan_rsp_len);
2999
3000 adv_instance->timeout = timeout;
3001 adv_instance->remaining_time = timeout;
3002
3003 if (duration == 0)
3004 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3005 else
3006 adv_instance->duration = duration;
3007
3008 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3009 adv_instance_rpa_expired);
3010
3011 BT_DBG("%s for %dMR", hdev->name, instance);
3012
3013 return 0;
3014}
3015
3016
3017int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3018 u16 adv_data_len, u8 *adv_data,
3019 u16 scan_rsp_len, u8 *scan_rsp_data)
3020{
3021 struct adv_info *adv_instance;
3022
3023 adv_instance = hci_find_adv_instance(hdev, instance);
3024
3025
3026 if (!adv_instance)
3027 return -ENOENT;
3028
3029 if (adv_data_len) {
3030 memset(adv_instance->adv_data, 0,
3031 sizeof(adv_instance->adv_data));
3032 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3033 adv_instance->adv_data_len = adv_data_len;
3034 }
3035
3036 if (scan_rsp_len) {
3037 memset(adv_instance->scan_rsp_data, 0,
3038 sizeof(adv_instance->scan_rsp_data));
3039 memcpy(adv_instance->scan_rsp_data,
3040 scan_rsp_data, scan_rsp_len);
3041 adv_instance->scan_rsp_len = scan_rsp_len;
3042 }
3043
3044 return 0;
3045}
3046
3047
3048void hci_adv_monitors_clear(struct hci_dev *hdev)
3049{
3050 struct adv_monitor *monitor;
3051 int handle;
3052
3053 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3054 hci_free_adv_monitor(monitor);
3055
3056 idr_destroy(&hdev->adv_monitors_idr);
3057}
3058
3059void hci_free_adv_monitor(struct adv_monitor *monitor)
3060{
3061 struct adv_pattern *pattern;
3062 struct adv_pattern *tmp;
3063
3064 if (!monitor)
3065 return;
3066
3067 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list)
3068 kfree(pattern);
3069
3070 kfree(monitor);
3071}
3072
3073
3074int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3075{
3076 int min, max, handle;
3077
3078 if (!monitor)
3079 return -EINVAL;
3080
3081 min = HCI_MIN_ADV_MONITOR_HANDLE;
3082 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3083 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3084 GFP_KERNEL);
3085 if (handle < 0)
3086 return handle;
3087
3088 hdev->adv_monitors_cnt++;
3089 monitor->handle = handle;
3090
3091 hci_update_background_scan(hdev);
3092
3093 return 0;
3094}
3095
3096static int free_adv_monitor(int id, void *ptr, void *data)
3097{
3098 struct hci_dev *hdev = data;
3099 struct adv_monitor *monitor = ptr;
3100
3101 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3102 hci_free_adv_monitor(monitor);
3103 hdev->adv_monitors_cnt--;
3104
3105 return 0;
3106}
3107
3108
3109int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
3110{
3111 struct adv_monitor *monitor;
3112
3113 if (handle) {
3114 monitor = idr_find(&hdev->adv_monitors_idr, handle);
3115 if (!monitor)
3116 return -ENOENT;
3117
3118 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3119 hci_free_adv_monitor(monitor);
3120 hdev->adv_monitors_cnt--;
3121 } else {
3122
3123 idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
3124 }
3125
3126 hci_update_background_scan(hdev);
3127
3128 return 0;
3129}
3130
3131
3132bool hci_is_adv_monitoring(struct hci_dev *hdev)
3133{
3134 return !idr_is_empty(&hdev->adv_monitors_idr);
3135}
3136
3137struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3138 bdaddr_t *bdaddr, u8 type)
3139{
3140 struct bdaddr_list *b;
3141
3142 list_for_each_entry(b, bdaddr_list, list) {
3143 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3144 return b;
3145 }
3146
3147 return NULL;
3148}
3149
3150struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3151 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3152 u8 type)
3153{
3154 struct bdaddr_list_with_irk *b;
3155
3156 list_for_each_entry(b, bdaddr_list, list) {
3157 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3158 return b;
3159 }
3160
3161 return NULL;
3162}
3163
3164struct bdaddr_list_with_flags *
3165hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3166 bdaddr_t *bdaddr, u8 type)
3167{
3168 struct bdaddr_list_with_flags *b;
3169
3170 list_for_each_entry(b, bdaddr_list, list) {
3171 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3172 return b;
3173 }
3174
3175 return NULL;
3176}
3177
3178void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3179{
3180 struct bdaddr_list *b, *n;
3181
3182 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3183 list_del(&b->list);
3184 kfree(b);
3185 }
3186}
3187
3188int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3189{
3190 struct bdaddr_list *entry;
3191
3192 if (!bacmp(bdaddr, BDADDR_ANY))
3193 return -EBADF;
3194
3195 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3196 return -EEXIST;
3197
3198 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3199 if (!entry)
3200 return -ENOMEM;
3201
3202 bacpy(&entry->bdaddr, bdaddr);
3203 entry->bdaddr_type = type;
3204
3205 list_add(&entry->list, list);
3206
3207 return 0;
3208}
3209
3210int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3211 u8 type, u8 *peer_irk, u8 *local_irk)
3212{
3213 struct bdaddr_list_with_irk *entry;
3214
3215 if (!bacmp(bdaddr, BDADDR_ANY))
3216 return -EBADF;
3217
3218 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3219 return -EEXIST;
3220
3221 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3222 if (!entry)
3223 return -ENOMEM;
3224
3225 bacpy(&entry->bdaddr, bdaddr);
3226 entry->bdaddr_type = type;
3227
3228 if (peer_irk)
3229 memcpy(entry->peer_irk, peer_irk, 16);
3230
3231 if (local_irk)
3232 memcpy(entry->local_irk, local_irk, 16);
3233
3234 list_add(&entry->list, list);
3235
3236 return 0;
3237}
3238
3239int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3240 u8 type, u32 flags)
3241{
3242 struct bdaddr_list_with_flags *entry;
3243
3244 if (!bacmp(bdaddr, BDADDR_ANY))
3245 return -EBADF;
3246
3247 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3248 return -EEXIST;
3249
3250 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3251 if (!entry)
3252 return -ENOMEM;
3253
3254 bacpy(&entry->bdaddr, bdaddr);
3255 entry->bdaddr_type = type;
3256 entry->current_flags = flags;
3257
3258 list_add(&entry->list, list);
3259
3260 return 0;
3261}
3262
3263int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3264{
3265 struct bdaddr_list *entry;
3266
3267 if (!bacmp(bdaddr, BDADDR_ANY)) {
3268 hci_bdaddr_list_clear(list);
3269 return 0;
3270 }
3271
3272 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3273 if (!entry)
3274 return -ENOENT;
3275
3276 list_del(&entry->list);
3277 kfree(entry);
3278
3279 return 0;
3280}
3281
3282int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3283 u8 type)
3284{
3285 struct bdaddr_list_with_irk *entry;
3286
3287 if (!bacmp(bdaddr, BDADDR_ANY)) {
3288 hci_bdaddr_list_clear(list);
3289 return 0;
3290 }
3291
3292 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3293 if (!entry)
3294 return -ENOENT;
3295
3296 list_del(&entry->list);
3297 kfree(entry);
3298
3299 return 0;
3300}
3301
3302int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3303 u8 type)
3304{
3305 struct bdaddr_list_with_flags *entry;
3306
3307 if (!bacmp(bdaddr, BDADDR_ANY)) {
3308 hci_bdaddr_list_clear(list);
3309 return 0;
3310 }
3311
3312 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3313 if (!entry)
3314 return -ENOENT;
3315
3316 list_del(&entry->list);
3317 kfree(entry);
3318
3319 return 0;
3320}
3321
3322
3323struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3324 bdaddr_t *addr, u8 addr_type)
3325{
3326 struct hci_conn_params *params;
3327
3328 list_for_each_entry(params, &hdev->le_conn_params, list) {
3329 if (bacmp(¶ms->addr, addr) == 0 &&
3330 params->addr_type == addr_type) {
3331 return params;
3332 }
3333 }
3334
3335 return NULL;
3336}
3337
3338
3339struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3340 bdaddr_t *addr, u8 addr_type)
3341{
3342 struct hci_conn_params *param;
3343
3344 switch (addr_type) {
3345 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3346 addr_type = ADDR_LE_DEV_PUBLIC;
3347 break;
3348 case ADDR_LE_DEV_RANDOM_RESOLVED:
3349 addr_type = ADDR_LE_DEV_RANDOM;
3350 break;
3351 }
3352
3353 list_for_each_entry(param, list, action) {
3354 if (bacmp(¶m->addr, addr) == 0 &&
3355 param->addr_type == addr_type)
3356 return param;
3357 }
3358
3359 return NULL;
3360}
3361
3362
3363struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3364 bdaddr_t *addr, u8 addr_type)
3365{
3366 struct hci_conn_params *params;
3367
3368 params = hci_conn_params_lookup(hdev, addr, addr_type);
3369 if (params)
3370 return params;
3371
3372 params = kzalloc(sizeof(*params), GFP_KERNEL);
3373 if (!params) {
3374 bt_dev_err(hdev, "out of memory");
3375 return NULL;
3376 }
3377
3378 bacpy(¶ms->addr, addr);
3379 params->addr_type = addr_type;
3380
3381 list_add(¶ms->list, &hdev->le_conn_params);
3382 INIT_LIST_HEAD(¶ms->action);
3383
3384 params->conn_min_interval = hdev->le_conn_min_interval;
3385 params->conn_max_interval = hdev->le_conn_max_interval;
3386 params->conn_latency = hdev->le_conn_latency;
3387 params->supervision_timeout = hdev->le_supv_timeout;
3388 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3389
3390 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3391
3392 return params;
3393}
3394
3395static void hci_conn_params_free(struct hci_conn_params *params)
3396{
3397 if (params->conn) {
3398 hci_conn_drop(params->conn);
3399 hci_conn_put(params->conn);
3400 }
3401
3402 list_del(¶ms->action);
3403 list_del(¶ms->list);
3404 kfree(params);
3405}
3406
3407
3408void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3409{
3410 struct hci_conn_params *params;
3411
3412 params = hci_conn_params_lookup(hdev, addr, addr_type);
3413 if (!params)
3414 return;
3415
3416 hci_conn_params_free(params);
3417
3418 hci_update_background_scan(hdev);
3419
3420 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3421}
3422
3423
3424void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3425{
3426 struct hci_conn_params *params, *tmp;
3427
3428 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3429 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3430 continue;
3431
3432
3433
3434
3435 if (params->explicit_connect) {
3436 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3437 continue;
3438 }
3439
3440 list_del(¶ms->list);
3441 kfree(params);
3442 }
3443
3444 BT_DBG("All LE disabled connection parameters were removed");
3445}
3446
3447
3448static void hci_conn_params_clear_all(struct hci_dev *hdev)
3449{
3450 struct hci_conn_params *params, *tmp;
3451
3452 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3453 hci_conn_params_free(params);
3454
3455 BT_DBG("All LE connection parameters were removed");
3456}
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3472 u8 *bdaddr_type)
3473{
3474 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3475 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3476 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3477 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3478 bacpy(bdaddr, &hdev->static_addr);
3479 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3480 } else {
3481 bacpy(bdaddr, &hdev->bdaddr);
3482 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3483 }
3484}
3485
3486static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3487{
3488 int i;
3489
3490 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3491 clear_bit(i, hdev->suspend_tasks);
3492
3493 wake_up(&hdev->suspend_wait_q);
3494}
3495
3496static int hci_suspend_wait_event(struct hci_dev *hdev)
3497{
3498#define WAKE_COND \
3499 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3500 __SUSPEND_NUM_TASKS)
3501
3502 int i;
3503 int ret = wait_event_timeout(hdev->suspend_wait_q,
3504 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3505
3506 if (ret == 0) {
3507 bt_dev_err(hdev, "Timed out waiting for suspend events");
3508 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3509 if (test_bit(i, hdev->suspend_tasks))
3510 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3511 clear_bit(i, hdev->suspend_tasks);
3512 }
3513
3514 ret = -ETIMEDOUT;
3515 } else {
3516 ret = 0;
3517 }
3518
3519 return ret;
3520}
3521
3522static void hci_prepare_suspend(struct work_struct *work)
3523{
3524 struct hci_dev *hdev =
3525 container_of(work, struct hci_dev, suspend_prepare);
3526
3527 hci_dev_lock(hdev);
3528 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3529 hci_dev_unlock(hdev);
3530}
3531
3532static int hci_change_suspend_state(struct hci_dev *hdev,
3533 enum suspended_state next)
3534{
3535 hdev->suspend_state_next = next;
3536 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3537 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3538 return hci_suspend_wait_event(hdev);
3539}
3540
3541static void hci_clear_wake_reason(struct hci_dev *hdev)
3542{
3543 hci_dev_lock(hdev);
3544
3545 hdev->wake_reason = 0;
3546 bacpy(&hdev->wake_addr, BDADDR_ANY);
3547 hdev->wake_addr_type = 0;
3548
3549 hci_dev_unlock(hdev);
3550}
3551
3552static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3553 void *data)
3554{
3555 struct hci_dev *hdev =
3556 container_of(nb, struct hci_dev, suspend_notifier);
3557 int ret = 0;
3558 u8 state = BT_RUNNING;
3559
3560
3561 if (mgmt_powering_down(hdev)) {
3562 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3563 ret = hci_suspend_wait_event(hdev);
3564 if (ret)
3565 goto done;
3566 }
3567
3568
3569 if (!hdev_is_powered(hdev))
3570 goto done;
3571
3572 if (action == PM_SUSPEND_PREPARE) {
3573
3574
3575
3576
3577
3578 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3579 if (!ret)
3580 state = BT_SUSPEND_DISCONNECT;
3581
3582
3583
3584
3585 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3586 ret = hci_change_suspend_state(hdev,
3587 BT_SUSPEND_CONFIGURE_WAKE);
3588 if (!ret)
3589 state = BT_SUSPEND_CONFIGURE_WAKE;
3590 }
3591
3592 hci_clear_wake_reason(hdev);
3593 mgmt_suspending(hdev, state);
3594
3595 } else if (action == PM_POST_SUSPEND) {
3596 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3597
3598 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3599 hdev->wake_addr_type);
3600 }
3601
3602done:
3603
3604
3605
3606 if (ret)
3607 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3608 action, ret);
3609
3610 return NOTIFY_DONE;
3611}
3612
3613
3614struct hci_dev *hci_alloc_dev(void)
3615{
3616 struct hci_dev *hdev;
3617
3618 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3619 if (!hdev)
3620 return NULL;
3621
3622 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3623 hdev->esco_type = (ESCO_HV1);
3624 hdev->link_mode = (HCI_LM_ACCEPT);
3625 hdev->num_iac = 0x01;
3626 hdev->io_capability = 0x03;
3627 hdev->manufacturer = 0xffff;
3628 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3629 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3630 hdev->adv_instance_cnt = 0;
3631 hdev->cur_adv_instance = 0x00;
3632 hdev->adv_instance_timeout = 0;
3633
3634 hdev->advmon_allowlist_duration = 300;
3635 hdev->advmon_no_filter_duration = 500;
3636 hdev->enable_advmon_interleave_scan = 0x00;
3637
3638 hdev->sniff_max_interval = 800;
3639 hdev->sniff_min_interval = 80;
3640
3641 hdev->le_adv_channel_map = 0x07;
3642 hdev->le_adv_min_interval = 0x0800;
3643 hdev->le_adv_max_interval = 0x0800;
3644 hdev->le_scan_interval = 0x0060;
3645 hdev->le_scan_window = 0x0030;
3646 hdev->le_scan_int_suspend = 0x0400;
3647 hdev->le_scan_window_suspend = 0x0012;
3648 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3649 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3650 hdev->le_scan_int_connect = 0x0060;
3651 hdev->le_scan_window_connect = 0x0060;
3652 hdev->le_conn_min_interval = 0x0018;
3653 hdev->le_conn_max_interval = 0x0028;
3654 hdev->le_conn_latency = 0x0000;
3655 hdev->le_supv_timeout = 0x002a;
3656 hdev->le_def_tx_len = 0x001b;
3657 hdev->le_def_tx_time = 0x0148;
3658 hdev->le_max_tx_len = 0x001b;
3659 hdev->le_max_tx_time = 0x0148;
3660 hdev->le_max_rx_len = 0x001b;
3661 hdev->le_max_rx_time = 0x0148;
3662 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3663 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3664 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3665 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3666 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3667 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3668 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3669 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3670 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3671
3672 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3673 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3674 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3675 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3676 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3677 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3678
3679
3680 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3681 hdev->def_page_scan_int = 0x0800;
3682 hdev->def_page_scan_window = 0x0012;
3683
3684 mutex_init(&hdev->lock);
3685 mutex_init(&hdev->req_lock);
3686
3687 INIT_LIST_HEAD(&hdev->mgmt_pending);
3688 INIT_LIST_HEAD(&hdev->blacklist);
3689 INIT_LIST_HEAD(&hdev->whitelist);
3690 INIT_LIST_HEAD(&hdev->uuids);
3691 INIT_LIST_HEAD(&hdev->link_keys);
3692 INIT_LIST_HEAD(&hdev->long_term_keys);
3693 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3694 INIT_LIST_HEAD(&hdev->remote_oob_data);
3695 INIT_LIST_HEAD(&hdev->le_white_list);
3696 INIT_LIST_HEAD(&hdev->le_resolv_list);
3697 INIT_LIST_HEAD(&hdev->le_conn_params);
3698 INIT_LIST_HEAD(&hdev->pend_le_conns);
3699 INIT_LIST_HEAD(&hdev->pend_le_reports);
3700 INIT_LIST_HEAD(&hdev->conn_hash.list);
3701 INIT_LIST_HEAD(&hdev->adv_instances);
3702 INIT_LIST_HEAD(&hdev->blocked_keys);
3703
3704 INIT_WORK(&hdev->rx_work, hci_rx_work);
3705 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3706 INIT_WORK(&hdev->tx_work, hci_tx_work);
3707 INIT_WORK(&hdev->power_on, hci_power_on);
3708 INIT_WORK(&hdev->error_reset, hci_error_reset);
3709 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3710
3711 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3712
3713 skb_queue_head_init(&hdev->rx_q);
3714 skb_queue_head_init(&hdev->cmd_q);
3715 skb_queue_head_init(&hdev->raw_q);
3716
3717 init_waitqueue_head(&hdev->req_wait_q);
3718 init_waitqueue_head(&hdev->suspend_wait_q);
3719
3720 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3721
3722 hci_request_setup(hdev);
3723
3724 hci_init_sysfs(hdev);
3725 discovery_init(hdev);
3726
3727 return hdev;
3728}
3729EXPORT_SYMBOL(hci_alloc_dev);
3730
3731
3732void hci_free_dev(struct hci_dev *hdev)
3733{
3734
3735 put_device(&hdev->dev);
3736}
3737EXPORT_SYMBOL(hci_free_dev);
3738
3739
3740int hci_register_dev(struct hci_dev *hdev)
3741{
3742 int id, error;
3743
3744 if (!hdev->open || !hdev->close || !hdev->send)
3745 return -EINVAL;
3746
3747
3748
3749
3750 switch (hdev->dev_type) {
3751 case HCI_PRIMARY:
3752 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3753 break;
3754 case HCI_AMP:
3755 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3756 break;
3757 default:
3758 return -EINVAL;
3759 }
3760
3761 if (id < 0)
3762 return id;
3763
3764 sprintf(hdev->name, "hci%d", id);
3765 hdev->id = id;
3766
3767 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3768
3769 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3770 if (!hdev->workqueue) {
3771 error = -ENOMEM;
3772 goto err;
3773 }
3774
3775 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3776 hdev->name);
3777 if (!hdev->req_workqueue) {
3778 destroy_workqueue(hdev->workqueue);
3779 error = -ENOMEM;
3780 goto err;
3781 }
3782
3783 if (!IS_ERR_OR_NULL(bt_debugfs))
3784 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3785
3786 dev_set_name(&hdev->dev, "%s", hdev->name);
3787
3788 error = device_add(&hdev->dev);
3789 if (error < 0)
3790 goto err_wqueue;
3791
3792 hci_leds_init(hdev);
3793
3794 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3795 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3796 hdev);
3797 if (hdev->rfkill) {
3798 if (rfkill_register(hdev->rfkill) < 0) {
3799 rfkill_destroy(hdev->rfkill);
3800 hdev->rfkill = NULL;
3801 }
3802 }
3803
3804 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3805 hci_dev_set_flag(hdev, HCI_RFKILLED);
3806
3807 hci_dev_set_flag(hdev, HCI_SETUP);
3808 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3809
3810 if (hdev->dev_type == HCI_PRIMARY) {
3811
3812
3813
3814 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3815 }
3816
3817 write_lock(&hci_dev_list_lock);
3818 list_add(&hdev->list, &hci_dev_list);
3819 write_unlock(&hci_dev_list_lock);
3820
3821
3822
3823
3824 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3825 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3826
3827 hci_sock_dev_event(hdev, HCI_DEV_REG);
3828 hci_dev_hold(hdev);
3829
3830 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3831 error = register_pm_notifier(&hdev->suspend_notifier);
3832 if (error)
3833 goto err_wqueue;
3834
3835 queue_work(hdev->req_workqueue, &hdev->power_on);
3836
3837 idr_init(&hdev->adv_monitors_idr);
3838
3839 return id;
3840
3841err_wqueue:
3842 destroy_workqueue(hdev->workqueue);
3843 destroy_workqueue(hdev->req_workqueue);
3844err:
3845 ida_simple_remove(&hci_index_ida, hdev->id);
3846
3847 return error;
3848}
3849EXPORT_SYMBOL(hci_register_dev);
3850
3851
3852void hci_unregister_dev(struct hci_dev *hdev)
3853{
3854 int id;
3855
3856 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3857
3858 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3859
3860 id = hdev->id;
3861
3862 write_lock(&hci_dev_list_lock);
3863 list_del(&hdev->list);
3864 write_unlock(&hci_dev_list_lock);
3865
3866 cancel_work_sync(&hdev->power_on);
3867
3868 hci_suspend_clear_tasks(hdev);
3869 unregister_pm_notifier(&hdev->suspend_notifier);
3870 cancel_work_sync(&hdev->suspend_prepare);
3871
3872 hci_dev_do_close(hdev);
3873
3874 if (!test_bit(HCI_INIT, &hdev->flags) &&
3875 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3876 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3877 hci_dev_lock(hdev);
3878 mgmt_index_removed(hdev);
3879 hci_dev_unlock(hdev);
3880 }
3881
3882
3883
3884 BUG_ON(!list_empty(&hdev->mgmt_pending));
3885
3886 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3887
3888 if (hdev->rfkill) {
3889 rfkill_unregister(hdev->rfkill);
3890 rfkill_destroy(hdev->rfkill);
3891 }
3892
3893 device_del(&hdev->dev);
3894
3895 debugfs_remove_recursive(hdev->debugfs);
3896 kfree_const(hdev->hw_info);
3897 kfree_const(hdev->fw_info);
3898
3899 destroy_workqueue(hdev->workqueue);
3900 destroy_workqueue(hdev->req_workqueue);
3901
3902 hci_dev_lock(hdev);
3903 hci_bdaddr_list_clear(&hdev->blacklist);
3904 hci_bdaddr_list_clear(&hdev->whitelist);
3905 hci_uuids_clear(hdev);
3906 hci_link_keys_clear(hdev);
3907 hci_smp_ltks_clear(hdev);
3908 hci_smp_irks_clear(hdev);
3909 hci_remote_oob_data_clear(hdev);
3910 hci_adv_instances_clear(hdev);
3911 hci_adv_monitors_clear(hdev);
3912 hci_bdaddr_list_clear(&hdev->le_white_list);
3913 hci_bdaddr_list_clear(&hdev->le_resolv_list);
3914 hci_conn_params_clear_all(hdev);
3915 hci_discovery_filter_clear(hdev);
3916 hci_blocked_keys_clear(hdev);
3917 hci_dev_unlock(hdev);
3918
3919 hci_dev_put(hdev);
3920
3921 ida_simple_remove(&hci_index_ida, id);
3922}
3923EXPORT_SYMBOL(hci_unregister_dev);
3924
3925
3926int hci_suspend_dev(struct hci_dev *hdev)
3927{
3928 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3929 return 0;
3930}
3931EXPORT_SYMBOL(hci_suspend_dev);
3932
3933
3934int hci_resume_dev(struct hci_dev *hdev)
3935{
3936 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3937 return 0;
3938}
3939EXPORT_SYMBOL(hci_resume_dev);
3940
3941
3942int hci_reset_dev(struct hci_dev *hdev)
3943{
3944 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3945 struct sk_buff *skb;
3946
3947 skb = bt_skb_alloc(3, GFP_ATOMIC);
3948 if (!skb)
3949 return -ENOMEM;
3950
3951 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3952 skb_put_data(skb, hw_err, 3);
3953
3954
3955 return hci_recv_frame(hdev, skb);
3956}
3957EXPORT_SYMBOL(hci_reset_dev);
3958
3959
3960int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3961{
3962 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3963 && !test_bit(HCI_INIT, &hdev->flags))) {
3964 kfree_skb(skb);
3965 return -ENXIO;
3966 }
3967
3968 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3969 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3970 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3971 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3972 kfree_skb(skb);
3973 return -EINVAL;
3974 }
3975
3976
3977 bt_cb(skb)->incoming = 1;
3978
3979
3980 __net_timestamp(skb);
3981
3982 skb_queue_tail(&hdev->rx_q, skb);
3983 queue_work(hdev->workqueue, &hdev->rx_work);
3984
3985 return 0;
3986}
3987EXPORT_SYMBOL(hci_recv_frame);
3988
3989
3990int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3991{
3992
3993 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3994
3995
3996 __net_timestamp(skb);
3997
3998 skb_queue_tail(&hdev->rx_q, skb);
3999 queue_work(hdev->workqueue, &hdev->rx_work);
4000
4001 return 0;
4002}
4003EXPORT_SYMBOL(hci_recv_diag);
4004
4005void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4006{
4007 va_list vargs;
4008
4009 va_start(vargs, fmt);
4010 kfree_const(hdev->hw_info);
4011 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4012 va_end(vargs);
4013}
4014EXPORT_SYMBOL(hci_set_hw_info);
4015
4016void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4017{
4018 va_list vargs;
4019
4020 va_start(vargs, fmt);
4021 kfree_const(hdev->fw_info);
4022 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4023 va_end(vargs);
4024}
4025EXPORT_SYMBOL(hci_set_fw_info);
4026
4027
4028
4029int hci_register_cb(struct hci_cb *cb)
4030{
4031 BT_DBG("%p name %s", cb, cb->name);
4032
4033 mutex_lock(&hci_cb_list_lock);
4034 list_add_tail(&cb->list, &hci_cb_list);
4035 mutex_unlock(&hci_cb_list_lock);
4036
4037 return 0;
4038}
4039EXPORT_SYMBOL(hci_register_cb);
4040
4041int hci_unregister_cb(struct hci_cb *cb)
4042{
4043 BT_DBG("%p name %s", cb, cb->name);
4044
4045 mutex_lock(&hci_cb_list_lock);
4046 list_del(&cb->list);
4047 mutex_unlock(&hci_cb_list_lock);
4048
4049 return 0;
4050}
4051EXPORT_SYMBOL(hci_unregister_cb);
4052
4053static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4054{
4055 int err;
4056
4057 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4058 skb->len);
4059
4060
4061 __net_timestamp(skb);
4062
4063
4064 hci_send_to_monitor(hdev, skb);
4065
4066 if (atomic_read(&hdev->promisc)) {
4067
4068 hci_send_to_sock(hdev, skb);
4069 }
4070
4071
4072 skb_orphan(skb);
4073
4074 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4075 kfree_skb(skb);
4076 return;
4077 }
4078
4079 err = hdev->send(hdev, skb);
4080 if (err < 0) {
4081 bt_dev_err(hdev, "sending frame failed (%d)", err);
4082 kfree_skb(skb);
4083 }
4084}
4085
4086
4087int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4088 const void *param)
4089{
4090 struct sk_buff *skb;
4091
4092 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4093
4094 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4095 if (!skb) {
4096 bt_dev_err(hdev, "no memory for command");
4097 return -ENOMEM;
4098 }
4099
4100
4101
4102
4103 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4104
4105 skb_queue_tail(&hdev->cmd_q, skb);
4106 queue_work(hdev->workqueue, &hdev->cmd_work);
4107
4108 return 0;
4109}
4110
4111int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4112 const void *param)
4113{
4114 struct sk_buff *skb;
4115
4116 if (hci_opcode_ogf(opcode) != 0x3f) {
4117
4118
4119
4120
4121
4122
4123
4124
4125 bt_dev_err(hdev, "unresponded command not supported");
4126 return -EINVAL;
4127 }
4128
4129 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4130 if (!skb) {
4131 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4132 opcode);
4133 return -ENOMEM;
4134 }
4135
4136 hci_send_frame(hdev, skb);
4137
4138 return 0;
4139}
4140EXPORT_SYMBOL(__hci_cmd_send);
4141
4142
4143void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4144{
4145 struct hci_command_hdr *hdr;
4146
4147 if (!hdev->sent_cmd)
4148 return NULL;
4149
4150 hdr = (void *) hdev->sent_cmd->data;
4151
4152 if (hdr->opcode != cpu_to_le16(opcode))
4153 return NULL;
4154
4155 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4156
4157 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4158}
4159
4160
4161struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4162 const void *param, u32 timeout)
4163{
4164 struct sk_buff *skb;
4165
4166 if (!test_bit(HCI_UP, &hdev->flags))
4167 return ERR_PTR(-ENETDOWN);
4168
4169 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4170
4171 hci_req_sync_lock(hdev);
4172 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4173 hci_req_sync_unlock(hdev);
4174
4175 return skb;
4176}
4177EXPORT_SYMBOL(hci_cmd_sync);
4178
4179
4180static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4181{
4182 struct hci_acl_hdr *hdr;
4183 int len = skb->len;
4184
4185 skb_push(skb, HCI_ACL_HDR_SIZE);
4186 skb_reset_transport_header(skb);
4187 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4188 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4189 hdr->dlen = cpu_to_le16(len);
4190}
4191
4192static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4193 struct sk_buff *skb, __u16 flags)
4194{
4195 struct hci_conn *conn = chan->conn;
4196 struct hci_dev *hdev = conn->hdev;
4197 struct sk_buff *list;
4198
4199 skb->len = skb_headlen(skb);
4200 skb->data_len = 0;
4201
4202 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4203
4204 switch (hdev->dev_type) {
4205 case HCI_PRIMARY:
4206 hci_add_acl_hdr(skb, conn->handle, flags);
4207 break;
4208 case HCI_AMP:
4209 hci_add_acl_hdr(skb, chan->handle, flags);
4210 break;
4211 default:
4212 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4213 return;
4214 }
4215
4216 list = skb_shinfo(skb)->frag_list;
4217 if (!list) {
4218
4219 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4220
4221 skb_queue_tail(queue, skb);
4222 } else {
4223
4224 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4225
4226 skb_shinfo(skb)->frag_list = NULL;
4227
4228
4229
4230
4231
4232
4233 spin_lock_bh(&queue->lock);
4234
4235 __skb_queue_tail(queue, skb);
4236
4237 flags &= ~ACL_START;
4238 flags |= ACL_CONT;
4239 do {
4240 skb = list; list = list->next;
4241
4242 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4243 hci_add_acl_hdr(skb, conn->handle, flags);
4244
4245 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4246
4247 __skb_queue_tail(queue, skb);
4248 } while (list);
4249
4250 spin_unlock_bh(&queue->lock);
4251 }
4252}
4253
4254void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4255{
4256 struct hci_dev *hdev = chan->conn->hdev;
4257
4258 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4259
4260 hci_queue_acl(chan, &chan->data_q, skb, flags);
4261
4262 queue_work(hdev->workqueue, &hdev->tx_work);
4263}
4264
4265
4266void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4267{
4268 struct hci_dev *hdev = conn->hdev;
4269 struct hci_sco_hdr hdr;
4270
4271 BT_DBG("%s len %d", hdev->name, skb->len);
4272
4273 hdr.handle = cpu_to_le16(conn->handle);
4274 hdr.dlen = skb->len;
4275
4276 skb_push(skb, HCI_SCO_HDR_SIZE);
4277 skb_reset_transport_header(skb);
4278 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4279
4280 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4281
4282 skb_queue_tail(&conn->data_q, skb);
4283 queue_work(hdev->workqueue, &hdev->tx_work);
4284}
4285
4286
4287
4288
4289static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4290 int *quote)
4291{
4292 struct hci_conn_hash *h = &hdev->conn_hash;
4293 struct hci_conn *conn = NULL, *c;
4294 unsigned int num = 0, min = ~0;
4295
4296
4297
4298
4299 rcu_read_lock();
4300
4301 list_for_each_entry_rcu(c, &h->list, list) {
4302 if (c->type != type || skb_queue_empty(&c->data_q))
4303 continue;
4304
4305 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4306 continue;
4307
4308 num++;
4309
4310 if (c->sent < min) {
4311 min = c->sent;
4312 conn = c;
4313 }
4314
4315 if (hci_conn_num(hdev, type) == num)
4316 break;
4317 }
4318
4319 rcu_read_unlock();
4320
4321 if (conn) {
4322 int cnt, q;
4323
4324 switch (conn->type) {
4325 case ACL_LINK:
4326 cnt = hdev->acl_cnt;
4327 break;
4328 case SCO_LINK:
4329 case ESCO_LINK:
4330 cnt = hdev->sco_cnt;
4331 break;
4332 case LE_LINK:
4333 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4334 break;
4335 default:
4336 cnt = 0;
4337 bt_dev_err(hdev, "unknown link type %d", conn->type);
4338 }
4339
4340 q = cnt / num;
4341 *quote = q ? q : 1;
4342 } else
4343 *quote = 0;
4344
4345 BT_DBG("conn %p quote %d", conn, *quote);
4346 return conn;
4347}
4348
4349static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4350{
4351 struct hci_conn_hash *h = &hdev->conn_hash;
4352 struct hci_conn *c;
4353
4354 bt_dev_err(hdev, "link tx timeout");
4355
4356 rcu_read_lock();
4357
4358
4359 list_for_each_entry_rcu(c, &h->list, list) {
4360 if (c->type == type && c->sent) {
4361 bt_dev_err(hdev, "killing stalled connection %pMR",
4362 &c->dst);
4363 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4364 }
4365 }
4366
4367 rcu_read_unlock();
4368}
4369
4370static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4371 int *quote)
4372{
4373 struct hci_conn_hash *h = &hdev->conn_hash;
4374 struct hci_chan *chan = NULL;
4375 unsigned int num = 0, min = ~0, cur_prio = 0;
4376 struct hci_conn *conn;
4377 int cnt, q, conn_num = 0;
4378
4379 BT_DBG("%s", hdev->name);
4380
4381 rcu_read_lock();
4382
4383 list_for_each_entry_rcu(conn, &h->list, list) {
4384 struct hci_chan *tmp;
4385
4386 if (conn->type != type)
4387 continue;
4388
4389 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4390 continue;
4391
4392 conn_num++;
4393
4394 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4395 struct sk_buff *skb;
4396
4397 if (skb_queue_empty(&tmp->data_q))
4398 continue;
4399
4400 skb = skb_peek(&tmp->data_q);
4401 if (skb->priority < cur_prio)
4402 continue;
4403
4404 if (skb->priority > cur_prio) {
4405 num = 0;
4406 min = ~0;
4407 cur_prio = skb->priority;
4408 }
4409
4410 num++;
4411
4412 if (conn->sent < min) {
4413 min = conn->sent;
4414 chan = tmp;
4415 }
4416 }
4417
4418 if (hci_conn_num(hdev, type) == conn_num)
4419 break;
4420 }
4421
4422 rcu_read_unlock();
4423
4424 if (!chan)
4425 return NULL;
4426
4427 switch (chan->conn->type) {
4428 case ACL_LINK:
4429 cnt = hdev->acl_cnt;
4430 break;
4431 case AMP_LINK:
4432 cnt = hdev->block_cnt;
4433 break;
4434 case SCO_LINK:
4435 case ESCO_LINK:
4436 cnt = hdev->sco_cnt;
4437 break;
4438 case LE_LINK:
4439 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4440 break;
4441 default:
4442 cnt = 0;
4443 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4444 }
4445
4446 q = cnt / num;
4447 *quote = q ? q : 1;
4448 BT_DBG("chan %p quote %d", chan, *quote);
4449 return chan;
4450}
4451
4452static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4453{
4454 struct hci_conn_hash *h = &hdev->conn_hash;
4455 struct hci_conn *conn;
4456 int num = 0;
4457
4458 BT_DBG("%s", hdev->name);
4459
4460 rcu_read_lock();
4461
4462 list_for_each_entry_rcu(conn, &h->list, list) {
4463 struct hci_chan *chan;
4464
4465 if (conn->type != type)
4466 continue;
4467
4468 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4469 continue;
4470
4471 num++;
4472
4473 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4474 struct sk_buff *skb;
4475
4476 if (chan->sent) {
4477 chan->sent = 0;
4478 continue;
4479 }
4480
4481 if (skb_queue_empty(&chan->data_q))
4482 continue;
4483
4484 skb = skb_peek(&chan->data_q);
4485 if (skb->priority >= HCI_PRIO_MAX - 1)
4486 continue;
4487
4488 skb->priority = HCI_PRIO_MAX - 1;
4489
4490 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4491 skb->priority);
4492 }
4493
4494 if (hci_conn_num(hdev, type) == num)
4495 break;
4496 }
4497
4498 rcu_read_unlock();
4499
4500}
4501
4502static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4503{
4504
4505 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4506}
4507
4508static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4509{
4510 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4511
4512
4513 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4514 HCI_ACL_TX_TIMEOUT))
4515 hci_link_tx_to(hdev, ACL_LINK);
4516 }
4517}
4518
4519
4520static void hci_sched_sco(struct hci_dev *hdev)
4521{
4522 struct hci_conn *conn;
4523 struct sk_buff *skb;
4524 int quote;
4525
4526 BT_DBG("%s", hdev->name);
4527
4528 if (!hci_conn_num(hdev, SCO_LINK))
4529 return;
4530
4531 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4532 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4533 BT_DBG("skb %p len %d", skb, skb->len);
4534 hci_send_frame(hdev, skb);
4535
4536 conn->sent++;
4537 if (conn->sent == ~0)
4538 conn->sent = 0;
4539 }
4540 }
4541}
4542
4543static void hci_sched_esco(struct hci_dev *hdev)
4544{
4545 struct hci_conn *conn;
4546 struct sk_buff *skb;
4547 int quote;
4548
4549 BT_DBG("%s", hdev->name);
4550
4551 if (!hci_conn_num(hdev, ESCO_LINK))
4552 return;
4553
4554 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4555 "e))) {
4556 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4557 BT_DBG("skb %p len %d", skb, skb->len);
4558 hci_send_frame(hdev, skb);
4559
4560 conn->sent++;
4561 if (conn->sent == ~0)
4562 conn->sent = 0;
4563 }
4564 }
4565}
4566
4567static void hci_sched_acl_pkt(struct hci_dev *hdev)
4568{
4569 unsigned int cnt = hdev->acl_cnt;
4570 struct hci_chan *chan;
4571 struct sk_buff *skb;
4572 int quote;
4573
4574 __check_timeout(hdev, cnt);
4575
4576 while (hdev->acl_cnt &&
4577 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4578 u32 priority = (skb_peek(&chan->data_q))->priority;
4579 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4580 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4581 skb->len, skb->priority);
4582
4583
4584 if (skb->priority < priority)
4585 break;
4586
4587 skb = skb_dequeue(&chan->data_q);
4588
4589 hci_conn_enter_active_mode(chan->conn,
4590 bt_cb(skb)->force_active);
4591
4592 hci_send_frame(hdev, skb);
4593 hdev->acl_last_tx = jiffies;
4594
4595 hdev->acl_cnt--;
4596 chan->sent++;
4597 chan->conn->sent++;
4598
4599
4600 hci_sched_sco(hdev);
4601 hci_sched_esco(hdev);
4602 }
4603 }
4604
4605 if (cnt != hdev->acl_cnt)
4606 hci_prio_recalculate(hdev, ACL_LINK);
4607}
4608
4609static void hci_sched_acl_blk(struct hci_dev *hdev)
4610{
4611 unsigned int cnt = hdev->block_cnt;
4612 struct hci_chan *chan;
4613 struct sk_buff *skb;
4614 int quote;
4615 u8 type;
4616
4617 __check_timeout(hdev, cnt);
4618
4619 BT_DBG("%s", hdev->name);
4620
4621 if (hdev->dev_type == HCI_AMP)
4622 type = AMP_LINK;
4623 else
4624 type = ACL_LINK;
4625
4626 while (hdev->block_cnt > 0 &&
4627 (chan = hci_chan_sent(hdev, type, "e))) {
4628 u32 priority = (skb_peek(&chan->data_q))->priority;
4629 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4630 int blocks;
4631
4632 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4633 skb->len, skb->priority);
4634
4635
4636 if (skb->priority < priority)
4637 break;
4638
4639 skb = skb_dequeue(&chan->data_q);
4640
4641 blocks = __get_blocks(hdev, skb);
4642 if (blocks > hdev->block_cnt)
4643 return;
4644
4645 hci_conn_enter_active_mode(chan->conn,
4646 bt_cb(skb)->force_active);
4647
4648 hci_send_frame(hdev, skb);
4649 hdev->acl_last_tx = jiffies;
4650
4651 hdev->block_cnt -= blocks;
4652 quote -= blocks;
4653
4654 chan->sent += blocks;
4655 chan->conn->sent += blocks;
4656 }
4657 }
4658
4659 if (cnt != hdev->block_cnt)
4660 hci_prio_recalculate(hdev, type);
4661}
4662
4663static void hci_sched_acl(struct hci_dev *hdev)
4664{
4665 BT_DBG("%s", hdev->name);
4666
4667
4668 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4669 return;
4670
4671
4672 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4673 return;
4674
4675 switch (hdev->flow_ctl_mode) {
4676 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4677 hci_sched_acl_pkt(hdev);
4678 break;
4679
4680 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4681 hci_sched_acl_blk(hdev);
4682 break;
4683 }
4684}
4685
4686static void hci_sched_le(struct hci_dev *hdev)
4687{
4688 struct hci_chan *chan;
4689 struct sk_buff *skb;
4690 int quote, cnt, tmp;
4691
4692 BT_DBG("%s", hdev->name);
4693
4694 if (!hci_conn_num(hdev, LE_LINK))
4695 return;
4696
4697 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4698
4699 __check_timeout(hdev, cnt);
4700
4701 tmp = cnt;
4702 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4703 u32 priority = (skb_peek(&chan->data_q))->priority;
4704 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4705 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4706 skb->len, skb->priority);
4707
4708
4709 if (skb->priority < priority)
4710 break;
4711
4712 skb = skb_dequeue(&chan->data_q);
4713
4714 hci_send_frame(hdev, skb);
4715 hdev->le_last_tx = jiffies;
4716
4717 cnt--;
4718 chan->sent++;
4719 chan->conn->sent++;
4720
4721
4722 hci_sched_sco(hdev);
4723 hci_sched_esco(hdev);
4724 }
4725 }
4726
4727 if (hdev->le_pkts)
4728 hdev->le_cnt = cnt;
4729 else
4730 hdev->acl_cnt = cnt;
4731
4732 if (cnt != tmp)
4733 hci_prio_recalculate(hdev, LE_LINK);
4734}
4735
4736static void hci_tx_work(struct work_struct *work)
4737{
4738 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4739 struct sk_buff *skb;
4740
4741 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4742 hdev->sco_cnt, hdev->le_cnt);
4743
4744 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4745
4746 hci_sched_sco(hdev);
4747 hci_sched_esco(hdev);
4748 hci_sched_acl(hdev);
4749 hci_sched_le(hdev);
4750 }
4751
4752
4753 while ((skb = skb_dequeue(&hdev->raw_q)))
4754 hci_send_frame(hdev, skb);
4755}
4756
4757
4758
4759
4760static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4761{
4762 struct hci_acl_hdr *hdr = (void *) skb->data;
4763 struct hci_conn *conn;
4764 __u16 handle, flags;
4765
4766 skb_pull(skb, HCI_ACL_HDR_SIZE);
4767
4768 handle = __le16_to_cpu(hdr->handle);
4769 flags = hci_flags(handle);
4770 handle = hci_handle(handle);
4771
4772 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4773 handle, flags);
4774
4775 hdev->stat.acl_rx++;
4776
4777 hci_dev_lock(hdev);
4778 conn = hci_conn_hash_lookup_handle(hdev, handle);
4779 hci_dev_unlock(hdev);
4780
4781 if (conn) {
4782 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4783
4784
4785 l2cap_recv_acldata(conn, skb, flags);
4786 return;
4787 } else {
4788 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4789 handle);
4790 }
4791
4792 kfree_skb(skb);
4793}
4794
4795
4796static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4797{
4798 struct hci_sco_hdr *hdr = (void *) skb->data;
4799 struct hci_conn *conn;
4800 __u16 handle, flags;
4801
4802 skb_pull(skb, HCI_SCO_HDR_SIZE);
4803
4804 handle = __le16_to_cpu(hdr->handle);
4805 flags = hci_flags(handle);
4806 handle = hci_handle(handle);
4807
4808 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4809 handle, flags);
4810
4811 hdev->stat.sco_rx++;
4812
4813 hci_dev_lock(hdev);
4814 conn = hci_conn_hash_lookup_handle(hdev, handle);
4815 hci_dev_unlock(hdev);
4816
4817 if (conn) {
4818
4819 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4820 sco_recv_scodata(conn, skb);
4821 return;
4822 } else {
4823 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4824 handle);
4825 }
4826
4827 kfree_skb(skb);
4828}
4829
4830static bool hci_req_is_complete(struct hci_dev *hdev)
4831{
4832 struct sk_buff *skb;
4833
4834 skb = skb_peek(&hdev->cmd_q);
4835 if (!skb)
4836 return true;
4837
4838 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4839}
4840
4841static void hci_resend_last(struct hci_dev *hdev)
4842{
4843 struct hci_command_hdr *sent;
4844 struct sk_buff *skb;
4845 u16 opcode;
4846
4847 if (!hdev->sent_cmd)
4848 return;
4849
4850 sent = (void *) hdev->sent_cmd->data;
4851 opcode = __le16_to_cpu(sent->opcode);
4852 if (opcode == HCI_OP_RESET)
4853 return;
4854
4855 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4856 if (!skb)
4857 return;
4858
4859 skb_queue_head(&hdev->cmd_q, skb);
4860 queue_work(hdev->workqueue, &hdev->cmd_work);
4861}
4862
4863void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4864 hci_req_complete_t *req_complete,
4865 hci_req_complete_skb_t *req_complete_skb)
4866{
4867 struct sk_buff *skb;
4868 unsigned long flags;
4869
4870 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4871
4872
4873
4874
4875 if (!hci_sent_cmd_data(hdev, opcode)) {
4876
4877
4878
4879
4880
4881
4882 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4883 hci_resend_last(hdev);
4884
4885 return;
4886 }
4887
4888
4889 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4890
4891
4892
4893
4894 if (!status && !hci_req_is_complete(hdev))
4895 return;
4896
4897
4898
4899
4900
4901 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4902 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4903 return;
4904 }
4905
4906 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4907 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4908 return;
4909 }
4910
4911
4912 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4913 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4914 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4915 __skb_queue_head(&hdev->cmd_q, skb);
4916 break;
4917 }
4918
4919 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4920 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4921 else
4922 *req_complete = bt_cb(skb)->hci.req_complete;
4923 kfree_skb(skb);
4924 }
4925 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4926}
4927
4928static void hci_rx_work(struct work_struct *work)
4929{
4930 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4931 struct sk_buff *skb;
4932
4933 BT_DBG("%s", hdev->name);
4934
4935 while ((skb = skb_dequeue(&hdev->rx_q))) {
4936
4937 hci_send_to_monitor(hdev, skb);
4938
4939 if (atomic_read(&hdev->promisc)) {
4940
4941 hci_send_to_sock(hdev, skb);
4942 }
4943
4944
4945
4946
4947
4948
4949
4950 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4951 !test_bit(HCI_INIT, &hdev->flags)) {
4952 kfree_skb(skb);
4953 continue;
4954 }
4955
4956 if (test_bit(HCI_INIT, &hdev->flags)) {
4957
4958 switch (hci_skb_pkt_type(skb)) {
4959 case HCI_ACLDATA_PKT:
4960 case HCI_SCODATA_PKT:
4961 case HCI_ISODATA_PKT:
4962 kfree_skb(skb);
4963 continue;
4964 }
4965 }
4966
4967
4968 switch (hci_skb_pkt_type(skb)) {
4969 case HCI_EVENT_PKT:
4970 BT_DBG("%s Event packet", hdev->name);
4971 hci_event_packet(hdev, skb);
4972 break;
4973
4974 case HCI_ACLDATA_PKT:
4975 BT_DBG("%s ACL data packet", hdev->name);
4976 hci_acldata_packet(hdev, skb);
4977 break;
4978
4979 case HCI_SCODATA_PKT:
4980 BT_DBG("%s SCO data packet", hdev->name);
4981 hci_scodata_packet(hdev, skb);
4982 break;
4983
4984 default:
4985 kfree_skb(skb);
4986 break;
4987 }
4988 }
4989}
4990
4991static void hci_cmd_work(struct work_struct *work)
4992{
4993 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4994 struct sk_buff *skb;
4995
4996 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4997 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4998
4999
5000 if (atomic_read(&hdev->cmd_cnt)) {
5001 skb = skb_dequeue(&hdev->cmd_q);
5002 if (!skb)
5003 return;
5004
5005 kfree_skb(hdev->sent_cmd);
5006
5007 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5008 if (hdev->sent_cmd) {
5009 if (hci_req_status_pend(hdev))
5010 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5011 atomic_dec(&hdev->cmd_cnt);
5012 hci_send_frame(hdev, skb);
5013 if (test_bit(HCI_RESET, &hdev->flags))
5014 cancel_delayed_work(&hdev->cmd_timer);
5015 else
5016 schedule_delayed_work(&hdev->cmd_timer,
5017 HCI_CMD_TIMEOUT);
5018 } else {
5019 skb_queue_head(&hdev->cmd_q, skb);
5020 queue_work(hdev->workqueue, &hdev->cmd_work);
5021 }
5022 }
5023}
5024