1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/export.h>
29#include <linux/idr.h>
30#include <linux/rfkill.h>
31#include <linux/debugfs.h>
32#include <linux/crypto.h>
33#include <linux/property.h>
34#include <asm/unaligned.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39#include <net/bluetooth/mgmt.h>
40
41#include "hci_request.h"
42#include "hci_debugfs.h"
43#include "smp.h"
44#include "leds.h"
45
46static void hci_rx_work(struct work_struct *work);
47static void hci_cmd_work(struct work_struct *work);
48static void hci_tx_work(struct work_struct *work);
49
50
51LIST_HEAD(hci_dev_list);
52DEFINE_RWLOCK(hci_dev_list_lock);
53
54
55LIST_HEAD(hci_cb_list);
56DEFINE_MUTEX(hci_cb_list_lock);
57
58
59static DEFINE_IDA(hci_index_ida);
60
61
62
63static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
64 size_t count, loff_t *ppos)
65{
66 struct hci_dev *hdev = file->private_data;
67 char buf[3];
68
69 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
70 buf[1] = '\n';
71 buf[2] = '\0';
72 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73}
74
75static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 struct sk_buff *skb;
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 err = kstrtobool_from_user(user_buf, count, &enable);
87 if (err)
88 return err;
89
90 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
91 return -EALREADY;
92
93 hci_req_sync_lock(hdev);
94 if (enable)
95 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
96 HCI_CMD_TIMEOUT);
97 else
98 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 hci_req_sync_unlock(hdev);
101
102 if (IS_ERR(skb))
103 return PTR_ERR(skb);
104
105 kfree_skb(skb);
106
107 hci_dev_change_flag(hdev, HCI_DUT_MODE);
108
109 return count;
110}
111
112static const struct file_operations dut_mode_fops = {
113 .open = simple_open,
114 .read = dut_mode_read,
115 .write = dut_mode_write,
116 .llseek = default_llseek,
117};
118
119static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
120 size_t count, loff_t *ppos)
121{
122 struct hci_dev *hdev = file->private_data;
123 char buf[3];
124
125 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
126 buf[1] = '\n';
127 buf[2] = '\0';
128 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
129}
130
131static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
132 size_t count, loff_t *ppos)
133{
134 struct hci_dev *hdev = file->private_data;
135 bool enable;
136 int err;
137
138 err = kstrtobool_from_user(user_buf, count, &enable);
139 if (err)
140 return err;
141
142
143
144
145
146
147 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
148 (!test_bit(HCI_RUNNING, &hdev->flags) ||
149 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
150 goto done;
151
152 hci_req_sync_lock(hdev);
153 err = hdev->set_diag(hdev, enable);
154 hci_req_sync_unlock(hdev);
155
156 if (err < 0)
157 return err;
158
159done:
160 if (enable)
161 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
162 else
163 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
164
165 return count;
166}
167
168static const struct file_operations vendor_diag_fops = {
169 .open = simple_open,
170 .read = vendor_diag_read,
171 .write = vendor_diag_write,
172 .llseek = default_llseek,
173};
174
175static void hci_debugfs_create_basic(struct hci_dev *hdev)
176{
177 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
178 &dut_mode_fops);
179
180 if (hdev->set_diag)
181 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
182 &vendor_diag_fops);
183}
184
185static int hci_reset_req(struct hci_request *req, unsigned long opt)
186{
187 BT_DBG("%s %ld", req->hdev->name, opt);
188
189
190 set_bit(HCI_RESET, &req->hdev->flags);
191 hci_req_add(req, HCI_OP_RESET, 0, NULL);
192 return 0;
193}
194
195static void bredr_init(struct hci_request *req)
196{
197 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198
199
200 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
201
202
203 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
204
205
206 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
207}
208
209static void amp_init1(struct hci_request *req)
210{
211 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
212
213
214 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215
216
217 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
218
219
220 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
221
222
223 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
224
225
226 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
227
228
229 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
230}
231
232static int amp_init2(struct hci_request *req)
233{
234
235
236
237
238 if (req->hdev->commands[14] & 0x20)
239 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
240
241 return 0;
242}
243
244static int hci_init1_req(struct hci_request *req, unsigned long opt)
245{
246 struct hci_dev *hdev = req->hdev;
247
248 BT_DBG("%s %ld", hdev->name, opt);
249
250
251 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
252 hci_reset_req(req, 0);
253
254 switch (hdev->dev_type) {
255 case HCI_PRIMARY:
256 bredr_init(req);
257 break;
258 case HCI_AMP:
259 amp_init1(req);
260 break;
261 default:
262 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
263 break;
264 }
265
266 return 0;
267}
268
269static void bredr_setup(struct hci_request *req)
270{
271 __le16 param;
272 __u8 flt_type;
273
274
275 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
276
277
278 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
279
280
281 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
282
283
284 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
285
286
287 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
288
289
290 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
291
292
293 flt_type = HCI_FLT_CLEAR_ALL;
294 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
295
296
297 param = cpu_to_le16(0x7d00);
298 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
299}
300
301static void le_setup(struct hci_request *req)
302{
303 struct hci_dev *hdev = req->hdev;
304
305
306 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307
308
309 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
310
311
312 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
313
314
315 if (!lmp_bredr_capable(hdev))
316 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
317}
318
319static void hci_setup_event_mask(struct hci_request *req)
320{
321 struct hci_dev *hdev = req->hdev;
322
323
324
325
326
327 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
328
329
330
331
332 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
333 return;
334
335 if (lmp_bredr_capable(hdev)) {
336 events[4] |= 0x01;
337 } else {
338
339 memset(events, 0, sizeof(events));
340 events[1] |= 0x20;
341 events[1] |= 0x40;
342 events[1] |= 0x80;
343
344
345
346
347
348 if (hdev->commands[0] & 0x20) {
349 events[0] |= 0x10;
350 events[2] |= 0x04;
351 events[3] |= 0x02;
352 }
353
354
355
356
357 if (hdev->commands[2] & 0x80)
358 events[1] |= 0x08;
359
360
361
362 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
363 events[0] |= 0x80;
364 events[5] |= 0x80;
365 }
366 }
367
368 if (lmp_inq_rssi_capable(hdev) ||
369 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
370 events[4] |= 0x02;
371
372 if (lmp_ext_feat_capable(hdev))
373 events[4] |= 0x04;
374
375 if (lmp_esco_capable(hdev)) {
376 events[5] |= 0x08;
377 events[5] |= 0x10;
378 }
379
380 if (lmp_sniffsubr_capable(hdev))
381 events[5] |= 0x20;
382
383 if (lmp_pause_enc_capable(hdev))
384 events[5] |= 0x80;
385
386 if (lmp_ext_inq_capable(hdev))
387 events[5] |= 0x40;
388
389 if (lmp_no_flush_capable(hdev))
390 events[7] |= 0x01;
391
392 if (lmp_lsto_capable(hdev))
393 events[6] |= 0x80;
394
395 if (lmp_ssp_capable(hdev)) {
396 events[6] |= 0x01;
397 events[6] |= 0x02;
398 events[6] |= 0x04;
399 events[6] |= 0x08;
400 events[6] |= 0x10;
401 events[6] |= 0x20;
402 events[7] |= 0x04;
403 events[7] |= 0x08;
404 events[7] |= 0x10;
405
406
407 }
408
409 if (lmp_le_capable(hdev))
410 events[7] |= 0x20;
411
412 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
413}
414
415static int hci_init2_req(struct hci_request *req, unsigned long opt)
416{
417 struct hci_dev *hdev = req->hdev;
418
419 if (hdev->dev_type == HCI_AMP)
420 return amp_init2(req);
421
422 if (lmp_bredr_capable(hdev))
423 bredr_setup(req);
424 else
425 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
426
427 if (lmp_le_capable(hdev))
428 le_setup(req);
429
430
431
432
433
434
435
436
437
438 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
439 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
440 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
441
442 if (lmp_ssp_capable(hdev)) {
443
444
445
446
447
448
449 hdev->max_page = 0x01;
450
451 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
452 u8 mode = 0x01;
453
454 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
455 sizeof(mode), &mode);
456 } else {
457 struct hci_cp_write_eir cp;
458
459 memset(hdev->eir, 0, sizeof(hdev->eir));
460 memset(&cp, 0, sizeof(cp));
461
462 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
463 }
464 }
465
466 if (lmp_inq_rssi_capable(hdev) ||
467 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
468 u8 mode;
469
470
471
472
473
474 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
475
476 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
477 }
478
479 if (lmp_inq_tx_pwr_capable(hdev))
480 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
481
482 if (lmp_ext_feat_capable(hdev)) {
483 struct hci_cp_read_local_ext_features cp;
484
485 cp.page = 0x01;
486 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
487 sizeof(cp), &cp);
488 }
489
490 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
491 u8 enable = 1;
492 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
493 &enable);
494 }
495
496 return 0;
497}
498
499static void hci_setup_link_policy(struct hci_request *req)
500{
501 struct hci_dev *hdev = req->hdev;
502 struct hci_cp_write_def_link_policy cp;
503 u16 link_policy = 0;
504
505 if (lmp_rswitch_capable(hdev))
506 link_policy |= HCI_LP_RSWITCH;
507 if (lmp_hold_capable(hdev))
508 link_policy |= HCI_LP_HOLD;
509 if (lmp_sniff_capable(hdev))
510 link_policy |= HCI_LP_SNIFF;
511 if (lmp_park_capable(hdev))
512 link_policy |= HCI_LP_PARK;
513
514 cp.policy = cpu_to_le16(link_policy);
515 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
516}
517
518static void hci_set_le_support(struct hci_request *req)
519{
520 struct hci_dev *hdev = req->hdev;
521 struct hci_cp_write_le_host_supported cp;
522
523
524 if (!lmp_bredr_capable(hdev))
525 return;
526
527 memset(&cp, 0, sizeof(cp));
528
529 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
530 cp.le = 0x01;
531 cp.simul = 0x00;
532 }
533
534 if (cp.le != lmp_host_le_capable(hdev))
535 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
536 &cp);
537}
538
539static void hci_set_event_mask_page_2(struct hci_request *req)
540{
541 struct hci_dev *hdev = req->hdev;
542 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
543 bool changed = false;
544
545
546
547
548 if (lmp_csb_master_capable(hdev)) {
549 events[1] |= 0x40;
550 events[1] |= 0x80;
551 events[2] |= 0x10;
552 events[2] |= 0x20;
553 changed = true;
554 }
555
556
557
558
559 if (lmp_csb_slave_capable(hdev)) {
560 events[2] |= 0x01;
561 events[2] |= 0x02;
562 events[2] |= 0x04;
563 events[2] |= 0x08;
564 changed = true;
565 }
566
567
568 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
569 events[2] |= 0x80;
570 changed = true;
571 }
572
573
574
575
576
577
578
579 if (changed)
580 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
581 sizeof(events), events);
582}
583
584static int hci_init3_req(struct hci_request *req, unsigned long opt)
585{
586 struct hci_dev *hdev = req->hdev;
587 u8 p;
588
589 hci_setup_event_mask(req);
590
591 if (hdev->commands[6] & 0x20 &&
592 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
593 struct hci_cp_read_stored_link_key cp;
594
595 bacpy(&cp.bdaddr, BDADDR_ANY);
596 cp.read_all = 0x01;
597 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
598 }
599
600 if (hdev->commands[5] & 0x10)
601 hci_setup_link_policy(req);
602
603 if (hdev->commands[8] & 0x01)
604 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
605
606
607
608
609
610 if (hdev->commands[13] & 0x01)
611 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
612
613 if (lmp_le_capable(hdev)) {
614 u8 events[8];
615
616 memset(events, 0, sizeof(events));
617
618 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
619 events[0] |= 0x10;
620
621
622
623
624 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
625 events[0] |= 0x20;
626
627
628
629
630
631
632 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
633 events[0] |= 0x40;
634
635
636
637
638 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
639 events[1] |= 0x04;
640
641
642
643
644
645
646 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
647 events[2] |= 0x08;
648
649
650
651
652
653
654 if (hdev->commands[26] & 0x08)
655 events[0] |= 0x02;
656
657
658
659
660 if (hdev->commands[26] & 0x10)
661 events[0] |= 0x01;
662
663
664
665
666 if (hdev->commands[27] & 0x04)
667 events[0] |= 0x04;
668
669
670
671
672
673
674 if (hdev->commands[27] & 0x20)
675 events[0] |= 0x08;
676
677
678
679
680
681
682 if (hdev->commands[34] & 0x02)
683 events[0] |= 0x80;
684
685
686
687
688
689
690 if (hdev->commands[34] & 0x04)
691 events[1] |= 0x01;
692
693
694
695
696 if (hdev->commands[35] & (0x20 | 0x40))
697 events[1] |= 0x08;
698
699
700
701
702
703 if (use_ext_scan(hdev))
704 events[1] |= 0x10;
705
706
707
708
709
710
711 if (use_ext_conn(hdev))
712 events[1] |= 0x02;
713
714
715
716
717
718
719 if (ext_adv_capable(hdev))
720 events[2] |= 0x02;
721
722
723
724 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
725 events);
726
727
728 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
729
730
731
732
733
734
735 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
736 }
737
738 if (hdev->commands[26] & 0x40) {
739
740 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
741 0, NULL);
742 }
743
744 if (hdev->commands[26] & 0x80) {
745
746 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
747 }
748
749 if (hdev->commands[34] & 0x40) {
750
751 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
752 0, NULL);
753 }
754
755 if (hdev->commands[34] & 0x20) {
756
757 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
758 }
759
760 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
761
762 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
763
764
765 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
766 }
767
768 if (ext_adv_capable(hdev)) {
769
770 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
771 0, NULL);
772 }
773
774 hci_set_le_support(req);
775 }
776
777
778 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
779 struct hci_cp_read_local_ext_features cp;
780
781 cp.page = p;
782 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
783 sizeof(cp), &cp);
784 }
785
786 return 0;
787}
788
789static int hci_init4_req(struct hci_request *req, unsigned long opt)
790{
791 struct hci_dev *hdev = req->hdev;
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806 if (hdev->commands[6] & 0x80 &&
807 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
808 struct hci_cp_delete_stored_link_key cp;
809
810 bacpy(&cp.bdaddr, BDADDR_ANY);
811 cp.delete_all = 0x01;
812 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
813 sizeof(cp), &cp);
814 }
815
816
817 if (hdev->commands[22] & 0x04)
818 hci_set_event_mask_page_2(req);
819
820
821 if (hdev->commands[29] & 0x20)
822 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
823
824
825 if (hdev->commands[30] & 0x08)
826 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
827
828
829 if (lmp_sync_train_capable(hdev))
830 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
831
832
833 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
834 bredr_sc_enabled(hdev)) {
835 u8 support = 0x01;
836
837 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
838 sizeof(support), &support);
839 }
840
841
842 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 struct hci_cp_le_write_def_data_len cp;
844
845 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
846 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
847 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
848 }
849
850
851 if (hdev->commands[35] & 0x20) {
852 struct hci_cp_le_set_default_phy cp;
853
854 cp.all_phys = 0x00;
855 cp.tx_phys = hdev->le_tx_def_phys;
856 cp.rx_phys = hdev->le_rx_def_phys;
857
858 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
859 }
860
861 return 0;
862}
863
864static int __hci_init(struct hci_dev *hdev)
865{
866 int err;
867
868 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
869 if (err < 0)
870 return err;
871
872 if (hci_dev_test_flag(hdev, HCI_SETUP))
873 hci_debugfs_create_basic(hdev);
874
875 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
876 if (err < 0)
877 return err;
878
879
880
881
882
883 if (hdev->dev_type != HCI_PRIMARY)
884 return 0;
885
886 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
887 if (err < 0)
888 return err;
889
890 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
891 if (err < 0)
892 return err;
893
894
895
896
897
898
899
900
901
902
903
904
905
906 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
907 !hci_dev_test_flag(hdev, HCI_CONFIG))
908 return 0;
909
910 hci_debugfs_create_common(hdev);
911
912 if (lmp_bredr_capable(hdev))
913 hci_debugfs_create_bredr(hdev);
914
915 if (lmp_le_capable(hdev))
916 hci_debugfs_create_le(hdev);
917
918 return 0;
919}
920
921static int hci_init0_req(struct hci_request *req, unsigned long opt)
922{
923 struct hci_dev *hdev = req->hdev;
924
925 BT_DBG("%s %ld", hdev->name, opt);
926
927
928 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
929 hci_reset_req(req, 0);
930
931
932 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
933
934
935 if (hdev->set_bdaddr)
936 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
937
938 return 0;
939}
940
941static int __hci_unconf_init(struct hci_dev *hdev)
942{
943 int err;
944
945 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
946 return 0;
947
948 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
949 if (err < 0)
950 return err;
951
952 if (hci_dev_test_flag(hdev, HCI_SETUP))
953 hci_debugfs_create_basic(hdev);
954
955 return 0;
956}
957
958static int hci_scan_req(struct hci_request *req, unsigned long opt)
959{
960 __u8 scan = opt;
961
962 BT_DBG("%s %x", req->hdev->name, scan);
963
964
965 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
966 return 0;
967}
968
969static int hci_auth_req(struct hci_request *req, unsigned long opt)
970{
971 __u8 auth = opt;
972
973 BT_DBG("%s %x", req->hdev->name, auth);
974
975
976 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
977 return 0;
978}
979
980static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
981{
982 __u8 encrypt = opt;
983
984 BT_DBG("%s %x", req->hdev->name, encrypt);
985
986
987 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
988 return 0;
989}
990
991static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
992{
993 __le16 policy = cpu_to_le16(opt);
994
995 BT_DBG("%s %x", req->hdev->name, policy);
996
997
998 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
999 return 0;
1000}
1001
1002
1003
1004struct hci_dev *hci_dev_get(int index)
1005{
1006 struct hci_dev *hdev = NULL, *d;
1007
1008 BT_DBG("%d", index);
1009
1010 if (index < 0)
1011 return NULL;
1012
1013 read_lock(&hci_dev_list_lock);
1014 list_for_each_entry(d, &hci_dev_list, list) {
1015 if (d->id == index) {
1016 hdev = hci_dev_hold(d);
1017 break;
1018 }
1019 }
1020 read_unlock(&hci_dev_list_lock);
1021 return hdev;
1022}
1023
1024
1025
1026bool hci_discovery_active(struct hci_dev *hdev)
1027{
1028 struct discovery_state *discov = &hdev->discovery;
1029
1030 switch (discov->state) {
1031 case DISCOVERY_FINDING:
1032 case DISCOVERY_RESOLVING:
1033 return true;
1034
1035 default:
1036 return false;
1037 }
1038}
1039
1040void hci_discovery_set_state(struct hci_dev *hdev, int state)
1041{
1042 int old_state = hdev->discovery.state;
1043
1044 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1045
1046 if (old_state == state)
1047 return;
1048
1049 hdev->discovery.state = state;
1050
1051 switch (state) {
1052 case DISCOVERY_STOPPED:
1053 hci_update_background_scan(hdev);
1054
1055 if (old_state != DISCOVERY_STARTING)
1056 mgmt_discovering(hdev, 0);
1057 break;
1058 case DISCOVERY_STARTING:
1059 break;
1060 case DISCOVERY_FINDING:
1061 mgmt_discovering(hdev, 1);
1062 break;
1063 case DISCOVERY_RESOLVING:
1064 break;
1065 case DISCOVERY_STOPPING:
1066 break;
1067 }
1068}
1069
1070void hci_inquiry_cache_flush(struct hci_dev *hdev)
1071{
1072 struct discovery_state *cache = &hdev->discovery;
1073 struct inquiry_entry *p, *n;
1074
1075 list_for_each_entry_safe(p, n, &cache->all, all) {
1076 list_del(&p->all);
1077 kfree(p);
1078 }
1079
1080 INIT_LIST_HEAD(&cache->unknown);
1081 INIT_LIST_HEAD(&cache->resolve);
1082}
1083
1084struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1085 bdaddr_t *bdaddr)
1086{
1087 struct discovery_state *cache = &hdev->discovery;
1088 struct inquiry_entry *e;
1089
1090 BT_DBG("cache %p, %pMR", cache, bdaddr);
1091
1092 list_for_each_entry(e, &cache->all, all) {
1093 if (!bacmp(&e->data.bdaddr, bdaddr))
1094 return e;
1095 }
1096
1097 return NULL;
1098}
1099
1100struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1101 bdaddr_t *bdaddr)
1102{
1103 struct discovery_state *cache = &hdev->discovery;
1104 struct inquiry_entry *e;
1105
1106 BT_DBG("cache %p, %pMR", cache, bdaddr);
1107
1108 list_for_each_entry(e, &cache->unknown, list) {
1109 if (!bacmp(&e->data.bdaddr, bdaddr))
1110 return e;
1111 }
1112
1113 return NULL;
1114}
1115
1116struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1117 bdaddr_t *bdaddr,
1118 int state)
1119{
1120 struct discovery_state *cache = &hdev->discovery;
1121 struct inquiry_entry *e;
1122
1123 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1124
1125 list_for_each_entry(e, &cache->resolve, list) {
1126 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1127 return e;
1128 if (!bacmp(&e->data.bdaddr, bdaddr))
1129 return e;
1130 }
1131
1132 return NULL;
1133}
1134
1135void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1136 struct inquiry_entry *ie)
1137{
1138 struct discovery_state *cache = &hdev->discovery;
1139 struct list_head *pos = &cache->resolve;
1140 struct inquiry_entry *p;
1141
1142 list_del(&ie->list);
1143
1144 list_for_each_entry(p, &cache->resolve, list) {
1145 if (p->name_state != NAME_PENDING &&
1146 abs(p->data.rssi) >= abs(ie->data.rssi))
1147 break;
1148 pos = &p->list;
1149 }
1150
1151 list_add(&ie->list, pos);
1152}
1153
1154u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1155 bool name_known)
1156{
1157 struct discovery_state *cache = &hdev->discovery;
1158 struct inquiry_entry *ie;
1159 u32 flags = 0;
1160
1161 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1162
1163 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1164
1165 if (!data->ssp_mode)
1166 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1167
1168 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1169 if (ie) {
1170 if (!ie->data.ssp_mode)
1171 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1172
1173 if (ie->name_state == NAME_NEEDED &&
1174 data->rssi != ie->data.rssi) {
1175 ie->data.rssi = data->rssi;
1176 hci_inquiry_cache_update_resolve(hdev, ie);
1177 }
1178
1179 goto update;
1180 }
1181
1182
1183 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1184 if (!ie) {
1185 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1186 goto done;
1187 }
1188
1189 list_add(&ie->all, &cache->all);
1190
1191 if (name_known) {
1192 ie->name_state = NAME_KNOWN;
1193 } else {
1194 ie->name_state = NAME_NOT_KNOWN;
1195 list_add(&ie->list, &cache->unknown);
1196 }
1197
1198update:
1199 if (name_known && ie->name_state != NAME_KNOWN &&
1200 ie->name_state != NAME_PENDING) {
1201 ie->name_state = NAME_KNOWN;
1202 list_del(&ie->list);
1203 }
1204
1205 memcpy(&ie->data, data, sizeof(*data));
1206 ie->timestamp = jiffies;
1207 cache->timestamp = jiffies;
1208
1209 if (ie->name_state == NAME_NOT_KNOWN)
1210 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211
1212done:
1213 return flags;
1214}
1215
1216static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1217{
1218 struct discovery_state *cache = &hdev->discovery;
1219 struct inquiry_info *info = (struct inquiry_info *) buf;
1220 struct inquiry_entry *e;
1221 int copied = 0;
1222
1223 list_for_each_entry(e, &cache->all, all) {
1224 struct inquiry_data *data = &e->data;
1225
1226 if (copied >= num)
1227 break;
1228
1229 bacpy(&info->bdaddr, &data->bdaddr);
1230 info->pscan_rep_mode = data->pscan_rep_mode;
1231 info->pscan_period_mode = data->pscan_period_mode;
1232 info->pscan_mode = data->pscan_mode;
1233 memcpy(info->dev_class, data->dev_class, 3);
1234 info->clock_offset = data->clock_offset;
1235
1236 info++;
1237 copied++;
1238 }
1239
1240 BT_DBG("cache %p, copied %d", cache, copied);
1241 return copied;
1242}
1243
1244static int hci_inq_req(struct hci_request *req, unsigned long opt)
1245{
1246 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1247 struct hci_dev *hdev = req->hdev;
1248 struct hci_cp_inquiry cp;
1249
1250 BT_DBG("%s", hdev->name);
1251
1252 if (test_bit(HCI_INQUIRY, &hdev->flags))
1253 return 0;
1254
1255
1256 memcpy(&cp.lap, &ir->lap, 3);
1257 cp.length = ir->length;
1258 cp.num_rsp = ir->num_rsp;
1259 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1260
1261 return 0;
1262}
1263
1264int hci_inquiry(void __user *arg)
1265{
1266 __u8 __user *ptr = arg;
1267 struct hci_inquiry_req ir;
1268 struct hci_dev *hdev;
1269 int err = 0, do_inquiry = 0, max_rsp;
1270 long timeo;
1271 __u8 *buf;
1272
1273 if (copy_from_user(&ir, ptr, sizeof(ir)))
1274 return -EFAULT;
1275
1276 hdev = hci_dev_get(ir.dev_id);
1277 if (!hdev)
1278 return -ENODEV;
1279
1280 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1281 err = -EBUSY;
1282 goto done;
1283 }
1284
1285 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1286 err = -EOPNOTSUPP;
1287 goto done;
1288 }
1289
1290 if (hdev->dev_type != HCI_PRIMARY) {
1291 err = -EOPNOTSUPP;
1292 goto done;
1293 }
1294
1295 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1296 err = -EOPNOTSUPP;
1297 goto done;
1298 }
1299
1300 hci_dev_lock(hdev);
1301 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1302 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1303 hci_inquiry_cache_flush(hdev);
1304 do_inquiry = 1;
1305 }
1306 hci_dev_unlock(hdev);
1307
1308 timeo = ir.length * msecs_to_jiffies(2000);
1309
1310 if (do_inquiry) {
1311 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1312 timeo, NULL);
1313 if (err < 0)
1314 goto done;
1315
1316
1317
1318
1319 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1320 TASK_INTERRUPTIBLE))
1321 return -EINTR;
1322 }
1323
1324
1325
1326
1327 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1328
1329
1330
1331
1332 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1333 if (!buf) {
1334 err = -ENOMEM;
1335 goto done;
1336 }
1337
1338 hci_dev_lock(hdev);
1339 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1340 hci_dev_unlock(hdev);
1341
1342 BT_DBG("num_rsp %d", ir.num_rsp);
1343
1344 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1345 ptr += sizeof(ir);
1346 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1347 ir.num_rsp))
1348 err = -EFAULT;
1349 } else
1350 err = -EFAULT;
1351
1352 kfree(buf);
1353
1354done:
1355 hci_dev_put(hdev);
1356 return err;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1372{
1373 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1374 bdaddr_t ba;
1375 int ret;
1376
1377 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1378 (u8 *)&ba, sizeof(ba));
1379 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1380 return;
1381
1382 bacpy(&hdev->public_addr, &ba);
1383}
1384
1385static int hci_dev_do_open(struct hci_dev *hdev)
1386{
1387 int ret = 0;
1388
1389 BT_DBG("%s %p", hdev->name, hdev);
1390
1391 hci_req_sync_lock(hdev);
1392
1393 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1394 ret = -ENODEV;
1395 goto done;
1396 }
1397
1398 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1400
1401
1402
1403 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1404 ret = -ERFKILL;
1405 goto done;
1406 }
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1421 hdev->dev_type == HCI_PRIMARY &&
1422 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1423 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1424 ret = -EADDRNOTAVAIL;
1425 goto done;
1426 }
1427 }
1428
1429 if (test_bit(HCI_UP, &hdev->flags)) {
1430 ret = -EALREADY;
1431 goto done;
1432 }
1433
1434 if (hdev->open(hdev)) {
1435 ret = -EIO;
1436 goto done;
1437 }
1438
1439 set_bit(HCI_RUNNING, &hdev->flags);
1440 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1441
1442 atomic_set(&hdev->cmd_cnt, 1);
1443 set_bit(HCI_INIT, &hdev->flags);
1444
1445 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1446 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1447 bool invalid_bdaddr;
1448
1449 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1450
1451 if (hdev->setup)
1452 ret = hdev->setup(hdev);
1453
1454
1455
1456
1457
1458 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1459 &hdev->quirks);
1460
1461 if (ret)
1462 goto setup_failed;
1463
1464 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1465 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1466 hci_dev_get_bd_addr_from_property(hdev);
1467
1468 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1469 hdev->set_bdaddr) {
1470 ret = hdev->set_bdaddr(hdev,
1471 &hdev->public_addr);
1472
1473
1474
1475
1476
1477
1478 if (!ret)
1479 invalid_bdaddr = false;
1480 }
1481 }
1482
1483setup_failed:
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1495 invalid_bdaddr)
1496 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1507 ret = __hci_unconf_init(hdev);
1508 }
1509
1510 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1511
1512
1513
1514
1515
1516 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1517 hdev->set_bdaddr)
1518 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1519 else
1520 ret = -EADDRNOTAVAIL;
1521 }
1522
1523 if (!ret) {
1524 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1525 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1526 ret = __hci_init(hdev);
1527 if (!ret && hdev->post_init)
1528 ret = hdev->post_init(hdev);
1529 }
1530 }
1531
1532
1533
1534
1535
1536 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1537 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1538 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1539 ret = hdev->set_diag(hdev, true);
1540
1541 clear_bit(HCI_INIT, &hdev->flags);
1542
1543 if (!ret) {
1544 hci_dev_hold(hdev);
1545 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1546 hci_adv_instances_set_rpa_expired(hdev, true);
1547 set_bit(HCI_UP, &hdev->flags);
1548 hci_sock_dev_event(hdev, HCI_DEV_UP);
1549 hci_leds_update_powered(hdev, true);
1550 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1551 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1552 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1553 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1554 hci_dev_test_flag(hdev, HCI_MGMT) &&
1555 hdev->dev_type == HCI_PRIMARY) {
1556 ret = __hci_req_hci_power_on(hdev);
1557 mgmt_power_on(hdev, ret);
1558 }
1559 } else {
1560
1561 flush_work(&hdev->tx_work);
1562 flush_work(&hdev->cmd_work);
1563 flush_work(&hdev->rx_work);
1564
1565 skb_queue_purge(&hdev->cmd_q);
1566 skb_queue_purge(&hdev->rx_q);
1567
1568 if (hdev->flush)
1569 hdev->flush(hdev);
1570
1571 if (hdev->sent_cmd) {
1572 kfree_skb(hdev->sent_cmd);
1573 hdev->sent_cmd = NULL;
1574 }
1575
1576 clear_bit(HCI_RUNNING, &hdev->flags);
1577 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1578
1579 hdev->close(hdev);
1580 hdev->flags &= BIT(HCI_RAW);
1581 }
1582
1583done:
1584 hci_req_sync_unlock(hdev);
1585 return ret;
1586}
1587
1588
1589
1590int hci_dev_open(__u16 dev)
1591{
1592 struct hci_dev *hdev;
1593 int err;
1594
1595 hdev = hci_dev_get(dev);
1596 if (!hdev)
1597 return -ENODEV;
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1609 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1610 err = -EOPNOTSUPP;
1611 goto done;
1612 }
1613
1614
1615
1616
1617
1618
1619 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1620 cancel_delayed_work(&hdev->power_off);
1621
1622
1623
1624
1625
1626 flush_workqueue(hdev->req_workqueue);
1627
1628
1629
1630
1631
1632
1633
1634 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1635 !hci_dev_test_flag(hdev, HCI_MGMT))
1636 hci_dev_set_flag(hdev, HCI_BONDABLE);
1637
1638 err = hci_dev_do_open(hdev);
1639
1640done:
1641 hci_dev_put(hdev);
1642 return err;
1643}
1644
1645
1646static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1647{
1648 struct hci_conn_params *p;
1649
1650 list_for_each_entry(p, &hdev->le_conn_params, list) {
1651 if (p->conn) {
1652 hci_conn_drop(p->conn);
1653 hci_conn_put(p->conn);
1654 p->conn = NULL;
1655 }
1656 list_del_init(&p->action);
1657 }
1658
1659 BT_DBG("All LE pending actions cleared");
1660}
1661
1662int hci_dev_do_close(struct hci_dev *hdev)
1663{
1664 bool auto_off;
1665
1666 BT_DBG("%s %p", hdev->name, hdev);
1667
1668 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1669 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1670 test_bit(HCI_UP, &hdev->flags)) {
1671
1672 if (hdev->shutdown)
1673 hdev->shutdown(hdev);
1674 }
1675
1676 cancel_delayed_work(&hdev->power_off);
1677
1678 hci_request_cancel_all(hdev);
1679 hci_req_sync_lock(hdev);
1680
1681 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1682 cancel_delayed_work_sync(&hdev->cmd_timer);
1683 hci_req_sync_unlock(hdev);
1684 return 0;
1685 }
1686
1687 hci_leds_update_powered(hdev, false);
1688
1689
1690 flush_work(&hdev->tx_work);
1691 flush_work(&hdev->rx_work);
1692
1693 if (hdev->discov_timeout > 0) {
1694 hdev->discov_timeout = 0;
1695 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1696 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1697 }
1698
1699 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1700 cancel_delayed_work(&hdev->service_cache);
1701
1702 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1703 struct adv_info *adv_instance;
1704
1705 cancel_delayed_work_sync(&hdev->rpa_expired);
1706
1707 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1708 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1709 }
1710
1711
1712
1713
1714 drain_workqueue(hdev->workqueue);
1715
1716 hci_dev_lock(hdev);
1717
1718 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1719
1720 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1721
1722 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1723 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1724 hci_dev_test_flag(hdev, HCI_MGMT))
1725 __mgmt_power_off(hdev);
1726
1727 hci_inquiry_cache_flush(hdev);
1728 hci_pend_le_actions_clear(hdev);
1729 hci_conn_hash_flush(hdev);
1730 hci_dev_unlock(hdev);
1731
1732 smp_unregister(hdev);
1733
1734 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1735
1736 if (hdev->flush)
1737 hdev->flush(hdev);
1738
1739
1740 skb_queue_purge(&hdev->cmd_q);
1741 atomic_set(&hdev->cmd_cnt, 1);
1742 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1743 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1744 set_bit(HCI_INIT, &hdev->flags);
1745 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1746 clear_bit(HCI_INIT, &hdev->flags);
1747 }
1748
1749
1750 flush_work(&hdev->cmd_work);
1751
1752
1753 skb_queue_purge(&hdev->rx_q);
1754 skb_queue_purge(&hdev->cmd_q);
1755 skb_queue_purge(&hdev->raw_q);
1756
1757
1758 if (hdev->sent_cmd) {
1759 cancel_delayed_work_sync(&hdev->cmd_timer);
1760 kfree_skb(hdev->sent_cmd);
1761 hdev->sent_cmd = NULL;
1762 }
1763
1764 clear_bit(HCI_RUNNING, &hdev->flags);
1765 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1766
1767
1768
1769 hdev->close(hdev);
1770
1771
1772 hdev->flags &= BIT(HCI_RAW);
1773 hci_dev_clear_volatile_flags(hdev);
1774
1775
1776 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1777
1778 memset(hdev->eir, 0, sizeof(hdev->eir));
1779 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1780 bacpy(&hdev->random_addr, BDADDR_ANY);
1781
1782 hci_req_sync_unlock(hdev);
1783
1784 hci_dev_put(hdev);
1785 return 0;
1786}
1787
1788int hci_dev_close(__u16 dev)
1789{
1790 struct hci_dev *hdev;
1791 int err;
1792
1793 hdev = hci_dev_get(dev);
1794 if (!hdev)
1795 return -ENODEV;
1796
1797 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1798 err = -EBUSY;
1799 goto done;
1800 }
1801
1802 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1803 cancel_delayed_work(&hdev->power_off);
1804
1805 err = hci_dev_do_close(hdev);
1806
1807done:
1808 hci_dev_put(hdev);
1809 return err;
1810}
1811
1812static int hci_dev_do_reset(struct hci_dev *hdev)
1813{
1814 int ret;
1815
1816 BT_DBG("%s %p", hdev->name, hdev);
1817
1818 hci_req_sync_lock(hdev);
1819
1820
1821 skb_queue_purge(&hdev->rx_q);
1822 skb_queue_purge(&hdev->cmd_q);
1823
1824
1825
1826
1827 drain_workqueue(hdev->workqueue);
1828
1829 hci_dev_lock(hdev);
1830 hci_inquiry_cache_flush(hdev);
1831 hci_conn_hash_flush(hdev);
1832 hci_dev_unlock(hdev);
1833
1834 if (hdev->flush)
1835 hdev->flush(hdev);
1836
1837 atomic_set(&hdev->cmd_cnt, 1);
1838 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1839
1840 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1841
1842 hci_req_sync_unlock(hdev);
1843 return ret;
1844}
1845
1846int hci_dev_reset(__u16 dev)
1847{
1848 struct hci_dev *hdev;
1849 int err;
1850
1851 hdev = hci_dev_get(dev);
1852 if (!hdev)
1853 return -ENODEV;
1854
1855 if (!test_bit(HCI_UP, &hdev->flags)) {
1856 err = -ENETDOWN;
1857 goto done;
1858 }
1859
1860 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1861 err = -EBUSY;
1862 goto done;
1863 }
1864
1865 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1866 err = -EOPNOTSUPP;
1867 goto done;
1868 }
1869
1870 err = hci_dev_do_reset(hdev);
1871
1872done:
1873 hci_dev_put(hdev);
1874 return err;
1875}
1876
1877int hci_dev_reset_stat(__u16 dev)
1878{
1879 struct hci_dev *hdev;
1880 int ret = 0;
1881
1882 hdev = hci_dev_get(dev);
1883 if (!hdev)
1884 return -ENODEV;
1885
1886 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1887 ret = -EBUSY;
1888 goto done;
1889 }
1890
1891 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1892 ret = -EOPNOTSUPP;
1893 goto done;
1894 }
1895
1896 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1897
1898done:
1899 hci_dev_put(hdev);
1900 return ret;
1901}
1902
1903static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1904{
1905 bool conn_changed, discov_changed;
1906
1907 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1908
1909 if ((scan & SCAN_PAGE))
1910 conn_changed = !hci_dev_test_and_set_flag(hdev,
1911 HCI_CONNECTABLE);
1912 else
1913 conn_changed = hci_dev_test_and_clear_flag(hdev,
1914 HCI_CONNECTABLE);
1915
1916 if ((scan & SCAN_INQUIRY)) {
1917 discov_changed = !hci_dev_test_and_set_flag(hdev,
1918 HCI_DISCOVERABLE);
1919 } else {
1920 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1921 discov_changed = hci_dev_test_and_clear_flag(hdev,
1922 HCI_DISCOVERABLE);
1923 }
1924
1925 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1926 return;
1927
1928 if (conn_changed || discov_changed) {
1929
1930 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1931
1932 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1933 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1934
1935 mgmt_new_settings(hdev);
1936 }
1937}
1938
1939int hci_dev_cmd(unsigned int cmd, void __user *arg)
1940{
1941 struct hci_dev *hdev;
1942 struct hci_dev_req dr;
1943 int err = 0;
1944
1945 if (copy_from_user(&dr, arg, sizeof(dr)))
1946 return -EFAULT;
1947
1948 hdev = hci_dev_get(dr.dev_id);
1949 if (!hdev)
1950 return -ENODEV;
1951
1952 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1953 err = -EBUSY;
1954 goto done;
1955 }
1956
1957 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1958 err = -EOPNOTSUPP;
1959 goto done;
1960 }
1961
1962 if (hdev->dev_type != HCI_PRIMARY) {
1963 err = -EOPNOTSUPP;
1964 goto done;
1965 }
1966
1967 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1968 err = -EOPNOTSUPP;
1969 goto done;
1970 }
1971
1972 switch (cmd) {
1973 case HCISETAUTH:
1974 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1975 HCI_INIT_TIMEOUT, NULL);
1976 break;
1977
1978 case HCISETENCRYPT:
1979 if (!lmp_encrypt_capable(hdev)) {
1980 err = -EOPNOTSUPP;
1981 break;
1982 }
1983
1984 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1985
1986 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1987 HCI_INIT_TIMEOUT, NULL);
1988 if (err)
1989 break;
1990 }
1991
1992 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1993 HCI_INIT_TIMEOUT, NULL);
1994 break;
1995
1996 case HCISETSCAN:
1997 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1998 HCI_INIT_TIMEOUT, NULL);
1999
2000
2001
2002
2003 if (!err)
2004 hci_update_scan_state(hdev, dr.dev_opt);
2005 break;
2006
2007 case HCISETLINKPOL:
2008 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2009 HCI_INIT_TIMEOUT, NULL);
2010 break;
2011
2012 case HCISETLINKMODE:
2013 hdev->link_mode = ((__u16) dr.dev_opt) &
2014 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2015 break;
2016
2017 case HCISETPTYPE:
2018 if (hdev->pkt_type == (__u16) dr.dev_opt)
2019 break;
2020
2021 hdev->pkt_type = (__u16) dr.dev_opt;
2022 mgmt_phy_configuration_changed(hdev, NULL);
2023 break;
2024
2025 case HCISETACLMTU:
2026 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2027 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2028 break;
2029
2030 case HCISETSCOMTU:
2031 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2032 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2033 break;
2034
2035 default:
2036 err = -EINVAL;
2037 break;
2038 }
2039
2040done:
2041 hci_dev_put(hdev);
2042 return err;
2043}
2044
2045int hci_get_dev_list(void __user *arg)
2046{
2047 struct hci_dev *hdev;
2048 struct hci_dev_list_req *dl;
2049 struct hci_dev_req *dr;
2050 int n = 0, size, err;
2051 __u16 dev_num;
2052
2053 if (get_user(dev_num, (__u16 __user *) arg))
2054 return -EFAULT;
2055
2056 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2057 return -EINVAL;
2058
2059 size = sizeof(*dl) + dev_num * sizeof(*dr);
2060
2061 dl = kzalloc(size, GFP_KERNEL);
2062 if (!dl)
2063 return -ENOMEM;
2064
2065 dr = dl->dev_req;
2066
2067 read_lock(&hci_dev_list_lock);
2068 list_for_each_entry(hdev, &hci_dev_list, list) {
2069 unsigned long flags = hdev->flags;
2070
2071
2072
2073
2074
2075 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2076 flags &= ~BIT(HCI_UP);
2077
2078 (dr + n)->dev_id = hdev->id;
2079 (dr + n)->dev_opt = flags;
2080
2081 if (++n >= dev_num)
2082 break;
2083 }
2084 read_unlock(&hci_dev_list_lock);
2085
2086 dl->dev_num = n;
2087 size = sizeof(*dl) + n * sizeof(*dr);
2088
2089 err = copy_to_user(arg, dl, size);
2090 kfree(dl);
2091
2092 return err ? -EFAULT : 0;
2093}
2094
2095int hci_get_dev_info(void __user *arg)
2096{
2097 struct hci_dev *hdev;
2098 struct hci_dev_info di;
2099 unsigned long flags;
2100 int err = 0;
2101
2102 if (copy_from_user(&di, arg, sizeof(di)))
2103 return -EFAULT;
2104
2105 hdev = hci_dev_get(di.dev_id);
2106 if (!hdev)
2107 return -ENODEV;
2108
2109
2110
2111
2112
2113 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2114 flags = hdev->flags & ~BIT(HCI_UP);
2115 else
2116 flags = hdev->flags;
2117
2118 strcpy(di.name, hdev->name);
2119 di.bdaddr = hdev->bdaddr;
2120 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2121 di.flags = flags;
2122 di.pkt_type = hdev->pkt_type;
2123 if (lmp_bredr_capable(hdev)) {
2124 di.acl_mtu = hdev->acl_mtu;
2125 di.acl_pkts = hdev->acl_pkts;
2126 di.sco_mtu = hdev->sco_mtu;
2127 di.sco_pkts = hdev->sco_pkts;
2128 } else {
2129 di.acl_mtu = hdev->le_mtu;
2130 di.acl_pkts = hdev->le_pkts;
2131 di.sco_mtu = 0;
2132 di.sco_pkts = 0;
2133 }
2134 di.link_policy = hdev->link_policy;
2135 di.link_mode = hdev->link_mode;
2136
2137 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2138 memcpy(&di.features, &hdev->features, sizeof(di.features));
2139
2140 if (copy_to_user(arg, &di, sizeof(di)))
2141 err = -EFAULT;
2142
2143 hci_dev_put(hdev);
2144
2145 return err;
2146}
2147
2148
2149
2150static int hci_rfkill_set_block(void *data, bool blocked)
2151{
2152 struct hci_dev *hdev = data;
2153
2154 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2155
2156 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2157 return -EBUSY;
2158
2159 if (blocked) {
2160 hci_dev_set_flag(hdev, HCI_RFKILLED);
2161 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2162 !hci_dev_test_flag(hdev, HCI_CONFIG))
2163 hci_dev_do_close(hdev);
2164 } else {
2165 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2166 }
2167
2168 return 0;
2169}
2170
2171static const struct rfkill_ops hci_rfkill_ops = {
2172 .set_block = hci_rfkill_set_block,
2173};
2174
2175static void hci_power_on(struct work_struct *work)
2176{
2177 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2178 int err;
2179
2180 BT_DBG("%s", hdev->name);
2181
2182 if (test_bit(HCI_UP, &hdev->flags) &&
2183 hci_dev_test_flag(hdev, HCI_MGMT) &&
2184 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2185 cancel_delayed_work(&hdev->power_off);
2186 hci_req_sync_lock(hdev);
2187 err = __hci_req_hci_power_on(hdev);
2188 hci_req_sync_unlock(hdev);
2189 mgmt_power_on(hdev, err);
2190 return;
2191 }
2192
2193 err = hci_dev_do_open(hdev);
2194 if (err < 0) {
2195 hci_dev_lock(hdev);
2196 mgmt_set_powered_failed(hdev, err);
2197 hci_dev_unlock(hdev);
2198 return;
2199 }
2200
2201
2202
2203
2204
2205 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2206 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2207 (hdev->dev_type == HCI_PRIMARY &&
2208 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2209 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2210 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2211 hci_dev_do_close(hdev);
2212 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2213 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2214 HCI_AUTO_OFF_TIMEOUT);
2215 }
2216
2217 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2218
2219
2220
2221 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2222 set_bit(HCI_RAW, &hdev->flags);
2223
2224
2225
2226
2227
2228
2229
2230
2231 mgmt_index_added(hdev);
2232 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2233
2234
2235
2236 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2237 clear_bit(HCI_RAW, &hdev->flags);
2238
2239
2240
2241
2242
2243 mgmt_index_added(hdev);
2244 }
2245}
2246
2247static void hci_power_off(struct work_struct *work)
2248{
2249 struct hci_dev *hdev = container_of(work, struct hci_dev,
2250 power_off.work);
2251
2252 BT_DBG("%s", hdev->name);
2253
2254 hci_dev_do_close(hdev);
2255}
2256
2257static void hci_error_reset(struct work_struct *work)
2258{
2259 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2260
2261 BT_DBG("%s", hdev->name);
2262
2263 if (hdev->hw_error)
2264 hdev->hw_error(hdev, hdev->hw_error_code);
2265 else
2266 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2267
2268 if (hci_dev_do_close(hdev))
2269 return;
2270
2271 hci_dev_do_open(hdev);
2272}
2273
2274void hci_uuids_clear(struct hci_dev *hdev)
2275{
2276 struct bt_uuid *uuid, *tmp;
2277
2278 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2279 list_del(&uuid->list);
2280 kfree(uuid);
2281 }
2282}
2283
2284void hci_link_keys_clear(struct hci_dev *hdev)
2285{
2286 struct link_key *key;
2287
2288 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2289 list_del_rcu(&key->list);
2290 kfree_rcu(key, rcu);
2291 }
2292}
2293
2294void hci_smp_ltks_clear(struct hci_dev *hdev)
2295{
2296 struct smp_ltk *k;
2297
2298 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2299 list_del_rcu(&k->list);
2300 kfree_rcu(k, rcu);
2301 }
2302}
2303
2304void hci_smp_irks_clear(struct hci_dev *hdev)
2305{
2306 struct smp_irk *k;
2307
2308 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2309 list_del_rcu(&k->list);
2310 kfree_rcu(k, rcu);
2311 }
2312}
2313
2314void hci_blocked_keys_clear(struct hci_dev *hdev)
2315{
2316 struct blocked_key *b;
2317
2318 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2319 list_del_rcu(&b->list);
2320 kfree_rcu(b, rcu);
2321 }
2322}
2323
2324bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2325{
2326 bool blocked = false;
2327 struct blocked_key *b;
2328
2329 rcu_read_lock();
2330 list_for_each_entry(b, &hdev->blocked_keys, list) {
2331 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2332 blocked = true;
2333 break;
2334 }
2335 }
2336
2337 rcu_read_unlock();
2338 return blocked;
2339}
2340
2341struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2342{
2343 struct link_key *k;
2344
2345 rcu_read_lock();
2346 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2347 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2348 rcu_read_unlock();
2349
2350 if (hci_is_blocked_key(hdev,
2351 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2352 k->val)) {
2353 bt_dev_warn_ratelimited(hdev,
2354 "Link key blocked for %pMR",
2355 &k->bdaddr);
2356 return NULL;
2357 }
2358
2359 return k;
2360 }
2361 }
2362 rcu_read_unlock();
2363
2364 return NULL;
2365}
2366
2367static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2368 u8 key_type, u8 old_key_type)
2369{
2370
2371 if (key_type < 0x03)
2372 return true;
2373
2374
2375 if (key_type == HCI_LK_DEBUG_COMBINATION)
2376 return false;
2377
2378
2379 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2380 return false;
2381
2382
2383 if (!conn)
2384 return true;
2385
2386
2387 if (conn->type == LE_LINK)
2388 return true;
2389
2390
2391 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2392 return true;
2393
2394
2395 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2396 return true;
2397
2398
2399 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2400 return true;
2401
2402
2403
2404 return false;
2405}
2406
2407static u8 ltk_role(u8 type)
2408{
2409 if (type == SMP_LTK)
2410 return HCI_ROLE_MASTER;
2411
2412 return HCI_ROLE_SLAVE;
2413}
2414
2415struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2416 u8 addr_type, u8 role)
2417{
2418 struct smp_ltk *k;
2419
2420 rcu_read_lock();
2421 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2422 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2423 continue;
2424
2425 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2426 rcu_read_unlock();
2427
2428 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2429 k->val)) {
2430 bt_dev_warn_ratelimited(hdev,
2431 "LTK blocked for %pMR",
2432 &k->bdaddr);
2433 return NULL;
2434 }
2435
2436 return k;
2437 }
2438 }
2439 rcu_read_unlock();
2440
2441 return NULL;
2442}
2443
2444struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2445{
2446 struct smp_irk *irk_to_return = NULL;
2447 struct smp_irk *irk;
2448
2449 rcu_read_lock();
2450 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2451 if (!bacmp(&irk->rpa, rpa)) {
2452 irk_to_return = irk;
2453 goto done;
2454 }
2455 }
2456
2457 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2458 if (smp_irk_matches(hdev, irk->val, rpa)) {
2459 bacpy(&irk->rpa, rpa);
2460 irk_to_return = irk;
2461 goto done;
2462 }
2463 }
2464
2465done:
2466 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2467 irk_to_return->val)) {
2468 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2469 &irk_to_return->bdaddr);
2470 irk_to_return = NULL;
2471 }
2472
2473 rcu_read_unlock();
2474
2475 return irk_to_return;
2476}
2477
2478struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2479 u8 addr_type)
2480{
2481 struct smp_irk *irk_to_return = NULL;
2482 struct smp_irk *irk;
2483
2484
2485 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2486 return NULL;
2487
2488 rcu_read_lock();
2489 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2490 if (addr_type == irk->addr_type &&
2491 bacmp(bdaddr, &irk->bdaddr) == 0) {
2492 irk_to_return = irk;
2493 goto done;
2494 }
2495 }
2496
2497done:
2498
2499 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2500 irk_to_return->val)) {
2501 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2502 &irk_to_return->bdaddr);
2503 irk_to_return = NULL;
2504 }
2505
2506 rcu_read_unlock();
2507
2508 return irk_to_return;
2509}
2510
2511struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2512 bdaddr_t *bdaddr, u8 *val, u8 type,
2513 u8 pin_len, bool *persistent)
2514{
2515 struct link_key *key, *old_key;
2516 u8 old_key_type;
2517
2518 old_key = hci_find_link_key(hdev, bdaddr);
2519 if (old_key) {
2520 old_key_type = old_key->type;
2521 key = old_key;
2522 } else {
2523 old_key_type = conn ? conn->key_type : 0xff;
2524 key = kzalloc(sizeof(*key), GFP_KERNEL);
2525 if (!key)
2526 return NULL;
2527 list_add_rcu(&key->list, &hdev->link_keys);
2528 }
2529
2530 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2531
2532
2533
2534
2535 if (type == HCI_LK_CHANGED_COMBINATION &&
2536 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2537 type = HCI_LK_COMBINATION;
2538 if (conn)
2539 conn->key_type = type;
2540 }
2541
2542 bacpy(&key->bdaddr, bdaddr);
2543 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2544 key->pin_len = pin_len;
2545
2546 if (type == HCI_LK_CHANGED_COMBINATION)
2547 key->type = old_key_type;
2548 else
2549 key->type = type;
2550
2551 if (persistent)
2552 *persistent = hci_persistent_key(hdev, conn, type,
2553 old_key_type);
2554
2555 return key;
2556}
2557
2558struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2559 u8 addr_type, u8 type, u8 authenticated,
2560 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2561{
2562 struct smp_ltk *key, *old_key;
2563 u8 role = ltk_role(type);
2564
2565 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2566 if (old_key)
2567 key = old_key;
2568 else {
2569 key = kzalloc(sizeof(*key), GFP_KERNEL);
2570 if (!key)
2571 return NULL;
2572 list_add_rcu(&key->list, &hdev->long_term_keys);
2573 }
2574
2575 bacpy(&key->bdaddr, bdaddr);
2576 key->bdaddr_type = addr_type;
2577 memcpy(key->val, tk, sizeof(key->val));
2578 key->authenticated = authenticated;
2579 key->ediv = ediv;
2580 key->rand = rand;
2581 key->enc_size = enc_size;
2582 key->type = type;
2583
2584 return key;
2585}
2586
2587struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2588 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2589{
2590 struct smp_irk *irk;
2591
2592 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2593 if (!irk) {
2594 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2595 if (!irk)
2596 return NULL;
2597
2598 bacpy(&irk->bdaddr, bdaddr);
2599 irk->addr_type = addr_type;
2600
2601 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2602 }
2603
2604 memcpy(irk->val, val, 16);
2605 bacpy(&irk->rpa, rpa);
2606
2607 return irk;
2608}
2609
2610int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2611{
2612 struct link_key *key;
2613
2614 key = hci_find_link_key(hdev, bdaddr);
2615 if (!key)
2616 return -ENOENT;
2617
2618 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2619
2620 list_del_rcu(&key->list);
2621 kfree_rcu(key, rcu);
2622
2623 return 0;
2624}
2625
2626int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2627{
2628 struct smp_ltk *k;
2629 int removed = 0;
2630
2631 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2632 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2633 continue;
2634
2635 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2636
2637 list_del_rcu(&k->list);
2638 kfree_rcu(k, rcu);
2639 removed++;
2640 }
2641
2642 return removed ? 0 : -ENOENT;
2643}
2644
2645void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2646{
2647 struct smp_irk *k;
2648
2649 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2650 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2651 continue;
2652
2653 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2654
2655 list_del_rcu(&k->list);
2656 kfree_rcu(k, rcu);
2657 }
2658}
2659
2660bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2661{
2662 struct smp_ltk *k;
2663 struct smp_irk *irk;
2664 u8 addr_type;
2665
2666 if (type == BDADDR_BREDR) {
2667 if (hci_find_link_key(hdev, bdaddr))
2668 return true;
2669 return false;
2670 }
2671
2672
2673 if (type == BDADDR_LE_PUBLIC)
2674 addr_type = ADDR_LE_DEV_PUBLIC;
2675 else
2676 addr_type = ADDR_LE_DEV_RANDOM;
2677
2678 irk = hci_get_irk(hdev, bdaddr, addr_type);
2679 if (irk) {
2680 bdaddr = &irk->bdaddr;
2681 addr_type = irk->addr_type;
2682 }
2683
2684 rcu_read_lock();
2685 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2686 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2687 rcu_read_unlock();
2688 return true;
2689 }
2690 }
2691 rcu_read_unlock();
2692
2693 return false;
2694}
2695
2696
2697static void hci_cmd_timeout(struct work_struct *work)
2698{
2699 struct hci_dev *hdev = container_of(work, struct hci_dev,
2700 cmd_timer.work);
2701
2702 if (hdev->sent_cmd) {
2703 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2704 u16 opcode = __le16_to_cpu(sent->opcode);
2705
2706 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2707 } else {
2708 bt_dev_err(hdev, "command tx timeout");
2709 }
2710
2711 if (hdev->cmd_timeout)
2712 hdev->cmd_timeout(hdev);
2713
2714 atomic_set(&hdev->cmd_cnt, 1);
2715 queue_work(hdev->workqueue, &hdev->cmd_work);
2716}
2717
2718struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2719 bdaddr_t *bdaddr, u8 bdaddr_type)
2720{
2721 struct oob_data *data;
2722
2723 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2724 if (bacmp(bdaddr, &data->bdaddr) != 0)
2725 continue;
2726 if (data->bdaddr_type != bdaddr_type)
2727 continue;
2728 return data;
2729 }
2730
2731 return NULL;
2732}
2733
2734int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2735 u8 bdaddr_type)
2736{
2737 struct oob_data *data;
2738
2739 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2740 if (!data)
2741 return -ENOENT;
2742
2743 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2744
2745 list_del(&data->list);
2746 kfree(data);
2747
2748 return 0;
2749}
2750
2751void hci_remote_oob_data_clear(struct hci_dev *hdev)
2752{
2753 struct oob_data *data, *n;
2754
2755 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2756 list_del(&data->list);
2757 kfree(data);
2758 }
2759}
2760
2761int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2762 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2763 u8 *hash256, u8 *rand256)
2764{
2765 struct oob_data *data;
2766
2767 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2768 if (!data) {
2769 data = kmalloc(sizeof(*data), GFP_KERNEL);
2770 if (!data)
2771 return -ENOMEM;
2772
2773 bacpy(&data->bdaddr, bdaddr);
2774 data->bdaddr_type = bdaddr_type;
2775 list_add(&data->list, &hdev->remote_oob_data);
2776 }
2777
2778 if (hash192 && rand192) {
2779 memcpy(data->hash192, hash192, sizeof(data->hash192));
2780 memcpy(data->rand192, rand192, sizeof(data->rand192));
2781 if (hash256 && rand256)
2782 data->present = 0x03;
2783 } else {
2784 memset(data->hash192, 0, sizeof(data->hash192));
2785 memset(data->rand192, 0, sizeof(data->rand192));
2786 if (hash256 && rand256)
2787 data->present = 0x02;
2788 else
2789 data->present = 0x00;
2790 }
2791
2792 if (hash256 && rand256) {
2793 memcpy(data->hash256, hash256, sizeof(data->hash256));
2794 memcpy(data->rand256, rand256, sizeof(data->rand256));
2795 } else {
2796 memset(data->hash256, 0, sizeof(data->hash256));
2797 memset(data->rand256, 0, sizeof(data->rand256));
2798 if (hash192 && rand192)
2799 data->present = 0x01;
2800 }
2801
2802 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2803
2804 return 0;
2805}
2806
2807
2808struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2809{
2810 struct adv_info *adv_instance;
2811
2812 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2813 if (adv_instance->instance == instance)
2814 return adv_instance;
2815 }
2816
2817 return NULL;
2818}
2819
2820
2821struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2822{
2823 struct adv_info *cur_instance;
2824
2825 cur_instance = hci_find_adv_instance(hdev, instance);
2826 if (!cur_instance)
2827 return NULL;
2828
2829 if (cur_instance == list_last_entry(&hdev->adv_instances,
2830 struct adv_info, list))
2831 return list_first_entry(&hdev->adv_instances,
2832 struct adv_info, list);
2833 else
2834 return list_next_entry(cur_instance, list);
2835}
2836
2837
2838int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2839{
2840 struct adv_info *adv_instance;
2841
2842 adv_instance = hci_find_adv_instance(hdev, instance);
2843 if (!adv_instance)
2844 return -ENOENT;
2845
2846 BT_DBG("%s removing %dMR", hdev->name, instance);
2847
2848 if (hdev->cur_adv_instance == instance) {
2849 if (hdev->adv_instance_timeout) {
2850 cancel_delayed_work(&hdev->adv_instance_expire);
2851 hdev->adv_instance_timeout = 0;
2852 }
2853 hdev->cur_adv_instance = 0x00;
2854 }
2855
2856 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2857
2858 list_del(&adv_instance->list);
2859 kfree(adv_instance);
2860
2861 hdev->adv_instance_cnt--;
2862
2863 return 0;
2864}
2865
2866void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2867{
2868 struct adv_info *adv_instance, *n;
2869
2870 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2871 adv_instance->rpa_expired = rpa_expired;
2872}
2873
2874
2875void hci_adv_instances_clear(struct hci_dev *hdev)
2876{
2877 struct adv_info *adv_instance, *n;
2878
2879 if (hdev->adv_instance_timeout) {
2880 cancel_delayed_work(&hdev->adv_instance_expire);
2881 hdev->adv_instance_timeout = 0;
2882 }
2883
2884 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2885 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2886 list_del(&adv_instance->list);
2887 kfree(adv_instance);
2888 }
2889
2890 hdev->adv_instance_cnt = 0;
2891 hdev->cur_adv_instance = 0x00;
2892}
2893
2894static void adv_instance_rpa_expired(struct work_struct *work)
2895{
2896 struct adv_info *adv_instance = container_of(work, struct adv_info,
2897 rpa_expired_cb.work);
2898
2899 BT_DBG("");
2900
2901 adv_instance->rpa_expired = true;
2902}
2903
2904
2905int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2906 u16 adv_data_len, u8 *adv_data,
2907 u16 scan_rsp_len, u8 *scan_rsp_data,
2908 u16 timeout, u16 duration)
2909{
2910 struct adv_info *adv_instance;
2911
2912 adv_instance = hci_find_adv_instance(hdev, instance);
2913 if (adv_instance) {
2914 memset(adv_instance->adv_data, 0,
2915 sizeof(adv_instance->adv_data));
2916 memset(adv_instance->scan_rsp_data, 0,
2917 sizeof(adv_instance->scan_rsp_data));
2918 } else {
2919 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2920 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2921 return -EOVERFLOW;
2922
2923 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2924 if (!adv_instance)
2925 return -ENOMEM;
2926
2927 adv_instance->pending = true;
2928 adv_instance->instance = instance;
2929 list_add(&adv_instance->list, &hdev->adv_instances);
2930 hdev->adv_instance_cnt++;
2931 }
2932
2933 adv_instance->flags = flags;
2934 adv_instance->adv_data_len = adv_data_len;
2935 adv_instance->scan_rsp_len = scan_rsp_len;
2936
2937 if (adv_data_len)
2938 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2939
2940 if (scan_rsp_len)
2941 memcpy(adv_instance->scan_rsp_data,
2942 scan_rsp_data, scan_rsp_len);
2943
2944 adv_instance->timeout = timeout;
2945 adv_instance->remaining_time = timeout;
2946
2947 if (duration == 0)
2948 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2949 else
2950 adv_instance->duration = duration;
2951
2952 adv_instance->tx_power = HCI_TX_POWER_INVALID;
2953
2954 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2955 adv_instance_rpa_expired);
2956
2957 BT_DBG("%s for %dMR", hdev->name, instance);
2958
2959 return 0;
2960}
2961
2962struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2963 bdaddr_t *bdaddr, u8 type)
2964{
2965 struct bdaddr_list *b;
2966
2967 list_for_each_entry(b, bdaddr_list, list) {
2968 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2969 return b;
2970 }
2971
2972 return NULL;
2973}
2974
2975struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2976 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2977 u8 type)
2978{
2979 struct bdaddr_list_with_irk *b;
2980
2981 list_for_each_entry(b, bdaddr_list, list) {
2982 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2983 return b;
2984 }
2985
2986 return NULL;
2987}
2988
2989void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2990{
2991 struct bdaddr_list *b, *n;
2992
2993 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2994 list_del(&b->list);
2995 kfree(b);
2996 }
2997}
2998
2999int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3000{
3001 struct bdaddr_list *entry;
3002
3003 if (!bacmp(bdaddr, BDADDR_ANY))
3004 return -EBADF;
3005
3006 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3007 return -EEXIST;
3008
3009 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3010 if (!entry)
3011 return -ENOMEM;
3012
3013 bacpy(&entry->bdaddr, bdaddr);
3014 entry->bdaddr_type = type;
3015
3016 list_add(&entry->list, list);
3017
3018 return 0;
3019}
3020
3021int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3022 u8 type, u8 *peer_irk, u8 *local_irk)
3023{
3024 struct bdaddr_list_with_irk *entry;
3025
3026 if (!bacmp(bdaddr, BDADDR_ANY))
3027 return -EBADF;
3028
3029 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3030 return -EEXIST;
3031
3032 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3033 if (!entry)
3034 return -ENOMEM;
3035
3036 bacpy(&entry->bdaddr, bdaddr);
3037 entry->bdaddr_type = type;
3038
3039 if (peer_irk)
3040 memcpy(entry->peer_irk, peer_irk, 16);
3041
3042 if (local_irk)
3043 memcpy(entry->local_irk, local_irk, 16);
3044
3045 list_add(&entry->list, list);
3046
3047 return 0;
3048}
3049
3050int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3051{
3052 struct bdaddr_list *entry;
3053
3054 if (!bacmp(bdaddr, BDADDR_ANY)) {
3055 hci_bdaddr_list_clear(list);
3056 return 0;
3057 }
3058
3059 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3060 if (!entry)
3061 return -ENOENT;
3062
3063 list_del(&entry->list);
3064 kfree(entry);
3065
3066 return 0;
3067}
3068
3069int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3070 u8 type)
3071{
3072 struct bdaddr_list_with_irk *entry;
3073
3074 if (!bacmp(bdaddr, BDADDR_ANY)) {
3075 hci_bdaddr_list_clear(list);
3076 return 0;
3077 }
3078
3079 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3080 if (!entry)
3081 return -ENOENT;
3082
3083 list_del(&entry->list);
3084 kfree(entry);
3085
3086 return 0;
3087}
3088
3089
3090struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3091 bdaddr_t *addr, u8 addr_type)
3092{
3093 struct hci_conn_params *params;
3094
3095 list_for_each_entry(params, &hdev->le_conn_params, list) {
3096 if (bacmp(¶ms->addr, addr) == 0 &&
3097 params->addr_type == addr_type) {
3098 return params;
3099 }
3100 }
3101
3102 return NULL;
3103}
3104
3105
3106struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3107 bdaddr_t *addr, u8 addr_type)
3108{
3109 struct hci_conn_params *param;
3110
3111 list_for_each_entry(param, list, action) {
3112 if (bacmp(¶m->addr, addr) == 0 &&
3113 param->addr_type == addr_type)
3114 return param;
3115 }
3116
3117 return NULL;
3118}
3119
3120
3121struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3122 bdaddr_t *addr, u8 addr_type)
3123{
3124 struct hci_conn_params *params;
3125
3126 params = hci_conn_params_lookup(hdev, addr, addr_type);
3127 if (params)
3128 return params;
3129
3130 params = kzalloc(sizeof(*params), GFP_KERNEL);
3131 if (!params) {
3132 bt_dev_err(hdev, "out of memory");
3133 return NULL;
3134 }
3135
3136 bacpy(¶ms->addr, addr);
3137 params->addr_type = addr_type;
3138
3139 list_add(¶ms->list, &hdev->le_conn_params);
3140 INIT_LIST_HEAD(¶ms->action);
3141
3142 params->conn_min_interval = hdev->le_conn_min_interval;
3143 params->conn_max_interval = hdev->le_conn_max_interval;
3144 params->conn_latency = hdev->le_conn_latency;
3145 params->supervision_timeout = hdev->le_supv_timeout;
3146 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3147
3148 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3149
3150 return params;
3151}
3152
3153static void hci_conn_params_free(struct hci_conn_params *params)
3154{
3155 if (params->conn) {
3156 hci_conn_drop(params->conn);
3157 hci_conn_put(params->conn);
3158 }
3159
3160 list_del(¶ms->action);
3161 list_del(¶ms->list);
3162 kfree(params);
3163}
3164
3165
3166void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3167{
3168 struct hci_conn_params *params;
3169
3170 params = hci_conn_params_lookup(hdev, addr, addr_type);
3171 if (!params)
3172 return;
3173
3174 hci_conn_params_free(params);
3175
3176 hci_update_background_scan(hdev);
3177
3178 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3179}
3180
3181
3182void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3183{
3184 struct hci_conn_params *params, *tmp;
3185
3186 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3187 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3188 continue;
3189
3190
3191
3192
3193 if (params->explicit_connect) {
3194 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3195 continue;
3196 }
3197
3198 list_del(¶ms->list);
3199 kfree(params);
3200 }
3201
3202 BT_DBG("All LE disabled connection parameters were removed");
3203}
3204
3205
3206static void hci_conn_params_clear_all(struct hci_dev *hdev)
3207{
3208 struct hci_conn_params *params, *tmp;
3209
3210 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3211 hci_conn_params_free(params);
3212
3213 BT_DBG("All LE connection parameters were removed");
3214}
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3230 u8 *bdaddr_type)
3231{
3232 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3233 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3234 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3235 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3236 bacpy(bdaddr, &hdev->static_addr);
3237 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3238 } else {
3239 bacpy(bdaddr, &hdev->bdaddr);
3240 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3241 }
3242}
3243
3244
3245struct hci_dev *hci_alloc_dev(void)
3246{
3247 struct hci_dev *hdev;
3248
3249 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3250 if (!hdev)
3251 return NULL;
3252
3253 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3254 hdev->esco_type = (ESCO_HV1);
3255 hdev->link_mode = (HCI_LM_ACCEPT);
3256 hdev->num_iac = 0x01;
3257 hdev->io_capability = 0x03;
3258 hdev->manufacturer = 0xffff;
3259 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3260 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3261 hdev->adv_instance_cnt = 0;
3262 hdev->cur_adv_instance = 0x00;
3263 hdev->adv_instance_timeout = 0;
3264
3265 hdev->sniff_max_interval = 800;
3266 hdev->sniff_min_interval = 80;
3267
3268 hdev->le_adv_channel_map = 0x07;
3269 hdev->le_adv_min_interval = 0x0800;
3270 hdev->le_adv_max_interval = 0x0800;
3271 hdev->le_scan_interval = 0x0060;
3272 hdev->le_scan_window = 0x0030;
3273 hdev->le_conn_min_interval = 0x0018;
3274 hdev->le_conn_max_interval = 0x0028;
3275 hdev->le_conn_latency = 0x0000;
3276 hdev->le_supv_timeout = 0x002a;
3277 hdev->le_def_tx_len = 0x001b;
3278 hdev->le_def_tx_time = 0x0148;
3279 hdev->le_max_tx_len = 0x001b;
3280 hdev->le_max_tx_time = 0x0148;
3281 hdev->le_max_rx_len = 0x001b;
3282 hdev->le_max_rx_time = 0x0148;
3283 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3284 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3285 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3286 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3287 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3288
3289 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3290 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3291 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3292 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3293 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3294 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3295
3296 mutex_init(&hdev->lock);
3297 mutex_init(&hdev->req_lock);
3298
3299 INIT_LIST_HEAD(&hdev->mgmt_pending);
3300 INIT_LIST_HEAD(&hdev->blacklist);
3301 INIT_LIST_HEAD(&hdev->whitelist);
3302 INIT_LIST_HEAD(&hdev->uuids);
3303 INIT_LIST_HEAD(&hdev->link_keys);
3304 INIT_LIST_HEAD(&hdev->long_term_keys);
3305 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3306 INIT_LIST_HEAD(&hdev->remote_oob_data);
3307 INIT_LIST_HEAD(&hdev->le_white_list);
3308 INIT_LIST_HEAD(&hdev->le_resolv_list);
3309 INIT_LIST_HEAD(&hdev->le_conn_params);
3310 INIT_LIST_HEAD(&hdev->pend_le_conns);
3311 INIT_LIST_HEAD(&hdev->pend_le_reports);
3312 INIT_LIST_HEAD(&hdev->conn_hash.list);
3313 INIT_LIST_HEAD(&hdev->adv_instances);
3314 INIT_LIST_HEAD(&hdev->blocked_keys);
3315
3316 INIT_WORK(&hdev->rx_work, hci_rx_work);
3317 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3318 INIT_WORK(&hdev->tx_work, hci_tx_work);
3319 INIT_WORK(&hdev->power_on, hci_power_on);
3320 INIT_WORK(&hdev->error_reset, hci_error_reset);
3321
3322 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3323
3324 skb_queue_head_init(&hdev->rx_q);
3325 skb_queue_head_init(&hdev->cmd_q);
3326 skb_queue_head_init(&hdev->raw_q);
3327
3328 init_waitqueue_head(&hdev->req_wait_q);
3329
3330 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3331
3332 hci_request_setup(hdev);
3333
3334 hci_init_sysfs(hdev);
3335 discovery_init(hdev);
3336
3337 return hdev;
3338}
3339EXPORT_SYMBOL(hci_alloc_dev);
3340
3341
3342void hci_free_dev(struct hci_dev *hdev)
3343{
3344
3345 put_device(&hdev->dev);
3346}
3347EXPORT_SYMBOL(hci_free_dev);
3348
3349
3350int hci_register_dev(struct hci_dev *hdev)
3351{
3352 int id, error;
3353
3354 if (!hdev->open || !hdev->close || !hdev->send)
3355 return -EINVAL;
3356
3357
3358
3359
3360 switch (hdev->dev_type) {
3361 case HCI_PRIMARY:
3362 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3363 break;
3364 case HCI_AMP:
3365 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3366 break;
3367 default:
3368 return -EINVAL;
3369 }
3370
3371 if (id < 0)
3372 return id;
3373
3374 sprintf(hdev->name, "hci%d", id);
3375 hdev->id = id;
3376
3377 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3378
3379 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3380 if (!hdev->workqueue) {
3381 error = -ENOMEM;
3382 goto err;
3383 }
3384
3385 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3386 hdev->name);
3387 if (!hdev->req_workqueue) {
3388 destroy_workqueue(hdev->workqueue);
3389 error = -ENOMEM;
3390 goto err;
3391 }
3392
3393 if (!IS_ERR_OR_NULL(bt_debugfs))
3394 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3395
3396 dev_set_name(&hdev->dev, "%s", hdev->name);
3397
3398 error = device_add(&hdev->dev);
3399 if (error < 0)
3400 goto err_wqueue;
3401
3402 hci_leds_init(hdev);
3403
3404 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3405 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3406 hdev);
3407 if (hdev->rfkill) {
3408 if (rfkill_register(hdev->rfkill) < 0) {
3409 rfkill_destroy(hdev->rfkill);
3410 hdev->rfkill = NULL;
3411 }
3412 }
3413
3414 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3415 hci_dev_set_flag(hdev, HCI_RFKILLED);
3416
3417 hci_dev_set_flag(hdev, HCI_SETUP);
3418 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3419
3420 if (hdev->dev_type == HCI_PRIMARY) {
3421
3422
3423
3424 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3425 }
3426
3427 write_lock(&hci_dev_list_lock);
3428 list_add(&hdev->list, &hci_dev_list);
3429 write_unlock(&hci_dev_list_lock);
3430
3431
3432
3433
3434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3435 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3436
3437 hci_sock_dev_event(hdev, HCI_DEV_REG);
3438 hci_dev_hold(hdev);
3439
3440 queue_work(hdev->req_workqueue, &hdev->power_on);
3441
3442 return id;
3443
3444err_wqueue:
3445 destroy_workqueue(hdev->workqueue);
3446 destroy_workqueue(hdev->req_workqueue);
3447err:
3448 ida_simple_remove(&hci_index_ida, hdev->id);
3449
3450 return error;
3451}
3452EXPORT_SYMBOL(hci_register_dev);
3453
3454
3455void hci_unregister_dev(struct hci_dev *hdev)
3456{
3457 int id;
3458
3459 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3460
3461 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3462
3463 id = hdev->id;
3464
3465 write_lock(&hci_dev_list_lock);
3466 list_del(&hdev->list);
3467 write_unlock(&hci_dev_list_lock);
3468
3469 cancel_work_sync(&hdev->power_on);
3470
3471 hci_dev_do_close(hdev);
3472
3473 if (!test_bit(HCI_INIT, &hdev->flags) &&
3474 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3475 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3476 hci_dev_lock(hdev);
3477 mgmt_index_removed(hdev);
3478 hci_dev_unlock(hdev);
3479 }
3480
3481
3482
3483 BUG_ON(!list_empty(&hdev->mgmt_pending));
3484
3485 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3486
3487 if (hdev->rfkill) {
3488 rfkill_unregister(hdev->rfkill);
3489 rfkill_destroy(hdev->rfkill);
3490 }
3491
3492 device_del(&hdev->dev);
3493
3494 debugfs_remove_recursive(hdev->debugfs);
3495 kfree_const(hdev->hw_info);
3496 kfree_const(hdev->fw_info);
3497
3498 destroy_workqueue(hdev->workqueue);
3499 destroy_workqueue(hdev->req_workqueue);
3500
3501 hci_dev_lock(hdev);
3502 hci_bdaddr_list_clear(&hdev->blacklist);
3503 hci_bdaddr_list_clear(&hdev->whitelist);
3504 hci_uuids_clear(hdev);
3505 hci_link_keys_clear(hdev);
3506 hci_smp_ltks_clear(hdev);
3507 hci_smp_irks_clear(hdev);
3508 hci_remote_oob_data_clear(hdev);
3509 hci_adv_instances_clear(hdev);
3510 hci_bdaddr_list_clear(&hdev->le_white_list);
3511 hci_bdaddr_list_clear(&hdev->le_resolv_list);
3512 hci_conn_params_clear_all(hdev);
3513 hci_discovery_filter_clear(hdev);
3514 hci_blocked_keys_clear(hdev);
3515 hci_dev_unlock(hdev);
3516
3517 hci_dev_put(hdev);
3518
3519 ida_simple_remove(&hci_index_ida, id);
3520}
3521EXPORT_SYMBOL(hci_unregister_dev);
3522
3523
3524int hci_suspend_dev(struct hci_dev *hdev)
3525{
3526 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3527 return 0;
3528}
3529EXPORT_SYMBOL(hci_suspend_dev);
3530
3531
3532int hci_resume_dev(struct hci_dev *hdev)
3533{
3534 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3535 return 0;
3536}
3537EXPORT_SYMBOL(hci_resume_dev);
3538
3539
3540int hci_reset_dev(struct hci_dev *hdev)
3541{
3542 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3543 struct sk_buff *skb;
3544
3545 skb = bt_skb_alloc(3, GFP_ATOMIC);
3546 if (!skb)
3547 return -ENOMEM;
3548
3549 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3550 skb_put_data(skb, hw_err, 3);
3551
3552
3553 return hci_recv_frame(hdev, skb);
3554}
3555EXPORT_SYMBOL(hci_reset_dev);
3556
3557
3558int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3559{
3560 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3561 && !test_bit(HCI_INIT, &hdev->flags))) {
3562 kfree_skb(skb);
3563 return -ENXIO;
3564 }
3565
3566 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3567 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3568 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3569 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3570 kfree_skb(skb);
3571 return -EINVAL;
3572 }
3573
3574
3575 bt_cb(skb)->incoming = 1;
3576
3577
3578 __net_timestamp(skb);
3579
3580 skb_queue_tail(&hdev->rx_q, skb);
3581 queue_work(hdev->workqueue, &hdev->rx_work);
3582
3583 return 0;
3584}
3585EXPORT_SYMBOL(hci_recv_frame);
3586
3587
3588int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3589{
3590
3591 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3592
3593
3594 __net_timestamp(skb);
3595
3596 skb_queue_tail(&hdev->rx_q, skb);
3597 queue_work(hdev->workqueue, &hdev->rx_work);
3598
3599 return 0;
3600}
3601EXPORT_SYMBOL(hci_recv_diag);
3602
3603void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3604{
3605 va_list vargs;
3606
3607 va_start(vargs, fmt);
3608 kfree_const(hdev->hw_info);
3609 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3610 va_end(vargs);
3611}
3612EXPORT_SYMBOL(hci_set_hw_info);
3613
3614void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3615{
3616 va_list vargs;
3617
3618 va_start(vargs, fmt);
3619 kfree_const(hdev->fw_info);
3620 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3621 va_end(vargs);
3622}
3623EXPORT_SYMBOL(hci_set_fw_info);
3624
3625
3626
3627int hci_register_cb(struct hci_cb *cb)
3628{
3629 BT_DBG("%p name %s", cb, cb->name);
3630
3631 mutex_lock(&hci_cb_list_lock);
3632 list_add_tail(&cb->list, &hci_cb_list);
3633 mutex_unlock(&hci_cb_list_lock);
3634
3635 return 0;
3636}
3637EXPORT_SYMBOL(hci_register_cb);
3638
3639int hci_unregister_cb(struct hci_cb *cb)
3640{
3641 BT_DBG("%p name %s", cb, cb->name);
3642
3643 mutex_lock(&hci_cb_list_lock);
3644 list_del(&cb->list);
3645 mutex_unlock(&hci_cb_list_lock);
3646
3647 return 0;
3648}
3649EXPORT_SYMBOL(hci_unregister_cb);
3650
3651static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3652{
3653 int err;
3654
3655 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3656 skb->len);
3657
3658
3659 __net_timestamp(skb);
3660
3661
3662 hci_send_to_monitor(hdev, skb);
3663
3664 if (atomic_read(&hdev->promisc)) {
3665
3666 hci_send_to_sock(hdev, skb);
3667 }
3668
3669
3670 skb_orphan(skb);
3671
3672 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3673 kfree_skb(skb);
3674 return;
3675 }
3676
3677 err = hdev->send(hdev, skb);
3678 if (err < 0) {
3679 bt_dev_err(hdev, "sending frame failed (%d)", err);
3680 kfree_skb(skb);
3681 }
3682}
3683
3684
3685int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3686 const void *param)
3687{
3688 struct sk_buff *skb;
3689
3690 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3691
3692 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3693 if (!skb) {
3694 bt_dev_err(hdev, "no memory for command");
3695 return -ENOMEM;
3696 }
3697
3698
3699
3700
3701 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3702
3703 skb_queue_tail(&hdev->cmd_q, skb);
3704 queue_work(hdev->workqueue, &hdev->cmd_work);
3705
3706 return 0;
3707}
3708
3709int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3710 const void *param)
3711{
3712 struct sk_buff *skb;
3713
3714 if (hci_opcode_ogf(opcode) != 0x3f) {
3715
3716
3717
3718
3719
3720
3721
3722
3723 bt_dev_err(hdev, "unresponded command not supported");
3724 return -EINVAL;
3725 }
3726
3727 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3728 if (!skb) {
3729 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3730 opcode);
3731 return -ENOMEM;
3732 }
3733
3734 hci_send_frame(hdev, skb);
3735
3736 return 0;
3737}
3738EXPORT_SYMBOL(__hci_cmd_send);
3739
3740
3741void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3742{
3743 struct hci_command_hdr *hdr;
3744
3745 if (!hdev->sent_cmd)
3746 return NULL;
3747
3748 hdr = (void *) hdev->sent_cmd->data;
3749
3750 if (hdr->opcode != cpu_to_le16(opcode))
3751 return NULL;
3752
3753 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3754
3755 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3756}
3757
3758
3759struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3760 const void *param, u32 timeout)
3761{
3762 struct sk_buff *skb;
3763
3764 if (!test_bit(HCI_UP, &hdev->flags))
3765 return ERR_PTR(-ENETDOWN);
3766
3767 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3768
3769 hci_req_sync_lock(hdev);
3770 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3771 hci_req_sync_unlock(hdev);
3772
3773 return skb;
3774}
3775EXPORT_SYMBOL(hci_cmd_sync);
3776
3777
3778static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3779{
3780 struct hci_acl_hdr *hdr;
3781 int len = skb->len;
3782
3783 skb_push(skb, HCI_ACL_HDR_SIZE);
3784 skb_reset_transport_header(skb);
3785 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3786 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3787 hdr->dlen = cpu_to_le16(len);
3788}
3789
3790static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3791 struct sk_buff *skb, __u16 flags)
3792{
3793 struct hci_conn *conn = chan->conn;
3794 struct hci_dev *hdev = conn->hdev;
3795 struct sk_buff *list;
3796
3797 skb->len = skb_headlen(skb);
3798 skb->data_len = 0;
3799
3800 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3801
3802 switch (hdev->dev_type) {
3803 case HCI_PRIMARY:
3804 hci_add_acl_hdr(skb, conn->handle, flags);
3805 break;
3806 case HCI_AMP:
3807 hci_add_acl_hdr(skb, chan->handle, flags);
3808 break;
3809 default:
3810 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3811 return;
3812 }
3813
3814 list = skb_shinfo(skb)->frag_list;
3815 if (!list) {
3816
3817 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3818
3819 skb_queue_tail(queue, skb);
3820 } else {
3821
3822 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3823
3824 skb_shinfo(skb)->frag_list = NULL;
3825
3826
3827
3828
3829
3830
3831 spin_lock_bh(&queue->lock);
3832
3833 __skb_queue_tail(queue, skb);
3834
3835 flags &= ~ACL_START;
3836 flags |= ACL_CONT;
3837 do {
3838 skb = list; list = list->next;
3839
3840 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3841 hci_add_acl_hdr(skb, conn->handle, flags);
3842
3843 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3844
3845 __skb_queue_tail(queue, skb);
3846 } while (list);
3847
3848 spin_unlock_bh(&queue->lock);
3849 }
3850}
3851
3852void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3853{
3854 struct hci_dev *hdev = chan->conn->hdev;
3855
3856 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3857
3858 hci_queue_acl(chan, &chan->data_q, skb, flags);
3859
3860 queue_work(hdev->workqueue, &hdev->tx_work);
3861}
3862
3863
3864void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3865{
3866 struct hci_dev *hdev = conn->hdev;
3867 struct hci_sco_hdr hdr;
3868
3869 BT_DBG("%s len %d", hdev->name, skb->len);
3870
3871 hdr.handle = cpu_to_le16(conn->handle);
3872 hdr.dlen = skb->len;
3873
3874 skb_push(skb, HCI_SCO_HDR_SIZE);
3875 skb_reset_transport_header(skb);
3876 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3877
3878 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3879
3880 skb_queue_tail(&conn->data_q, skb);
3881 queue_work(hdev->workqueue, &hdev->tx_work);
3882}
3883
3884
3885
3886
3887static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3888 int *quote)
3889{
3890 struct hci_conn_hash *h = &hdev->conn_hash;
3891 struct hci_conn *conn = NULL, *c;
3892 unsigned int num = 0, min = ~0;
3893
3894
3895
3896
3897 rcu_read_lock();
3898
3899 list_for_each_entry_rcu(c, &h->list, list) {
3900 if (c->type != type || skb_queue_empty(&c->data_q))
3901 continue;
3902
3903 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3904 continue;
3905
3906 num++;
3907
3908 if (c->sent < min) {
3909 min = c->sent;
3910 conn = c;
3911 }
3912
3913 if (hci_conn_num(hdev, type) == num)
3914 break;
3915 }
3916
3917 rcu_read_unlock();
3918
3919 if (conn) {
3920 int cnt, q;
3921
3922 switch (conn->type) {
3923 case ACL_LINK:
3924 cnt = hdev->acl_cnt;
3925 break;
3926 case SCO_LINK:
3927 case ESCO_LINK:
3928 cnt = hdev->sco_cnt;
3929 break;
3930 case LE_LINK:
3931 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3932 break;
3933 default:
3934 cnt = 0;
3935 bt_dev_err(hdev, "unknown link type %d", conn->type);
3936 }
3937
3938 q = cnt / num;
3939 *quote = q ? q : 1;
3940 } else
3941 *quote = 0;
3942
3943 BT_DBG("conn %p quote %d", conn, *quote);
3944 return conn;
3945}
3946
3947static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3948{
3949 struct hci_conn_hash *h = &hdev->conn_hash;
3950 struct hci_conn *c;
3951
3952 bt_dev_err(hdev, "link tx timeout");
3953
3954 rcu_read_lock();
3955
3956
3957 list_for_each_entry_rcu(c, &h->list, list) {
3958 if (c->type == type && c->sent) {
3959 bt_dev_err(hdev, "killing stalled connection %pMR",
3960 &c->dst);
3961 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3962 }
3963 }
3964
3965 rcu_read_unlock();
3966}
3967
3968static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3969 int *quote)
3970{
3971 struct hci_conn_hash *h = &hdev->conn_hash;
3972 struct hci_chan *chan = NULL;
3973 unsigned int num = 0, min = ~0, cur_prio = 0;
3974 struct hci_conn *conn;
3975 int cnt, q, conn_num = 0;
3976
3977 BT_DBG("%s", hdev->name);
3978
3979 rcu_read_lock();
3980
3981 list_for_each_entry_rcu(conn, &h->list, list) {
3982 struct hci_chan *tmp;
3983
3984 if (conn->type != type)
3985 continue;
3986
3987 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3988 continue;
3989
3990 conn_num++;
3991
3992 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3993 struct sk_buff *skb;
3994
3995 if (skb_queue_empty(&tmp->data_q))
3996 continue;
3997
3998 skb = skb_peek(&tmp->data_q);
3999 if (skb->priority < cur_prio)
4000 continue;
4001
4002 if (skb->priority > cur_prio) {
4003 num = 0;
4004 min = ~0;
4005 cur_prio = skb->priority;
4006 }
4007
4008 num++;
4009
4010 if (conn->sent < min) {
4011 min = conn->sent;
4012 chan = tmp;
4013 }
4014 }
4015
4016 if (hci_conn_num(hdev, type) == conn_num)
4017 break;
4018 }
4019
4020 rcu_read_unlock();
4021
4022 if (!chan)
4023 return NULL;
4024
4025 switch (chan->conn->type) {
4026 case ACL_LINK:
4027 cnt = hdev->acl_cnt;
4028 break;
4029 case AMP_LINK:
4030 cnt = hdev->block_cnt;
4031 break;
4032 case SCO_LINK:
4033 case ESCO_LINK:
4034 cnt = hdev->sco_cnt;
4035 break;
4036 case LE_LINK:
4037 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4038 break;
4039 default:
4040 cnt = 0;
4041 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4042 }
4043
4044 q = cnt / num;
4045 *quote = q ? q : 1;
4046 BT_DBG("chan %p quote %d", chan, *quote);
4047 return chan;
4048}
4049
4050static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4051{
4052 struct hci_conn_hash *h = &hdev->conn_hash;
4053 struct hci_conn *conn;
4054 int num = 0;
4055
4056 BT_DBG("%s", hdev->name);
4057
4058 rcu_read_lock();
4059
4060 list_for_each_entry_rcu(conn, &h->list, list) {
4061 struct hci_chan *chan;
4062
4063 if (conn->type != type)
4064 continue;
4065
4066 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4067 continue;
4068
4069 num++;
4070
4071 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4072 struct sk_buff *skb;
4073
4074 if (chan->sent) {
4075 chan->sent = 0;
4076 continue;
4077 }
4078
4079 if (skb_queue_empty(&chan->data_q))
4080 continue;
4081
4082 skb = skb_peek(&chan->data_q);
4083 if (skb->priority >= HCI_PRIO_MAX - 1)
4084 continue;
4085
4086 skb->priority = HCI_PRIO_MAX - 1;
4087
4088 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4089 skb->priority);
4090 }
4091
4092 if (hci_conn_num(hdev, type) == num)
4093 break;
4094 }
4095
4096 rcu_read_unlock();
4097
4098}
4099
4100static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4101{
4102
4103 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4104}
4105
4106static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4107{
4108 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4109
4110
4111 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4112 HCI_ACL_TX_TIMEOUT))
4113 hci_link_tx_to(hdev, ACL_LINK);
4114 }
4115}
4116
4117static void hci_sched_acl_pkt(struct hci_dev *hdev)
4118{
4119 unsigned int cnt = hdev->acl_cnt;
4120 struct hci_chan *chan;
4121 struct sk_buff *skb;
4122 int quote;
4123
4124 __check_timeout(hdev, cnt);
4125
4126 while (hdev->acl_cnt &&
4127 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4128 u32 priority = (skb_peek(&chan->data_q))->priority;
4129 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4130 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4131 skb->len, skb->priority);
4132
4133
4134 if (skb->priority < priority)
4135 break;
4136
4137 skb = skb_dequeue(&chan->data_q);
4138
4139 hci_conn_enter_active_mode(chan->conn,
4140 bt_cb(skb)->force_active);
4141
4142 hci_send_frame(hdev, skb);
4143 hdev->acl_last_tx = jiffies;
4144
4145 hdev->acl_cnt--;
4146 chan->sent++;
4147 chan->conn->sent++;
4148 }
4149 }
4150
4151 if (cnt != hdev->acl_cnt)
4152 hci_prio_recalculate(hdev, ACL_LINK);
4153}
4154
4155static void hci_sched_acl_blk(struct hci_dev *hdev)
4156{
4157 unsigned int cnt = hdev->block_cnt;
4158 struct hci_chan *chan;
4159 struct sk_buff *skb;
4160 int quote;
4161 u8 type;
4162
4163 __check_timeout(hdev, cnt);
4164
4165 BT_DBG("%s", hdev->name);
4166
4167 if (hdev->dev_type == HCI_AMP)
4168 type = AMP_LINK;
4169 else
4170 type = ACL_LINK;
4171
4172 while (hdev->block_cnt > 0 &&
4173 (chan = hci_chan_sent(hdev, type, "e))) {
4174 u32 priority = (skb_peek(&chan->data_q))->priority;
4175 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4176 int blocks;
4177
4178 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4179 skb->len, skb->priority);
4180
4181
4182 if (skb->priority < priority)
4183 break;
4184
4185 skb = skb_dequeue(&chan->data_q);
4186
4187 blocks = __get_blocks(hdev, skb);
4188 if (blocks > hdev->block_cnt)
4189 return;
4190
4191 hci_conn_enter_active_mode(chan->conn,
4192 bt_cb(skb)->force_active);
4193
4194 hci_send_frame(hdev, skb);
4195 hdev->acl_last_tx = jiffies;
4196
4197 hdev->block_cnt -= blocks;
4198 quote -= blocks;
4199
4200 chan->sent += blocks;
4201 chan->conn->sent += blocks;
4202 }
4203 }
4204
4205 if (cnt != hdev->block_cnt)
4206 hci_prio_recalculate(hdev, type);
4207}
4208
4209static void hci_sched_acl(struct hci_dev *hdev)
4210{
4211 BT_DBG("%s", hdev->name);
4212
4213
4214 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4215 return;
4216
4217
4218 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4219 return;
4220
4221 switch (hdev->flow_ctl_mode) {
4222 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4223 hci_sched_acl_pkt(hdev);
4224 break;
4225
4226 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4227 hci_sched_acl_blk(hdev);
4228 break;
4229 }
4230}
4231
4232
4233static void hci_sched_sco(struct hci_dev *hdev)
4234{
4235 struct hci_conn *conn;
4236 struct sk_buff *skb;
4237 int quote;
4238
4239 BT_DBG("%s", hdev->name);
4240
4241 if (!hci_conn_num(hdev, SCO_LINK))
4242 return;
4243
4244 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4245 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4246 BT_DBG("skb %p len %d", skb, skb->len);
4247 hci_send_frame(hdev, skb);
4248
4249 conn->sent++;
4250 if (conn->sent == ~0)
4251 conn->sent = 0;
4252 }
4253 }
4254}
4255
4256static void hci_sched_esco(struct hci_dev *hdev)
4257{
4258 struct hci_conn *conn;
4259 struct sk_buff *skb;
4260 int quote;
4261
4262 BT_DBG("%s", hdev->name);
4263
4264 if (!hci_conn_num(hdev, ESCO_LINK))
4265 return;
4266
4267 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4268 "e))) {
4269 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4270 BT_DBG("skb %p len %d", skb, skb->len);
4271 hci_send_frame(hdev, skb);
4272
4273 conn->sent++;
4274 if (conn->sent == ~0)
4275 conn->sent = 0;
4276 }
4277 }
4278}
4279
4280static void hci_sched_le(struct hci_dev *hdev)
4281{
4282 struct hci_chan *chan;
4283 struct sk_buff *skb;
4284 int quote, cnt, tmp;
4285
4286 BT_DBG("%s", hdev->name);
4287
4288 if (!hci_conn_num(hdev, LE_LINK))
4289 return;
4290
4291 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4292
4293 __check_timeout(hdev, cnt);
4294
4295 tmp = cnt;
4296 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4297 u32 priority = (skb_peek(&chan->data_q))->priority;
4298 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4299 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4300 skb->len, skb->priority);
4301
4302
4303 if (skb->priority < priority)
4304 break;
4305
4306 skb = skb_dequeue(&chan->data_q);
4307
4308 hci_send_frame(hdev, skb);
4309 hdev->le_last_tx = jiffies;
4310
4311 cnt--;
4312 chan->sent++;
4313 chan->conn->sent++;
4314 }
4315 }
4316
4317 if (hdev->le_pkts)
4318 hdev->le_cnt = cnt;
4319 else
4320 hdev->acl_cnt = cnt;
4321
4322 if (cnt != tmp)
4323 hci_prio_recalculate(hdev, LE_LINK);
4324}
4325
4326static void hci_tx_work(struct work_struct *work)
4327{
4328 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4329 struct sk_buff *skb;
4330
4331 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4332 hdev->sco_cnt, hdev->le_cnt);
4333
4334 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4335
4336 hci_sched_acl(hdev);
4337 hci_sched_sco(hdev);
4338 hci_sched_esco(hdev);
4339 hci_sched_le(hdev);
4340 }
4341
4342
4343 while ((skb = skb_dequeue(&hdev->raw_q)))
4344 hci_send_frame(hdev, skb);
4345}
4346
4347
4348
4349
4350static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4351{
4352 struct hci_acl_hdr *hdr = (void *) skb->data;
4353 struct hci_conn *conn;
4354 __u16 handle, flags;
4355
4356 skb_pull(skb, HCI_ACL_HDR_SIZE);
4357
4358 handle = __le16_to_cpu(hdr->handle);
4359 flags = hci_flags(handle);
4360 handle = hci_handle(handle);
4361
4362 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4363 handle, flags);
4364
4365 hdev->stat.acl_rx++;
4366
4367 hci_dev_lock(hdev);
4368 conn = hci_conn_hash_lookup_handle(hdev, handle);
4369 hci_dev_unlock(hdev);
4370
4371 if (conn) {
4372 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4373
4374
4375 l2cap_recv_acldata(conn, skb, flags);
4376 return;
4377 } else {
4378 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4379 handle);
4380 }
4381
4382 kfree_skb(skb);
4383}
4384
4385
4386static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4387{
4388 struct hci_sco_hdr *hdr = (void *) skb->data;
4389 struct hci_conn *conn;
4390 __u16 handle;
4391
4392 skb_pull(skb, HCI_SCO_HDR_SIZE);
4393
4394 handle = __le16_to_cpu(hdr->handle);
4395
4396 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4397
4398 hdev->stat.sco_rx++;
4399
4400 hci_dev_lock(hdev);
4401 conn = hci_conn_hash_lookup_handle(hdev, handle);
4402 hci_dev_unlock(hdev);
4403
4404 if (conn) {
4405
4406 sco_recv_scodata(conn, skb);
4407 return;
4408 } else {
4409 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4410 handle);
4411 }
4412
4413 kfree_skb(skb);
4414}
4415
4416static bool hci_req_is_complete(struct hci_dev *hdev)
4417{
4418 struct sk_buff *skb;
4419
4420 skb = skb_peek(&hdev->cmd_q);
4421 if (!skb)
4422 return true;
4423
4424 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4425}
4426
4427static void hci_resend_last(struct hci_dev *hdev)
4428{
4429 struct hci_command_hdr *sent;
4430 struct sk_buff *skb;
4431 u16 opcode;
4432
4433 if (!hdev->sent_cmd)
4434 return;
4435
4436 sent = (void *) hdev->sent_cmd->data;
4437 opcode = __le16_to_cpu(sent->opcode);
4438 if (opcode == HCI_OP_RESET)
4439 return;
4440
4441 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4442 if (!skb)
4443 return;
4444
4445 skb_queue_head(&hdev->cmd_q, skb);
4446 queue_work(hdev->workqueue, &hdev->cmd_work);
4447}
4448
4449void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4450 hci_req_complete_t *req_complete,
4451 hci_req_complete_skb_t *req_complete_skb)
4452{
4453 struct sk_buff *skb;
4454 unsigned long flags;
4455
4456 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4457
4458
4459
4460
4461 if (!hci_sent_cmd_data(hdev, opcode)) {
4462
4463
4464
4465
4466
4467
4468 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4469 hci_resend_last(hdev);
4470
4471 return;
4472 }
4473
4474
4475 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4476
4477
4478
4479
4480 if (!status && !hci_req_is_complete(hdev))
4481 return;
4482
4483
4484
4485
4486
4487 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4488 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4489 return;
4490 }
4491
4492 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4493 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4494 return;
4495 }
4496
4497
4498 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4499 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4500 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4501 __skb_queue_head(&hdev->cmd_q, skb);
4502 break;
4503 }
4504
4505 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4506 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4507 else
4508 *req_complete = bt_cb(skb)->hci.req_complete;
4509 kfree_skb(skb);
4510 }
4511 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4512}
4513
4514static void hci_rx_work(struct work_struct *work)
4515{
4516 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4517 struct sk_buff *skb;
4518
4519 BT_DBG("%s", hdev->name);
4520
4521 while ((skb = skb_dequeue(&hdev->rx_q))) {
4522
4523 hci_send_to_monitor(hdev, skb);
4524
4525 if (atomic_read(&hdev->promisc)) {
4526
4527 hci_send_to_sock(hdev, skb);
4528 }
4529
4530
4531
4532
4533
4534
4535
4536 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4537 !test_bit(HCI_INIT, &hdev->flags)) {
4538 kfree_skb(skb);
4539 continue;
4540 }
4541
4542 if (test_bit(HCI_INIT, &hdev->flags)) {
4543
4544 switch (hci_skb_pkt_type(skb)) {
4545 case HCI_ACLDATA_PKT:
4546 case HCI_SCODATA_PKT:
4547 case HCI_ISODATA_PKT:
4548 kfree_skb(skb);
4549 continue;
4550 }
4551 }
4552
4553
4554 switch (hci_skb_pkt_type(skb)) {
4555 case HCI_EVENT_PKT:
4556 BT_DBG("%s Event packet", hdev->name);
4557 hci_event_packet(hdev, skb);
4558 break;
4559
4560 case HCI_ACLDATA_PKT:
4561 BT_DBG("%s ACL data packet", hdev->name);
4562 hci_acldata_packet(hdev, skb);
4563 break;
4564
4565 case HCI_SCODATA_PKT:
4566 BT_DBG("%s SCO data packet", hdev->name);
4567 hci_scodata_packet(hdev, skb);
4568 break;
4569
4570 default:
4571 kfree_skb(skb);
4572 break;
4573 }
4574 }
4575}
4576
4577static void hci_cmd_work(struct work_struct *work)
4578{
4579 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4580 struct sk_buff *skb;
4581
4582 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4583 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4584
4585
4586 if (atomic_read(&hdev->cmd_cnt)) {
4587 skb = skb_dequeue(&hdev->cmd_q);
4588 if (!skb)
4589 return;
4590
4591 kfree_skb(hdev->sent_cmd);
4592
4593 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4594 if (hdev->sent_cmd) {
4595 if (hci_req_status_pend(hdev))
4596 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4597 atomic_dec(&hdev->cmd_cnt);
4598 hci_send_frame(hdev, skb);
4599 if (test_bit(HCI_RESET, &hdev->flags))
4600 cancel_delayed_work(&hdev->cmd_timer);
4601 else
4602 schedule_delayed_work(&hdev->cmd_timer,
4603 HCI_CMD_TIMEOUT);
4604 } else {
4605 skb_queue_head(&hdev->cmd_q, skb);
4606 queue_work(hdev->workqueue, &hdev->cmd_work);
4607 }
4608 }
4609}
4610