1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/export.h>
29#include <linux/idr.h>
30#include <linux/rfkill.h>
31#include <linux/debugfs.h>
32#include <linux/crypto.h>
33#include <asm/unaligned.h>
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37#include <net/bluetooth/l2cap.h>
38#include <net/bluetooth/mgmt.h>
39
40#include "hci_request.h"
41#include "hci_debugfs.h"
42#include "smp.h"
43#include "leds.h"
44
45static void hci_rx_work(struct work_struct *work);
46static void hci_cmd_work(struct work_struct *work);
47static void hci_tx_work(struct work_struct *work);
48
49
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53
54LIST_HEAD(hci_cb_list);
55DEFINE_MUTEX(hci_cb_list_lock);
56
57
58static DEFINE_IDA(hci_index_ida);
59
60
61
62static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94 return -EALREADY;
95
96 hci_req_sync_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_sync_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 kfree_skb(skb);
109
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
138 char buf[32];
139 size_t buf_size = min(count, (sizeof(buf)-1));
140 bool enable;
141 int err;
142
143 if (copy_from_user(buf, user_buf, buf_size))
144 return -EFAULT;
145
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
148 return -EINVAL;
149
150
151
152
153
154
155
156 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157 !test_bit(HCI_RUNNING, &hdev->flags))
158 goto done;
159
160 hci_req_sync_lock(hdev);
161 err = hdev->set_diag(hdev, enable);
162 hci_req_sync_unlock(hdev);
163
164 if (err < 0)
165 return err;
166
167done:
168 if (enable)
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170 else
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173 return count;
174}
175
176static const struct file_operations vendor_diag_fops = {
177 .open = simple_open,
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
181};
182
183static void hci_debugfs_create_basic(struct hci_dev *hdev)
184{
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186 &dut_mode_fops);
187
188 if (hdev->set_diag)
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190 &vendor_diag_fops);
191}
192
193static int hci_reset_req(struct hci_request *req, unsigned long opt)
194{
195 BT_DBG("%s %ld", req->hdev->name, opt);
196
197
198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
200 return 0;
201}
202
203static void bredr_init(struct hci_request *req)
204{
205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207
208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210
211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213
214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215}
216
217static void amp_init1(struct hci_request *req)
218{
219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221
222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227
228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230
231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238}
239
240static int amp_init2(struct hci_request *req)
241{
242
243
244
245
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249 return 0;
250}
251
252static int hci_init1_req(struct hci_request *req, unsigned long opt)
253{
254 struct hci_dev *hdev = req->hdev;
255
256 BT_DBG("%s %ld", hdev->name, opt);
257
258
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260 hci_reset_req(req, 0);
261
262 switch (hdev->dev_type) {
263 case HCI_PRIMARY:
264 bredr_init(req);
265 break;
266 case HCI_AMP:
267 amp_init1(req);
268 break;
269 default:
270 BT_ERR("Unknown device type %d", hdev->dev_type);
271 break;
272 }
273
274 return 0;
275}
276
277static void bredr_setup(struct hci_request *req)
278{
279 __le16 param;
280 __u8 flt_type;
281
282
283 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
284
285
286 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
287
288
289 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
290
291
292 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
293
294
295 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
297
298 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
300
301 flt_type = HCI_FLT_CLEAR_ALL;
302 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
303
304
305 param = cpu_to_le16(0x7d00);
306 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
307}
308
309static void le_setup(struct hci_request *req)
310{
311 struct hci_dev *hdev = req->hdev;
312
313
314 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
315
316
317 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
318
319
320 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
322
323 if (!lmp_bredr_capable(hdev))
324 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
325}
326
327static void hci_setup_event_mask(struct hci_request *req)
328{
329 struct hci_dev *hdev = req->hdev;
330
331
332
333
334
335 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337
338
339
340 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341 return;
342
343 if (lmp_bredr_capable(hdev)) {
344 events[4] |= 0x01;
345 } else {
346
347 memset(events, 0, sizeof(events));
348 events[1] |= 0x20;
349 events[1] |= 0x40;
350 events[1] |= 0x80;
351
352
353
354
355
356 if (hdev->commands[0] & 0x20) {
357 events[0] |= 0x10;
358 events[2] |= 0x04;
359 events[3] |= 0x02;
360 }
361
362
363
364
365 if (hdev->commands[2] & 0x80)
366 events[1] |= 0x08;
367
368
369
370 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371 events[0] |= 0x80;
372 events[5] |= 0x80;
373 }
374 }
375
376 if (lmp_inq_rssi_capable(hdev) ||
377 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378 events[4] |= 0x02;
379
380 if (lmp_ext_feat_capable(hdev))
381 events[4] |= 0x04;
382
383 if (lmp_esco_capable(hdev)) {
384 events[5] |= 0x08;
385 events[5] |= 0x10;
386 }
387
388 if (lmp_sniffsubr_capable(hdev))
389 events[5] |= 0x20;
390
391 if (lmp_pause_enc_capable(hdev))
392 events[5] |= 0x80;
393
394 if (lmp_ext_inq_capable(hdev))
395 events[5] |= 0x40;
396
397 if (lmp_no_flush_capable(hdev))
398 events[7] |= 0x01;
399
400 if (lmp_lsto_capable(hdev))
401 events[6] |= 0x80;
402
403 if (lmp_ssp_capable(hdev)) {
404 events[6] |= 0x01;
405 events[6] |= 0x02;
406 events[6] |= 0x04;
407 events[6] |= 0x08;
408 events[6] |= 0x10;
409 events[6] |= 0x20;
410 events[7] |= 0x04;
411 events[7] |= 0x08;
412 events[7] |= 0x10;
413
414
415 }
416
417 if (lmp_le_capable(hdev))
418 events[7] |= 0x20;
419
420 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
421}
422
423static int hci_init2_req(struct hci_request *req, unsigned long opt)
424{
425 struct hci_dev *hdev = req->hdev;
426
427 if (hdev->dev_type == HCI_AMP)
428 return amp_init2(req);
429
430 if (lmp_bredr_capable(hdev))
431 bredr_setup(req);
432 else
433 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
434
435 if (lmp_le_capable(hdev))
436 le_setup(req);
437
438
439
440
441
442
443
444
445
446 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
449
450 if (lmp_ssp_capable(hdev)) {
451
452
453
454
455
456
457 hdev->max_page = 0x01;
458
459 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
460 u8 mode = 0x01;
461
462 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463 sizeof(mode), &mode);
464 } else {
465 struct hci_cp_write_eir cp;
466
467 memset(hdev->eir, 0, sizeof(hdev->eir));
468 memset(&cp, 0, sizeof(cp));
469
470 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
471 }
472 }
473
474 if (lmp_inq_rssi_capable(hdev) ||
475 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
476 u8 mode;
477
478
479
480
481
482 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485 }
486
487 if (lmp_inq_tx_pwr_capable(hdev))
488 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
489
490 if (lmp_ext_feat_capable(hdev)) {
491 struct hci_cp_read_local_ext_features cp;
492
493 cp.page = 0x01;
494 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495 sizeof(cp), &cp);
496 }
497
498 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
499 u8 enable = 1;
500 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501 &enable);
502 }
503
504 return 0;
505}
506
507static void hci_setup_link_policy(struct hci_request *req)
508{
509 struct hci_dev *hdev = req->hdev;
510 struct hci_cp_write_def_link_policy cp;
511 u16 link_policy = 0;
512
513 if (lmp_rswitch_capable(hdev))
514 link_policy |= HCI_LP_RSWITCH;
515 if (lmp_hold_capable(hdev))
516 link_policy |= HCI_LP_HOLD;
517 if (lmp_sniff_capable(hdev))
518 link_policy |= HCI_LP_SNIFF;
519 if (lmp_park_capable(hdev))
520 link_policy |= HCI_LP_PARK;
521
522 cp.policy = cpu_to_le16(link_policy);
523 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
524}
525
526static void hci_set_le_support(struct hci_request *req)
527{
528 struct hci_dev *hdev = req->hdev;
529 struct hci_cp_write_le_host_supported cp;
530
531
532 if (!lmp_bredr_capable(hdev))
533 return;
534
535 memset(&cp, 0, sizeof(cp));
536
537 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
538 cp.le = 0x01;
539 cp.simul = 0x00;
540 }
541
542 if (cp.le != lmp_host_le_capable(hdev))
543 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544 &cp);
545}
546
547static void hci_set_event_mask_page_2(struct hci_request *req)
548{
549 struct hci_dev *hdev = req->hdev;
550 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551
552
553
554
555 if (lmp_csb_master_capable(hdev)) {
556 events[1] |= 0x40;
557 events[1] |= 0x80;
558 events[2] |= 0x10;
559 events[2] |= 0x20;
560 }
561
562
563
564
565 if (lmp_csb_slave_capable(hdev)) {
566 events[2] |= 0x01;
567 events[2] |= 0x02;
568 events[2] |= 0x04;
569 events[2] |= 0x08;
570 }
571
572
573 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
574 events[2] |= 0x80;
575
576 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
577}
578
579static int hci_init3_req(struct hci_request *req, unsigned long opt)
580{
581 struct hci_dev *hdev = req->hdev;
582 u8 p;
583
584 hci_setup_event_mask(req);
585
586 if (hdev->commands[6] & 0x20 &&
587 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
588 struct hci_cp_read_stored_link_key cp;
589
590 bacpy(&cp.bdaddr, BDADDR_ANY);
591 cp.read_all = 0x01;
592 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
593 }
594
595 if (hdev->commands[5] & 0x10)
596 hci_setup_link_policy(req);
597
598 if (hdev->commands[8] & 0x01)
599 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
600
601
602
603
604
605 if (hdev->commands[13] & 0x01)
606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
607
608 if (lmp_le_capable(hdev)) {
609 u8 events[8];
610
611 memset(events, 0, sizeof(events));
612
613 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
614 events[0] |= 0x10;
615
616
617
618
619 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
620 events[0] |= 0x20;
621
622
623
624
625
626
627 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
628 events[0] |= 0x40;
629
630
631
632
633 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
634 events[1] |= 0x04;
635
636
637
638
639
640
641 if (hdev->commands[26] & 0x08)
642 events[0] |= 0x02;
643
644
645
646
647 if (hdev->commands[26] & 0x10)
648 events[0] |= 0x01;
649
650
651
652
653 if (hdev->commands[27] & 0x04)
654 events[0] |= 0x04;
655
656
657
658
659
660
661 if (hdev->commands[27] & 0x20)
662 events[0] |= 0x08;
663
664
665
666
667
668
669 if (hdev->commands[34] & 0x02)
670 events[0] |= 0x80;
671
672
673
674
675
676
677 if (hdev->commands[34] & 0x04)
678 events[1] |= 0x01;
679
680 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
681 events);
682
683 if (hdev->commands[25] & 0x40) {
684
685 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
686 }
687
688 if (hdev->commands[26] & 0x40) {
689
690 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
691 0, NULL);
692 }
693
694 if (hdev->commands[26] & 0x80) {
695
696 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
697 }
698
699 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
700
701 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
702
703
704 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
705 }
706
707 hci_set_le_support(req);
708 }
709
710
711 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
712 struct hci_cp_read_local_ext_features cp;
713
714 cp.page = p;
715 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
716 sizeof(cp), &cp);
717 }
718
719 return 0;
720}
721
722static int hci_init4_req(struct hci_request *req, unsigned long opt)
723{
724 struct hci_dev *hdev = req->hdev;
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739 if (hdev->commands[6] & 0x80 &&
740 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
741 struct hci_cp_delete_stored_link_key cp;
742
743 bacpy(&cp.bdaddr, BDADDR_ANY);
744 cp.delete_all = 0x01;
745 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
746 sizeof(cp), &cp);
747 }
748
749
750 if (hdev->commands[22] & 0x04)
751 hci_set_event_mask_page_2(req);
752
753
754 if (hdev->commands[29] & 0x20)
755 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
756
757
758 if (hdev->commands[30] & 0x08)
759 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
760
761
762 if (lmp_sync_train_capable(hdev))
763 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
764
765
766 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
767 bredr_sc_enabled(hdev)) {
768 u8 support = 0x01;
769
770 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
771 sizeof(support), &support);
772 }
773
774 return 0;
775}
776
777static int __hci_init(struct hci_dev *hdev)
778{
779 int err;
780
781 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
782 if (err < 0)
783 return err;
784
785 if (hci_dev_test_flag(hdev, HCI_SETUP))
786 hci_debugfs_create_basic(hdev);
787
788 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
789 if (err < 0)
790 return err;
791
792
793
794
795
796 if (hdev->dev_type != HCI_PRIMARY)
797 return 0;
798
799 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
800 if (err < 0)
801 return err;
802
803 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
804 if (err < 0)
805 return err;
806
807
808
809
810
811
812
813
814
815
816
817
818
819 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
820 !hci_dev_test_flag(hdev, HCI_CONFIG))
821 return 0;
822
823 hci_debugfs_create_common(hdev);
824
825 if (lmp_bredr_capable(hdev))
826 hci_debugfs_create_bredr(hdev);
827
828 if (lmp_le_capable(hdev))
829 hci_debugfs_create_le(hdev);
830
831 return 0;
832}
833
834static int hci_init0_req(struct hci_request *req, unsigned long opt)
835{
836 struct hci_dev *hdev = req->hdev;
837
838 BT_DBG("%s %ld", hdev->name, opt);
839
840
841 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
842 hci_reset_req(req, 0);
843
844
845 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
846
847
848 if (hdev->set_bdaddr)
849 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
850
851 return 0;
852}
853
854static int __hci_unconf_init(struct hci_dev *hdev)
855{
856 int err;
857
858 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
859 return 0;
860
861 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
862 if (err < 0)
863 return err;
864
865 if (hci_dev_test_flag(hdev, HCI_SETUP))
866 hci_debugfs_create_basic(hdev);
867
868 return 0;
869}
870
871static int hci_scan_req(struct hci_request *req, unsigned long opt)
872{
873 __u8 scan = opt;
874
875 BT_DBG("%s %x", req->hdev->name, scan);
876
877
878 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
879 return 0;
880}
881
882static int hci_auth_req(struct hci_request *req, unsigned long opt)
883{
884 __u8 auth = opt;
885
886 BT_DBG("%s %x", req->hdev->name, auth);
887
888
889 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
890 return 0;
891}
892
893static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
894{
895 __u8 encrypt = opt;
896
897 BT_DBG("%s %x", req->hdev->name, encrypt);
898
899
900 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
901 return 0;
902}
903
904static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
905{
906 __le16 policy = cpu_to_le16(opt);
907
908 BT_DBG("%s %x", req->hdev->name, policy);
909
910
911 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
912 return 0;
913}
914
915
916
917struct hci_dev *hci_dev_get(int index)
918{
919 struct hci_dev *hdev = NULL, *d;
920
921 BT_DBG("%d", index);
922
923 if (index < 0)
924 return NULL;
925
926 read_lock(&hci_dev_list_lock);
927 list_for_each_entry(d, &hci_dev_list, list) {
928 if (d->id == index) {
929 hdev = hci_dev_hold(d);
930 break;
931 }
932 }
933 read_unlock(&hci_dev_list_lock);
934 return hdev;
935}
936
937
938
939bool hci_discovery_active(struct hci_dev *hdev)
940{
941 struct discovery_state *discov = &hdev->discovery;
942
943 switch (discov->state) {
944 case DISCOVERY_FINDING:
945 case DISCOVERY_RESOLVING:
946 return true;
947
948 default:
949 return false;
950 }
951}
952
953void hci_discovery_set_state(struct hci_dev *hdev, int state)
954{
955 int old_state = hdev->discovery.state;
956
957 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
958
959 if (old_state == state)
960 return;
961
962 hdev->discovery.state = state;
963
964 switch (state) {
965 case DISCOVERY_STOPPED:
966 hci_update_background_scan(hdev);
967
968 if (old_state != DISCOVERY_STARTING)
969 mgmt_discovering(hdev, 0);
970 break;
971 case DISCOVERY_STARTING:
972 break;
973 case DISCOVERY_FINDING:
974 mgmt_discovering(hdev, 1);
975 break;
976 case DISCOVERY_RESOLVING:
977 break;
978 case DISCOVERY_STOPPING:
979 break;
980 }
981}
982
983void hci_inquiry_cache_flush(struct hci_dev *hdev)
984{
985 struct discovery_state *cache = &hdev->discovery;
986 struct inquiry_entry *p, *n;
987
988 list_for_each_entry_safe(p, n, &cache->all, all) {
989 list_del(&p->all);
990 kfree(p);
991 }
992
993 INIT_LIST_HEAD(&cache->unknown);
994 INIT_LIST_HEAD(&cache->resolve);
995}
996
997struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
998 bdaddr_t *bdaddr)
999{
1000 struct discovery_state *cache = &hdev->discovery;
1001 struct inquiry_entry *e;
1002
1003 BT_DBG("cache %p, %pMR", cache, bdaddr);
1004
1005 list_for_each_entry(e, &cache->all, all) {
1006 if (!bacmp(&e->data.bdaddr, bdaddr))
1007 return e;
1008 }
1009
1010 return NULL;
1011}
1012
1013struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1014 bdaddr_t *bdaddr)
1015{
1016 struct discovery_state *cache = &hdev->discovery;
1017 struct inquiry_entry *e;
1018
1019 BT_DBG("cache %p, %pMR", cache, bdaddr);
1020
1021 list_for_each_entry(e, &cache->unknown, list) {
1022 if (!bacmp(&e->data.bdaddr, bdaddr))
1023 return e;
1024 }
1025
1026 return NULL;
1027}
1028
1029struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1030 bdaddr_t *bdaddr,
1031 int state)
1032{
1033 struct discovery_state *cache = &hdev->discovery;
1034 struct inquiry_entry *e;
1035
1036 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1037
1038 list_for_each_entry(e, &cache->resolve, list) {
1039 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1040 return e;
1041 if (!bacmp(&e->data.bdaddr, bdaddr))
1042 return e;
1043 }
1044
1045 return NULL;
1046}
1047
1048void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1049 struct inquiry_entry *ie)
1050{
1051 struct discovery_state *cache = &hdev->discovery;
1052 struct list_head *pos = &cache->resolve;
1053 struct inquiry_entry *p;
1054
1055 list_del(&ie->list);
1056
1057 list_for_each_entry(p, &cache->resolve, list) {
1058 if (p->name_state != NAME_PENDING &&
1059 abs(p->data.rssi) >= abs(ie->data.rssi))
1060 break;
1061 pos = &p->list;
1062 }
1063
1064 list_add(&ie->list, pos);
1065}
1066
1067u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1068 bool name_known)
1069{
1070 struct discovery_state *cache = &hdev->discovery;
1071 struct inquiry_entry *ie;
1072 u32 flags = 0;
1073
1074 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1075
1076 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1077
1078 if (!data->ssp_mode)
1079 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1080
1081 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1082 if (ie) {
1083 if (!ie->data.ssp_mode)
1084 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1085
1086 if (ie->name_state == NAME_NEEDED &&
1087 data->rssi != ie->data.rssi) {
1088 ie->data.rssi = data->rssi;
1089 hci_inquiry_cache_update_resolve(hdev, ie);
1090 }
1091
1092 goto update;
1093 }
1094
1095
1096 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1097 if (!ie) {
1098 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1099 goto done;
1100 }
1101
1102 list_add(&ie->all, &cache->all);
1103
1104 if (name_known) {
1105 ie->name_state = NAME_KNOWN;
1106 } else {
1107 ie->name_state = NAME_NOT_KNOWN;
1108 list_add(&ie->list, &cache->unknown);
1109 }
1110
1111update:
1112 if (name_known && ie->name_state != NAME_KNOWN &&
1113 ie->name_state != NAME_PENDING) {
1114 ie->name_state = NAME_KNOWN;
1115 list_del(&ie->list);
1116 }
1117
1118 memcpy(&ie->data, data, sizeof(*data));
1119 ie->timestamp = jiffies;
1120 cache->timestamp = jiffies;
1121
1122 if (ie->name_state == NAME_NOT_KNOWN)
1123 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1124
1125done:
1126 return flags;
1127}
1128
1129static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1130{
1131 struct discovery_state *cache = &hdev->discovery;
1132 struct inquiry_info *info = (struct inquiry_info *) buf;
1133 struct inquiry_entry *e;
1134 int copied = 0;
1135
1136 list_for_each_entry(e, &cache->all, all) {
1137 struct inquiry_data *data = &e->data;
1138
1139 if (copied >= num)
1140 break;
1141
1142 bacpy(&info->bdaddr, &data->bdaddr);
1143 info->pscan_rep_mode = data->pscan_rep_mode;
1144 info->pscan_period_mode = data->pscan_period_mode;
1145 info->pscan_mode = data->pscan_mode;
1146 memcpy(info->dev_class, data->dev_class, 3);
1147 info->clock_offset = data->clock_offset;
1148
1149 info++;
1150 copied++;
1151 }
1152
1153 BT_DBG("cache %p, copied %d", cache, copied);
1154 return copied;
1155}
1156
1157static int hci_inq_req(struct hci_request *req, unsigned long opt)
1158{
1159 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1160 struct hci_dev *hdev = req->hdev;
1161 struct hci_cp_inquiry cp;
1162
1163 BT_DBG("%s", hdev->name);
1164
1165 if (test_bit(HCI_INQUIRY, &hdev->flags))
1166 return 0;
1167
1168
1169 memcpy(&cp.lap, &ir->lap, 3);
1170 cp.length = ir->length;
1171 cp.num_rsp = ir->num_rsp;
1172 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1173
1174 return 0;
1175}
1176
1177int hci_inquiry(void __user *arg)
1178{
1179 __u8 __user *ptr = arg;
1180 struct hci_inquiry_req ir;
1181 struct hci_dev *hdev;
1182 int err = 0, do_inquiry = 0, max_rsp;
1183 long timeo;
1184 __u8 *buf;
1185
1186 if (copy_from_user(&ir, ptr, sizeof(ir)))
1187 return -EFAULT;
1188
1189 hdev = hci_dev_get(ir.dev_id);
1190 if (!hdev)
1191 return -ENODEV;
1192
1193 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1194 err = -EBUSY;
1195 goto done;
1196 }
1197
1198 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1199 err = -EOPNOTSUPP;
1200 goto done;
1201 }
1202
1203 if (hdev->dev_type != HCI_PRIMARY) {
1204 err = -EOPNOTSUPP;
1205 goto done;
1206 }
1207
1208 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1209 err = -EOPNOTSUPP;
1210 goto done;
1211 }
1212
1213 hci_dev_lock(hdev);
1214 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1215 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1216 hci_inquiry_cache_flush(hdev);
1217 do_inquiry = 1;
1218 }
1219 hci_dev_unlock(hdev);
1220
1221 timeo = ir.length * msecs_to_jiffies(2000);
1222
1223 if (do_inquiry) {
1224 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1225 timeo, NULL);
1226 if (err < 0)
1227 goto done;
1228
1229
1230
1231
1232 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1233 TASK_INTERRUPTIBLE))
1234 return -EINTR;
1235 }
1236
1237
1238
1239
1240 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1241
1242
1243
1244
1245 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1246 if (!buf) {
1247 err = -ENOMEM;
1248 goto done;
1249 }
1250
1251 hci_dev_lock(hdev);
1252 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1253 hci_dev_unlock(hdev);
1254
1255 BT_DBG("num_rsp %d", ir.num_rsp);
1256
1257 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1258 ptr += sizeof(ir);
1259 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1260 ir.num_rsp))
1261 err = -EFAULT;
1262 } else
1263 err = -EFAULT;
1264
1265 kfree(buf);
1266
1267done:
1268 hci_dev_put(hdev);
1269 return err;
1270}
1271
1272static int hci_dev_do_open(struct hci_dev *hdev)
1273{
1274 int ret = 0;
1275
1276 BT_DBG("%s %p", hdev->name, hdev);
1277
1278 hci_req_sync_lock(hdev);
1279
1280 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1281 ret = -ENODEV;
1282 goto done;
1283 }
1284
1285 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1286 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1287
1288
1289
1290 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1291 ret = -ERFKILL;
1292 goto done;
1293 }
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1308 hdev->dev_type == HCI_PRIMARY &&
1309 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1310 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1311 ret = -EADDRNOTAVAIL;
1312 goto done;
1313 }
1314 }
1315
1316 if (test_bit(HCI_UP, &hdev->flags)) {
1317 ret = -EALREADY;
1318 goto done;
1319 }
1320
1321 if (hdev->open(hdev)) {
1322 ret = -EIO;
1323 goto done;
1324 }
1325
1326 set_bit(HCI_RUNNING, &hdev->flags);
1327 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1328
1329 atomic_set(&hdev->cmd_cnt, 1);
1330 set_bit(HCI_INIT, &hdev->flags);
1331
1332 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1333 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1334
1335 if (hdev->setup)
1336 ret = hdev->setup(hdev);
1337
1338
1339
1340
1341
1342
1343
1344 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1345 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1346 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1357 ret = __hci_unconf_init(hdev);
1358 }
1359
1360 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1361
1362
1363
1364
1365
1366 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1367 hdev->set_bdaddr)
1368 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1369 else
1370 ret = -EADDRNOTAVAIL;
1371 }
1372
1373 if (!ret) {
1374 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1375 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1376 ret = __hci_init(hdev);
1377 if (!ret && hdev->post_init)
1378 ret = hdev->post_init(hdev);
1379 }
1380 }
1381
1382
1383
1384
1385
1386 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1387 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1388 ret = hdev->set_diag(hdev, true);
1389
1390 clear_bit(HCI_INIT, &hdev->flags);
1391
1392 if (!ret) {
1393 hci_dev_hold(hdev);
1394 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1395 set_bit(HCI_UP, &hdev->flags);
1396 hci_sock_dev_event(hdev, HCI_DEV_UP);
1397 hci_leds_update_powered(hdev, true);
1398 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1400 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1401 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1402 hci_dev_test_flag(hdev, HCI_MGMT) &&
1403 hdev->dev_type == HCI_PRIMARY) {
1404 ret = __hci_req_hci_power_on(hdev);
1405 mgmt_power_on(hdev, ret);
1406 }
1407 } else {
1408
1409 flush_work(&hdev->tx_work);
1410 flush_work(&hdev->cmd_work);
1411 flush_work(&hdev->rx_work);
1412
1413 skb_queue_purge(&hdev->cmd_q);
1414 skb_queue_purge(&hdev->rx_q);
1415
1416 if (hdev->flush)
1417 hdev->flush(hdev);
1418
1419 if (hdev->sent_cmd) {
1420 kfree_skb(hdev->sent_cmd);
1421 hdev->sent_cmd = NULL;
1422 }
1423
1424 clear_bit(HCI_RUNNING, &hdev->flags);
1425 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1426
1427 hdev->close(hdev);
1428 hdev->flags &= BIT(HCI_RAW);
1429 }
1430
1431done:
1432 hci_req_sync_unlock(hdev);
1433 return ret;
1434}
1435
1436
1437
1438int hci_dev_open(__u16 dev)
1439{
1440 struct hci_dev *hdev;
1441 int err;
1442
1443 hdev = hci_dev_get(dev);
1444 if (!hdev)
1445 return -ENODEV;
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1457 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1458 err = -EOPNOTSUPP;
1459 goto done;
1460 }
1461
1462
1463
1464
1465
1466
1467 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1468 cancel_delayed_work(&hdev->power_off);
1469
1470
1471
1472
1473
1474 flush_workqueue(hdev->req_workqueue);
1475
1476
1477
1478
1479
1480
1481
1482 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1483 !hci_dev_test_flag(hdev, HCI_MGMT))
1484 hci_dev_set_flag(hdev, HCI_BONDABLE);
1485
1486 err = hci_dev_do_open(hdev);
1487
1488done:
1489 hci_dev_put(hdev);
1490 return err;
1491}
1492
1493
1494static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1495{
1496 struct hci_conn_params *p;
1497
1498 list_for_each_entry(p, &hdev->le_conn_params, list) {
1499 if (p->conn) {
1500 hci_conn_drop(p->conn);
1501 hci_conn_put(p->conn);
1502 p->conn = NULL;
1503 }
1504 list_del_init(&p->action);
1505 }
1506
1507 BT_DBG("All LE pending actions cleared");
1508}
1509
1510int hci_dev_do_close(struct hci_dev *hdev)
1511{
1512 bool auto_off;
1513
1514 BT_DBG("%s %p", hdev->name, hdev);
1515
1516 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1517 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1518 test_bit(HCI_UP, &hdev->flags)) {
1519
1520 if (hdev->shutdown)
1521 hdev->shutdown(hdev);
1522 }
1523
1524 cancel_delayed_work(&hdev->power_off);
1525
1526 hci_request_cancel_all(hdev);
1527 hci_req_sync_lock(hdev);
1528
1529 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1530 cancel_delayed_work_sync(&hdev->cmd_timer);
1531 hci_req_sync_unlock(hdev);
1532 return 0;
1533 }
1534
1535 hci_leds_update_powered(hdev, false);
1536
1537
1538 flush_work(&hdev->tx_work);
1539 flush_work(&hdev->rx_work);
1540
1541 if (hdev->discov_timeout > 0) {
1542 hdev->discov_timeout = 0;
1543 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1544 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1545 }
1546
1547 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1548 cancel_delayed_work(&hdev->service_cache);
1549
1550 if (hci_dev_test_flag(hdev, HCI_MGMT))
1551 cancel_delayed_work_sync(&hdev->rpa_expired);
1552
1553
1554
1555
1556 drain_workqueue(hdev->workqueue);
1557
1558 hci_dev_lock(hdev);
1559
1560 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1561
1562 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1563
1564 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1565 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1566 hci_dev_test_flag(hdev, HCI_MGMT))
1567 __mgmt_power_off(hdev);
1568
1569 hci_inquiry_cache_flush(hdev);
1570 hci_pend_le_actions_clear(hdev);
1571 hci_conn_hash_flush(hdev);
1572 hci_dev_unlock(hdev);
1573
1574 smp_unregister(hdev);
1575
1576 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1577
1578 if (hdev->flush)
1579 hdev->flush(hdev);
1580
1581
1582 skb_queue_purge(&hdev->cmd_q);
1583 atomic_set(&hdev->cmd_cnt, 1);
1584 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1585 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1586 set_bit(HCI_INIT, &hdev->flags);
1587 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1588 clear_bit(HCI_INIT, &hdev->flags);
1589 }
1590
1591
1592 flush_work(&hdev->cmd_work);
1593
1594
1595 skb_queue_purge(&hdev->rx_q);
1596 skb_queue_purge(&hdev->cmd_q);
1597 skb_queue_purge(&hdev->raw_q);
1598
1599
1600 if (hdev->sent_cmd) {
1601 cancel_delayed_work_sync(&hdev->cmd_timer);
1602 kfree_skb(hdev->sent_cmd);
1603 hdev->sent_cmd = NULL;
1604 }
1605
1606 clear_bit(HCI_RUNNING, &hdev->flags);
1607 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1608
1609
1610
1611 hdev->close(hdev);
1612
1613
1614 hdev->flags &= BIT(HCI_RAW);
1615 hci_dev_clear_volatile_flags(hdev);
1616
1617
1618 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1619
1620 memset(hdev->eir, 0, sizeof(hdev->eir));
1621 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1622 bacpy(&hdev->random_addr, BDADDR_ANY);
1623
1624 hci_req_sync_unlock(hdev);
1625
1626 hci_dev_put(hdev);
1627 return 0;
1628}
1629
1630int hci_dev_close(__u16 dev)
1631{
1632 struct hci_dev *hdev;
1633 int err;
1634
1635 hdev = hci_dev_get(dev);
1636 if (!hdev)
1637 return -ENODEV;
1638
1639 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1640 err = -EBUSY;
1641 goto done;
1642 }
1643
1644 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1645 cancel_delayed_work(&hdev->power_off);
1646
1647 err = hci_dev_do_close(hdev);
1648
1649done:
1650 hci_dev_put(hdev);
1651 return err;
1652}
1653
1654static int hci_dev_do_reset(struct hci_dev *hdev)
1655{
1656 int ret;
1657
1658 BT_DBG("%s %p", hdev->name, hdev);
1659
1660 hci_req_sync_lock(hdev);
1661
1662
1663 skb_queue_purge(&hdev->rx_q);
1664 skb_queue_purge(&hdev->cmd_q);
1665
1666
1667
1668
1669 drain_workqueue(hdev->workqueue);
1670
1671 hci_dev_lock(hdev);
1672 hci_inquiry_cache_flush(hdev);
1673 hci_conn_hash_flush(hdev);
1674 hci_dev_unlock(hdev);
1675
1676 if (hdev->flush)
1677 hdev->flush(hdev);
1678
1679 atomic_set(&hdev->cmd_cnt, 1);
1680 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1681
1682 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1683
1684 hci_req_sync_unlock(hdev);
1685 return ret;
1686}
1687
1688int hci_dev_reset(__u16 dev)
1689{
1690 struct hci_dev *hdev;
1691 int err;
1692
1693 hdev = hci_dev_get(dev);
1694 if (!hdev)
1695 return -ENODEV;
1696
1697 if (!test_bit(HCI_UP, &hdev->flags)) {
1698 err = -ENETDOWN;
1699 goto done;
1700 }
1701
1702 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1703 err = -EBUSY;
1704 goto done;
1705 }
1706
1707 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1708 err = -EOPNOTSUPP;
1709 goto done;
1710 }
1711
1712 err = hci_dev_do_reset(hdev);
1713
1714done:
1715 hci_dev_put(hdev);
1716 return err;
1717}
1718
1719int hci_dev_reset_stat(__u16 dev)
1720{
1721 struct hci_dev *hdev;
1722 int ret = 0;
1723
1724 hdev = hci_dev_get(dev);
1725 if (!hdev)
1726 return -ENODEV;
1727
1728 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1729 ret = -EBUSY;
1730 goto done;
1731 }
1732
1733 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1734 ret = -EOPNOTSUPP;
1735 goto done;
1736 }
1737
1738 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1739
1740done:
1741 hci_dev_put(hdev);
1742 return ret;
1743}
1744
1745static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1746{
1747 bool conn_changed, discov_changed;
1748
1749 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1750
1751 if ((scan & SCAN_PAGE))
1752 conn_changed = !hci_dev_test_and_set_flag(hdev,
1753 HCI_CONNECTABLE);
1754 else
1755 conn_changed = hci_dev_test_and_clear_flag(hdev,
1756 HCI_CONNECTABLE);
1757
1758 if ((scan & SCAN_INQUIRY)) {
1759 discov_changed = !hci_dev_test_and_set_flag(hdev,
1760 HCI_DISCOVERABLE);
1761 } else {
1762 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1763 discov_changed = hci_dev_test_and_clear_flag(hdev,
1764 HCI_DISCOVERABLE);
1765 }
1766
1767 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1768 return;
1769
1770 if (conn_changed || discov_changed) {
1771
1772 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1773
1774 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1775 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1776
1777 mgmt_new_settings(hdev);
1778 }
1779}
1780
1781int hci_dev_cmd(unsigned int cmd, void __user *arg)
1782{
1783 struct hci_dev *hdev;
1784 struct hci_dev_req dr;
1785 int err = 0;
1786
1787 if (copy_from_user(&dr, arg, sizeof(dr)))
1788 return -EFAULT;
1789
1790 hdev = hci_dev_get(dr.dev_id);
1791 if (!hdev)
1792 return -ENODEV;
1793
1794 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1795 err = -EBUSY;
1796 goto done;
1797 }
1798
1799 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1800 err = -EOPNOTSUPP;
1801 goto done;
1802 }
1803
1804 if (hdev->dev_type != HCI_PRIMARY) {
1805 err = -EOPNOTSUPP;
1806 goto done;
1807 }
1808
1809 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1810 err = -EOPNOTSUPP;
1811 goto done;
1812 }
1813
1814 switch (cmd) {
1815 case HCISETAUTH:
1816 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1817 HCI_INIT_TIMEOUT, NULL);
1818 break;
1819
1820 case HCISETENCRYPT:
1821 if (!lmp_encrypt_capable(hdev)) {
1822 err = -EOPNOTSUPP;
1823 break;
1824 }
1825
1826 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1827
1828 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1829 HCI_INIT_TIMEOUT, NULL);
1830 if (err)
1831 break;
1832 }
1833
1834 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1835 HCI_INIT_TIMEOUT, NULL);
1836 break;
1837
1838 case HCISETSCAN:
1839 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1840 HCI_INIT_TIMEOUT, NULL);
1841
1842
1843
1844
1845 if (!err)
1846 hci_update_scan_state(hdev, dr.dev_opt);
1847 break;
1848
1849 case HCISETLINKPOL:
1850 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1851 HCI_INIT_TIMEOUT, NULL);
1852 break;
1853
1854 case HCISETLINKMODE:
1855 hdev->link_mode = ((__u16) dr.dev_opt) &
1856 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1857 break;
1858
1859 case HCISETPTYPE:
1860 hdev->pkt_type = (__u16) dr.dev_opt;
1861 break;
1862
1863 case HCISETACLMTU:
1864 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1865 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1866 break;
1867
1868 case HCISETSCOMTU:
1869 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1870 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1871 break;
1872
1873 default:
1874 err = -EINVAL;
1875 break;
1876 }
1877
1878done:
1879 hci_dev_put(hdev);
1880 return err;
1881}
1882
1883int hci_get_dev_list(void __user *arg)
1884{
1885 struct hci_dev *hdev;
1886 struct hci_dev_list_req *dl;
1887 struct hci_dev_req *dr;
1888 int n = 0, size, err;
1889 __u16 dev_num;
1890
1891 if (get_user(dev_num, (__u16 __user *) arg))
1892 return -EFAULT;
1893
1894 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1895 return -EINVAL;
1896
1897 size = sizeof(*dl) + dev_num * sizeof(*dr);
1898
1899 dl = kzalloc(size, GFP_KERNEL);
1900 if (!dl)
1901 return -ENOMEM;
1902
1903 dr = dl->dev_req;
1904
1905 read_lock(&hci_dev_list_lock);
1906 list_for_each_entry(hdev, &hci_dev_list, list) {
1907 unsigned long flags = hdev->flags;
1908
1909
1910
1911
1912
1913 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1914 flags &= ~BIT(HCI_UP);
1915
1916 (dr + n)->dev_id = hdev->id;
1917 (dr + n)->dev_opt = flags;
1918
1919 if (++n >= dev_num)
1920 break;
1921 }
1922 read_unlock(&hci_dev_list_lock);
1923
1924 dl->dev_num = n;
1925 size = sizeof(*dl) + n * sizeof(*dr);
1926
1927 err = copy_to_user(arg, dl, size);
1928 kfree(dl);
1929
1930 return err ? -EFAULT : 0;
1931}
1932
1933int hci_get_dev_info(void __user *arg)
1934{
1935 struct hci_dev *hdev;
1936 struct hci_dev_info di;
1937 unsigned long flags;
1938 int err = 0;
1939
1940 if (copy_from_user(&di, arg, sizeof(di)))
1941 return -EFAULT;
1942
1943 hdev = hci_dev_get(di.dev_id);
1944 if (!hdev)
1945 return -ENODEV;
1946
1947
1948
1949
1950
1951 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1952 flags = hdev->flags & ~BIT(HCI_UP);
1953 else
1954 flags = hdev->flags;
1955
1956 strcpy(di.name, hdev->name);
1957 di.bdaddr = hdev->bdaddr;
1958 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1959 di.flags = flags;
1960 di.pkt_type = hdev->pkt_type;
1961 if (lmp_bredr_capable(hdev)) {
1962 di.acl_mtu = hdev->acl_mtu;
1963 di.acl_pkts = hdev->acl_pkts;
1964 di.sco_mtu = hdev->sco_mtu;
1965 di.sco_pkts = hdev->sco_pkts;
1966 } else {
1967 di.acl_mtu = hdev->le_mtu;
1968 di.acl_pkts = hdev->le_pkts;
1969 di.sco_mtu = 0;
1970 di.sco_pkts = 0;
1971 }
1972 di.link_policy = hdev->link_policy;
1973 di.link_mode = hdev->link_mode;
1974
1975 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1976 memcpy(&di.features, &hdev->features, sizeof(di.features));
1977
1978 if (copy_to_user(arg, &di, sizeof(di)))
1979 err = -EFAULT;
1980
1981 hci_dev_put(hdev);
1982
1983 return err;
1984}
1985
1986
1987
1988static int hci_rfkill_set_block(void *data, bool blocked)
1989{
1990 struct hci_dev *hdev = data;
1991
1992 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1993
1994 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1995 return -EBUSY;
1996
1997 if (blocked) {
1998 hci_dev_set_flag(hdev, HCI_RFKILLED);
1999 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2000 !hci_dev_test_flag(hdev, HCI_CONFIG))
2001 hci_dev_do_close(hdev);
2002 } else {
2003 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2004 }
2005
2006 return 0;
2007}
2008
2009static const struct rfkill_ops hci_rfkill_ops = {
2010 .set_block = hci_rfkill_set_block,
2011};
2012
2013static void hci_power_on(struct work_struct *work)
2014{
2015 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2016 int err;
2017
2018 BT_DBG("%s", hdev->name);
2019
2020 if (test_bit(HCI_UP, &hdev->flags) &&
2021 hci_dev_test_flag(hdev, HCI_MGMT) &&
2022 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2023 cancel_delayed_work(&hdev->power_off);
2024 hci_req_sync_lock(hdev);
2025 err = __hci_req_hci_power_on(hdev);
2026 hci_req_sync_unlock(hdev);
2027 mgmt_power_on(hdev, err);
2028 return;
2029 }
2030
2031 err = hci_dev_do_open(hdev);
2032 if (err < 0) {
2033 hci_dev_lock(hdev);
2034 mgmt_set_powered_failed(hdev, err);
2035 hci_dev_unlock(hdev);
2036 return;
2037 }
2038
2039
2040
2041
2042
2043 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2044 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2045 (hdev->dev_type == HCI_PRIMARY &&
2046 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2047 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2048 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2049 hci_dev_do_close(hdev);
2050 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2051 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2052 HCI_AUTO_OFF_TIMEOUT);
2053 }
2054
2055 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2056
2057
2058
2059 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2060 set_bit(HCI_RAW, &hdev->flags);
2061
2062
2063
2064
2065
2066
2067
2068
2069 mgmt_index_added(hdev);
2070 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2071
2072
2073
2074 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2075 clear_bit(HCI_RAW, &hdev->flags);
2076
2077
2078
2079
2080
2081 mgmt_index_added(hdev);
2082 }
2083}
2084
2085static void hci_power_off(struct work_struct *work)
2086{
2087 struct hci_dev *hdev = container_of(work, struct hci_dev,
2088 power_off.work);
2089
2090 BT_DBG("%s", hdev->name);
2091
2092 hci_dev_do_close(hdev);
2093}
2094
2095static void hci_error_reset(struct work_struct *work)
2096{
2097 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2098
2099 BT_DBG("%s", hdev->name);
2100
2101 if (hdev->hw_error)
2102 hdev->hw_error(hdev, hdev->hw_error_code);
2103 else
2104 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2105 hdev->hw_error_code);
2106
2107 if (hci_dev_do_close(hdev))
2108 return;
2109
2110 hci_dev_do_open(hdev);
2111}
2112
2113void hci_uuids_clear(struct hci_dev *hdev)
2114{
2115 struct bt_uuid *uuid, *tmp;
2116
2117 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2118 list_del(&uuid->list);
2119 kfree(uuid);
2120 }
2121}
2122
2123void hci_link_keys_clear(struct hci_dev *hdev)
2124{
2125 struct link_key *key;
2126
2127 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2128 list_del_rcu(&key->list);
2129 kfree_rcu(key, rcu);
2130 }
2131}
2132
2133void hci_smp_ltks_clear(struct hci_dev *hdev)
2134{
2135 struct smp_ltk *k;
2136
2137 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2138 list_del_rcu(&k->list);
2139 kfree_rcu(k, rcu);
2140 }
2141}
2142
2143void hci_smp_irks_clear(struct hci_dev *hdev)
2144{
2145 struct smp_irk *k;
2146
2147 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2148 list_del_rcu(&k->list);
2149 kfree_rcu(k, rcu);
2150 }
2151}
2152
2153struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2154{
2155 struct link_key *k;
2156
2157 rcu_read_lock();
2158 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2159 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2160 rcu_read_unlock();
2161 return k;
2162 }
2163 }
2164 rcu_read_unlock();
2165
2166 return NULL;
2167}
2168
2169static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2170 u8 key_type, u8 old_key_type)
2171{
2172
2173 if (key_type < 0x03)
2174 return true;
2175
2176
2177 if (key_type == HCI_LK_DEBUG_COMBINATION)
2178 return false;
2179
2180
2181 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2182 return false;
2183
2184
2185 if (!conn)
2186 return true;
2187
2188
2189 if (conn->type == LE_LINK)
2190 return true;
2191
2192
2193 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2194 return true;
2195
2196
2197 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2198 return true;
2199
2200
2201 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2202 return true;
2203
2204
2205
2206 return false;
2207}
2208
2209static u8 ltk_role(u8 type)
2210{
2211 if (type == SMP_LTK)
2212 return HCI_ROLE_MASTER;
2213
2214 return HCI_ROLE_SLAVE;
2215}
2216
2217struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2218 u8 addr_type, u8 role)
2219{
2220 struct smp_ltk *k;
2221
2222 rcu_read_lock();
2223 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2224 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2225 continue;
2226
2227 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2228 rcu_read_unlock();
2229 return k;
2230 }
2231 }
2232 rcu_read_unlock();
2233
2234 return NULL;
2235}
2236
2237struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2238{
2239 struct smp_irk *irk;
2240
2241 rcu_read_lock();
2242 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2243 if (!bacmp(&irk->rpa, rpa)) {
2244 rcu_read_unlock();
2245 return irk;
2246 }
2247 }
2248
2249 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2250 if (smp_irk_matches(hdev, irk->val, rpa)) {
2251 bacpy(&irk->rpa, rpa);
2252 rcu_read_unlock();
2253 return irk;
2254 }
2255 }
2256 rcu_read_unlock();
2257
2258 return NULL;
2259}
2260
2261struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2262 u8 addr_type)
2263{
2264 struct smp_irk *irk;
2265
2266
2267 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2268 return NULL;
2269
2270 rcu_read_lock();
2271 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2272 if (addr_type == irk->addr_type &&
2273 bacmp(bdaddr, &irk->bdaddr) == 0) {
2274 rcu_read_unlock();
2275 return irk;
2276 }
2277 }
2278 rcu_read_unlock();
2279
2280 return NULL;
2281}
2282
2283struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2284 bdaddr_t *bdaddr, u8 *val, u8 type,
2285 u8 pin_len, bool *persistent)
2286{
2287 struct link_key *key, *old_key;
2288 u8 old_key_type;
2289
2290 old_key = hci_find_link_key(hdev, bdaddr);
2291 if (old_key) {
2292 old_key_type = old_key->type;
2293 key = old_key;
2294 } else {
2295 old_key_type = conn ? conn->key_type : 0xff;
2296 key = kzalloc(sizeof(*key), GFP_KERNEL);
2297 if (!key)
2298 return NULL;
2299 list_add_rcu(&key->list, &hdev->link_keys);
2300 }
2301
2302 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2303
2304
2305
2306
2307 if (type == HCI_LK_CHANGED_COMBINATION &&
2308 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2309 type = HCI_LK_COMBINATION;
2310 if (conn)
2311 conn->key_type = type;
2312 }
2313
2314 bacpy(&key->bdaddr, bdaddr);
2315 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2316 key->pin_len = pin_len;
2317
2318 if (type == HCI_LK_CHANGED_COMBINATION)
2319 key->type = old_key_type;
2320 else
2321 key->type = type;
2322
2323 if (persistent)
2324 *persistent = hci_persistent_key(hdev, conn, type,
2325 old_key_type);
2326
2327 return key;
2328}
2329
2330struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2331 u8 addr_type, u8 type, u8 authenticated,
2332 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2333{
2334 struct smp_ltk *key, *old_key;
2335 u8 role = ltk_role(type);
2336
2337 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2338 if (old_key)
2339 key = old_key;
2340 else {
2341 key = kzalloc(sizeof(*key), GFP_KERNEL);
2342 if (!key)
2343 return NULL;
2344 list_add_rcu(&key->list, &hdev->long_term_keys);
2345 }
2346
2347 bacpy(&key->bdaddr, bdaddr);
2348 key->bdaddr_type = addr_type;
2349 memcpy(key->val, tk, sizeof(key->val));
2350 key->authenticated = authenticated;
2351 key->ediv = ediv;
2352 key->rand = rand;
2353 key->enc_size = enc_size;
2354 key->type = type;
2355
2356 return key;
2357}
2358
2359struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2360 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2361{
2362 struct smp_irk *irk;
2363
2364 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2365 if (!irk) {
2366 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2367 if (!irk)
2368 return NULL;
2369
2370 bacpy(&irk->bdaddr, bdaddr);
2371 irk->addr_type = addr_type;
2372
2373 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2374 }
2375
2376 memcpy(irk->val, val, 16);
2377 bacpy(&irk->rpa, rpa);
2378
2379 return irk;
2380}
2381
2382int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2383{
2384 struct link_key *key;
2385
2386 key = hci_find_link_key(hdev, bdaddr);
2387 if (!key)
2388 return -ENOENT;
2389
2390 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2391
2392 list_del_rcu(&key->list);
2393 kfree_rcu(key, rcu);
2394
2395 return 0;
2396}
2397
2398int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2399{
2400 struct smp_ltk *k;
2401 int removed = 0;
2402
2403 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2404 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2405 continue;
2406
2407 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2408
2409 list_del_rcu(&k->list);
2410 kfree_rcu(k, rcu);
2411 removed++;
2412 }
2413
2414 return removed ? 0 : -ENOENT;
2415}
2416
2417void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2418{
2419 struct smp_irk *k;
2420
2421 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2422 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2423 continue;
2424
2425 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2426
2427 list_del_rcu(&k->list);
2428 kfree_rcu(k, rcu);
2429 }
2430}
2431
2432bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2433{
2434 struct smp_ltk *k;
2435 struct smp_irk *irk;
2436 u8 addr_type;
2437
2438 if (type == BDADDR_BREDR) {
2439 if (hci_find_link_key(hdev, bdaddr))
2440 return true;
2441 return false;
2442 }
2443
2444
2445 if (type == BDADDR_LE_PUBLIC)
2446 addr_type = ADDR_LE_DEV_PUBLIC;
2447 else
2448 addr_type = ADDR_LE_DEV_RANDOM;
2449
2450 irk = hci_get_irk(hdev, bdaddr, addr_type);
2451 if (irk) {
2452 bdaddr = &irk->bdaddr;
2453 addr_type = irk->addr_type;
2454 }
2455
2456 rcu_read_lock();
2457 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2458 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2459 rcu_read_unlock();
2460 return true;
2461 }
2462 }
2463 rcu_read_unlock();
2464
2465 return false;
2466}
2467
2468
2469static void hci_cmd_timeout(struct work_struct *work)
2470{
2471 struct hci_dev *hdev = container_of(work, struct hci_dev,
2472 cmd_timer.work);
2473
2474 if (hdev->sent_cmd) {
2475 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2476 u16 opcode = __le16_to_cpu(sent->opcode);
2477
2478 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2479 } else {
2480 BT_ERR("%s command tx timeout", hdev->name);
2481 }
2482
2483 atomic_set(&hdev->cmd_cnt, 1);
2484 queue_work(hdev->workqueue, &hdev->cmd_work);
2485}
2486
2487struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2488 bdaddr_t *bdaddr, u8 bdaddr_type)
2489{
2490 struct oob_data *data;
2491
2492 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2493 if (bacmp(bdaddr, &data->bdaddr) != 0)
2494 continue;
2495 if (data->bdaddr_type != bdaddr_type)
2496 continue;
2497 return data;
2498 }
2499
2500 return NULL;
2501}
2502
2503int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504 u8 bdaddr_type)
2505{
2506 struct oob_data *data;
2507
2508 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2509 if (!data)
2510 return -ENOENT;
2511
2512 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2513
2514 list_del(&data->list);
2515 kfree(data);
2516
2517 return 0;
2518}
2519
2520void hci_remote_oob_data_clear(struct hci_dev *hdev)
2521{
2522 struct oob_data *data, *n;
2523
2524 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2525 list_del(&data->list);
2526 kfree(data);
2527 }
2528}
2529
2530int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2531 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2532 u8 *hash256, u8 *rand256)
2533{
2534 struct oob_data *data;
2535
2536 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2537 if (!data) {
2538 data = kmalloc(sizeof(*data), GFP_KERNEL);
2539 if (!data)
2540 return -ENOMEM;
2541
2542 bacpy(&data->bdaddr, bdaddr);
2543 data->bdaddr_type = bdaddr_type;
2544 list_add(&data->list, &hdev->remote_oob_data);
2545 }
2546
2547 if (hash192 && rand192) {
2548 memcpy(data->hash192, hash192, sizeof(data->hash192));
2549 memcpy(data->rand192, rand192, sizeof(data->rand192));
2550 if (hash256 && rand256)
2551 data->present = 0x03;
2552 } else {
2553 memset(data->hash192, 0, sizeof(data->hash192));
2554 memset(data->rand192, 0, sizeof(data->rand192));
2555 if (hash256 && rand256)
2556 data->present = 0x02;
2557 else
2558 data->present = 0x00;
2559 }
2560
2561 if (hash256 && rand256) {
2562 memcpy(data->hash256, hash256, sizeof(data->hash256));
2563 memcpy(data->rand256, rand256, sizeof(data->rand256));
2564 } else {
2565 memset(data->hash256, 0, sizeof(data->hash256));
2566 memset(data->rand256, 0, sizeof(data->rand256));
2567 if (hash192 && rand192)
2568 data->present = 0x01;
2569 }
2570
2571 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2572
2573 return 0;
2574}
2575
2576
2577struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2578{
2579 struct adv_info *adv_instance;
2580
2581 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2582 if (adv_instance->instance == instance)
2583 return adv_instance;
2584 }
2585
2586 return NULL;
2587}
2588
2589
2590struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2591{
2592 struct adv_info *cur_instance;
2593
2594 cur_instance = hci_find_adv_instance(hdev, instance);
2595 if (!cur_instance)
2596 return NULL;
2597
2598 if (cur_instance == list_last_entry(&hdev->adv_instances,
2599 struct adv_info, list))
2600 return list_first_entry(&hdev->adv_instances,
2601 struct adv_info, list);
2602 else
2603 return list_next_entry(cur_instance, list);
2604}
2605
2606
2607int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2608{
2609 struct adv_info *adv_instance;
2610
2611 adv_instance = hci_find_adv_instance(hdev, instance);
2612 if (!adv_instance)
2613 return -ENOENT;
2614
2615 BT_DBG("%s removing %dMR", hdev->name, instance);
2616
2617 if (hdev->cur_adv_instance == instance) {
2618 if (hdev->adv_instance_timeout) {
2619 cancel_delayed_work(&hdev->adv_instance_expire);
2620 hdev->adv_instance_timeout = 0;
2621 }
2622 hdev->cur_adv_instance = 0x00;
2623 }
2624
2625 list_del(&adv_instance->list);
2626 kfree(adv_instance);
2627
2628 hdev->adv_instance_cnt--;
2629
2630 return 0;
2631}
2632
2633
2634void hci_adv_instances_clear(struct hci_dev *hdev)
2635{
2636 struct adv_info *adv_instance, *n;
2637
2638 if (hdev->adv_instance_timeout) {
2639 cancel_delayed_work(&hdev->adv_instance_expire);
2640 hdev->adv_instance_timeout = 0;
2641 }
2642
2643 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2644 list_del(&adv_instance->list);
2645 kfree(adv_instance);
2646 }
2647
2648 hdev->adv_instance_cnt = 0;
2649 hdev->cur_adv_instance = 0x00;
2650}
2651
2652
2653int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2654 u16 adv_data_len, u8 *adv_data,
2655 u16 scan_rsp_len, u8 *scan_rsp_data,
2656 u16 timeout, u16 duration)
2657{
2658 struct adv_info *adv_instance;
2659
2660 adv_instance = hci_find_adv_instance(hdev, instance);
2661 if (adv_instance) {
2662 memset(adv_instance->adv_data, 0,
2663 sizeof(adv_instance->adv_data));
2664 memset(adv_instance->scan_rsp_data, 0,
2665 sizeof(adv_instance->scan_rsp_data));
2666 } else {
2667 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2668 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2669 return -EOVERFLOW;
2670
2671 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2672 if (!adv_instance)
2673 return -ENOMEM;
2674
2675 adv_instance->pending = true;
2676 adv_instance->instance = instance;
2677 list_add(&adv_instance->list, &hdev->adv_instances);
2678 hdev->adv_instance_cnt++;
2679 }
2680
2681 adv_instance->flags = flags;
2682 adv_instance->adv_data_len = adv_data_len;
2683 adv_instance->scan_rsp_len = scan_rsp_len;
2684
2685 if (adv_data_len)
2686 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2687
2688 if (scan_rsp_len)
2689 memcpy(adv_instance->scan_rsp_data,
2690 scan_rsp_data, scan_rsp_len);
2691
2692 adv_instance->timeout = timeout;
2693 adv_instance->remaining_time = timeout;
2694
2695 if (duration == 0)
2696 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2697 else
2698 adv_instance->duration = duration;
2699
2700 BT_DBG("%s for %dMR", hdev->name, instance);
2701
2702 return 0;
2703}
2704
2705struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2706 bdaddr_t *bdaddr, u8 type)
2707{
2708 struct bdaddr_list *b;
2709
2710 list_for_each_entry(b, bdaddr_list, list) {
2711 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2712 return b;
2713 }
2714
2715 return NULL;
2716}
2717
2718void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2719{
2720 struct bdaddr_list *b, *n;
2721
2722 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2723 list_del(&b->list);
2724 kfree(b);
2725 }
2726}
2727
2728int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2729{
2730 struct bdaddr_list *entry;
2731
2732 if (!bacmp(bdaddr, BDADDR_ANY))
2733 return -EBADF;
2734
2735 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2736 return -EEXIST;
2737
2738 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2739 if (!entry)
2740 return -ENOMEM;
2741
2742 bacpy(&entry->bdaddr, bdaddr);
2743 entry->bdaddr_type = type;
2744
2745 list_add(&entry->list, list);
2746
2747 return 0;
2748}
2749
2750int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2751{
2752 struct bdaddr_list *entry;
2753
2754 if (!bacmp(bdaddr, BDADDR_ANY)) {
2755 hci_bdaddr_list_clear(list);
2756 return 0;
2757 }
2758
2759 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2760 if (!entry)
2761 return -ENOENT;
2762
2763 list_del(&entry->list);
2764 kfree(entry);
2765
2766 return 0;
2767}
2768
2769
2770struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2771 bdaddr_t *addr, u8 addr_type)
2772{
2773 struct hci_conn_params *params;
2774
2775 list_for_each_entry(params, &hdev->le_conn_params, list) {
2776 if (bacmp(¶ms->addr, addr) == 0 &&
2777 params->addr_type == addr_type) {
2778 return params;
2779 }
2780 }
2781
2782 return NULL;
2783}
2784
2785
2786struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2787 bdaddr_t *addr, u8 addr_type)
2788{
2789 struct hci_conn_params *param;
2790
2791 list_for_each_entry(param, list, action) {
2792 if (bacmp(¶m->addr, addr) == 0 &&
2793 param->addr_type == addr_type)
2794 return param;
2795 }
2796
2797 return NULL;
2798}
2799
2800
2801struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2802 bdaddr_t *addr, u8 addr_type)
2803{
2804 struct hci_conn_params *params;
2805
2806 params = hci_conn_params_lookup(hdev, addr, addr_type);
2807 if (params)
2808 return params;
2809
2810 params = kzalloc(sizeof(*params), GFP_KERNEL);
2811 if (!params) {
2812 BT_ERR("Out of memory");
2813 return NULL;
2814 }
2815
2816 bacpy(¶ms->addr, addr);
2817 params->addr_type = addr_type;
2818
2819 list_add(¶ms->list, &hdev->le_conn_params);
2820 INIT_LIST_HEAD(¶ms->action);
2821
2822 params->conn_min_interval = hdev->le_conn_min_interval;
2823 params->conn_max_interval = hdev->le_conn_max_interval;
2824 params->conn_latency = hdev->le_conn_latency;
2825 params->supervision_timeout = hdev->le_supv_timeout;
2826 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2827
2828 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2829
2830 return params;
2831}
2832
2833static void hci_conn_params_free(struct hci_conn_params *params)
2834{
2835 if (params->conn) {
2836 hci_conn_drop(params->conn);
2837 hci_conn_put(params->conn);
2838 }
2839
2840 list_del(¶ms->action);
2841 list_del(¶ms->list);
2842 kfree(params);
2843}
2844
2845
2846void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2847{
2848 struct hci_conn_params *params;
2849
2850 params = hci_conn_params_lookup(hdev, addr, addr_type);
2851 if (!params)
2852 return;
2853
2854 hci_conn_params_free(params);
2855
2856 hci_update_background_scan(hdev);
2857
2858 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2859}
2860
2861
2862void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2863{
2864 struct hci_conn_params *params, *tmp;
2865
2866 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2867 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2868 continue;
2869
2870
2871
2872
2873 if (params->explicit_connect) {
2874 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2875 continue;
2876 }
2877
2878 list_del(¶ms->list);
2879 kfree(params);
2880 }
2881
2882 BT_DBG("All LE disabled connection parameters were removed");
2883}
2884
2885
2886static void hci_conn_params_clear_all(struct hci_dev *hdev)
2887{
2888 struct hci_conn_params *params, *tmp;
2889
2890 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2891 hci_conn_params_free(params);
2892
2893 BT_DBG("All LE connection parameters were removed");
2894}
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2910 u8 *bdaddr_type)
2911{
2912 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2913 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2914 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2915 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2916 bacpy(bdaddr, &hdev->static_addr);
2917 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2918 } else {
2919 bacpy(bdaddr, &hdev->bdaddr);
2920 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2921 }
2922}
2923
2924
2925struct hci_dev *hci_alloc_dev(void)
2926{
2927 struct hci_dev *hdev;
2928
2929 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2930 if (!hdev)
2931 return NULL;
2932
2933 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2934 hdev->esco_type = (ESCO_HV1);
2935 hdev->link_mode = (HCI_LM_ACCEPT);
2936 hdev->num_iac = 0x01;
2937 hdev->io_capability = 0x03;
2938 hdev->manufacturer = 0xffff;
2939 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2940 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2941 hdev->adv_instance_cnt = 0;
2942 hdev->cur_adv_instance = 0x00;
2943 hdev->adv_instance_timeout = 0;
2944
2945 hdev->sniff_max_interval = 800;
2946 hdev->sniff_min_interval = 80;
2947
2948 hdev->le_adv_channel_map = 0x07;
2949 hdev->le_adv_min_interval = 0x0800;
2950 hdev->le_adv_max_interval = 0x0800;
2951 hdev->le_scan_interval = 0x0060;
2952 hdev->le_scan_window = 0x0030;
2953 hdev->le_conn_min_interval = 0x0028;
2954 hdev->le_conn_max_interval = 0x0038;
2955 hdev->le_conn_latency = 0x0000;
2956 hdev->le_supv_timeout = 0x002a;
2957 hdev->le_def_tx_len = 0x001b;
2958 hdev->le_def_tx_time = 0x0148;
2959 hdev->le_max_tx_len = 0x001b;
2960 hdev->le_max_tx_time = 0x0148;
2961 hdev->le_max_rx_len = 0x001b;
2962 hdev->le_max_rx_time = 0x0148;
2963
2964 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2965 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2966 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2967 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2968
2969 mutex_init(&hdev->lock);
2970 mutex_init(&hdev->req_lock);
2971
2972 INIT_LIST_HEAD(&hdev->mgmt_pending);
2973 INIT_LIST_HEAD(&hdev->blacklist);
2974 INIT_LIST_HEAD(&hdev->whitelist);
2975 INIT_LIST_HEAD(&hdev->uuids);
2976 INIT_LIST_HEAD(&hdev->link_keys);
2977 INIT_LIST_HEAD(&hdev->long_term_keys);
2978 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2979 INIT_LIST_HEAD(&hdev->remote_oob_data);
2980 INIT_LIST_HEAD(&hdev->le_white_list);
2981 INIT_LIST_HEAD(&hdev->le_conn_params);
2982 INIT_LIST_HEAD(&hdev->pend_le_conns);
2983 INIT_LIST_HEAD(&hdev->pend_le_reports);
2984 INIT_LIST_HEAD(&hdev->conn_hash.list);
2985 INIT_LIST_HEAD(&hdev->adv_instances);
2986
2987 INIT_WORK(&hdev->rx_work, hci_rx_work);
2988 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2989 INIT_WORK(&hdev->tx_work, hci_tx_work);
2990 INIT_WORK(&hdev->power_on, hci_power_on);
2991 INIT_WORK(&hdev->error_reset, hci_error_reset);
2992
2993 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2994
2995 skb_queue_head_init(&hdev->rx_q);
2996 skb_queue_head_init(&hdev->cmd_q);
2997 skb_queue_head_init(&hdev->raw_q);
2998
2999 init_waitqueue_head(&hdev->req_wait_q);
3000
3001 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3002
3003 hci_request_setup(hdev);
3004
3005 hci_init_sysfs(hdev);
3006 discovery_init(hdev);
3007
3008 return hdev;
3009}
3010EXPORT_SYMBOL(hci_alloc_dev);
3011
3012
3013void hci_free_dev(struct hci_dev *hdev)
3014{
3015
3016 put_device(&hdev->dev);
3017}
3018EXPORT_SYMBOL(hci_free_dev);
3019
3020
3021int hci_register_dev(struct hci_dev *hdev)
3022{
3023 int id, error;
3024
3025
3026 if (!hdev->open || !hdev->close ||
3027 !(hdev->send || hdev->rh_reserved_send))
3028 return -EINVAL;
3029
3030
3031
3032
3033 switch (hdev->dev_type) {
3034 case HCI_PRIMARY:
3035 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3036 break;
3037 case HCI_AMP:
3038 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3039 break;
3040 default:
3041 return -EINVAL;
3042 }
3043
3044 if (id < 0)
3045 return id;
3046
3047 sprintf(hdev->name, "hci%d", id);
3048 hdev->id = id;
3049
3050 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3051
3052 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3053 WQ_MEM_RECLAIM, 1, hdev->name);
3054 if (!hdev->workqueue) {
3055 error = -ENOMEM;
3056 goto err;
3057 }
3058
3059 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3060 WQ_MEM_RECLAIM, 1, hdev->name);
3061 if (!hdev->req_workqueue) {
3062 destroy_workqueue(hdev->workqueue);
3063 error = -ENOMEM;
3064 goto err;
3065 }
3066
3067 if (!IS_ERR_OR_NULL(bt_debugfs))
3068 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3069
3070 dev_set_name(&hdev->dev, "%s", hdev->name);
3071
3072 error = device_add(&hdev->dev);
3073 if (error < 0)
3074 goto err_wqueue;
3075
3076 hci_leds_init(hdev);
3077
3078 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3079 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3080 hdev);
3081 if (hdev->rfkill) {
3082 if (rfkill_register(hdev->rfkill) < 0) {
3083 rfkill_destroy(hdev->rfkill);
3084 hdev->rfkill = NULL;
3085 }
3086 }
3087
3088 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3089 hci_dev_set_flag(hdev, HCI_RFKILLED);
3090
3091 hci_dev_set_flag(hdev, HCI_SETUP);
3092 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3093
3094 if (hdev->dev_type == HCI_PRIMARY) {
3095
3096
3097
3098 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3099 }
3100
3101 write_lock(&hci_dev_list_lock);
3102 list_add(&hdev->list, &hci_dev_list);
3103 write_unlock(&hci_dev_list_lock);
3104
3105
3106
3107
3108 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3109 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3110
3111 hci_sock_dev_event(hdev, HCI_DEV_REG);
3112 hci_dev_hold(hdev);
3113
3114 queue_work(hdev->req_workqueue, &hdev->power_on);
3115
3116 return id;
3117
3118err_wqueue:
3119 destroy_workqueue(hdev->workqueue);
3120 destroy_workqueue(hdev->req_workqueue);
3121err:
3122 ida_simple_remove(&hci_index_ida, hdev->id);
3123
3124 return error;
3125}
3126EXPORT_SYMBOL(hci_register_dev);
3127
3128
3129void hci_unregister_dev(struct hci_dev *hdev)
3130{
3131 int id;
3132
3133 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3134
3135 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3136
3137 id = hdev->id;
3138
3139 write_lock(&hci_dev_list_lock);
3140 list_del(&hdev->list);
3141 write_unlock(&hci_dev_list_lock);
3142
3143 cancel_work_sync(&hdev->power_on);
3144
3145 hci_dev_do_close(hdev);
3146
3147 if (!test_bit(HCI_INIT, &hdev->flags) &&
3148 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3149 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3150 hci_dev_lock(hdev);
3151 mgmt_index_removed(hdev);
3152 hci_dev_unlock(hdev);
3153 }
3154
3155
3156
3157 BUG_ON(!list_empty(&hdev->mgmt_pending));
3158
3159 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3160
3161 if (hdev->rfkill) {
3162 rfkill_unregister(hdev->rfkill);
3163 rfkill_destroy(hdev->rfkill);
3164 }
3165
3166 device_del(&hdev->dev);
3167
3168 debugfs_remove_recursive(hdev->debugfs);
3169 kfree_const(hdev->hw_info);
3170 kfree_const(hdev->fw_info);
3171
3172 destroy_workqueue(hdev->workqueue);
3173 destroy_workqueue(hdev->req_workqueue);
3174
3175 hci_dev_lock(hdev);
3176 hci_bdaddr_list_clear(&hdev->blacklist);
3177 hci_bdaddr_list_clear(&hdev->whitelist);
3178 hci_uuids_clear(hdev);
3179 hci_link_keys_clear(hdev);
3180 hci_smp_ltks_clear(hdev);
3181 hci_smp_irks_clear(hdev);
3182 hci_remote_oob_data_clear(hdev);
3183 hci_adv_instances_clear(hdev);
3184 hci_bdaddr_list_clear(&hdev->le_white_list);
3185 hci_conn_params_clear_all(hdev);
3186 hci_discovery_filter_clear(hdev);
3187 hci_dev_unlock(hdev);
3188
3189 hci_dev_put(hdev);
3190
3191 ida_simple_remove(&hci_index_ida, id);
3192}
3193EXPORT_SYMBOL(hci_unregister_dev);
3194
3195
3196int hci_suspend_dev(struct hci_dev *hdev)
3197{
3198 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3199 return 0;
3200}
3201EXPORT_SYMBOL(hci_suspend_dev);
3202
3203
3204int hci_resume_dev(struct hci_dev *hdev)
3205{
3206 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3207 return 0;
3208}
3209EXPORT_SYMBOL(hci_resume_dev);
3210
3211
3212int hci_reset_dev(struct hci_dev *hdev)
3213{
3214 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3215 struct sk_buff *skb;
3216
3217 skb = bt_skb_alloc(3, GFP_ATOMIC);
3218 if (!skb)
3219 return -ENOMEM;
3220
3221 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3222 memcpy(skb_put(skb, 3), hw_err, 3);
3223
3224
3225 return hci_recv_frame(hdev, skb);
3226}
3227EXPORT_SYMBOL(hci_reset_dev);
3228
3229
3230int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3231{
3232 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3233 && !test_bit(HCI_INIT, &hdev->flags))) {
3234 kfree_skb(skb);
3235 return -ENXIO;
3236 }
3237
3238 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3239 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3240 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3241 kfree_skb(skb);
3242 return -EINVAL;
3243 }
3244
3245
3246 bt_cb(skb)->incoming = 1;
3247
3248
3249 __net_timestamp(skb);
3250
3251 skb_queue_tail(&hdev->rx_q, skb);
3252 queue_work(hdev->workqueue, &hdev->rx_work);
3253
3254 return 0;
3255}
3256EXPORT_SYMBOL(hci_recv_frame);
3257
3258
3259int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3260{
3261
3262 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3263
3264
3265 __net_timestamp(skb);
3266
3267 skb_queue_tail(&hdev->rx_q, skb);
3268 queue_work(hdev->workqueue, &hdev->rx_work);
3269
3270 return 0;
3271}
3272EXPORT_SYMBOL(hci_recv_diag);
3273
3274void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3275{
3276 va_list vargs;
3277
3278 va_start(vargs, fmt);
3279 kfree_const(hdev->hw_info);
3280 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3281 va_end(vargs);
3282}
3283EXPORT_SYMBOL(hci_set_hw_info);
3284
3285void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3286{
3287 va_list vargs;
3288
3289 va_start(vargs, fmt);
3290 kfree_const(hdev->fw_info);
3291 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3292 va_end(vargs);
3293}
3294EXPORT_SYMBOL(hci_set_fw_info);
3295
3296
3297
3298int hci_register_cb(struct hci_cb *cb)
3299{
3300 BT_DBG("%p name %s", cb, cb->name);
3301
3302 mutex_lock(&hci_cb_list_lock);
3303 list_add_tail(&cb->list, &hci_cb_list);
3304 mutex_unlock(&hci_cb_list_lock);
3305
3306 return 0;
3307}
3308EXPORT_SYMBOL(hci_register_cb);
3309
3310int hci_unregister_cb(struct hci_cb *cb)
3311{
3312 BT_DBG("%p name %s", cb, cb->name);
3313
3314 mutex_lock(&hci_cb_list_lock);
3315 list_del(&cb->list);
3316 mutex_unlock(&hci_cb_list_lock);
3317
3318 return 0;
3319}
3320EXPORT_SYMBOL(hci_unregister_cb);
3321
3322static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3323{
3324 int err;
3325
3326 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3327 skb->len);
3328
3329
3330 __net_timestamp(skb);
3331
3332
3333 hci_send_to_monitor(hdev, skb);
3334
3335 if (atomic_read(&hdev->promisc)) {
3336
3337 hci_send_to_sock(hdev, skb);
3338 }
3339
3340
3341 skb_orphan(skb);
3342
3343 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3344 kfree_skb(skb);
3345 return;
3346 }
3347
3348
3349
3350
3351
3352
3353
3354 if (hdev->rh_reserved_send) {
3355 skb->dev = (void *) hdev;
3356 err = hdev->rh_reserved_send(skb);
3357 if (err < 0) {
3358 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3359 kfree_skb(skb);
3360 }
3361 return;
3362 }
3363
3364 err = hdev->send(hdev, skb);
3365 if (err < 0) {
3366 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3367 kfree_skb(skb);
3368 }
3369}
3370
3371
3372int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3373 const void *param)
3374{
3375 struct sk_buff *skb;
3376
3377 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3378
3379 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3380 if (!skb) {
3381 BT_ERR("%s no memory for command", hdev->name);
3382 return -ENOMEM;
3383 }
3384
3385
3386
3387
3388 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3389
3390 skb_queue_tail(&hdev->cmd_q, skb);
3391 queue_work(hdev->workqueue, &hdev->cmd_work);
3392
3393 return 0;
3394}
3395
3396
3397void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3398{
3399 struct hci_command_hdr *hdr;
3400
3401 if (!hdev->sent_cmd)
3402 return NULL;
3403
3404 hdr = (void *) hdev->sent_cmd->data;
3405
3406 if (hdr->opcode != cpu_to_le16(opcode))
3407 return NULL;
3408
3409 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3410
3411 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3412}
3413
3414
3415struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3416 const void *param, u32 timeout)
3417{
3418 struct sk_buff *skb;
3419
3420 if (!test_bit(HCI_UP, &hdev->flags))
3421 return ERR_PTR(-ENETDOWN);
3422
3423 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3424
3425 hci_req_sync_lock(hdev);
3426 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3427 hci_req_sync_unlock(hdev);
3428
3429 return skb;
3430}
3431EXPORT_SYMBOL(hci_cmd_sync);
3432
3433
3434static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3435{
3436 struct hci_acl_hdr *hdr;
3437 int len = skb->len;
3438
3439 skb_push(skb, HCI_ACL_HDR_SIZE);
3440 skb_reset_transport_header(skb);
3441 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3442 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3443 hdr->dlen = cpu_to_le16(len);
3444}
3445
3446static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3447 struct sk_buff *skb, __u16 flags)
3448{
3449 struct hci_conn *conn = chan->conn;
3450 struct hci_dev *hdev = conn->hdev;
3451 struct sk_buff *list;
3452
3453 skb->len = skb_headlen(skb);
3454 skb->data_len = 0;
3455
3456 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3457
3458 switch (hdev->dev_type) {
3459 case HCI_PRIMARY:
3460 hci_add_acl_hdr(skb, conn->handle, flags);
3461 break;
3462 case HCI_AMP:
3463 hci_add_acl_hdr(skb, chan->handle, flags);
3464 break;
3465 default:
3466 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3467 return;
3468 }
3469
3470 list = skb_shinfo(skb)->frag_list;
3471 if (!list) {
3472
3473 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3474
3475 skb_queue_tail(queue, skb);
3476 } else {
3477
3478 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3479
3480 skb_shinfo(skb)->frag_list = NULL;
3481
3482
3483
3484
3485
3486
3487 spin_lock_bh(&queue->lock);
3488
3489 __skb_queue_tail(queue, skb);
3490
3491 flags &= ~ACL_START;
3492 flags |= ACL_CONT;
3493 do {
3494 skb = list; list = list->next;
3495
3496 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3497 hci_add_acl_hdr(skb, conn->handle, flags);
3498
3499 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3500
3501 __skb_queue_tail(queue, skb);
3502 } while (list);
3503
3504 spin_unlock_bh(&queue->lock);
3505 }
3506}
3507
3508void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3509{
3510 struct hci_dev *hdev = chan->conn->hdev;
3511
3512 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3513
3514 hci_queue_acl(chan, &chan->data_q, skb, flags);
3515
3516 queue_work(hdev->workqueue, &hdev->tx_work);
3517}
3518
3519
3520void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3521{
3522 struct hci_dev *hdev = conn->hdev;
3523 struct hci_sco_hdr hdr;
3524
3525 BT_DBG("%s len %d", hdev->name, skb->len);
3526
3527 hdr.handle = cpu_to_le16(conn->handle);
3528 hdr.dlen = skb->len;
3529
3530 skb_push(skb, HCI_SCO_HDR_SIZE);
3531 skb_reset_transport_header(skb);
3532 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3533
3534 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3535
3536 skb_queue_tail(&conn->data_q, skb);
3537 queue_work(hdev->workqueue, &hdev->tx_work);
3538}
3539
3540
3541
3542
3543static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3544 int *quote)
3545{
3546 struct hci_conn_hash *h = &hdev->conn_hash;
3547 struct hci_conn *conn = NULL, *c;
3548 unsigned int num = 0, min = ~0;
3549
3550
3551
3552
3553 rcu_read_lock();
3554
3555 list_for_each_entry_rcu(c, &h->list, list) {
3556 if (c->type != type || skb_queue_empty(&c->data_q))
3557 continue;
3558
3559 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3560 continue;
3561
3562 num++;
3563
3564 if (c->sent < min) {
3565 min = c->sent;
3566 conn = c;
3567 }
3568
3569 if (hci_conn_num(hdev, type) == num)
3570 break;
3571 }
3572
3573 rcu_read_unlock();
3574
3575 if (conn) {
3576 int cnt, q;
3577
3578 switch (conn->type) {
3579 case ACL_LINK:
3580 cnt = hdev->acl_cnt;
3581 break;
3582 case SCO_LINK:
3583 case ESCO_LINK:
3584 cnt = hdev->sco_cnt;
3585 break;
3586 case LE_LINK:
3587 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3588 break;
3589 default:
3590 cnt = 0;
3591 BT_ERR("Unknown link type");
3592 }
3593
3594 q = cnt / num;
3595 *quote = q ? q : 1;
3596 } else
3597 *quote = 0;
3598
3599 BT_DBG("conn %p quote %d", conn, *quote);
3600 return conn;
3601}
3602
3603static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3604{
3605 struct hci_conn_hash *h = &hdev->conn_hash;
3606 struct hci_conn *c;
3607
3608 BT_ERR("%s link tx timeout", hdev->name);
3609
3610 rcu_read_lock();
3611
3612
3613 list_for_each_entry_rcu(c, &h->list, list) {
3614 if (c->type == type && c->sent) {
3615 BT_ERR("%s killing stalled connection %pMR",
3616 hdev->name, &c->dst);
3617 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3618 }
3619 }
3620
3621 rcu_read_unlock();
3622}
3623
3624static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3625 int *quote)
3626{
3627 struct hci_conn_hash *h = &hdev->conn_hash;
3628 struct hci_chan *chan = NULL;
3629 unsigned int num = 0, min = ~0, cur_prio = 0;
3630 struct hci_conn *conn;
3631 int cnt, q, conn_num = 0;
3632
3633 BT_DBG("%s", hdev->name);
3634
3635 rcu_read_lock();
3636
3637 list_for_each_entry_rcu(conn, &h->list, list) {
3638 struct hci_chan *tmp;
3639
3640 if (conn->type != type)
3641 continue;
3642
3643 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3644 continue;
3645
3646 conn_num++;
3647
3648 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3649 struct sk_buff *skb;
3650
3651 if (skb_queue_empty(&tmp->data_q))
3652 continue;
3653
3654 skb = skb_peek(&tmp->data_q);
3655 if (skb->priority < cur_prio)
3656 continue;
3657
3658 if (skb->priority > cur_prio) {
3659 num = 0;
3660 min = ~0;
3661 cur_prio = skb->priority;
3662 }
3663
3664 num++;
3665
3666 if (conn->sent < min) {
3667 min = conn->sent;
3668 chan = tmp;
3669 }
3670 }
3671
3672 if (hci_conn_num(hdev, type) == conn_num)
3673 break;
3674 }
3675
3676 rcu_read_unlock();
3677
3678 if (!chan)
3679 return NULL;
3680
3681 switch (chan->conn->type) {
3682 case ACL_LINK:
3683 cnt = hdev->acl_cnt;
3684 break;
3685 case AMP_LINK:
3686 cnt = hdev->block_cnt;
3687 break;
3688 case SCO_LINK:
3689 case ESCO_LINK:
3690 cnt = hdev->sco_cnt;
3691 break;
3692 case LE_LINK:
3693 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3694 break;
3695 default:
3696 cnt = 0;
3697 BT_ERR("Unknown link type");
3698 }
3699
3700 q = cnt / num;
3701 *quote = q ? q : 1;
3702 BT_DBG("chan %p quote %d", chan, *quote);
3703 return chan;
3704}
3705
3706static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3707{
3708 struct hci_conn_hash *h = &hdev->conn_hash;
3709 struct hci_conn *conn;
3710 int num = 0;
3711
3712 BT_DBG("%s", hdev->name);
3713
3714 rcu_read_lock();
3715
3716 list_for_each_entry_rcu(conn, &h->list, list) {
3717 struct hci_chan *chan;
3718
3719 if (conn->type != type)
3720 continue;
3721
3722 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3723 continue;
3724
3725 num++;
3726
3727 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3728 struct sk_buff *skb;
3729
3730 if (chan->sent) {
3731 chan->sent = 0;
3732 continue;
3733 }
3734
3735 if (skb_queue_empty(&chan->data_q))
3736 continue;
3737
3738 skb = skb_peek(&chan->data_q);
3739 if (skb->priority >= HCI_PRIO_MAX - 1)
3740 continue;
3741
3742 skb->priority = HCI_PRIO_MAX - 1;
3743
3744 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3745 skb->priority);
3746 }
3747
3748 if (hci_conn_num(hdev, type) == num)
3749 break;
3750 }
3751
3752 rcu_read_unlock();
3753
3754}
3755
3756static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3757{
3758
3759 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3760}
3761
3762static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3763{
3764 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3765
3766
3767 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3768 HCI_ACL_TX_TIMEOUT))
3769 hci_link_tx_to(hdev, ACL_LINK);
3770 }
3771}
3772
3773static void hci_sched_acl_pkt(struct hci_dev *hdev)
3774{
3775 unsigned int cnt = hdev->acl_cnt;
3776 struct hci_chan *chan;
3777 struct sk_buff *skb;
3778 int quote;
3779
3780 __check_timeout(hdev, cnt);
3781
3782 while (hdev->acl_cnt &&
3783 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3784 u32 priority = (skb_peek(&chan->data_q))->priority;
3785 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3786 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3787 skb->len, skb->priority);
3788
3789
3790 if (skb->priority < priority)
3791 break;
3792
3793 skb = skb_dequeue(&chan->data_q);
3794
3795 hci_conn_enter_active_mode(chan->conn,
3796 bt_cb(skb)->force_active);
3797
3798 hci_send_frame(hdev, skb);
3799 hdev->acl_last_tx = jiffies;
3800
3801 hdev->acl_cnt--;
3802 chan->sent++;
3803 chan->conn->sent++;
3804 }
3805 }
3806
3807 if (cnt != hdev->acl_cnt)
3808 hci_prio_recalculate(hdev, ACL_LINK);
3809}
3810
3811static void hci_sched_acl_blk(struct hci_dev *hdev)
3812{
3813 unsigned int cnt = hdev->block_cnt;
3814 struct hci_chan *chan;
3815 struct sk_buff *skb;
3816 int quote;
3817 u8 type;
3818
3819 __check_timeout(hdev, cnt);
3820
3821 BT_DBG("%s", hdev->name);
3822
3823 if (hdev->dev_type == HCI_AMP)
3824 type = AMP_LINK;
3825 else
3826 type = ACL_LINK;
3827
3828 while (hdev->block_cnt > 0 &&
3829 (chan = hci_chan_sent(hdev, type, "e))) {
3830 u32 priority = (skb_peek(&chan->data_q))->priority;
3831 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3832 int blocks;
3833
3834 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3835 skb->len, skb->priority);
3836
3837
3838 if (skb->priority < priority)
3839 break;
3840
3841 skb = skb_dequeue(&chan->data_q);
3842
3843 blocks = __get_blocks(hdev, skb);
3844 if (blocks > hdev->block_cnt)
3845 return;
3846
3847 hci_conn_enter_active_mode(chan->conn,
3848 bt_cb(skb)->force_active);
3849
3850 hci_send_frame(hdev, skb);
3851 hdev->acl_last_tx = jiffies;
3852
3853 hdev->block_cnt -= blocks;
3854 quote -= blocks;
3855
3856 chan->sent += blocks;
3857 chan->conn->sent += blocks;
3858 }
3859 }
3860
3861 if (cnt != hdev->block_cnt)
3862 hci_prio_recalculate(hdev, type);
3863}
3864
3865static void hci_sched_acl(struct hci_dev *hdev)
3866{
3867 BT_DBG("%s", hdev->name);
3868
3869
3870 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3871 return;
3872
3873
3874 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3875 return;
3876
3877 switch (hdev->flow_ctl_mode) {
3878 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3879 hci_sched_acl_pkt(hdev);
3880 break;
3881
3882 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3883 hci_sched_acl_blk(hdev);
3884 break;
3885 }
3886}
3887
3888
3889static void hci_sched_sco(struct hci_dev *hdev)
3890{
3891 struct hci_conn *conn;
3892 struct sk_buff *skb;
3893 int quote;
3894
3895 BT_DBG("%s", hdev->name);
3896
3897 if (!hci_conn_num(hdev, SCO_LINK))
3898 return;
3899
3900 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3901 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3902 BT_DBG("skb %p len %d", skb, skb->len);
3903 hci_send_frame(hdev, skb);
3904
3905 conn->sent++;
3906 if (conn->sent == ~0)
3907 conn->sent = 0;
3908 }
3909 }
3910}
3911
3912static void hci_sched_esco(struct hci_dev *hdev)
3913{
3914 struct hci_conn *conn;
3915 struct sk_buff *skb;
3916 int quote;
3917
3918 BT_DBG("%s", hdev->name);
3919
3920 if (!hci_conn_num(hdev, ESCO_LINK))
3921 return;
3922
3923 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3924 "e))) {
3925 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3926 BT_DBG("skb %p len %d", skb, skb->len);
3927 hci_send_frame(hdev, skb);
3928
3929 conn->sent++;
3930 if (conn->sent == ~0)
3931 conn->sent = 0;
3932 }
3933 }
3934}
3935
3936static void hci_sched_le(struct hci_dev *hdev)
3937{
3938 struct hci_chan *chan;
3939 struct sk_buff *skb;
3940 int quote, cnt, tmp;
3941
3942 BT_DBG("%s", hdev->name);
3943
3944 if (!hci_conn_num(hdev, LE_LINK))
3945 return;
3946
3947 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3948
3949
3950 if (!hdev->le_cnt && hdev->le_pkts &&
3951 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3952 hci_link_tx_to(hdev, LE_LINK);
3953 }
3954
3955 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3956 tmp = cnt;
3957 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3958 u32 priority = (skb_peek(&chan->data_q))->priority;
3959 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3960 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3961 skb->len, skb->priority);
3962
3963
3964 if (skb->priority < priority)
3965 break;
3966
3967 skb = skb_dequeue(&chan->data_q);
3968
3969 hci_send_frame(hdev, skb);
3970 hdev->le_last_tx = jiffies;
3971
3972 cnt--;
3973 chan->sent++;
3974 chan->conn->sent++;
3975 }
3976 }
3977
3978 if (hdev->le_pkts)
3979 hdev->le_cnt = cnt;
3980 else
3981 hdev->acl_cnt = cnt;
3982
3983 if (cnt != tmp)
3984 hci_prio_recalculate(hdev, LE_LINK);
3985}
3986
3987static void hci_tx_work(struct work_struct *work)
3988{
3989 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3990 struct sk_buff *skb;
3991
3992 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3993 hdev->sco_cnt, hdev->le_cnt);
3994
3995 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3996
3997 hci_sched_acl(hdev);
3998 hci_sched_sco(hdev);
3999 hci_sched_esco(hdev);
4000 hci_sched_le(hdev);
4001 }
4002
4003
4004 while ((skb = skb_dequeue(&hdev->raw_q)))
4005 hci_send_frame(hdev, skb);
4006}
4007
4008
4009
4010
4011static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4012{
4013 struct hci_acl_hdr *hdr = (void *) skb->data;
4014 struct hci_conn *conn;
4015 __u16 handle, flags;
4016
4017 skb_pull(skb, HCI_ACL_HDR_SIZE);
4018
4019 handle = __le16_to_cpu(hdr->handle);
4020 flags = hci_flags(handle);
4021 handle = hci_handle(handle);
4022
4023 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4024 handle, flags);
4025
4026 hdev->stat.acl_rx++;
4027
4028 hci_dev_lock(hdev);
4029 conn = hci_conn_hash_lookup_handle(hdev, handle);
4030 hci_dev_unlock(hdev);
4031
4032 if (conn) {
4033 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4034
4035
4036 l2cap_recv_acldata(conn, skb, flags);
4037 return;
4038 } else {
4039 BT_ERR("%s ACL packet for unknown connection handle %d",
4040 hdev->name, handle);
4041 }
4042
4043 kfree_skb(skb);
4044}
4045
4046
4047static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4048{
4049 struct hci_sco_hdr *hdr = (void *) skb->data;
4050 struct hci_conn *conn;
4051 __u16 handle;
4052
4053 skb_pull(skb, HCI_SCO_HDR_SIZE);
4054
4055 handle = __le16_to_cpu(hdr->handle);
4056
4057 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4058
4059 hdev->stat.sco_rx++;
4060
4061 hci_dev_lock(hdev);
4062 conn = hci_conn_hash_lookup_handle(hdev, handle);
4063 hci_dev_unlock(hdev);
4064
4065 if (conn) {
4066
4067 sco_recv_scodata(conn, skb);
4068 return;
4069 } else {
4070 BT_ERR("%s SCO packet for unknown connection handle %d",
4071 hdev->name, handle);
4072 }
4073
4074 kfree_skb(skb);
4075}
4076
4077static bool hci_req_is_complete(struct hci_dev *hdev)
4078{
4079 struct sk_buff *skb;
4080
4081 skb = skb_peek(&hdev->cmd_q);
4082 if (!skb)
4083 return true;
4084
4085 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4086}
4087
4088static void hci_resend_last(struct hci_dev *hdev)
4089{
4090 struct hci_command_hdr *sent;
4091 struct sk_buff *skb;
4092 u16 opcode;
4093
4094 if (!hdev->sent_cmd)
4095 return;
4096
4097 sent = (void *) hdev->sent_cmd->data;
4098 opcode = __le16_to_cpu(sent->opcode);
4099 if (opcode == HCI_OP_RESET)
4100 return;
4101
4102 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4103 if (!skb)
4104 return;
4105
4106 skb_queue_head(&hdev->cmd_q, skb);
4107 queue_work(hdev->workqueue, &hdev->cmd_work);
4108}
4109
4110void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4111 hci_req_complete_t *req_complete,
4112 hci_req_complete_skb_t *req_complete_skb)
4113{
4114 struct sk_buff *skb;
4115 unsigned long flags;
4116
4117 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4118
4119
4120
4121
4122 if (!hci_sent_cmd_data(hdev, opcode)) {
4123
4124
4125
4126
4127
4128
4129 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4130 hci_resend_last(hdev);
4131
4132 return;
4133 }
4134
4135
4136
4137
4138 if (!status && !hci_req_is_complete(hdev))
4139 return;
4140
4141
4142
4143
4144
4145 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4146 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4147 return;
4148 }
4149
4150 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4151 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4152 return;
4153 }
4154
4155
4156 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4157 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4158 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4159 __skb_queue_head(&hdev->cmd_q, skb);
4160 break;
4161 }
4162
4163 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4164 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4165 else
4166 *req_complete = bt_cb(skb)->hci.req_complete;
4167 kfree_skb(skb);
4168 }
4169 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4170}
4171
4172static void hci_rx_work(struct work_struct *work)
4173{
4174 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4175 struct sk_buff *skb;
4176
4177 BT_DBG("%s", hdev->name);
4178
4179 while ((skb = skb_dequeue(&hdev->rx_q))) {
4180
4181 hci_send_to_monitor(hdev, skb);
4182
4183 if (atomic_read(&hdev->promisc)) {
4184
4185 hci_send_to_sock(hdev, skb);
4186 }
4187
4188 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4189 kfree_skb(skb);
4190 continue;
4191 }
4192
4193 if (test_bit(HCI_INIT, &hdev->flags)) {
4194
4195 switch (hci_skb_pkt_type(skb)) {
4196 case HCI_ACLDATA_PKT:
4197 case HCI_SCODATA_PKT:
4198 kfree_skb(skb);
4199 continue;
4200 }
4201 }
4202
4203
4204 switch (hci_skb_pkt_type(skb)) {
4205 case HCI_EVENT_PKT:
4206 BT_DBG("%s Event packet", hdev->name);
4207 hci_event_packet(hdev, skb);
4208 break;
4209
4210 case HCI_ACLDATA_PKT:
4211 BT_DBG("%s ACL data packet", hdev->name);
4212 hci_acldata_packet(hdev, skb);
4213 break;
4214
4215 case HCI_SCODATA_PKT:
4216 BT_DBG("%s SCO data packet", hdev->name);
4217 hci_scodata_packet(hdev, skb);
4218 break;
4219
4220 default:
4221 kfree_skb(skb);
4222 break;
4223 }
4224 }
4225}
4226
4227static void hci_cmd_work(struct work_struct *work)
4228{
4229 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4230 struct sk_buff *skb;
4231
4232 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4233 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4234
4235
4236 if (atomic_read(&hdev->cmd_cnt)) {
4237 skb = skb_dequeue(&hdev->cmd_q);
4238 if (!skb)
4239 return;
4240
4241 kfree_skb(hdev->sent_cmd);
4242
4243 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4244 if (hdev->sent_cmd) {
4245 atomic_dec(&hdev->cmd_cnt);
4246 hci_send_frame(hdev, skb);
4247 if (test_bit(HCI_RESET, &hdev->flags))
4248 cancel_delayed_work(&hdev->cmd_timer);
4249 else
4250 schedule_delayed_work(&hdev->cmd_timer,
4251 HCI_CMD_TIMEOUT);
4252 } else {
4253 skb_queue_head(&hdev->cmd_q, skb);
4254 queue_work(hdev->workqueue, &hdev->cmd_work);
4255 }
4256 }
4257}
4258