1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/export.h>
29#include <linux/idr.h>
30
31#include <linux/rfkill.h>
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
36static void hci_rx_work(struct work_struct *work);
37static void hci_cmd_work(struct work_struct *work);
38static void hci_tx_work(struct work_struct *work);
39
40
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
48
49static DEFINE_IDA(hci_index_ida);
50
51
52
53static void hci_notify(struct hci_dev *hdev, int event)
54{
55 hci_sock_dev_event(hdev, event);
56}
57
58
59
60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61{
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
148 hci_req_add_ev(&req, opcode, plen, param, event);
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
198
199static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
202 unsigned long opt, __u32 timeout)
203{
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
210 hci_req_init(&req, hdev);
211
212 hdev->req_status = HCI_REQ_PEND;
213
214 func(&req, opt);
215
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
218 hdev->req_status = 0;
219
220
221
222
223
224
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
229 }
230
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
243 err = -bt_to_errno(hdev->req_result);
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
253 }
254
255 hdev->req_status = hdev->req_result = 0;
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
262static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
265 unsigned long opt, __u32 timeout)
266{
267 int ret;
268
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
272
273 hci_req_lock(hdev);
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
280static void hci_reset_req(struct hci_request *req, unsigned long opt)
281{
282 BT_DBG("%s %ld", req->hdev->name, opt);
283
284
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
287}
288
289static void bredr_init(struct hci_request *req)
290{
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301}
302
303static void amp_init(struct hci_request *req)
304{
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315}
316
317static void hci_init1_req(struct hci_request *req, unsigned long opt)
318{
319 struct hci_dev *hdev = req->hdev;
320
321 BT_DBG("%s %ld", hdev->name, opt);
322
323
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
326
327 switch (hdev->dev_type) {
328 case HCI_BREDR:
329 bredr_init(req);
330 break;
331
332 case HCI_AMP:
333 amp_init(req);
334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
340}
341
342static void bredr_setup(struct hci_request *req)
343{
344 __le16 param;
345 __u8 flt_type;
346
347
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
366
367
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
372}
373
374static void le_setup(struct hci_request *req)
375{
376 struct hci_dev *hdev = req->hdev;
377
378
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
426static void hci_setup_inquiry_mode(struct hci_request *req)
427{
428 u8 mode;
429
430 mode = hci_get_inquiry_mode(req->hdev);
431
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433}
434
435static void hci_setup_event_mask(struct hci_request *req)
436{
437 struct hci_dev *hdev = req->hdev;
438
439
440
441
442
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445
446
447
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01;
453 events[4] |= 0x02;
454 events[4] |= 0x04;
455 events[5] |= 0x08;
456 events[5] |= 0x10;
457 }
458
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02;
461
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20;
464
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80;
467
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40;
470
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01;
473
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80;
476
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01;
479 events[6] |= 0x02;
480 events[6] |= 0x04;
481 events[6] |= 0x08;
482 events[6] |= 0x10;
483 events[6] |= 0x20;
484 events[7] |= 0x04;
485 events[7] |= 0x08;
486 events[7] |= 0x10;
487
488
489 }
490
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20;
493
494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
495
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
498 events[0] = 0x1f;
499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
501 }
502}
503
504static void hci_init2_req(struct hci_request *req, unsigned long opt)
505{
506 struct hci_dev *hdev = req->hdev;
507
508 if (lmp_bredr_capable(hdev))
509 bredr_setup(req);
510
511 if (lmp_le_capable(hdev))
512 le_setup(req);
513
514 hci_setup_event_mask(req);
515
516
517
518
519 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
520 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
521
522 if (lmp_ssp_capable(hdev)) {
523 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
524 u8 mode = 0x01;
525 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
526 sizeof(mode), &mode);
527 } else {
528 struct hci_cp_write_eir cp;
529
530 memset(hdev->eir, 0, sizeof(hdev->eir));
531 memset(&cp, 0, sizeof(cp));
532
533 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
534 }
535 }
536
537 if (lmp_inq_rssi_capable(hdev))
538 hci_setup_inquiry_mode(req);
539
540 if (lmp_inq_tx_pwr_capable(hdev))
541 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
542
543 if (lmp_ext_feat_capable(hdev)) {
544 struct hci_cp_read_local_ext_features cp;
545
546 cp.page = 0x01;
547 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
548 sizeof(cp), &cp);
549 }
550
551 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
552 u8 enable = 1;
553 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
554 &enable);
555 }
556}
557
558static void hci_setup_link_policy(struct hci_request *req)
559{
560 struct hci_dev *hdev = req->hdev;
561 struct hci_cp_write_def_link_policy cp;
562 u16 link_policy = 0;
563
564 if (lmp_rswitch_capable(hdev))
565 link_policy |= HCI_LP_RSWITCH;
566 if (lmp_hold_capable(hdev))
567 link_policy |= HCI_LP_HOLD;
568 if (lmp_sniff_capable(hdev))
569 link_policy |= HCI_LP_SNIFF;
570 if (lmp_park_capable(hdev))
571 link_policy |= HCI_LP_PARK;
572
573 cp.policy = cpu_to_le16(link_policy);
574 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
575}
576
577static void hci_set_le_support(struct hci_request *req)
578{
579 struct hci_dev *hdev = req->hdev;
580 struct hci_cp_write_le_host_supported cp;
581
582
583 if (!lmp_bredr_capable(hdev))
584 return;
585
586 memset(&cp, 0, sizeof(cp));
587
588 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
589 cp.le = 0x01;
590 cp.simul = lmp_le_br_capable(hdev);
591 }
592
593 if (cp.le != lmp_host_le_capable(hdev))
594 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
595 &cp);
596}
597
598static void hci_init3_req(struct hci_request *req, unsigned long opt)
599{
600 struct hci_dev *hdev = req->hdev;
601 u8 p;
602
603
604
605
606
607
608
609
610
611
612 if (hdev->commands[6] & 0x80) {
613 struct hci_cp_delete_stored_link_key cp;
614
615 bacpy(&cp.bdaddr, BDADDR_ANY);
616 cp.delete_all = 0x01;
617 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
618 sizeof(cp), &cp);
619 }
620
621 if (hdev->commands[5] & 0x10)
622 hci_setup_link_policy(req);
623
624 if (lmp_le_capable(hdev)) {
625 hci_set_le_support(req);
626 hci_update_ad(req);
627 }
628
629
630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
631 struct hci_cp_read_local_ext_features cp;
632
633 cp.page = p;
634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
635 sizeof(cp), &cp);
636 }
637}
638
639static int __hci_init(struct hci_dev *hdev)
640{
641 int err;
642
643 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
644 if (err < 0)
645 return err;
646
647
648
649
650
651 if (hdev->dev_type != HCI_BREDR)
652 return 0;
653
654 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
655 if (err < 0)
656 return err;
657
658 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
659}
660
661static void hci_scan_req(struct hci_request *req, unsigned long opt)
662{
663 __u8 scan = opt;
664
665 BT_DBG("%s %x", req->hdev->name, scan);
666
667
668 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
669}
670
671static void hci_auth_req(struct hci_request *req, unsigned long opt)
672{
673 __u8 auth = opt;
674
675 BT_DBG("%s %x", req->hdev->name, auth);
676
677
678 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
679}
680
681static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
682{
683 __u8 encrypt = opt;
684
685 BT_DBG("%s %x", req->hdev->name, encrypt);
686
687
688 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
689}
690
691static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
692{
693 __le16 policy = cpu_to_le16(opt);
694
695 BT_DBG("%s %x", req->hdev->name, policy);
696
697
698 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
699}
700
701
702
703struct hci_dev *hci_dev_get(int index)
704{
705 struct hci_dev *hdev = NULL, *d;
706
707 BT_DBG("%d", index);
708
709 if (index < 0)
710 return NULL;
711
712 read_lock(&hci_dev_list_lock);
713 list_for_each_entry(d, &hci_dev_list, list) {
714 if (d->id == index) {
715 hdev = hci_dev_hold(d);
716 break;
717 }
718 }
719 read_unlock(&hci_dev_list_lock);
720 return hdev;
721}
722
723
724
725bool hci_discovery_active(struct hci_dev *hdev)
726{
727 struct discovery_state *discov = &hdev->discovery;
728
729 switch (discov->state) {
730 case DISCOVERY_FINDING:
731 case DISCOVERY_RESOLVING:
732 return true;
733
734 default:
735 return false;
736 }
737}
738
739void hci_discovery_set_state(struct hci_dev *hdev, int state)
740{
741 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
742
743 if (hdev->discovery.state == state)
744 return;
745
746 switch (state) {
747 case DISCOVERY_STOPPED:
748 if (hdev->discovery.state != DISCOVERY_STARTING)
749 mgmt_discovering(hdev, 0);
750 break;
751 case DISCOVERY_STARTING:
752 break;
753 case DISCOVERY_FINDING:
754 mgmt_discovering(hdev, 1);
755 break;
756 case DISCOVERY_RESOLVING:
757 break;
758 case DISCOVERY_STOPPING:
759 break;
760 }
761
762 hdev->discovery.state = state;
763}
764
765void hci_inquiry_cache_flush(struct hci_dev *hdev)
766{
767 struct discovery_state *cache = &hdev->discovery;
768 struct inquiry_entry *p, *n;
769
770 list_for_each_entry_safe(p, n, &cache->all, all) {
771 list_del(&p->all);
772 kfree(p);
773 }
774
775 INIT_LIST_HEAD(&cache->unknown);
776 INIT_LIST_HEAD(&cache->resolve);
777}
778
779struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
780 bdaddr_t *bdaddr)
781{
782 struct discovery_state *cache = &hdev->discovery;
783 struct inquiry_entry *e;
784
785 BT_DBG("cache %p, %pMR", cache, bdaddr);
786
787 list_for_each_entry(e, &cache->all, all) {
788 if (!bacmp(&e->data.bdaddr, bdaddr))
789 return e;
790 }
791
792 return NULL;
793}
794
795struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
796 bdaddr_t *bdaddr)
797{
798 struct discovery_state *cache = &hdev->discovery;
799 struct inquiry_entry *e;
800
801 BT_DBG("cache %p, %pMR", cache, bdaddr);
802
803 list_for_each_entry(e, &cache->unknown, list) {
804 if (!bacmp(&e->data.bdaddr, bdaddr))
805 return e;
806 }
807
808 return NULL;
809}
810
811struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
812 bdaddr_t *bdaddr,
813 int state)
814{
815 struct discovery_state *cache = &hdev->discovery;
816 struct inquiry_entry *e;
817
818 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
819
820 list_for_each_entry(e, &cache->resolve, list) {
821 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
822 return e;
823 if (!bacmp(&e->data.bdaddr, bdaddr))
824 return e;
825 }
826
827 return NULL;
828}
829
830void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
831 struct inquiry_entry *ie)
832{
833 struct discovery_state *cache = &hdev->discovery;
834 struct list_head *pos = &cache->resolve;
835 struct inquiry_entry *p;
836
837 list_del(&ie->list);
838
839 list_for_each_entry(p, &cache->resolve, list) {
840 if (p->name_state != NAME_PENDING &&
841 abs(p->data.rssi) >= abs(ie->data.rssi))
842 break;
843 pos = &p->list;
844 }
845
846 list_add(&ie->list, pos);
847}
848
849bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
850 bool name_known, bool *ssp)
851{
852 struct discovery_state *cache = &hdev->discovery;
853 struct inquiry_entry *ie;
854
855 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
856
857 hci_remove_remote_oob_data(hdev, &data->bdaddr);
858
859 if (ssp)
860 *ssp = data->ssp_mode;
861
862 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
863 if (ie) {
864 if (ie->data.ssp_mode && ssp)
865 *ssp = true;
866
867 if (ie->name_state == NAME_NEEDED &&
868 data->rssi != ie->data.rssi) {
869 ie->data.rssi = data->rssi;
870 hci_inquiry_cache_update_resolve(hdev, ie);
871 }
872
873 goto update;
874 }
875
876
877 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
878 if (!ie)
879 return false;
880
881 list_add(&ie->all, &cache->all);
882
883 if (name_known) {
884 ie->name_state = NAME_KNOWN;
885 } else {
886 ie->name_state = NAME_NOT_KNOWN;
887 list_add(&ie->list, &cache->unknown);
888 }
889
890update:
891 if (name_known && ie->name_state != NAME_KNOWN &&
892 ie->name_state != NAME_PENDING) {
893 ie->name_state = NAME_KNOWN;
894 list_del(&ie->list);
895 }
896
897 memcpy(&ie->data, data, sizeof(*data));
898 ie->timestamp = jiffies;
899 cache->timestamp = jiffies;
900
901 if (ie->name_state == NAME_NOT_KNOWN)
902 return false;
903
904 return true;
905}
906
907static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
908{
909 struct discovery_state *cache = &hdev->discovery;
910 struct inquiry_info *info = (struct inquiry_info *) buf;
911 struct inquiry_entry *e;
912 int copied = 0;
913
914 list_for_each_entry(e, &cache->all, all) {
915 struct inquiry_data *data = &e->data;
916
917 if (copied >= num)
918 break;
919
920 bacpy(&info->bdaddr, &data->bdaddr);
921 info->pscan_rep_mode = data->pscan_rep_mode;
922 info->pscan_period_mode = data->pscan_period_mode;
923 info->pscan_mode = data->pscan_mode;
924 memcpy(info->dev_class, data->dev_class, 3);
925 info->clock_offset = data->clock_offset;
926
927 info++;
928 copied++;
929 }
930
931 BT_DBG("cache %p, copied %d", cache, copied);
932 return copied;
933}
934
935static void hci_inq_req(struct hci_request *req, unsigned long opt)
936{
937 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
938 struct hci_dev *hdev = req->hdev;
939 struct hci_cp_inquiry cp;
940
941 BT_DBG("%s", hdev->name);
942
943 if (test_bit(HCI_INQUIRY, &hdev->flags))
944 return;
945
946
947 memcpy(&cp.lap, &ir->lap, 3);
948 cp.length = ir->length;
949 cp.num_rsp = ir->num_rsp;
950 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
951}
952
953static int wait_inquiry(void *word)
954{
955 schedule();
956 return signal_pending(current);
957}
958
959int hci_inquiry(void __user *arg)
960{
961 __u8 __user *ptr = arg;
962 struct hci_inquiry_req ir;
963 struct hci_dev *hdev;
964 int err = 0, do_inquiry = 0, max_rsp;
965 long timeo;
966 __u8 *buf;
967
968 if (copy_from_user(&ir, ptr, sizeof(ir)))
969 return -EFAULT;
970
971 hdev = hci_dev_get(ir.dev_id);
972 if (!hdev)
973 return -ENODEV;
974
975 hci_dev_lock(hdev);
976 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
977 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
978 hci_inquiry_cache_flush(hdev);
979 do_inquiry = 1;
980 }
981 hci_dev_unlock(hdev);
982
983 timeo = ir.length * msecs_to_jiffies(2000);
984
985 if (do_inquiry) {
986 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
987 timeo);
988 if (err < 0)
989 goto done;
990
991
992
993
994 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
995 TASK_INTERRUPTIBLE))
996 return -EINTR;
997 }
998
999
1000
1001
1002 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1003
1004
1005
1006
1007 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1008 if (!buf) {
1009 err = -ENOMEM;
1010 goto done;
1011 }
1012
1013 hci_dev_lock(hdev);
1014 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1015 hci_dev_unlock(hdev);
1016
1017 BT_DBG("num_rsp %d", ir.num_rsp);
1018
1019 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1020 ptr += sizeof(ir);
1021 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1022 ir.num_rsp))
1023 err = -EFAULT;
1024 } else
1025 err = -EFAULT;
1026
1027 kfree(buf);
1028
1029done:
1030 hci_dev_put(hdev);
1031 return err;
1032}
1033
1034static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1035{
1036 u8 ad_len = 0, flags = 0;
1037 size_t name_len;
1038
1039 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1040 flags |= LE_AD_GENERAL;
1041
1042 if (!lmp_bredr_capable(hdev))
1043 flags |= LE_AD_NO_BREDR;
1044
1045 if (lmp_le_br_capable(hdev))
1046 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1047
1048 if (lmp_host_le_br_capable(hdev))
1049 flags |= LE_AD_SIM_LE_BREDR_HOST;
1050
1051 if (flags) {
1052 BT_DBG("adv flags 0x%02x", flags);
1053
1054 ptr[0] = 2;
1055 ptr[1] = EIR_FLAGS;
1056 ptr[2] = flags;
1057
1058 ad_len += 3;
1059 ptr += 3;
1060 }
1061
1062 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1063 ptr[0] = 2;
1064 ptr[1] = EIR_TX_POWER;
1065 ptr[2] = (u8) hdev->adv_tx_power;
1066
1067 ad_len += 3;
1068 ptr += 3;
1069 }
1070
1071 name_len = strlen(hdev->dev_name);
1072 if (name_len > 0) {
1073 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1074
1075 if (name_len > max_len) {
1076 name_len = max_len;
1077 ptr[1] = EIR_NAME_SHORT;
1078 } else
1079 ptr[1] = EIR_NAME_COMPLETE;
1080
1081 ptr[0] = name_len + 1;
1082
1083 memcpy(ptr + 2, hdev->dev_name, name_len);
1084
1085 ad_len += (name_len + 2);
1086 ptr += (name_len + 2);
1087 }
1088
1089 return ad_len;
1090}
1091
1092void hci_update_ad(struct hci_request *req)
1093{
1094 struct hci_dev *hdev = req->hdev;
1095 struct hci_cp_le_set_adv_data cp;
1096 u8 len;
1097
1098 if (!lmp_le_capable(hdev))
1099 return;
1100
1101 memset(&cp, 0, sizeof(cp));
1102
1103 len = create_ad(hdev, cp.data);
1104
1105 if (hdev->adv_data_len == len &&
1106 memcmp(cp.data, hdev->adv_data, len) == 0)
1107 return;
1108
1109 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1110 hdev->adv_data_len = len;
1111
1112 cp.length = len;
1113
1114 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1115}
1116
1117
1118
1119int hci_dev_open(__u16 dev)
1120{
1121 struct hci_dev *hdev;
1122 int ret = 0;
1123
1124 hdev = hci_dev_get(dev);
1125 if (!hdev)
1126 return -ENODEV;
1127
1128 BT_DBG("%s %p", hdev->name, hdev);
1129
1130 hci_req_lock(hdev);
1131
1132 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1133 ret = -ENODEV;
1134 goto done;
1135 }
1136
1137 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1138 ret = -ERFKILL;
1139 goto done;
1140 }
1141
1142 if (test_bit(HCI_UP, &hdev->flags)) {
1143 ret = -EALREADY;
1144 goto done;
1145 }
1146
1147 if (hdev->open(hdev)) {
1148 ret = -EIO;
1149 goto done;
1150 }
1151
1152 atomic_set(&hdev->cmd_cnt, 1);
1153 set_bit(HCI_INIT, &hdev->flags);
1154
1155 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1156 ret = hdev->setup(hdev);
1157
1158 if (!ret) {
1159
1160
1161
1162 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1163 set_bit(HCI_RAW, &hdev->flags);
1164
1165 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1166 set_bit(HCI_RAW, &hdev->flags);
1167
1168 if (!test_bit(HCI_RAW, &hdev->flags))
1169 ret = __hci_init(hdev);
1170 }
1171
1172 clear_bit(HCI_INIT, &hdev->flags);
1173
1174 if (!ret) {
1175 hci_dev_hold(hdev);
1176 set_bit(HCI_UP, &hdev->flags);
1177 hci_notify(hdev, HCI_DEV_UP);
1178 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1179 mgmt_valid_hdev(hdev)) {
1180 hci_dev_lock(hdev);
1181 mgmt_powered(hdev, 1);
1182 hci_dev_unlock(hdev);
1183 }
1184 } else {
1185
1186 flush_work(&hdev->tx_work);
1187 flush_work(&hdev->cmd_work);
1188 flush_work(&hdev->rx_work);
1189
1190 skb_queue_purge(&hdev->cmd_q);
1191 skb_queue_purge(&hdev->rx_q);
1192
1193 if (hdev->flush)
1194 hdev->flush(hdev);
1195
1196 if (hdev->sent_cmd) {
1197 kfree_skb(hdev->sent_cmd);
1198 hdev->sent_cmd = NULL;
1199 }
1200
1201 hdev->close(hdev);
1202 hdev->flags = 0;
1203 }
1204
1205done:
1206 hci_req_unlock(hdev);
1207 hci_dev_put(hdev);
1208 return ret;
1209}
1210
1211static int hci_dev_do_close(struct hci_dev *hdev)
1212{
1213 BT_DBG("%s %p", hdev->name, hdev);
1214
1215 cancel_delayed_work(&hdev->power_off);
1216
1217 hci_req_cancel(hdev, ENODEV);
1218 hci_req_lock(hdev);
1219
1220 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1221 del_timer_sync(&hdev->cmd_timer);
1222 hci_req_unlock(hdev);
1223 return 0;
1224 }
1225
1226
1227 flush_work(&hdev->tx_work);
1228 flush_work(&hdev->rx_work);
1229
1230 if (hdev->discov_timeout > 0) {
1231 cancel_delayed_work(&hdev->discov_off);
1232 hdev->discov_timeout = 0;
1233 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1234 }
1235
1236 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1237 cancel_delayed_work(&hdev->service_cache);
1238
1239 cancel_delayed_work_sync(&hdev->le_scan_disable);
1240
1241 hci_dev_lock(hdev);
1242 hci_inquiry_cache_flush(hdev);
1243 hci_conn_hash_flush(hdev);
1244 hci_dev_unlock(hdev);
1245
1246 hci_notify(hdev, HCI_DEV_DOWN);
1247
1248 if (hdev->flush)
1249 hdev->flush(hdev);
1250
1251
1252 skb_queue_purge(&hdev->cmd_q);
1253 atomic_set(&hdev->cmd_cnt, 1);
1254 if (!test_bit(HCI_RAW, &hdev->flags) &&
1255 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1256 set_bit(HCI_INIT, &hdev->flags);
1257 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1258 clear_bit(HCI_INIT, &hdev->flags);
1259 }
1260
1261
1262 flush_work(&hdev->cmd_work);
1263
1264
1265 skb_queue_purge(&hdev->rx_q);
1266 skb_queue_purge(&hdev->cmd_q);
1267 skb_queue_purge(&hdev->raw_q);
1268
1269
1270 if (hdev->sent_cmd) {
1271 del_timer_sync(&hdev->cmd_timer);
1272 kfree_skb(hdev->sent_cmd);
1273 hdev->sent_cmd = NULL;
1274 }
1275
1276 kfree_skb(hdev->recv_evt);
1277 hdev->recv_evt = NULL;
1278
1279
1280
1281 hdev->close(hdev);
1282
1283
1284 hdev->flags = 0;
1285 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1286
1287 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1288 mgmt_valid_hdev(hdev)) {
1289 hci_dev_lock(hdev);
1290 mgmt_powered(hdev, 0);
1291 hci_dev_unlock(hdev);
1292 }
1293
1294
1295 hdev->amp_status = 0;
1296
1297 memset(hdev->eir, 0, sizeof(hdev->eir));
1298 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1299
1300 hci_req_unlock(hdev);
1301
1302 hci_dev_put(hdev);
1303 return 0;
1304}
1305
1306int hci_dev_close(__u16 dev)
1307{
1308 struct hci_dev *hdev;
1309 int err;
1310
1311 hdev = hci_dev_get(dev);
1312 if (!hdev)
1313 return -ENODEV;
1314
1315 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1316 cancel_delayed_work(&hdev->power_off);
1317
1318 err = hci_dev_do_close(hdev);
1319
1320 hci_dev_put(hdev);
1321 return err;
1322}
1323
1324int hci_dev_reset(__u16 dev)
1325{
1326 struct hci_dev *hdev;
1327 int ret = 0;
1328
1329 hdev = hci_dev_get(dev);
1330 if (!hdev)
1331 return -ENODEV;
1332
1333 hci_req_lock(hdev);
1334
1335 if (!test_bit(HCI_UP, &hdev->flags))
1336 goto done;
1337
1338
1339 skb_queue_purge(&hdev->rx_q);
1340 skb_queue_purge(&hdev->cmd_q);
1341
1342 hci_dev_lock(hdev);
1343 hci_inquiry_cache_flush(hdev);
1344 hci_conn_hash_flush(hdev);
1345 hci_dev_unlock(hdev);
1346
1347 if (hdev->flush)
1348 hdev->flush(hdev);
1349
1350 atomic_set(&hdev->cmd_cnt, 1);
1351 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1352
1353 if (!test_bit(HCI_RAW, &hdev->flags))
1354 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1355
1356done:
1357 hci_req_unlock(hdev);
1358 hci_dev_put(hdev);
1359 return ret;
1360}
1361
1362int hci_dev_reset_stat(__u16 dev)
1363{
1364 struct hci_dev *hdev;
1365 int ret = 0;
1366
1367 hdev = hci_dev_get(dev);
1368 if (!hdev)
1369 return -ENODEV;
1370
1371 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1372
1373 hci_dev_put(hdev);
1374
1375 return ret;
1376}
1377
1378int hci_dev_cmd(unsigned int cmd, void __user *arg)
1379{
1380 struct hci_dev *hdev;
1381 struct hci_dev_req dr;
1382 int err = 0;
1383
1384 if (copy_from_user(&dr, arg, sizeof(dr)))
1385 return -EFAULT;
1386
1387 hdev = hci_dev_get(dr.dev_id);
1388 if (!hdev)
1389 return -ENODEV;
1390
1391 switch (cmd) {
1392 case HCISETAUTH:
1393 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1394 HCI_INIT_TIMEOUT);
1395 break;
1396
1397 case HCISETENCRYPT:
1398 if (!lmp_encrypt_capable(hdev)) {
1399 err = -EOPNOTSUPP;
1400 break;
1401 }
1402
1403 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1404
1405 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1406 HCI_INIT_TIMEOUT);
1407 if (err)
1408 break;
1409 }
1410
1411 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1412 HCI_INIT_TIMEOUT);
1413 break;
1414
1415 case HCISETSCAN:
1416 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1417 HCI_INIT_TIMEOUT);
1418 break;
1419
1420 case HCISETLINKPOL:
1421 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1422 HCI_INIT_TIMEOUT);
1423 break;
1424
1425 case HCISETLINKMODE:
1426 hdev->link_mode = ((__u16) dr.dev_opt) &
1427 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1428 break;
1429
1430 case HCISETPTYPE:
1431 hdev->pkt_type = (__u16) dr.dev_opt;
1432 break;
1433
1434 case HCISETACLMTU:
1435 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1436 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1437 break;
1438
1439 case HCISETSCOMTU:
1440 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1441 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1442 break;
1443
1444 default:
1445 err = -EINVAL;
1446 break;
1447 }
1448
1449 hci_dev_put(hdev);
1450 return err;
1451}
1452
1453int hci_get_dev_list(void __user *arg)
1454{
1455 struct hci_dev *hdev;
1456 struct hci_dev_list_req *dl;
1457 struct hci_dev_req *dr;
1458 int n = 0, size, err;
1459 __u16 dev_num;
1460
1461 if (get_user(dev_num, (__u16 __user *) arg))
1462 return -EFAULT;
1463
1464 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1465 return -EINVAL;
1466
1467 size = sizeof(*dl) + dev_num * sizeof(*dr);
1468
1469 dl = kzalloc(size, GFP_KERNEL);
1470 if (!dl)
1471 return -ENOMEM;
1472
1473 dr = dl->dev_req;
1474
1475 read_lock(&hci_dev_list_lock);
1476 list_for_each_entry(hdev, &hci_dev_list, list) {
1477 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1478 cancel_delayed_work(&hdev->power_off);
1479
1480 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1481 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1482
1483 (dr + n)->dev_id = hdev->id;
1484 (dr + n)->dev_opt = hdev->flags;
1485
1486 if (++n >= dev_num)
1487 break;
1488 }
1489 read_unlock(&hci_dev_list_lock);
1490
1491 dl->dev_num = n;
1492 size = sizeof(*dl) + n * sizeof(*dr);
1493
1494 err = copy_to_user(arg, dl, size);
1495 kfree(dl);
1496
1497 return err ? -EFAULT : 0;
1498}
1499
1500int hci_get_dev_info(void __user *arg)
1501{
1502 struct hci_dev *hdev;
1503 struct hci_dev_info di;
1504 int err = 0;
1505
1506 if (copy_from_user(&di, arg, sizeof(di)))
1507 return -EFAULT;
1508
1509 hdev = hci_dev_get(di.dev_id);
1510 if (!hdev)
1511 return -ENODEV;
1512
1513 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1514 cancel_delayed_work_sync(&hdev->power_off);
1515
1516 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1517 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1518
1519 strcpy(di.name, hdev->name);
1520 di.bdaddr = hdev->bdaddr;
1521 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1522 di.flags = hdev->flags;
1523 di.pkt_type = hdev->pkt_type;
1524 if (lmp_bredr_capable(hdev)) {
1525 di.acl_mtu = hdev->acl_mtu;
1526 di.acl_pkts = hdev->acl_pkts;
1527 di.sco_mtu = hdev->sco_mtu;
1528 di.sco_pkts = hdev->sco_pkts;
1529 } else {
1530 di.acl_mtu = hdev->le_mtu;
1531 di.acl_pkts = hdev->le_pkts;
1532 di.sco_mtu = 0;
1533 di.sco_pkts = 0;
1534 }
1535 di.link_policy = hdev->link_policy;
1536 di.link_mode = hdev->link_mode;
1537
1538 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1539 memcpy(&di.features, &hdev->features, sizeof(di.features));
1540
1541 if (copy_to_user(arg, &di, sizeof(di)))
1542 err = -EFAULT;
1543
1544 hci_dev_put(hdev);
1545
1546 return err;
1547}
1548
1549
1550
1551static int hci_rfkill_set_block(void *data, bool blocked)
1552{
1553 struct hci_dev *hdev = data;
1554
1555 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1556
1557 if (!blocked)
1558 return 0;
1559
1560 hci_dev_do_close(hdev);
1561
1562 return 0;
1563}
1564
1565static const struct rfkill_ops hci_rfkill_ops = {
1566 .set_block = hci_rfkill_set_block,
1567};
1568
1569static void hci_power_on(struct work_struct *work)
1570{
1571 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1572 int err;
1573
1574 BT_DBG("%s", hdev->name);
1575
1576 err = hci_dev_open(hdev->id);
1577 if (err < 0) {
1578 mgmt_set_powered_failed(hdev, err);
1579 return;
1580 }
1581
1582 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1583 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1584 HCI_AUTO_OFF_TIMEOUT);
1585
1586 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1587 mgmt_index_added(hdev);
1588}
1589
1590static void hci_power_off(struct work_struct *work)
1591{
1592 struct hci_dev *hdev = container_of(work, struct hci_dev,
1593 power_off.work);
1594
1595 BT_DBG("%s", hdev->name);
1596
1597 hci_dev_do_close(hdev);
1598}
1599
1600static void hci_discov_off(struct work_struct *work)
1601{
1602 struct hci_dev *hdev;
1603 u8 scan = SCAN_PAGE;
1604
1605 hdev = container_of(work, struct hci_dev, discov_off.work);
1606
1607 BT_DBG("%s", hdev->name);
1608
1609 hci_dev_lock(hdev);
1610
1611 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1612
1613 hdev->discov_timeout = 0;
1614
1615 hci_dev_unlock(hdev);
1616}
1617
1618int hci_uuids_clear(struct hci_dev *hdev)
1619{
1620 struct bt_uuid *uuid, *tmp;
1621
1622 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1623 list_del(&uuid->list);
1624 kfree(uuid);
1625 }
1626
1627 return 0;
1628}
1629
1630int hci_link_keys_clear(struct hci_dev *hdev)
1631{
1632 struct list_head *p, *n;
1633
1634 list_for_each_safe(p, n, &hdev->link_keys) {
1635 struct link_key *key;
1636
1637 key = list_entry(p, struct link_key, list);
1638
1639 list_del(p);
1640 kfree(key);
1641 }
1642
1643 return 0;
1644}
1645
1646int hci_smp_ltks_clear(struct hci_dev *hdev)
1647{
1648 struct smp_ltk *k, *tmp;
1649
1650 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1651 list_del(&k->list);
1652 kfree(k);
1653 }
1654
1655 return 0;
1656}
1657
1658struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1659{
1660 struct link_key *k;
1661
1662 list_for_each_entry(k, &hdev->link_keys, list)
1663 if (bacmp(bdaddr, &k->bdaddr) == 0)
1664 return k;
1665
1666 return NULL;
1667}
1668
1669static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1670 u8 key_type, u8 old_key_type)
1671{
1672
1673 if (key_type < 0x03)
1674 return true;
1675
1676
1677 if (key_type == HCI_LK_DEBUG_COMBINATION)
1678 return false;
1679
1680
1681 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1682 return false;
1683
1684
1685 if (!conn)
1686 return true;
1687
1688
1689 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1690 return true;
1691
1692
1693 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1694 return true;
1695
1696
1697 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1698 return true;
1699
1700
1701
1702 return false;
1703}
1704
1705struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1706{
1707 struct smp_ltk *k;
1708
1709 list_for_each_entry(k, &hdev->long_term_keys, list) {
1710 if (k->ediv != ediv ||
1711 memcmp(rand, k->rand, sizeof(k->rand)))
1712 continue;
1713
1714 return k;
1715 }
1716
1717 return NULL;
1718}
1719
1720struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1721 u8 addr_type)
1722{
1723 struct smp_ltk *k;
1724
1725 list_for_each_entry(k, &hdev->long_term_keys, list)
1726 if (addr_type == k->bdaddr_type &&
1727 bacmp(bdaddr, &k->bdaddr) == 0)
1728 return k;
1729
1730 return NULL;
1731}
1732
1733int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1734 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1735{
1736 struct link_key *key, *old_key;
1737 u8 old_key_type;
1738 bool persistent;
1739
1740 old_key = hci_find_link_key(hdev, bdaddr);
1741 if (old_key) {
1742 old_key_type = old_key->type;
1743 key = old_key;
1744 } else {
1745 old_key_type = conn ? conn->key_type : 0xff;
1746 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1747 if (!key)
1748 return -ENOMEM;
1749 list_add(&key->list, &hdev->link_keys);
1750 }
1751
1752 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1753
1754
1755
1756
1757 if (type == HCI_LK_CHANGED_COMBINATION &&
1758 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1759 type = HCI_LK_COMBINATION;
1760 if (conn)
1761 conn->key_type = type;
1762 }
1763
1764 bacpy(&key->bdaddr, bdaddr);
1765 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1766 key->pin_len = pin_len;
1767
1768 if (type == HCI_LK_CHANGED_COMBINATION)
1769 key->type = old_key_type;
1770 else
1771 key->type = type;
1772
1773 if (!new_key)
1774 return 0;
1775
1776 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1777
1778 mgmt_new_link_key(hdev, key, persistent);
1779
1780 if (conn)
1781 conn->flush_key = !persistent;
1782
1783 return 0;
1784}
1785
1786int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1787 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1788 ediv, u8 rand[8])
1789{
1790 struct smp_ltk *key, *old_key;
1791
1792 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1793 return 0;
1794
1795 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1796 if (old_key)
1797 key = old_key;
1798 else {
1799 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1800 if (!key)
1801 return -ENOMEM;
1802 list_add(&key->list, &hdev->long_term_keys);
1803 }
1804
1805 bacpy(&key->bdaddr, bdaddr);
1806 key->bdaddr_type = addr_type;
1807 memcpy(key->val, tk, sizeof(key->val));
1808 key->authenticated = authenticated;
1809 key->ediv = ediv;
1810 key->enc_size = enc_size;
1811 key->type = type;
1812 memcpy(key->rand, rand, sizeof(key->rand));
1813
1814 if (!new_key)
1815 return 0;
1816
1817 if (type & HCI_SMP_LTK)
1818 mgmt_new_ltk(hdev, key, 1);
1819
1820 return 0;
1821}
1822
1823int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1824{
1825 struct link_key *key;
1826
1827 key = hci_find_link_key(hdev, bdaddr);
1828 if (!key)
1829 return -ENOENT;
1830
1831 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1832
1833 list_del(&key->list);
1834 kfree(key);
1835
1836 return 0;
1837}
1838
1839int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1840{
1841 struct smp_ltk *k, *tmp;
1842
1843 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1844 if (bacmp(bdaddr, &k->bdaddr))
1845 continue;
1846
1847 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1848
1849 list_del(&k->list);
1850 kfree(k);
1851 }
1852
1853 return 0;
1854}
1855
1856
1857static void hci_cmd_timeout(unsigned long arg)
1858{
1859 struct hci_dev *hdev = (void *) arg;
1860
1861 if (hdev->sent_cmd) {
1862 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1863 u16 opcode = __le16_to_cpu(sent->opcode);
1864
1865 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1866 } else {
1867 BT_ERR("%s command tx timeout", hdev->name);
1868 }
1869
1870 atomic_set(&hdev->cmd_cnt, 1);
1871 queue_work(hdev->workqueue, &hdev->cmd_work);
1872}
1873
1874struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1875 bdaddr_t *bdaddr)
1876{
1877 struct oob_data *data;
1878
1879 list_for_each_entry(data, &hdev->remote_oob_data, list)
1880 if (bacmp(bdaddr, &data->bdaddr) == 0)
1881 return data;
1882
1883 return NULL;
1884}
1885
1886int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1887{
1888 struct oob_data *data;
1889
1890 data = hci_find_remote_oob_data(hdev, bdaddr);
1891 if (!data)
1892 return -ENOENT;
1893
1894 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1895
1896 list_del(&data->list);
1897 kfree(data);
1898
1899 return 0;
1900}
1901
1902int hci_remote_oob_data_clear(struct hci_dev *hdev)
1903{
1904 struct oob_data *data, *n;
1905
1906 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1907 list_del(&data->list);
1908 kfree(data);
1909 }
1910
1911 return 0;
1912}
1913
1914int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1915 u8 *randomizer)
1916{
1917 struct oob_data *data;
1918
1919 data = hci_find_remote_oob_data(hdev, bdaddr);
1920
1921 if (!data) {
1922 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1923 if (!data)
1924 return -ENOMEM;
1925
1926 bacpy(&data->bdaddr, bdaddr);
1927 list_add(&data->list, &hdev->remote_oob_data);
1928 }
1929
1930 memcpy(data->hash, hash, sizeof(data->hash));
1931 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1932
1933 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1934
1935 return 0;
1936}
1937
1938struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1939{
1940 struct bdaddr_list *b;
1941
1942 list_for_each_entry(b, &hdev->blacklist, list)
1943 if (bacmp(bdaddr, &b->bdaddr) == 0)
1944 return b;
1945
1946 return NULL;
1947}
1948
1949int hci_blacklist_clear(struct hci_dev *hdev)
1950{
1951 struct list_head *p, *n;
1952
1953 list_for_each_safe(p, n, &hdev->blacklist) {
1954 struct bdaddr_list *b;
1955
1956 b = list_entry(p, struct bdaddr_list, list);
1957
1958 list_del(p);
1959 kfree(b);
1960 }
1961
1962 return 0;
1963}
1964
1965int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1966{
1967 struct bdaddr_list *entry;
1968
1969 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1970 return -EBADF;
1971
1972 if (hci_blacklist_lookup(hdev, bdaddr))
1973 return -EEXIST;
1974
1975 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1976 if (!entry)
1977 return -ENOMEM;
1978
1979 bacpy(&entry->bdaddr, bdaddr);
1980
1981 list_add(&entry->list, &hdev->blacklist);
1982
1983 return mgmt_device_blocked(hdev, bdaddr, type);
1984}
1985
1986int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1987{
1988 struct bdaddr_list *entry;
1989
1990 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1991 return hci_blacklist_clear(hdev);
1992
1993 entry = hci_blacklist_lookup(hdev, bdaddr);
1994 if (!entry)
1995 return -ENOENT;
1996
1997 list_del(&entry->list);
1998 kfree(entry);
1999
2000 return mgmt_device_unblocked(hdev, bdaddr, type);
2001}
2002
2003static void inquiry_complete(struct hci_dev *hdev, u8 status)
2004{
2005 if (status) {
2006 BT_ERR("Failed to start inquiry: status %d", status);
2007
2008 hci_dev_lock(hdev);
2009 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2010 hci_dev_unlock(hdev);
2011 return;
2012 }
2013}
2014
2015static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2016{
2017
2018 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2019 struct hci_request req;
2020 struct hci_cp_inquiry cp;
2021 int err;
2022
2023 if (status) {
2024 BT_ERR("Failed to disable LE scanning: status %d", status);
2025 return;
2026 }
2027
2028 switch (hdev->discovery.type) {
2029 case DISCOV_TYPE_LE:
2030 hci_dev_lock(hdev);
2031 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2032 hci_dev_unlock(hdev);
2033 break;
2034
2035 case DISCOV_TYPE_INTERLEAVED:
2036 hci_req_init(&req, hdev);
2037
2038 memset(&cp, 0, sizeof(cp));
2039 memcpy(&cp.lap, lap, sizeof(cp.lap));
2040 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2041 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2042
2043 hci_dev_lock(hdev);
2044
2045 hci_inquiry_cache_flush(hdev);
2046
2047 err = hci_req_run(&req, inquiry_complete);
2048 if (err) {
2049 BT_ERR("Inquiry request failed: err %d", err);
2050 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2051 }
2052
2053 hci_dev_unlock(hdev);
2054 break;
2055 }
2056}
2057
2058static void le_scan_disable_work(struct work_struct *work)
2059{
2060 struct hci_dev *hdev = container_of(work, struct hci_dev,
2061 le_scan_disable.work);
2062 struct hci_cp_le_set_scan_enable cp;
2063 struct hci_request req;
2064 int err;
2065
2066 BT_DBG("%s", hdev->name);
2067
2068 hci_req_init(&req, hdev);
2069
2070 memset(&cp, 0, sizeof(cp));
2071 cp.enable = LE_SCAN_DISABLE;
2072 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2073
2074 err = hci_req_run(&req, le_scan_disable_work_complete);
2075 if (err)
2076 BT_ERR("Disable LE scanning request failed: err %d", err);
2077}
2078
2079
2080struct hci_dev *hci_alloc_dev(void)
2081{
2082 struct hci_dev *hdev;
2083
2084 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2085 if (!hdev)
2086 return NULL;
2087
2088 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2089 hdev->esco_type = (ESCO_HV1);
2090 hdev->link_mode = (HCI_LM_ACCEPT);
2091 hdev->io_capability = 0x03;
2092 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2093 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2094
2095 hdev->sniff_max_interval = 800;
2096 hdev->sniff_min_interval = 80;
2097
2098 mutex_init(&hdev->lock);
2099 mutex_init(&hdev->req_lock);
2100
2101 INIT_LIST_HEAD(&hdev->mgmt_pending);
2102 INIT_LIST_HEAD(&hdev->blacklist);
2103 INIT_LIST_HEAD(&hdev->uuids);
2104 INIT_LIST_HEAD(&hdev->link_keys);
2105 INIT_LIST_HEAD(&hdev->long_term_keys);
2106 INIT_LIST_HEAD(&hdev->remote_oob_data);
2107 INIT_LIST_HEAD(&hdev->conn_hash.list);
2108
2109 INIT_WORK(&hdev->rx_work, hci_rx_work);
2110 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2111 INIT_WORK(&hdev->tx_work, hci_tx_work);
2112 INIT_WORK(&hdev->power_on, hci_power_on);
2113
2114 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2115 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2116 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2117
2118 skb_queue_head_init(&hdev->rx_q);
2119 skb_queue_head_init(&hdev->cmd_q);
2120 skb_queue_head_init(&hdev->raw_q);
2121
2122 init_waitqueue_head(&hdev->req_wait_q);
2123
2124 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2125
2126 hci_init_sysfs(hdev);
2127 discovery_init(hdev);
2128
2129 return hdev;
2130}
2131EXPORT_SYMBOL(hci_alloc_dev);
2132
2133
2134void hci_free_dev(struct hci_dev *hdev)
2135{
2136
2137 put_device(&hdev->dev);
2138}
2139EXPORT_SYMBOL(hci_free_dev);
2140
2141
2142int hci_register_dev(struct hci_dev *hdev)
2143{
2144 int id, error;
2145
2146 if (!hdev->open || !hdev->close)
2147 return -EINVAL;
2148
2149
2150
2151
2152 switch (hdev->dev_type) {
2153 case HCI_BREDR:
2154 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2155 break;
2156 case HCI_AMP:
2157 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2158 break;
2159 default:
2160 return -EINVAL;
2161 }
2162
2163 if (id < 0)
2164 return id;
2165
2166 sprintf(hdev->name, "hci%d", id);
2167 hdev->id = id;
2168
2169 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2170
2171 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2172 WQ_MEM_RECLAIM, 1, hdev->name);
2173 if (!hdev->workqueue) {
2174 error = -ENOMEM;
2175 goto err;
2176 }
2177
2178 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2179 WQ_MEM_RECLAIM, 1, hdev->name);
2180 if (!hdev->req_workqueue) {
2181 destroy_workqueue(hdev->workqueue);
2182 error = -ENOMEM;
2183 goto err;
2184 }
2185
2186 error = hci_add_sysfs(hdev);
2187 if (error < 0)
2188 goto err_wqueue;
2189
2190 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2191 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2192 hdev);
2193 if (hdev->rfkill) {
2194 if (rfkill_register(hdev->rfkill) < 0) {
2195 rfkill_destroy(hdev->rfkill);
2196 hdev->rfkill = NULL;
2197 }
2198 }
2199
2200 set_bit(HCI_SETUP, &hdev->dev_flags);
2201
2202 if (hdev->dev_type != HCI_AMP)
2203 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2204
2205 write_lock(&hci_dev_list_lock);
2206 list_add(&hdev->list, &hci_dev_list);
2207 write_unlock(&hci_dev_list_lock);
2208
2209 hci_notify(hdev, HCI_DEV_REG);
2210 hci_dev_hold(hdev);
2211
2212 queue_work(hdev->req_workqueue, &hdev->power_on);
2213
2214 return id;
2215
2216err_wqueue:
2217 destroy_workqueue(hdev->workqueue);
2218 destroy_workqueue(hdev->req_workqueue);
2219err:
2220 ida_simple_remove(&hci_index_ida, hdev->id);
2221
2222 return error;
2223}
2224EXPORT_SYMBOL(hci_register_dev);
2225
2226
2227void hci_unregister_dev(struct hci_dev *hdev)
2228{
2229 int i, id;
2230
2231 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2232
2233 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2234
2235 id = hdev->id;
2236
2237 write_lock(&hci_dev_list_lock);
2238 list_del(&hdev->list);
2239 write_unlock(&hci_dev_list_lock);
2240
2241 hci_dev_do_close(hdev);
2242
2243 for (i = 0; i < NUM_REASSEMBLY; i++)
2244 kfree_skb(hdev->reassembly[i]);
2245
2246 cancel_work_sync(&hdev->power_on);
2247
2248 if (!test_bit(HCI_INIT, &hdev->flags) &&
2249 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2250 hci_dev_lock(hdev);
2251 mgmt_index_removed(hdev);
2252 hci_dev_unlock(hdev);
2253 }
2254
2255
2256
2257 BUG_ON(!list_empty(&hdev->mgmt_pending));
2258
2259 hci_notify(hdev, HCI_DEV_UNREG);
2260
2261 if (hdev->rfkill) {
2262 rfkill_unregister(hdev->rfkill);
2263 rfkill_destroy(hdev->rfkill);
2264 }
2265
2266 hci_del_sysfs(hdev);
2267
2268 destroy_workqueue(hdev->workqueue);
2269 destroy_workqueue(hdev->req_workqueue);
2270
2271 hci_dev_lock(hdev);
2272 hci_blacklist_clear(hdev);
2273 hci_uuids_clear(hdev);
2274 hci_link_keys_clear(hdev);
2275 hci_smp_ltks_clear(hdev);
2276 hci_remote_oob_data_clear(hdev);
2277 hci_dev_unlock(hdev);
2278
2279 hci_dev_put(hdev);
2280
2281 ida_simple_remove(&hci_index_ida, id);
2282}
2283EXPORT_SYMBOL(hci_unregister_dev);
2284
2285
2286int hci_suspend_dev(struct hci_dev *hdev)
2287{
2288 hci_notify(hdev, HCI_DEV_SUSPEND);
2289 return 0;
2290}
2291EXPORT_SYMBOL(hci_suspend_dev);
2292
2293
2294int hci_resume_dev(struct hci_dev *hdev)
2295{
2296 hci_notify(hdev, HCI_DEV_RESUME);
2297 return 0;
2298}
2299EXPORT_SYMBOL(hci_resume_dev);
2300
2301
2302int hci_recv_frame(struct sk_buff *skb)
2303{
2304 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2305 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2306 && !test_bit(HCI_INIT, &hdev->flags))) {
2307 kfree_skb(skb);
2308 return -ENXIO;
2309 }
2310
2311
2312 bt_cb(skb)->incoming = 1;
2313
2314
2315 __net_timestamp(skb);
2316
2317 skb_queue_tail(&hdev->rx_q, skb);
2318 queue_work(hdev->workqueue, &hdev->rx_work);
2319
2320 return 0;
2321}
2322EXPORT_SYMBOL(hci_recv_frame);
2323
2324static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2325 int count, __u8 index)
2326{
2327 int len = 0;
2328 int hlen = 0;
2329 int remain = count;
2330 struct sk_buff *skb;
2331 struct bt_skb_cb *scb;
2332
2333 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2334 index >= NUM_REASSEMBLY)
2335 return -EILSEQ;
2336
2337 skb = hdev->reassembly[index];
2338
2339 if (!skb) {
2340 switch (type) {
2341 case HCI_ACLDATA_PKT:
2342 len = HCI_MAX_FRAME_SIZE;
2343 hlen = HCI_ACL_HDR_SIZE;
2344 break;
2345 case HCI_EVENT_PKT:
2346 len = HCI_MAX_EVENT_SIZE;
2347 hlen = HCI_EVENT_HDR_SIZE;
2348 break;
2349 case HCI_SCODATA_PKT:
2350 len = HCI_MAX_SCO_SIZE;
2351 hlen = HCI_SCO_HDR_SIZE;
2352 break;
2353 }
2354
2355 skb = bt_skb_alloc(len, GFP_ATOMIC);
2356 if (!skb)
2357 return -ENOMEM;
2358
2359 scb = (void *) skb->cb;
2360 scb->expect = hlen;
2361 scb->pkt_type = type;
2362
2363 skb->dev = (void *) hdev;
2364 hdev->reassembly[index] = skb;
2365 }
2366
2367 while (count) {
2368 scb = (void *) skb->cb;
2369 len = min_t(uint, scb->expect, count);
2370
2371 memcpy(skb_put(skb, len), data, len);
2372
2373 count -= len;
2374 data += len;
2375 scb->expect -= len;
2376 remain = count;
2377
2378 switch (type) {
2379 case HCI_EVENT_PKT:
2380 if (skb->len == HCI_EVENT_HDR_SIZE) {
2381 struct hci_event_hdr *h = hci_event_hdr(skb);
2382 scb->expect = h->plen;
2383
2384 if (skb_tailroom(skb) < scb->expect) {
2385 kfree_skb(skb);
2386 hdev->reassembly[index] = NULL;
2387 return -ENOMEM;
2388 }
2389 }
2390 break;
2391
2392 case HCI_ACLDATA_PKT:
2393 if (skb->len == HCI_ACL_HDR_SIZE) {
2394 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2395 scb->expect = __le16_to_cpu(h->dlen);
2396
2397 if (skb_tailroom(skb) < scb->expect) {
2398 kfree_skb(skb);
2399 hdev->reassembly[index] = NULL;
2400 return -ENOMEM;
2401 }
2402 }
2403 break;
2404
2405 case HCI_SCODATA_PKT:
2406 if (skb->len == HCI_SCO_HDR_SIZE) {
2407 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2408 scb->expect = h->dlen;
2409
2410 if (skb_tailroom(skb) < scb->expect) {
2411 kfree_skb(skb);
2412 hdev->reassembly[index] = NULL;
2413 return -ENOMEM;
2414 }
2415 }
2416 break;
2417 }
2418
2419 if (scb->expect == 0) {
2420
2421
2422 bt_cb(skb)->pkt_type = type;
2423 hci_recv_frame(skb);
2424
2425 hdev->reassembly[index] = NULL;
2426 return remain;
2427 }
2428 }
2429
2430 return remain;
2431}
2432
2433int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2434{
2435 int rem = 0;
2436
2437 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2438 return -EILSEQ;
2439
2440 while (count) {
2441 rem = hci_reassembly(hdev, type, data, count, type - 1);
2442 if (rem < 0)
2443 return rem;
2444
2445 data += (count - rem);
2446 count = rem;
2447 }
2448
2449 return rem;
2450}
2451EXPORT_SYMBOL(hci_recv_fragment);
2452
2453#define STREAM_REASSEMBLY 0
2454
2455int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2456{
2457 int type;
2458 int rem = 0;
2459
2460 while (count) {
2461 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2462
2463 if (!skb) {
2464 struct { char type; } *pkt;
2465
2466
2467 pkt = data;
2468 type = pkt->type;
2469
2470 data++;
2471 count--;
2472 } else
2473 type = bt_cb(skb)->pkt_type;
2474
2475 rem = hci_reassembly(hdev, type, data, count,
2476 STREAM_REASSEMBLY);
2477 if (rem < 0)
2478 return rem;
2479
2480 data += (count - rem);
2481 count = rem;
2482 }
2483
2484 return rem;
2485}
2486EXPORT_SYMBOL(hci_recv_stream_fragment);
2487
2488
2489
2490int hci_register_cb(struct hci_cb *cb)
2491{
2492 BT_DBG("%p name %s", cb, cb->name);
2493
2494 write_lock(&hci_cb_list_lock);
2495 list_add(&cb->list, &hci_cb_list);
2496 write_unlock(&hci_cb_list_lock);
2497
2498 return 0;
2499}
2500EXPORT_SYMBOL(hci_register_cb);
2501
2502int hci_unregister_cb(struct hci_cb *cb)
2503{
2504 BT_DBG("%p name %s", cb, cb->name);
2505
2506 write_lock(&hci_cb_list_lock);
2507 list_del(&cb->list);
2508 write_unlock(&hci_cb_list_lock);
2509
2510 return 0;
2511}
2512EXPORT_SYMBOL(hci_unregister_cb);
2513
2514static int hci_send_frame(struct sk_buff *skb)
2515{
2516 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2517
2518 if (!hdev) {
2519 kfree_skb(skb);
2520 return -ENODEV;
2521 }
2522
2523 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2524
2525
2526 __net_timestamp(skb);
2527
2528
2529 hci_send_to_monitor(hdev, skb);
2530
2531 if (atomic_read(&hdev->promisc)) {
2532
2533 hci_send_to_sock(hdev, skb);
2534 }
2535
2536
2537 skb_orphan(skb);
2538
2539 return hdev->send(skb);
2540}
2541
2542void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2543{
2544 skb_queue_head_init(&req->cmd_q);
2545 req->hdev = hdev;
2546 req->err = 0;
2547}
2548
2549int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2550{
2551 struct hci_dev *hdev = req->hdev;
2552 struct sk_buff *skb;
2553 unsigned long flags;
2554
2555 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2556
2557
2558
2559
2560 if (req->err) {
2561 skb_queue_purge(&req->cmd_q);
2562 return req->err;
2563 }
2564
2565
2566 if (skb_queue_empty(&req->cmd_q))
2567 return -ENODATA;
2568
2569 skb = skb_peek_tail(&req->cmd_q);
2570 bt_cb(skb)->req.complete = complete;
2571
2572 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2573 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2574 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2575
2576 queue_work(hdev->workqueue, &hdev->cmd_work);
2577
2578 return 0;
2579}
2580
2581static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2582 u32 plen, const void *param)
2583{
2584 int len = HCI_COMMAND_HDR_SIZE + plen;
2585 struct hci_command_hdr *hdr;
2586 struct sk_buff *skb;
2587
2588 skb = bt_skb_alloc(len, GFP_ATOMIC);
2589 if (!skb)
2590 return NULL;
2591
2592 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2593 hdr->opcode = cpu_to_le16(opcode);
2594 hdr->plen = plen;
2595
2596 if (plen)
2597 memcpy(skb_put(skb, plen), param, plen);
2598
2599 BT_DBG("skb len %d", skb->len);
2600
2601 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2602 skb->dev = (void *) hdev;
2603
2604 return skb;
2605}
2606
2607
2608int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2609 const void *param)
2610{
2611 struct sk_buff *skb;
2612
2613 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2614
2615 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2616 if (!skb) {
2617 BT_ERR("%s no memory for command", hdev->name);
2618 return -ENOMEM;
2619 }
2620
2621
2622
2623
2624 bt_cb(skb)->req.start = true;
2625
2626 skb_queue_tail(&hdev->cmd_q, skb);
2627 queue_work(hdev->workqueue, &hdev->cmd_work);
2628
2629 return 0;
2630}
2631
2632
2633void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2634 const void *param, u8 event)
2635{
2636 struct hci_dev *hdev = req->hdev;
2637 struct sk_buff *skb;
2638
2639 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2640
2641
2642
2643
2644 if (req->err)
2645 return;
2646
2647 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2648 if (!skb) {
2649 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2650 hdev->name, opcode);
2651 req->err = -ENOMEM;
2652 return;
2653 }
2654
2655 if (skb_queue_empty(&req->cmd_q))
2656 bt_cb(skb)->req.start = true;
2657
2658 bt_cb(skb)->req.event = event;
2659
2660 skb_queue_tail(&req->cmd_q, skb);
2661}
2662
2663void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2664 const void *param)
2665{
2666 hci_req_add_ev(req, opcode, plen, param, 0);
2667}
2668
2669
2670void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2671{
2672 struct hci_command_hdr *hdr;
2673
2674 if (!hdev->sent_cmd)
2675 return NULL;
2676
2677 hdr = (void *) hdev->sent_cmd->data;
2678
2679 if (hdr->opcode != cpu_to_le16(opcode))
2680 return NULL;
2681
2682 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2683
2684 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2685}
2686
2687
2688static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2689{
2690 struct hci_acl_hdr *hdr;
2691 int len = skb->len;
2692
2693 skb_push(skb, HCI_ACL_HDR_SIZE);
2694 skb_reset_transport_header(skb);
2695 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2696 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2697 hdr->dlen = cpu_to_le16(len);
2698}
2699
2700static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2701 struct sk_buff *skb, __u16 flags)
2702{
2703 struct hci_conn *conn = chan->conn;
2704 struct hci_dev *hdev = conn->hdev;
2705 struct sk_buff *list;
2706
2707 skb->len = skb_headlen(skb);
2708 skb->data_len = 0;
2709
2710 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2711
2712 switch (hdev->dev_type) {
2713 case HCI_BREDR:
2714 hci_add_acl_hdr(skb, conn->handle, flags);
2715 break;
2716 case HCI_AMP:
2717 hci_add_acl_hdr(skb, chan->handle, flags);
2718 break;
2719 default:
2720 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2721 return;
2722 }
2723
2724 list = skb_shinfo(skb)->frag_list;
2725 if (!list) {
2726
2727 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2728
2729 skb_queue_tail(queue, skb);
2730 } else {
2731
2732 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2733
2734 skb_shinfo(skb)->frag_list = NULL;
2735
2736
2737 spin_lock(&queue->lock);
2738
2739 __skb_queue_tail(queue, skb);
2740
2741 flags &= ~ACL_START;
2742 flags |= ACL_CONT;
2743 do {
2744 skb = list; list = list->next;
2745
2746 skb->dev = (void *) hdev;
2747 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2748 hci_add_acl_hdr(skb, conn->handle, flags);
2749
2750 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2751
2752 __skb_queue_tail(queue, skb);
2753 } while (list);
2754
2755 spin_unlock(&queue->lock);
2756 }
2757}
2758
2759void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2760{
2761 struct hci_dev *hdev = chan->conn->hdev;
2762
2763 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2764
2765 skb->dev = (void *) hdev;
2766
2767 hci_queue_acl(chan, &chan->data_q, skb, flags);
2768
2769 queue_work(hdev->workqueue, &hdev->tx_work);
2770}
2771
2772
2773void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2774{
2775 struct hci_dev *hdev = conn->hdev;
2776 struct hci_sco_hdr hdr;
2777
2778 BT_DBG("%s len %d", hdev->name, skb->len);
2779
2780 hdr.handle = cpu_to_le16(conn->handle);
2781 hdr.dlen = skb->len;
2782
2783 skb_push(skb, HCI_SCO_HDR_SIZE);
2784 skb_reset_transport_header(skb);
2785 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2786
2787 skb->dev = (void *) hdev;
2788 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2789
2790 skb_queue_tail(&conn->data_q, skb);
2791 queue_work(hdev->workqueue, &hdev->tx_work);
2792}
2793
2794
2795
2796
2797static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2798 int *quote)
2799{
2800 struct hci_conn_hash *h = &hdev->conn_hash;
2801 struct hci_conn *conn = NULL, *c;
2802 unsigned int num = 0, min = ~0;
2803
2804
2805
2806
2807 rcu_read_lock();
2808
2809 list_for_each_entry_rcu(c, &h->list, list) {
2810 if (c->type != type || skb_queue_empty(&c->data_q))
2811 continue;
2812
2813 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2814 continue;
2815
2816 num++;
2817
2818 if (c->sent < min) {
2819 min = c->sent;
2820 conn = c;
2821 }
2822
2823 if (hci_conn_num(hdev, type) == num)
2824 break;
2825 }
2826
2827 rcu_read_unlock();
2828
2829 if (conn) {
2830 int cnt, q;
2831
2832 switch (conn->type) {
2833 case ACL_LINK:
2834 cnt = hdev->acl_cnt;
2835 break;
2836 case SCO_LINK:
2837 case ESCO_LINK:
2838 cnt = hdev->sco_cnt;
2839 break;
2840 case LE_LINK:
2841 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2842 break;
2843 default:
2844 cnt = 0;
2845 BT_ERR("Unknown link type");
2846 }
2847
2848 q = cnt / num;
2849 *quote = q ? q : 1;
2850 } else
2851 *quote = 0;
2852
2853 BT_DBG("conn %p quote %d", conn, *quote);
2854 return conn;
2855}
2856
2857static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2858{
2859 struct hci_conn_hash *h = &hdev->conn_hash;
2860 struct hci_conn *c;
2861
2862 BT_ERR("%s link tx timeout", hdev->name);
2863
2864 rcu_read_lock();
2865
2866
2867 list_for_each_entry_rcu(c, &h->list, list) {
2868 if (c->type == type && c->sent) {
2869 BT_ERR("%s killing stalled connection %pMR",
2870 hdev->name, &c->dst);
2871 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2872 }
2873 }
2874
2875 rcu_read_unlock();
2876}
2877
2878static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2879 int *quote)
2880{
2881 struct hci_conn_hash *h = &hdev->conn_hash;
2882 struct hci_chan *chan = NULL;
2883 unsigned int num = 0, min = ~0, cur_prio = 0;
2884 struct hci_conn *conn;
2885 int cnt, q, conn_num = 0;
2886
2887 BT_DBG("%s", hdev->name);
2888
2889 rcu_read_lock();
2890
2891 list_for_each_entry_rcu(conn, &h->list, list) {
2892 struct hci_chan *tmp;
2893
2894 if (conn->type != type)
2895 continue;
2896
2897 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2898 continue;
2899
2900 conn_num++;
2901
2902 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2903 struct sk_buff *skb;
2904
2905 if (skb_queue_empty(&tmp->data_q))
2906 continue;
2907
2908 skb = skb_peek(&tmp->data_q);
2909 if (skb->priority < cur_prio)
2910 continue;
2911
2912 if (skb->priority > cur_prio) {
2913 num = 0;
2914 min = ~0;
2915 cur_prio = skb->priority;
2916 }
2917
2918 num++;
2919
2920 if (conn->sent < min) {
2921 min = conn->sent;
2922 chan = tmp;
2923 }
2924 }
2925
2926 if (hci_conn_num(hdev, type) == conn_num)
2927 break;
2928 }
2929
2930 rcu_read_unlock();
2931
2932 if (!chan)
2933 return NULL;
2934
2935 switch (chan->conn->type) {
2936 case ACL_LINK:
2937 cnt = hdev->acl_cnt;
2938 break;
2939 case AMP_LINK:
2940 cnt = hdev->block_cnt;
2941 break;
2942 case SCO_LINK:
2943 case ESCO_LINK:
2944 cnt = hdev->sco_cnt;
2945 break;
2946 case LE_LINK:
2947 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2948 break;
2949 default:
2950 cnt = 0;
2951 BT_ERR("Unknown link type");
2952 }
2953
2954 q = cnt / num;
2955 *quote = q ? q : 1;
2956 BT_DBG("chan %p quote %d", chan, *quote);
2957 return chan;
2958}
2959
2960static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2961{
2962 struct hci_conn_hash *h = &hdev->conn_hash;
2963 struct hci_conn *conn;
2964 int num = 0;
2965
2966 BT_DBG("%s", hdev->name);
2967
2968 rcu_read_lock();
2969
2970 list_for_each_entry_rcu(conn, &h->list, list) {
2971 struct hci_chan *chan;
2972
2973 if (conn->type != type)
2974 continue;
2975
2976 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2977 continue;
2978
2979 num++;
2980
2981 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2982 struct sk_buff *skb;
2983
2984 if (chan->sent) {
2985 chan->sent = 0;
2986 continue;
2987 }
2988
2989 if (skb_queue_empty(&chan->data_q))
2990 continue;
2991
2992 skb = skb_peek(&chan->data_q);
2993 if (skb->priority >= HCI_PRIO_MAX - 1)
2994 continue;
2995
2996 skb->priority = HCI_PRIO_MAX - 1;
2997
2998 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2999 skb->priority);
3000 }
3001
3002 if (hci_conn_num(hdev, type) == num)
3003 break;
3004 }
3005
3006 rcu_read_unlock();
3007
3008}
3009
3010static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3011{
3012
3013 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3014}
3015
3016static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3017{
3018 if (!test_bit(HCI_RAW, &hdev->flags)) {
3019
3020
3021 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3022 HCI_ACL_TX_TIMEOUT))
3023 hci_link_tx_to(hdev, ACL_LINK);
3024 }
3025}
3026
3027static void hci_sched_acl_pkt(struct hci_dev *hdev)
3028{
3029 unsigned int cnt = hdev->acl_cnt;
3030 struct hci_chan *chan;
3031 struct sk_buff *skb;
3032 int quote;
3033
3034 __check_timeout(hdev, cnt);
3035
3036 while (hdev->acl_cnt &&
3037 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3038 u32 priority = (skb_peek(&chan->data_q))->priority;
3039 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3040 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3041 skb->len, skb->priority);
3042
3043
3044 if (skb->priority < priority)
3045 break;
3046
3047 skb = skb_dequeue(&chan->data_q);
3048
3049 hci_conn_enter_active_mode(chan->conn,
3050 bt_cb(skb)->force_active);
3051
3052 hci_send_frame(skb);
3053 hdev->acl_last_tx = jiffies;
3054
3055 hdev->acl_cnt--;
3056 chan->sent++;
3057 chan->conn->sent++;
3058 }
3059 }
3060
3061 if (cnt != hdev->acl_cnt)
3062 hci_prio_recalculate(hdev, ACL_LINK);
3063}
3064
3065static void hci_sched_acl_blk(struct hci_dev *hdev)
3066{
3067 unsigned int cnt = hdev->block_cnt;
3068 struct hci_chan *chan;
3069 struct sk_buff *skb;
3070 int quote;
3071 u8 type;
3072
3073 __check_timeout(hdev, cnt);
3074
3075 BT_DBG("%s", hdev->name);
3076
3077 if (hdev->dev_type == HCI_AMP)
3078 type = AMP_LINK;
3079 else
3080 type = ACL_LINK;
3081
3082 while (hdev->block_cnt > 0 &&
3083 (chan = hci_chan_sent(hdev, type, "e))) {
3084 u32 priority = (skb_peek(&chan->data_q))->priority;
3085 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3086 int blocks;
3087
3088 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3089 skb->len, skb->priority);
3090
3091
3092 if (skb->priority < priority)
3093 break;
3094
3095 skb = skb_dequeue(&chan->data_q);
3096
3097 blocks = __get_blocks(hdev, skb);
3098 if (blocks > hdev->block_cnt)
3099 return;
3100
3101 hci_conn_enter_active_mode(chan->conn,
3102 bt_cb(skb)->force_active);
3103
3104 hci_send_frame(skb);
3105 hdev->acl_last_tx = jiffies;
3106
3107 hdev->block_cnt -= blocks;
3108 quote -= blocks;
3109
3110 chan->sent += blocks;
3111 chan->conn->sent += blocks;
3112 }
3113 }
3114
3115 if (cnt != hdev->block_cnt)
3116 hci_prio_recalculate(hdev, type);
3117}
3118
3119static void hci_sched_acl(struct hci_dev *hdev)
3120{
3121 BT_DBG("%s", hdev->name);
3122
3123
3124 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3125 return;
3126
3127
3128 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3129 return;
3130
3131 switch (hdev->flow_ctl_mode) {
3132 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3133 hci_sched_acl_pkt(hdev);
3134 break;
3135
3136 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3137 hci_sched_acl_blk(hdev);
3138 break;
3139 }
3140}
3141
3142
3143static void hci_sched_sco(struct hci_dev *hdev)
3144{
3145 struct hci_conn *conn;
3146 struct sk_buff *skb;
3147 int quote;
3148
3149 BT_DBG("%s", hdev->name);
3150
3151 if (!hci_conn_num(hdev, SCO_LINK))
3152 return;
3153
3154 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3155 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3156 BT_DBG("skb %p len %d", skb, skb->len);
3157 hci_send_frame(skb);
3158
3159 conn->sent++;
3160 if (conn->sent == ~0)
3161 conn->sent = 0;
3162 }
3163 }
3164}
3165
3166static void hci_sched_esco(struct hci_dev *hdev)
3167{
3168 struct hci_conn *conn;
3169 struct sk_buff *skb;
3170 int quote;
3171
3172 BT_DBG("%s", hdev->name);
3173
3174 if (!hci_conn_num(hdev, ESCO_LINK))
3175 return;
3176
3177 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3178 "e))) {
3179 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3180 BT_DBG("skb %p len %d", skb, skb->len);
3181 hci_send_frame(skb);
3182
3183 conn->sent++;
3184 if (conn->sent == ~0)
3185 conn->sent = 0;
3186 }
3187 }
3188}
3189
3190static void hci_sched_le(struct hci_dev *hdev)
3191{
3192 struct hci_chan *chan;
3193 struct sk_buff *skb;
3194 int quote, cnt, tmp;
3195
3196 BT_DBG("%s", hdev->name);
3197
3198 if (!hci_conn_num(hdev, LE_LINK))
3199 return;
3200
3201 if (!test_bit(HCI_RAW, &hdev->flags)) {
3202
3203
3204 if (!hdev->le_cnt && hdev->le_pkts &&
3205 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3206 hci_link_tx_to(hdev, LE_LINK);
3207 }
3208
3209 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3210 tmp = cnt;
3211 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3212 u32 priority = (skb_peek(&chan->data_q))->priority;
3213 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3214 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3215 skb->len, skb->priority);
3216
3217
3218 if (skb->priority < priority)
3219 break;
3220
3221 skb = skb_dequeue(&chan->data_q);
3222
3223 hci_send_frame(skb);
3224 hdev->le_last_tx = jiffies;
3225
3226 cnt--;
3227 chan->sent++;
3228 chan->conn->sent++;
3229 }
3230 }
3231
3232 if (hdev->le_pkts)
3233 hdev->le_cnt = cnt;
3234 else
3235 hdev->acl_cnt = cnt;
3236
3237 if (cnt != tmp)
3238 hci_prio_recalculate(hdev, LE_LINK);
3239}
3240
3241static void hci_tx_work(struct work_struct *work)
3242{
3243 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3244 struct sk_buff *skb;
3245
3246 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3247 hdev->sco_cnt, hdev->le_cnt);
3248
3249
3250
3251 hci_sched_acl(hdev);
3252
3253 hci_sched_sco(hdev);
3254
3255 hci_sched_esco(hdev);
3256
3257 hci_sched_le(hdev);
3258
3259
3260 while ((skb = skb_dequeue(&hdev->raw_q)))
3261 hci_send_frame(skb);
3262}
3263
3264
3265
3266
3267static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3268{
3269 struct hci_acl_hdr *hdr = (void *) skb->data;
3270 struct hci_conn *conn;
3271 __u16 handle, flags;
3272
3273 skb_pull(skb, HCI_ACL_HDR_SIZE);
3274
3275 handle = __le16_to_cpu(hdr->handle);
3276 flags = hci_flags(handle);
3277 handle = hci_handle(handle);
3278
3279 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3280 handle, flags);
3281
3282 hdev->stat.acl_rx++;
3283
3284 hci_dev_lock(hdev);
3285 conn = hci_conn_hash_lookup_handle(hdev, handle);
3286 hci_dev_unlock(hdev);
3287
3288 if (conn) {
3289 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3290
3291
3292 l2cap_recv_acldata(conn, skb, flags);
3293 return;
3294 } else {
3295 BT_ERR("%s ACL packet for unknown connection handle %d",
3296 hdev->name, handle);
3297 }
3298
3299 kfree_skb(skb);
3300}
3301
3302
3303static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3304{
3305 struct hci_sco_hdr *hdr = (void *) skb->data;
3306 struct hci_conn *conn;
3307 __u16 handle;
3308
3309 skb_pull(skb, HCI_SCO_HDR_SIZE);
3310
3311 handle = __le16_to_cpu(hdr->handle);
3312
3313 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3314
3315 hdev->stat.sco_rx++;
3316
3317 hci_dev_lock(hdev);
3318 conn = hci_conn_hash_lookup_handle(hdev, handle);
3319 hci_dev_unlock(hdev);
3320
3321 if (conn) {
3322
3323 sco_recv_scodata(conn, skb);
3324 return;
3325 } else {
3326 BT_ERR("%s SCO packet for unknown connection handle %d",
3327 hdev->name, handle);
3328 }
3329
3330 kfree_skb(skb);
3331}
3332
3333static bool hci_req_is_complete(struct hci_dev *hdev)
3334{
3335 struct sk_buff *skb;
3336
3337 skb = skb_peek(&hdev->cmd_q);
3338 if (!skb)
3339 return true;
3340
3341 return bt_cb(skb)->req.start;
3342}
3343
3344static void hci_resend_last(struct hci_dev *hdev)
3345{
3346 struct hci_command_hdr *sent;
3347 struct sk_buff *skb;
3348 u16 opcode;
3349
3350 if (!hdev->sent_cmd)
3351 return;
3352
3353 sent = (void *) hdev->sent_cmd->data;
3354 opcode = __le16_to_cpu(sent->opcode);
3355 if (opcode == HCI_OP_RESET)
3356 return;
3357
3358 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3359 if (!skb)
3360 return;
3361
3362 skb_queue_head(&hdev->cmd_q, skb);
3363 queue_work(hdev->workqueue, &hdev->cmd_work);
3364}
3365
3366void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3367{
3368 hci_req_complete_t req_complete = NULL;
3369 struct sk_buff *skb;
3370 unsigned long flags;
3371
3372 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3373
3374
3375
3376
3377 if (!hci_sent_cmd_data(hdev, opcode)) {
3378
3379
3380
3381
3382
3383
3384 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3385 hci_resend_last(hdev);
3386
3387 return;
3388 }
3389
3390
3391
3392
3393 if (!status && !hci_req_is_complete(hdev))
3394 return;
3395
3396
3397
3398
3399
3400 if (hdev->sent_cmd) {
3401 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3402
3403 if (req_complete) {
3404
3405
3406
3407
3408 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3409
3410 goto call_complete;
3411 }
3412 }
3413
3414
3415 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3416 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3417 if (bt_cb(skb)->req.start) {
3418 __skb_queue_head(&hdev->cmd_q, skb);
3419 break;
3420 }
3421
3422 req_complete = bt_cb(skb)->req.complete;
3423 kfree_skb(skb);
3424 }
3425 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3426
3427call_complete:
3428 if (req_complete)
3429 req_complete(hdev, status);
3430}
3431
3432static void hci_rx_work(struct work_struct *work)
3433{
3434 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3435 struct sk_buff *skb;
3436
3437 BT_DBG("%s", hdev->name);
3438
3439 while ((skb = skb_dequeue(&hdev->rx_q))) {
3440
3441 hci_send_to_monitor(hdev, skb);
3442
3443 if (atomic_read(&hdev->promisc)) {
3444
3445 hci_send_to_sock(hdev, skb);
3446 }
3447
3448 if (test_bit(HCI_RAW, &hdev->flags)) {
3449 kfree_skb(skb);
3450 continue;
3451 }
3452
3453 if (test_bit(HCI_INIT, &hdev->flags)) {
3454
3455 switch (bt_cb(skb)->pkt_type) {
3456 case HCI_ACLDATA_PKT:
3457 case HCI_SCODATA_PKT:
3458 kfree_skb(skb);
3459 continue;
3460 }
3461 }
3462
3463
3464 switch (bt_cb(skb)->pkt_type) {
3465 case HCI_EVENT_PKT:
3466 BT_DBG("%s Event packet", hdev->name);
3467 hci_event_packet(hdev, skb);
3468 break;
3469
3470 case HCI_ACLDATA_PKT:
3471 BT_DBG("%s ACL data packet", hdev->name);
3472 hci_acldata_packet(hdev, skb);
3473 break;
3474
3475 case HCI_SCODATA_PKT:
3476 BT_DBG("%s SCO data packet", hdev->name);
3477 hci_scodata_packet(hdev, skb);
3478 break;
3479
3480 default:
3481 kfree_skb(skb);
3482 break;
3483 }
3484 }
3485}
3486
3487static void hci_cmd_work(struct work_struct *work)
3488{
3489 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3490 struct sk_buff *skb;
3491
3492 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3493 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3494
3495
3496 if (atomic_read(&hdev->cmd_cnt)) {
3497 skb = skb_dequeue(&hdev->cmd_q);
3498 if (!skb)
3499 return;
3500
3501 kfree_skb(hdev->sent_cmd);
3502
3503 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3504 if (hdev->sent_cmd) {
3505 atomic_dec(&hdev->cmd_cnt);
3506 hci_send_frame(skb);
3507 if (test_bit(HCI_RESET, &hdev->flags))
3508 del_timer(&hdev->cmd_timer);
3509 else
3510 mod_timer(&hdev->cmd_timer,
3511 jiffies + HCI_CMD_TIMEOUT);
3512 } else {
3513 skb_queue_head(&hdev->cmd_q, skb);
3514 queue_work(hdev->workqueue, &hdev->cmd_work);
3515 }
3516 }
3517}
3518
3519u8 bdaddr_to_le(u8 bdaddr_type)
3520{
3521 switch (bdaddr_type) {
3522 case BDADDR_LE_PUBLIC:
3523 return ADDR_LE_DEV_PUBLIC;
3524
3525 default:
3526
3527 return ADDR_LE_DEV_RANDOM;
3528 }
3529}
3530