1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/export.h>
28#include <linux/debugfs.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/l2cap.h>
33
34#include "hci_request.h"
35#include "smp.h"
36#include "a2mp.h"
37
38struct sco_param {
39 u16 pkt_type;
40 u16 max_latency;
41 u8 retrans_effort;
42};
43
44static const struct sco_param esco_param_cvsd[] = {
45 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 },
46 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 },
47 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 },
48 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 },
49 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 },
50};
51
52static const struct sco_param sco_param_cvsd[] = {
53 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff },
54 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff },
55};
56
57static const struct sco_param esco_param_msbc[] = {
58 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 },
59 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 },
60};
61
62
63static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64{
65 struct hci_conn_params *params;
66 struct hci_dev *hdev = conn->hdev;
67 struct smp_irk *irk;
68 bdaddr_t *bdaddr;
69 u8 bdaddr_type;
70
71 bdaddr = &conn->dst;
72 bdaddr_type = conn->dst_type;
73
74
75 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76 if (irk) {
77 bdaddr = &irk->bdaddr;
78 bdaddr_type = irk->addr_type;
79 }
80
81 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82 bdaddr_type);
83 if (!params || !params->explicit_connect)
84 return;
85
86
87
88
89
90
91 params->explicit_connect = false;
92
93 list_del_init(¶ms->action);
94
95 switch (params->auto_connect) {
96 case HCI_AUTO_CONN_EXPLICIT:
97 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98
99 return;
100 case HCI_AUTO_CONN_DIRECT:
101 case HCI_AUTO_CONN_ALWAYS:
102 list_add(¶ms->action, &hdev->pend_le_conns);
103 break;
104 case HCI_AUTO_CONN_REPORT:
105 list_add(¶ms->action, &hdev->pend_le_reports);
106 break;
107 default:
108 break;
109 }
110
111 hci_update_background_scan(hdev);
112}
113
114static void hci_conn_cleanup(struct hci_conn *conn)
115{
116 struct hci_dev *hdev = conn->hdev;
117
118 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120
121 hci_chan_list_flush(conn);
122
123 hci_conn_hash_del(hdev, conn);
124
125 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
126 switch (conn->setting & SCO_AIRMODE_MASK) {
127 case SCO_AIRMODE_CVSD:
128 case SCO_AIRMODE_TRANSP:
129 if (hdev->notify)
130 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
131 break;
132 }
133 } else {
134 if (hdev->notify)
135 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
136 }
137
138 hci_conn_del_sysfs(conn);
139
140 debugfs_remove_recursive(conn->debugfs);
141
142 hci_dev_put(hdev);
143
144 hci_conn_put(conn);
145}
146
147static void le_scan_cleanup(struct work_struct *work)
148{
149 struct hci_conn *conn = container_of(work, struct hci_conn,
150 le_scan_cleanup);
151 struct hci_dev *hdev = conn->hdev;
152 struct hci_conn *c = NULL;
153
154 BT_DBG("%s hcon %p", hdev->name, conn);
155
156 hci_dev_lock(hdev);
157
158
159 rcu_read_lock();
160 list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
161 if (c == conn)
162 break;
163 }
164 rcu_read_unlock();
165
166 if (c == conn) {
167 hci_connect_le_scan_cleanup(conn);
168 hci_conn_cleanup(conn);
169 }
170
171 hci_dev_unlock(hdev);
172 hci_dev_put(hdev);
173 hci_conn_put(conn);
174}
175
176static void hci_connect_le_scan_remove(struct hci_conn *conn)
177{
178 BT_DBG("%s hcon %p", conn->hdev->name, conn);
179
180
181
182
183
184
185
186
187
188 hci_dev_hold(conn->hdev);
189 hci_conn_get(conn);
190
191
192
193
194
195 schedule_work(&conn->le_scan_cleanup);
196}
197
198static void hci_acl_create_connection(struct hci_conn *conn)
199{
200 struct hci_dev *hdev = conn->hdev;
201 struct inquiry_entry *ie;
202 struct hci_cp_create_conn cp;
203
204 BT_DBG("hcon %p", conn);
205
206
207
208
209
210
211
212
213
214 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
215
216
217
218 conn->state = BT_CONNECT2;
219 hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
220 return;
221 }
222
223 conn->state = BT_CONNECT;
224 conn->out = true;
225 conn->role = HCI_ROLE_MASTER;
226
227 conn->attempt++;
228
229 conn->link_policy = hdev->link_policy;
230
231 memset(&cp, 0, sizeof(cp));
232 bacpy(&cp.bdaddr, &conn->dst);
233 cp.pscan_rep_mode = 0x02;
234
235 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
236 if (ie) {
237 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
238 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
239 cp.pscan_mode = ie->data.pscan_mode;
240 cp.clock_offset = ie->data.clock_offset |
241 cpu_to_le16(0x8000);
242 }
243
244 memcpy(conn->dev_class, ie->data.dev_class, 3);
245 }
246
247 cp.pkt_type = cpu_to_le16(conn->pkt_type);
248 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
249 cp.role_switch = 0x01;
250 else
251 cp.role_switch = 0x00;
252
253 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
254}
255
256int hci_disconnect(struct hci_conn *conn, __u8 reason)
257{
258 BT_DBG("hcon %p", conn);
259
260
261
262
263
264
265 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
266 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
267 struct hci_dev *hdev = conn->hdev;
268 struct hci_cp_read_clock_offset clkoff_cp;
269
270 clkoff_cp.handle = cpu_to_le16(conn->handle);
271 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
272 &clkoff_cp);
273 }
274
275 return hci_abort_conn(conn, reason);
276}
277
278static void hci_add_sco(struct hci_conn *conn, __u16 handle)
279{
280 struct hci_dev *hdev = conn->hdev;
281 struct hci_cp_add_sco cp;
282
283 BT_DBG("hcon %p", conn);
284
285 conn->state = BT_CONNECT;
286 conn->out = true;
287
288 conn->attempt++;
289
290 cp.handle = cpu_to_le16(handle);
291 cp.pkt_type = cpu_to_le16(conn->pkt_type);
292
293 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
294}
295
296static bool find_next_esco_param(struct hci_conn *conn,
297 const struct sco_param *esco_param, int size)
298{
299 for (; conn->attempt <= size; conn->attempt++) {
300 if (lmp_esco_2m_capable(conn->link) ||
301 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
302 break;
303 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
304 conn, conn->attempt);
305 }
306
307 return conn->attempt <= size;
308}
309
310bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
311{
312 struct hci_dev *hdev = conn->hdev;
313 struct hci_cp_setup_sync_conn cp;
314 const struct sco_param *param;
315
316 BT_DBG("hcon %p", conn);
317
318 conn->state = BT_CONNECT;
319 conn->out = true;
320
321 conn->attempt++;
322
323 cp.handle = cpu_to_le16(handle);
324
325 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
326 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
327 cp.voice_setting = cpu_to_le16(conn->setting);
328
329 switch (conn->setting & SCO_AIRMODE_MASK) {
330 case SCO_AIRMODE_TRANSP:
331 if (!find_next_esco_param(conn, esco_param_msbc,
332 ARRAY_SIZE(esco_param_msbc)))
333 return false;
334 param = &esco_param_msbc[conn->attempt - 1];
335 break;
336 case SCO_AIRMODE_CVSD:
337 if (lmp_esco_capable(conn->link)) {
338 if (!find_next_esco_param(conn, esco_param_cvsd,
339 ARRAY_SIZE(esco_param_cvsd)))
340 return false;
341 param = &esco_param_cvsd[conn->attempt - 1];
342 } else {
343 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
344 return false;
345 param = &sco_param_cvsd[conn->attempt - 1];
346 }
347 break;
348 default:
349 return false;
350 }
351
352 cp.retrans_effort = param->retrans_effort;
353 cp.pkt_type = __cpu_to_le16(param->pkt_type);
354 cp.max_latency = __cpu_to_le16(param->max_latency);
355
356 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
357 return false;
358
359 return true;
360}
361
362u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
363 u16 to_multiplier)
364{
365 struct hci_dev *hdev = conn->hdev;
366 struct hci_conn_params *params;
367 struct hci_cp_le_conn_update cp;
368
369 hci_dev_lock(hdev);
370
371 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
372 if (params) {
373 params->conn_min_interval = min;
374 params->conn_max_interval = max;
375 params->conn_latency = latency;
376 params->supervision_timeout = to_multiplier;
377 }
378
379 hci_dev_unlock(hdev);
380
381 memset(&cp, 0, sizeof(cp));
382 cp.handle = cpu_to_le16(conn->handle);
383 cp.conn_interval_min = cpu_to_le16(min);
384 cp.conn_interval_max = cpu_to_le16(max);
385 cp.conn_latency = cpu_to_le16(latency);
386 cp.supervision_timeout = cpu_to_le16(to_multiplier);
387 cp.min_ce_len = cpu_to_le16(0x0000);
388 cp.max_ce_len = cpu_to_le16(0x0000);
389
390 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
391
392 if (params)
393 return 0x01;
394
395 return 0x00;
396}
397
398void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
399 __u8 ltk[16], __u8 key_size)
400{
401 struct hci_dev *hdev = conn->hdev;
402 struct hci_cp_le_start_enc cp;
403
404 BT_DBG("hcon %p", conn);
405
406 memset(&cp, 0, sizeof(cp));
407
408 cp.handle = cpu_to_le16(conn->handle);
409 cp.rand = rand;
410 cp.ediv = ediv;
411 memcpy(cp.ltk, ltk, key_size);
412
413 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
414}
415
416
417void hci_sco_setup(struct hci_conn *conn, __u8 status)
418{
419 struct hci_conn *sco = conn->link;
420
421 if (!sco)
422 return;
423
424 BT_DBG("hcon %p", conn);
425
426 if (!status) {
427 if (lmp_esco_capable(conn->hdev))
428 hci_setup_sync(sco, conn->handle);
429 else
430 hci_add_sco(sco, conn->handle);
431 } else {
432 hci_connect_cfm(sco, status);
433 hci_conn_del(sco);
434 }
435}
436
437static void hci_conn_timeout(struct work_struct *work)
438{
439 struct hci_conn *conn = container_of(work, struct hci_conn,
440 disc_work.work);
441 int refcnt = atomic_read(&conn->refcnt);
442
443 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
444
445 WARN_ON(refcnt < 0);
446
447
448
449
450
451
452
453
454 if (refcnt > 0)
455 return;
456
457
458 if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
459 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
460 hci_connect_le_scan_remove(conn);
461 return;
462 }
463
464 hci_abort_conn(conn, hci_proto_disconn_ind(conn));
465}
466
467
468static void hci_conn_idle(struct work_struct *work)
469{
470 struct hci_conn *conn = container_of(work, struct hci_conn,
471 idle_work.work);
472 struct hci_dev *hdev = conn->hdev;
473
474 BT_DBG("hcon %p mode %d", conn, conn->mode);
475
476 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
477 return;
478
479 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
480 return;
481
482 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
483 struct hci_cp_sniff_subrate cp;
484 cp.handle = cpu_to_le16(conn->handle);
485 cp.max_latency = cpu_to_le16(0);
486 cp.min_remote_timeout = cpu_to_le16(0);
487 cp.min_local_timeout = cpu_to_le16(0);
488 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
489 }
490
491 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
492 struct hci_cp_sniff_mode cp;
493 cp.handle = cpu_to_le16(conn->handle);
494 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
495 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
496 cp.attempt = cpu_to_le16(4);
497 cp.timeout = cpu_to_le16(1);
498 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
499 }
500}
501
502static void hci_conn_auto_accept(struct work_struct *work)
503{
504 struct hci_conn *conn = container_of(work, struct hci_conn,
505 auto_accept_work.work);
506
507 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
508 &conn->dst);
509}
510
511static void le_disable_advertising(struct hci_dev *hdev)
512{
513 if (ext_adv_capable(hdev)) {
514 struct hci_cp_le_set_ext_adv_enable cp;
515
516 cp.enable = 0x00;
517 cp.num_of_sets = 0x00;
518
519 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
520 &cp);
521 } else {
522 u8 enable = 0x00;
523 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
524 &enable);
525 }
526}
527
528static void le_conn_timeout(struct work_struct *work)
529{
530 struct hci_conn *conn = container_of(work, struct hci_conn,
531 le_conn_timeout.work);
532 struct hci_dev *hdev = conn->hdev;
533
534 BT_DBG("");
535
536
537
538
539
540
541 if (conn->role == HCI_ROLE_SLAVE) {
542
543 le_disable_advertising(hdev);
544 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
545 return;
546 }
547
548 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
549}
550
551struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
552 u8 role)
553{
554 struct hci_conn *conn;
555
556 BT_DBG("%s dst %pMR", hdev->name, dst);
557
558 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
559 if (!conn)
560 return NULL;
561
562 bacpy(&conn->dst, dst);
563 bacpy(&conn->src, &hdev->bdaddr);
564 conn->hdev = hdev;
565 conn->type = type;
566 conn->role = role;
567 conn->mode = HCI_CM_ACTIVE;
568 conn->state = BT_OPEN;
569 conn->auth_type = HCI_AT_GENERAL_BONDING;
570 conn->io_capability = hdev->io_capability;
571 conn->remote_auth = 0xff;
572 conn->key_type = 0xff;
573 conn->rssi = HCI_RSSI_INVALID;
574 conn->tx_power = HCI_TX_POWER_INVALID;
575 conn->max_tx_power = HCI_TX_POWER_INVALID;
576
577 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
578 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
579
580
581 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
582
583 if (conn->role == HCI_ROLE_MASTER)
584 conn->out = true;
585
586 switch (type) {
587 case ACL_LINK:
588 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
589 break;
590 case LE_LINK:
591
592 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
593 break;
594 case SCO_LINK:
595 if (lmp_esco_capable(hdev))
596 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
597 (hdev->esco_type & EDR_ESCO_MASK);
598 else
599 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
600 break;
601 case ESCO_LINK:
602 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
603 break;
604 }
605
606 skb_queue_head_init(&conn->data_q);
607
608 INIT_LIST_HEAD(&conn->chan_list);
609
610 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
611 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
612 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
613 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
614 INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
615
616 atomic_set(&conn->refcnt, 0);
617
618 hci_dev_hold(hdev);
619
620 hci_conn_hash_add(hdev, conn);
621
622
623
624
625
626 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
627 if (hdev->notify)
628 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
629 }
630
631 hci_conn_init_sysfs(conn);
632
633 return conn;
634}
635
636int hci_conn_del(struct hci_conn *conn)
637{
638 struct hci_dev *hdev = conn->hdev;
639
640 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
641
642 cancel_delayed_work_sync(&conn->disc_work);
643 cancel_delayed_work_sync(&conn->auto_accept_work);
644 cancel_delayed_work_sync(&conn->idle_work);
645
646 if (conn->type == ACL_LINK) {
647 struct hci_conn *sco = conn->link;
648 if (sco)
649 sco->link = NULL;
650
651
652 hdev->acl_cnt += conn->sent;
653 } else if (conn->type == LE_LINK) {
654 cancel_delayed_work(&conn->le_conn_timeout);
655
656 if (hdev->le_pkts)
657 hdev->le_cnt += conn->sent;
658 else
659 hdev->acl_cnt += conn->sent;
660 } else {
661 struct hci_conn *acl = conn->link;
662 if (acl) {
663 acl->link = NULL;
664 hci_conn_drop(acl);
665 }
666 }
667
668 if (conn->amp_mgr)
669 amp_mgr_put(conn->amp_mgr);
670
671 skb_queue_purge(&conn->data_q);
672
673
674
675
676
677
678 hci_conn_cleanup(conn);
679
680 return 0;
681}
682
683struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
684{
685 int use_src = bacmp(src, BDADDR_ANY);
686 struct hci_dev *hdev = NULL, *d;
687
688 BT_DBG("%pMR -> %pMR", src, dst);
689
690 read_lock(&hci_dev_list_lock);
691
692 list_for_each_entry(d, &hci_dev_list, list) {
693 if (!test_bit(HCI_UP, &d->flags) ||
694 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
695 d->dev_type != HCI_PRIMARY)
696 continue;
697
698
699
700
701
702
703 if (use_src) {
704 bdaddr_t id_addr;
705 u8 id_addr_type;
706
707 if (src_type == BDADDR_BREDR) {
708 if (!lmp_bredr_capable(d))
709 continue;
710 bacpy(&id_addr, &d->bdaddr);
711 id_addr_type = BDADDR_BREDR;
712 } else {
713 if (!lmp_le_capable(d))
714 continue;
715
716 hci_copy_identity_address(d, &id_addr,
717 &id_addr_type);
718
719
720 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
721 id_addr_type = BDADDR_LE_PUBLIC;
722 else
723 id_addr_type = BDADDR_LE_RANDOM;
724 }
725
726 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
727 hdev = d; break;
728 }
729 } else {
730 if (bacmp(&d->bdaddr, dst)) {
731 hdev = d; break;
732 }
733 }
734 }
735
736 if (hdev)
737 hdev = hci_dev_hold(hdev);
738
739 read_unlock(&hci_dev_list_lock);
740 return hdev;
741}
742EXPORT_SYMBOL(hci_get_route);
743
744
745void hci_le_conn_failed(struct hci_conn *conn, u8 status)
746{
747 struct hci_dev *hdev = conn->hdev;
748 struct hci_conn_params *params;
749
750 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
751 conn->dst_type);
752 if (params && params->conn) {
753 hci_conn_drop(params->conn);
754 hci_conn_put(params->conn);
755 params->conn = NULL;
756 }
757
758 conn->state = BT_CLOSED;
759
760
761
762
763
764
765
766 if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
767 (params && params->explicit_connect))
768 mgmt_connect_failed(hdev, &conn->dst, conn->type,
769 conn->dst_type, status);
770
771 hci_connect_cfm(conn, status);
772
773 hci_conn_del(conn);
774
775
776
777
778
779
780 if (list_empty(&hdev->conn_hash.list) &&
781 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
782 wake_up(&hdev->suspend_wait_q);
783 }
784
785
786
787
788 hci_update_background_scan(hdev);
789
790
791
792
793 hci_req_reenable_advertising(hdev);
794}
795
796static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
797{
798 struct hci_conn *conn;
799
800 hci_dev_lock(hdev);
801
802 conn = hci_lookup_le_connect(hdev);
803
804 if (hdev->adv_instance_cnt)
805 hci_req_resume_adv_instances(hdev);
806
807 if (!status) {
808 hci_connect_le_scan_cleanup(conn);
809 goto done;
810 }
811
812 bt_dev_err(hdev, "request failed to create LE connection: "
813 "status 0x%2.2x", status);
814
815 if (!conn)
816 goto done;
817
818 hci_le_conn_failed(conn, status);
819
820done:
821 hci_dev_unlock(hdev);
822}
823
824static bool conn_use_rpa(struct hci_conn *conn)
825{
826 struct hci_dev *hdev = conn->hdev;
827
828 return hci_dev_test_flag(hdev, HCI_PRIVACY);
829}
830
831static void set_ext_conn_params(struct hci_conn *conn,
832 struct hci_cp_le_ext_conn_param *p)
833{
834 struct hci_dev *hdev = conn->hdev;
835
836 memset(p, 0, sizeof(*p));
837
838 p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
839 p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
840 p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
841 p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
842 p->conn_latency = cpu_to_le16(conn->le_conn_latency);
843 p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
844 p->min_ce_len = cpu_to_le16(0x0000);
845 p->max_ce_len = cpu_to_le16(0x0000);
846}
847
848static void hci_req_add_le_create_conn(struct hci_request *req,
849 struct hci_conn *conn,
850 bdaddr_t *direct_rpa)
851{
852 struct hci_dev *hdev = conn->hdev;
853 u8 own_addr_type;
854
855
856
857
858 if (direct_rpa) {
859 if (bacmp(&req->hdev->random_addr, direct_rpa))
860 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
861 direct_rpa);
862
863
864 own_addr_type = ADDR_LE_DEV_RANDOM;
865 } else {
866
867
868
869 if (hci_update_random_address(req, false, conn_use_rpa(conn),
870 &own_addr_type))
871 return;
872 }
873
874 if (use_ext_conn(hdev)) {
875 struct hci_cp_le_ext_create_conn *cp;
876 struct hci_cp_le_ext_conn_param *p;
877 u8 data[sizeof(*cp) + sizeof(*p) * 3];
878 u32 plen;
879
880 cp = (void *) data;
881 p = (void *) cp->data;
882
883 memset(cp, 0, sizeof(*cp));
884
885 bacpy(&cp->peer_addr, &conn->dst);
886 cp->peer_addr_type = conn->dst_type;
887 cp->own_addr_type = own_addr_type;
888
889 plen = sizeof(*cp);
890
891 if (scan_1m(hdev)) {
892 cp->phys |= LE_SCAN_PHY_1M;
893 set_ext_conn_params(conn, p);
894
895 p++;
896 plen += sizeof(*p);
897 }
898
899 if (scan_2m(hdev)) {
900 cp->phys |= LE_SCAN_PHY_2M;
901 set_ext_conn_params(conn, p);
902
903 p++;
904 plen += sizeof(*p);
905 }
906
907 if (scan_coded(hdev)) {
908 cp->phys |= LE_SCAN_PHY_CODED;
909 set_ext_conn_params(conn, p);
910
911 plen += sizeof(*p);
912 }
913
914 hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
915
916 } else {
917 struct hci_cp_le_create_conn cp;
918
919 memset(&cp, 0, sizeof(cp));
920
921 cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
922 cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
923
924 bacpy(&cp.peer_addr, &conn->dst);
925 cp.peer_addr_type = conn->dst_type;
926 cp.own_address_type = own_addr_type;
927 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
928 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
929 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
930 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
931 cp.min_ce_len = cpu_to_le16(0x0000);
932 cp.max_ce_len = cpu_to_le16(0x0000);
933
934 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
935 }
936
937 conn->state = BT_CONNECT;
938 clear_bit(HCI_CONN_SCANNING, &conn->flags);
939}
940
941static void hci_req_directed_advertising(struct hci_request *req,
942 struct hci_conn *conn)
943{
944 struct hci_dev *hdev = req->hdev;
945 u8 own_addr_type;
946 u8 enable;
947
948 if (ext_adv_capable(hdev)) {
949 struct hci_cp_le_set_ext_adv_params cp;
950 bdaddr_t random_addr;
951
952
953
954
955 if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
956 &own_addr_type, &random_addr) < 0)
957 return;
958
959 memset(&cp, 0, sizeof(cp));
960
961 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
962 cp.own_addr_type = own_addr_type;
963 cp.channel_map = hdev->le_adv_channel_map;
964 cp.tx_power = HCI_TX_POWER_INVALID;
965 cp.primary_phy = HCI_ADV_PHY_1M;
966 cp.secondary_phy = HCI_ADV_PHY_1M;
967 cp.handle = 0;
968 cp.own_addr_type = own_addr_type;
969 cp.peer_addr_type = conn->dst_type;
970 bacpy(&cp.peer_addr, &conn->dst);
971
972
973
974
975
976
977
978
979
980 __hci_req_remove_ext_adv_instance(req, cp.handle);
981
982 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
983
984 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
985 bacmp(&random_addr, BDADDR_ANY) &&
986 bacmp(&random_addr, &hdev->random_addr)) {
987 struct hci_cp_le_set_adv_set_rand_addr cp;
988
989 memset(&cp, 0, sizeof(cp));
990
991 cp.handle = 0;
992 bacpy(&cp.bdaddr, &random_addr);
993
994 hci_req_add(req,
995 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
996 sizeof(cp), &cp);
997 }
998
999 __hci_req_enable_ext_advertising(req, 0x00);
1000 } else {
1001 struct hci_cp_le_set_adv_param cp;
1002
1003
1004
1005
1006
1007
1008 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1009
1010
1011
1012
1013 if (hci_update_random_address(req, false, conn_use_rpa(conn),
1014 &own_addr_type) < 0)
1015 return;
1016
1017 memset(&cp, 0, sizeof(cp));
1018
1019
1020
1021
1022
1023 cp.min_interval = cpu_to_le16(0x0020);
1024 cp.max_interval = cpu_to_le16(0x0020);
1025
1026 cp.type = LE_ADV_DIRECT_IND;
1027 cp.own_address_type = own_addr_type;
1028 cp.direct_addr_type = conn->dst_type;
1029 bacpy(&cp.direct_addr, &conn->dst);
1030 cp.channel_map = hdev->le_adv_channel_map;
1031
1032 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1033
1034 enable = 0x01;
1035 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
1036 &enable);
1037 }
1038
1039 conn->state = BT_CONNECT;
1040}
1041
1042struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1043 u8 dst_type, u8 sec_level, u16 conn_timeout,
1044 u8 role, bdaddr_t *direct_rpa)
1045{
1046 struct hci_conn_params *params;
1047 struct hci_conn *conn;
1048 struct smp_irk *irk;
1049 struct hci_request req;
1050 int err;
1051
1052
1053
1054
1055 bool rpa_le_conn = true;
1056
1057
1058 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1059 if (lmp_le_capable(hdev))
1060 return ERR_PTR(-ECONNREFUSED);
1061
1062 return ERR_PTR(-EOPNOTSUPP);
1063 }
1064
1065
1066
1067
1068 if (hci_lookup_le_connect(hdev))
1069 return ERR_PTR(-EBUSY);
1070
1071
1072
1073
1074
1075
1076 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1077 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1078 return ERR_PTR(-EBUSY);
1079 }
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1091 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1092 dst = &irk->rpa;
1093 dst_type = ADDR_LE_DEV_RANDOM;
1094 }
1095
1096 if (conn) {
1097 bacpy(&conn->dst, dst);
1098 } else {
1099 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1100 if (!conn)
1101 return ERR_PTR(-ENOMEM);
1102 hci_conn_hold(conn);
1103 conn->pending_sec_level = sec_level;
1104 }
1105
1106 conn->dst_type = dst_type;
1107 conn->sec_level = BT_SECURITY_LOW;
1108 conn->conn_timeout = conn_timeout;
1109
1110 hci_req_init(&req, hdev);
1111
1112
1113
1114
1115
1116
1117
1118
1119 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1120 __hci_req_pause_adv_instances(&req);
1121
1122
1123 if (conn->role == HCI_ROLE_SLAVE) {
1124
1125
1126
1127 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
1128 hdev->le_scan_type == LE_SCAN_ACTIVE) {
1129 hci_req_purge(&req);
1130 hci_conn_del(conn);
1131 return ERR_PTR(-EBUSY);
1132 }
1133
1134 hci_req_directed_advertising(&req, conn);
1135 goto create_conn;
1136 }
1137
1138 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1139 if (params) {
1140 conn->le_conn_min_interval = params->conn_min_interval;
1141 conn->le_conn_max_interval = params->conn_max_interval;
1142 conn->le_conn_latency = params->conn_latency;
1143 conn->le_supv_timeout = params->supervision_timeout;
1144 } else {
1145 conn->le_conn_min_interval = hdev->le_conn_min_interval;
1146 conn->le_conn_max_interval = hdev->le_conn_max_interval;
1147 conn->le_conn_latency = hdev->le_conn_latency;
1148 conn->le_supv_timeout = hdev->le_supv_timeout;
1149 }
1150
1151
1152
1153
1154
1155
1156
1157 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1158 hci_req_add_le_scan_disable(&req, rpa_le_conn);
1159 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
1160 }
1161
1162 hci_req_add_le_create_conn(&req, conn, direct_rpa);
1163
1164create_conn:
1165 err = hci_req_run(&req, create_le_conn_complete);
1166 if (err) {
1167 hci_conn_del(conn);
1168
1169 if (hdev->adv_instance_cnt)
1170 hci_req_resume_adv_instances(hdev);
1171
1172 return ERR_PTR(err);
1173 }
1174
1175 return conn;
1176}
1177
1178static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1179{
1180 struct hci_conn *conn;
1181
1182 conn = hci_conn_hash_lookup_le(hdev, addr, type);
1183 if (!conn)
1184 return false;
1185
1186 if (conn->state != BT_CONNECTED)
1187 return false;
1188
1189 return true;
1190}
1191
1192
1193static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1194 bdaddr_t *addr, u8 addr_type)
1195{
1196 struct hci_conn_params *params;
1197
1198 if (is_connected(hdev, addr, addr_type))
1199 return -EISCONN;
1200
1201 params = hci_conn_params_lookup(hdev, addr, addr_type);
1202 if (!params) {
1203 params = hci_conn_params_add(hdev, addr, addr_type);
1204 if (!params)
1205 return -ENOMEM;
1206
1207
1208
1209
1210
1211 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1212 }
1213
1214
1215 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1216 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1217 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1218 list_del_init(¶ms->action);
1219 list_add(¶ms->action, &hdev->pend_le_conns);
1220 }
1221
1222 params->explicit_connect = true;
1223
1224 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1225 params->auto_connect);
1226
1227 return 0;
1228}
1229
1230
1231struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1232 u8 dst_type, u8 sec_level,
1233 u16 conn_timeout,
1234 enum conn_reasons conn_reason)
1235{
1236 struct hci_conn *conn;
1237
1238
1239 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1240 if (lmp_le_capable(hdev))
1241 return ERR_PTR(-ECONNREFUSED);
1242
1243 return ERR_PTR(-EOPNOTSUPP);
1244 }
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1256 if (conn) {
1257 if (conn->pending_sec_level < sec_level)
1258 conn->pending_sec_level = sec_level;
1259 goto done;
1260 }
1261
1262 BT_DBG("requesting refresh of dst_addr");
1263
1264 conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1265 if (!conn)
1266 return ERR_PTR(-ENOMEM);
1267
1268 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1269 hci_conn_del(conn);
1270 return ERR_PTR(-EBUSY);
1271 }
1272
1273 conn->state = BT_CONNECT;
1274 set_bit(HCI_CONN_SCANNING, &conn->flags);
1275 conn->dst_type = dst_type;
1276 conn->sec_level = BT_SECURITY_LOW;
1277 conn->pending_sec_level = sec_level;
1278 conn->conn_timeout = conn_timeout;
1279 conn->conn_reason = conn_reason;
1280
1281 hci_update_background_scan(hdev);
1282
1283done:
1284 hci_conn_hold(conn);
1285 return conn;
1286}
1287
1288struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1289 u8 sec_level, u8 auth_type,
1290 enum conn_reasons conn_reason)
1291{
1292 struct hci_conn *acl;
1293
1294 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1295 if (lmp_bredr_capable(hdev))
1296 return ERR_PTR(-ECONNREFUSED);
1297
1298 return ERR_PTR(-EOPNOTSUPP);
1299 }
1300
1301 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1302 if (!acl) {
1303 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1304 if (!acl)
1305 return ERR_PTR(-ENOMEM);
1306 }
1307
1308 hci_conn_hold(acl);
1309
1310 acl->conn_reason = conn_reason;
1311 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1312 acl->sec_level = BT_SECURITY_LOW;
1313 acl->pending_sec_level = sec_level;
1314 acl->auth_type = auth_type;
1315 hci_acl_create_connection(acl);
1316 }
1317
1318 return acl;
1319}
1320
1321struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1322 __u16 setting)
1323{
1324 struct hci_conn *acl;
1325 struct hci_conn *sco;
1326
1327 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1328 CONN_REASON_SCO_CONNECT);
1329 if (IS_ERR(acl))
1330 return acl;
1331
1332 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1333 if (!sco) {
1334 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1335 if (!sco) {
1336 hci_conn_drop(acl);
1337 return ERR_PTR(-ENOMEM);
1338 }
1339 }
1340
1341 acl->link = sco;
1342 sco->link = acl;
1343
1344 hci_conn_hold(sco);
1345
1346 sco->setting = setting;
1347
1348 if (acl->state == BT_CONNECTED &&
1349 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1350 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1351 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1352
1353 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1354
1355 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1356 return sco;
1357 }
1358
1359 hci_sco_setup(acl, 0x00);
1360 }
1361
1362 return sco;
1363}
1364
1365
1366int hci_conn_check_link_mode(struct hci_conn *conn)
1367{
1368 BT_DBG("hcon %p", conn);
1369
1370
1371
1372
1373
1374 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1375 if (!hci_conn_sc_enabled(conn) ||
1376 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1377 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1378 return 0;
1379 }
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 if (conn->sec_level == BT_SECURITY_FIPS &&
1392 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
1393 bt_dev_err(conn->hdev,
1394 "Invalid security: Missing AES-CCM usage");
1395 return 0;
1396 }
1397
1398 if (hci_conn_ssp_enabled(conn) &&
1399 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1400 return 0;
1401
1402 return 1;
1403}
1404
1405
1406static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1407{
1408 BT_DBG("hcon %p", conn);
1409
1410 if (conn->pending_sec_level > sec_level)
1411 sec_level = conn->pending_sec_level;
1412
1413 if (sec_level > conn->sec_level)
1414 conn->pending_sec_level = sec_level;
1415 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1416 return 1;
1417
1418
1419 auth_type |= (conn->auth_type & 0x01);
1420
1421 conn->auth_type = auth_type;
1422
1423 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1424 struct hci_cp_auth_requested cp;
1425
1426 cp.handle = cpu_to_le16(conn->handle);
1427 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1428 sizeof(cp), &cp);
1429
1430
1431
1432
1433 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1434 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1435 else
1436 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1437 }
1438
1439 return 0;
1440}
1441
1442
1443static void hci_conn_encrypt(struct hci_conn *conn)
1444{
1445 BT_DBG("hcon %p", conn);
1446
1447 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1448 struct hci_cp_set_conn_encrypt cp;
1449 cp.handle = cpu_to_le16(conn->handle);
1450 cp.encrypt = 0x01;
1451 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1452 &cp);
1453 }
1454}
1455
1456
1457int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1458 bool initiator)
1459{
1460 BT_DBG("hcon %p", conn);
1461
1462 if (conn->type == LE_LINK)
1463 return smp_conn_security(conn, sec_level);
1464
1465
1466 if (sec_level == BT_SECURITY_SDP)
1467 return 1;
1468
1469
1470
1471 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1472 return 1;
1473
1474
1475 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1476 goto auth;
1477
1478
1479
1480 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1481 sec_level == BT_SECURITY_FIPS)
1482 goto encrypt;
1483
1484
1485
1486 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1487 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1488 sec_level == BT_SECURITY_HIGH)
1489 goto encrypt;
1490
1491
1492
1493 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1494 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1495 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1496 goto encrypt;
1497
1498
1499
1500
1501
1502 if (conn->key_type == HCI_LK_COMBINATION &&
1503 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1504 conn->pin_length == 16))
1505 goto encrypt;
1506
1507auth:
1508 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1509 return 0;
1510
1511 if (initiator)
1512 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1513
1514 if (!hci_conn_auth(conn, sec_level, auth_type))
1515 return 0;
1516
1517encrypt:
1518 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1519
1520
1521
1522 if (!conn->enc_key_size)
1523 return 0;
1524
1525
1526 return 1;
1527 }
1528
1529 hci_conn_encrypt(conn);
1530 return 0;
1531}
1532EXPORT_SYMBOL(hci_conn_security);
1533
1534
1535int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1536{
1537 BT_DBG("hcon %p", conn);
1538
1539
1540 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1541 return 1;
1542
1543
1544 if (conn->sec_level == BT_SECURITY_HIGH ||
1545 conn->sec_level == BT_SECURITY_FIPS)
1546 return 1;
1547
1548
1549 return 0;
1550}
1551EXPORT_SYMBOL(hci_conn_check_secure);
1552
1553
1554int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1555{
1556 BT_DBG("hcon %p", conn);
1557
1558 if (role == conn->role)
1559 return 1;
1560
1561 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1562 struct hci_cp_switch_role cp;
1563 bacpy(&cp.bdaddr, &conn->dst);
1564 cp.role = role;
1565 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1566 }
1567
1568 return 0;
1569}
1570EXPORT_SYMBOL(hci_conn_switch_role);
1571
1572
1573void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1574{
1575 struct hci_dev *hdev = conn->hdev;
1576
1577 BT_DBG("hcon %p mode %d", conn, conn->mode);
1578
1579 if (conn->mode != HCI_CM_SNIFF)
1580 goto timer;
1581
1582 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1583 goto timer;
1584
1585 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1586 struct hci_cp_exit_sniff_mode cp;
1587 cp.handle = cpu_to_le16(conn->handle);
1588 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1589 }
1590
1591timer:
1592 if (hdev->idle_timeout > 0)
1593 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1594 msecs_to_jiffies(hdev->idle_timeout));
1595}
1596
1597
1598void hci_conn_hash_flush(struct hci_dev *hdev)
1599{
1600 struct hci_conn_hash *h = &hdev->conn_hash;
1601 struct hci_conn *c, *n;
1602
1603 BT_DBG("hdev %s", hdev->name);
1604
1605 list_for_each_entry_safe(c, n, &h->list, list) {
1606 c->state = BT_CLOSED;
1607
1608 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1609 hci_conn_del(c);
1610 }
1611}
1612
1613
1614void hci_conn_check_pending(struct hci_dev *hdev)
1615{
1616 struct hci_conn *conn;
1617
1618 BT_DBG("hdev %s", hdev->name);
1619
1620 hci_dev_lock(hdev);
1621
1622 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1623 if (conn)
1624 hci_acl_create_connection(conn);
1625
1626 hci_dev_unlock(hdev);
1627}
1628
1629static u32 get_link_mode(struct hci_conn *conn)
1630{
1631 u32 link_mode = 0;
1632
1633 if (conn->role == HCI_ROLE_MASTER)
1634 link_mode |= HCI_LM_MASTER;
1635
1636 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1637 link_mode |= HCI_LM_ENCRYPT;
1638
1639 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1640 link_mode |= HCI_LM_AUTH;
1641
1642 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1643 link_mode |= HCI_LM_SECURE;
1644
1645 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1646 link_mode |= HCI_LM_FIPS;
1647
1648 return link_mode;
1649}
1650
1651int hci_get_conn_list(void __user *arg)
1652{
1653 struct hci_conn *c;
1654 struct hci_conn_list_req req, *cl;
1655 struct hci_conn_info *ci;
1656 struct hci_dev *hdev;
1657 int n = 0, size, err;
1658
1659 if (copy_from_user(&req, arg, sizeof(req)))
1660 return -EFAULT;
1661
1662 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1663 return -EINVAL;
1664
1665 size = sizeof(req) + req.conn_num * sizeof(*ci);
1666
1667 cl = kmalloc(size, GFP_KERNEL);
1668 if (!cl)
1669 return -ENOMEM;
1670
1671 hdev = hci_dev_get(req.dev_id);
1672 if (!hdev) {
1673 kfree(cl);
1674 return -ENODEV;
1675 }
1676
1677 ci = cl->conn_info;
1678
1679 hci_dev_lock(hdev);
1680 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1681 bacpy(&(ci + n)->bdaddr, &c->dst);
1682 (ci + n)->handle = c->handle;
1683 (ci + n)->type = c->type;
1684 (ci + n)->out = c->out;
1685 (ci + n)->state = c->state;
1686 (ci + n)->link_mode = get_link_mode(c);
1687 if (++n >= req.conn_num)
1688 break;
1689 }
1690 hci_dev_unlock(hdev);
1691
1692 cl->dev_id = hdev->id;
1693 cl->conn_num = n;
1694 size = sizeof(req) + n * sizeof(*ci);
1695
1696 hci_dev_put(hdev);
1697
1698 err = copy_to_user(arg, cl, size);
1699 kfree(cl);
1700
1701 return err ? -EFAULT : 0;
1702}
1703
1704int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1705{
1706 struct hci_conn_info_req req;
1707 struct hci_conn_info ci;
1708 struct hci_conn *conn;
1709 char __user *ptr = arg + sizeof(req);
1710
1711 if (copy_from_user(&req, arg, sizeof(req)))
1712 return -EFAULT;
1713
1714 hci_dev_lock(hdev);
1715 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1716 if (conn) {
1717 bacpy(&ci.bdaddr, &conn->dst);
1718 ci.handle = conn->handle;
1719 ci.type = conn->type;
1720 ci.out = conn->out;
1721 ci.state = conn->state;
1722 ci.link_mode = get_link_mode(conn);
1723 }
1724 hci_dev_unlock(hdev);
1725
1726 if (!conn)
1727 return -ENOENT;
1728
1729 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1730}
1731
1732int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1733{
1734 struct hci_auth_info_req req;
1735 struct hci_conn *conn;
1736
1737 if (copy_from_user(&req, arg, sizeof(req)))
1738 return -EFAULT;
1739
1740 hci_dev_lock(hdev);
1741 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1742 if (conn)
1743 req.type = conn->auth_type;
1744 hci_dev_unlock(hdev);
1745
1746 if (!conn)
1747 return -ENOENT;
1748
1749 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1750}
1751
1752struct hci_chan *hci_chan_create(struct hci_conn *conn)
1753{
1754 struct hci_dev *hdev = conn->hdev;
1755 struct hci_chan *chan;
1756
1757 BT_DBG("%s hcon %p", hdev->name, conn);
1758
1759 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1760 BT_DBG("Refusing to create new hci_chan");
1761 return NULL;
1762 }
1763
1764 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1765 if (!chan)
1766 return NULL;
1767
1768 chan->conn = hci_conn_get(conn);
1769 skb_queue_head_init(&chan->data_q);
1770 chan->state = BT_CONNECTED;
1771
1772 list_add_rcu(&chan->list, &conn->chan_list);
1773
1774 return chan;
1775}
1776
1777void hci_chan_del(struct hci_chan *chan)
1778{
1779 struct hci_conn *conn = chan->conn;
1780 struct hci_dev *hdev = conn->hdev;
1781
1782 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1783
1784 list_del_rcu(&chan->list);
1785
1786 synchronize_rcu();
1787
1788
1789 set_bit(HCI_CONN_DROP, &conn->flags);
1790
1791 hci_conn_put(conn);
1792
1793 skb_queue_purge(&chan->data_q);
1794 kfree(chan);
1795}
1796
1797void hci_chan_list_flush(struct hci_conn *conn)
1798{
1799 struct hci_chan *chan, *n;
1800
1801 BT_DBG("hcon %p", conn);
1802
1803 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1804 hci_chan_del(chan);
1805}
1806
1807static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1808 __u16 handle)
1809{
1810 struct hci_chan *hchan;
1811
1812 list_for_each_entry(hchan, &hcon->chan_list, list) {
1813 if (hchan->handle == handle)
1814 return hchan;
1815 }
1816
1817 return NULL;
1818}
1819
1820struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1821{
1822 struct hci_conn_hash *h = &hdev->conn_hash;
1823 struct hci_conn *hcon;
1824 struct hci_chan *hchan = NULL;
1825
1826 rcu_read_lock();
1827
1828 list_for_each_entry_rcu(hcon, &h->list, list) {
1829 hchan = __hci_chan_lookup_handle(hcon, handle);
1830 if (hchan)
1831 break;
1832 }
1833
1834 rcu_read_unlock();
1835
1836 return hchan;
1837}
1838
1839u32 hci_conn_get_phy(struct hci_conn *conn)
1840{
1841 u32 phys = 0;
1842
1843
1844
1845
1846
1847 switch (conn->type) {
1848 case SCO_LINK:
1849
1850
1851
1852 phys |= BT_PHY_BR_1M_1SLOT;
1853
1854 break;
1855
1856 case ACL_LINK:
1857
1858
1859
1860 phys |= BT_PHY_BR_1M_1SLOT;
1861
1862 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
1863 phys |= BT_PHY_BR_1M_3SLOT;
1864
1865 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
1866 phys |= BT_PHY_BR_1M_5SLOT;
1867
1868
1869
1870
1871 if (!(conn->pkt_type & HCI_2DH1))
1872 phys |= BT_PHY_EDR_2M_1SLOT;
1873
1874 if (!(conn->pkt_type & HCI_2DH3))
1875 phys |= BT_PHY_EDR_2M_3SLOT;
1876
1877 if (!(conn->pkt_type & HCI_2DH5))
1878 phys |= BT_PHY_EDR_2M_5SLOT;
1879
1880
1881
1882
1883 if (!(conn->pkt_type & HCI_3DH1))
1884 phys |= BT_PHY_EDR_3M_1SLOT;
1885
1886 if (!(conn->pkt_type & HCI_3DH3))
1887 phys |= BT_PHY_EDR_3M_3SLOT;
1888
1889 if (!(conn->pkt_type & HCI_3DH5))
1890 phys |= BT_PHY_EDR_3M_5SLOT;
1891
1892 break;
1893
1894 case ESCO_LINK:
1895
1896 phys |= BT_PHY_BR_1M_1SLOT;
1897
1898 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
1899 phys |= BT_PHY_BR_1M_3SLOT;
1900
1901
1902 if (!(conn->pkt_type & ESCO_2EV3))
1903 phys |= BT_PHY_EDR_2M_1SLOT;
1904
1905 if (!(conn->pkt_type & ESCO_2EV5))
1906 phys |= BT_PHY_EDR_2M_3SLOT;
1907
1908
1909 if (!(conn->pkt_type & ESCO_3EV3))
1910 phys |= BT_PHY_EDR_3M_1SLOT;
1911
1912 if (!(conn->pkt_type & ESCO_3EV5))
1913 phys |= BT_PHY_EDR_3M_3SLOT;
1914
1915 break;
1916
1917 case LE_LINK:
1918 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
1919 phys |= BT_PHY_LE_1M_TX;
1920
1921 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
1922 phys |= BT_PHY_LE_1M_RX;
1923
1924 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
1925 phys |= BT_PHY_LE_2M_TX;
1926
1927 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
1928 phys |= BT_PHY_LE_2M_RX;
1929
1930 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
1931 phys |= BT_PHY_LE_CODED_TX;
1932
1933 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
1934 phys |= BT_PHY_LE_CODED_RX;
1935
1936 break;
1937 }
1938
1939 return phys;
1940}
1941