1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26
27#include "smp.h"
28#include "hci_request.h"
29
30void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
31{
32 skb_queue_head_init(&req->cmd_q);
33 req->hdev = hdev;
34 req->err = 0;
35}
36
37static int req_run(struct hci_request *req, hci_req_complete_t complete,
38 hci_req_complete_skb_t complete_skb)
39{
40 struct hci_dev *hdev = req->hdev;
41 struct sk_buff *skb;
42 unsigned long flags;
43
44 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
45
46
47
48
49 if (req->err) {
50 skb_queue_purge(&req->cmd_q);
51 return req->err;
52 }
53
54
55 if (skb_queue_empty(&req->cmd_q))
56 return -ENODATA;
57
58 skb = skb_peek_tail(&req->cmd_q);
59 bt_cb(skb)->hci.req_complete = complete;
60 bt_cb(skb)->hci.req_complete_skb = complete_skb;
61
62 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
63 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
64 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
65
66 queue_work(hdev->workqueue, &hdev->cmd_work);
67
68 return 0;
69}
70
71int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
72{
73 return req_run(req, complete, NULL);
74}
75
76int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
77{
78 return req_run(req, NULL, complete);
79}
80
81struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
82 const void *param)
83{
84 int len = HCI_COMMAND_HDR_SIZE + plen;
85 struct hci_command_hdr *hdr;
86 struct sk_buff *skb;
87
88 skb = bt_skb_alloc(len, GFP_ATOMIC);
89 if (!skb)
90 return NULL;
91
92 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
93 hdr->opcode = cpu_to_le16(opcode);
94 hdr->plen = plen;
95
96 if (plen)
97 memcpy(skb_put(skb, plen), param, plen);
98
99 BT_DBG("skb len %d", skb->len);
100
101 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
102 bt_cb(skb)->hci.opcode = opcode;
103
104 return skb;
105}
106
107
108void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
109 const void *param, u8 event)
110{
111 struct hci_dev *hdev = req->hdev;
112 struct sk_buff *skb;
113
114 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
115
116
117
118
119 if (req->err)
120 return;
121
122 skb = hci_prepare_cmd(hdev, opcode, plen, param);
123 if (!skb) {
124 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
125 hdev->name, opcode);
126 req->err = -ENOMEM;
127 return;
128 }
129
130 if (skb_queue_empty(&req->cmd_q))
131 bt_cb(skb)->hci.req_start = true;
132
133 bt_cb(skb)->hci.req_event = event;
134
135 skb_queue_tail(&req->cmd_q, skb);
136}
137
138void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
139 const void *param)
140{
141 hci_req_add_ev(req, opcode, plen, param, 0);
142}
143
144void hci_req_add_le_scan_disable(struct hci_request *req)
145{
146 struct hci_cp_le_set_scan_enable cp;
147
148 memset(&cp, 0, sizeof(cp));
149 cp.enable = LE_SCAN_DISABLE;
150 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
151}
152
153static void add_to_white_list(struct hci_request *req,
154 struct hci_conn_params *params)
155{
156 struct hci_cp_le_add_to_white_list cp;
157
158 cp.bdaddr_type = params->addr_type;
159 bacpy(&cp.bdaddr, ¶ms->addr);
160
161 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
162}
163
164static u8 update_white_list(struct hci_request *req)
165{
166 struct hci_dev *hdev = req->hdev;
167 struct hci_conn_params *params;
168 struct bdaddr_list *b;
169 uint8_t white_list_entries = 0;
170
171
172
173
174
175
176
177 list_for_each_entry(b, &hdev->le_white_list, list) {
178 struct hci_cp_le_del_from_white_list cp;
179
180 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
181 &b->bdaddr, b->bdaddr_type) ||
182 hci_pend_le_action_lookup(&hdev->pend_le_reports,
183 &b->bdaddr, b->bdaddr_type)) {
184 white_list_entries++;
185 continue;
186 }
187
188 cp.bdaddr_type = b->bdaddr_type;
189 bacpy(&cp.bdaddr, &b->bdaddr);
190
191 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
192 sizeof(cp), &cp);
193 }
194
195
196
197
198
199
200
201
202
203
204
205 list_for_each_entry(params, &hdev->pend_le_conns, action) {
206 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
207 ¶ms->addr, params->addr_type))
208 continue;
209
210 if (white_list_entries >= hdev->le_white_list_size) {
211
212 return 0x00;
213 }
214
215 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
216 params->addr_type)) {
217
218 return 0x00;
219 }
220
221 white_list_entries++;
222 add_to_white_list(req, params);
223 }
224
225
226
227
228
229 list_for_each_entry(params, &hdev->pend_le_reports, action) {
230 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
231 ¶ms->addr, params->addr_type))
232 continue;
233
234 if (white_list_entries >= hdev->le_white_list_size) {
235
236 return 0x00;
237 }
238
239 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
240 params->addr_type)) {
241
242 return 0x00;
243 }
244
245 white_list_entries++;
246 add_to_white_list(req, params);
247 }
248
249
250 return 0x01;
251}
252
253void hci_req_add_le_passive_scan(struct hci_request *req)
254{
255 struct hci_cp_le_set_scan_param param_cp;
256 struct hci_cp_le_set_scan_enable enable_cp;
257 struct hci_dev *hdev = req->hdev;
258 u8 own_addr_type;
259 u8 filter_policy;
260
261
262
263
264
265
266
267 if (hci_update_random_address(req, false, &own_addr_type))
268 return;
269
270
271
272
273
274 filter_policy = update_white_list(req);
275
276
277
278
279
280
281
282
283
284
285 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
286 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
287 filter_policy |= 0x02;
288
289 memset(¶m_cp, 0, sizeof(param_cp));
290 param_cp.type = LE_SCAN_PASSIVE;
291 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
292 param_cp.window = cpu_to_le16(hdev->le_scan_window);
293 param_cp.own_address_type = own_addr_type;
294 param_cp.filter_policy = filter_policy;
295 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
296 ¶m_cp);
297
298 memset(&enable_cp, 0, sizeof(enable_cp));
299 enable_cp.enable = LE_SCAN_ENABLE;
300 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
301 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
302 &enable_cp);
303}
304
305static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
306{
307 struct hci_dev *hdev = req->hdev;
308
309
310
311
312
313
314
315
316
317
318
319 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
320 hci_lookup_le_connect(hdev)) {
321 BT_DBG("Deferring random address update");
322 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
323 return;
324 }
325
326 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
327}
328
329int hci_update_random_address(struct hci_request *req, bool require_privacy,
330 u8 *own_addr_type)
331{
332 struct hci_dev *hdev = req->hdev;
333 int err;
334
335
336
337
338
339 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
340 int to;
341
342 *own_addr_type = ADDR_LE_DEV_RANDOM;
343
344 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
345 !bacmp(&hdev->random_addr, &hdev->rpa))
346 return 0;
347
348 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
349 if (err < 0) {
350 BT_ERR("%s failed to generate new RPA", hdev->name);
351 return err;
352 }
353
354 set_random_addr(req, &hdev->rpa);
355
356 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
357 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
358
359 return 0;
360 }
361
362
363
364
365
366 if (require_privacy) {
367 bdaddr_t nrpa;
368
369 while (true) {
370
371
372
373
374 get_random_bytes(&nrpa, 6);
375 nrpa.b[5] &= 0x3f;
376
377
378
379
380 if (bacmp(&hdev->bdaddr, &nrpa))
381 break;
382 }
383
384 *own_addr_type = ADDR_LE_DEV_RANDOM;
385 set_random_addr(req, &nrpa);
386 return 0;
387 }
388
389
390
391
392
393
394
395
396
397
398 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
399 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
400 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
401 bacmp(&hdev->static_addr, BDADDR_ANY))) {
402 *own_addr_type = ADDR_LE_DEV_RANDOM;
403 if (bacmp(&hdev->static_addr, &hdev->random_addr))
404 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
405 &hdev->static_addr);
406 return 0;
407 }
408
409
410
411
412 *own_addr_type = ADDR_LE_DEV_PUBLIC;
413
414 return 0;
415}
416
417static bool disconnected_whitelist_entries(struct hci_dev *hdev)
418{
419 struct bdaddr_list *b;
420
421 list_for_each_entry(b, &hdev->whitelist, list) {
422 struct hci_conn *conn;
423
424 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
425 if (!conn)
426 return true;
427
428 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
429 return true;
430 }
431
432 return false;
433}
434
435void __hci_update_page_scan(struct hci_request *req)
436{
437 struct hci_dev *hdev = req->hdev;
438 u8 scan;
439
440 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
441 return;
442
443 if (!hdev_is_powered(hdev))
444 return;
445
446 if (mgmt_powering_down(hdev))
447 return;
448
449 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
450 disconnected_whitelist_entries(hdev))
451 scan = SCAN_PAGE;
452 else
453 scan = SCAN_DISABLED;
454
455 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
456 return;
457
458 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
459 scan |= SCAN_INQUIRY;
460
461 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
462}
463
464void hci_update_page_scan(struct hci_dev *hdev)
465{
466 struct hci_request req;
467
468 hci_req_init(&req, hdev);
469 __hci_update_page_scan(&req);
470 hci_req_run(&req, NULL);
471}
472
473
474
475
476
477
478
479void __hci_update_background_scan(struct hci_request *req)
480{
481 struct hci_dev *hdev = req->hdev;
482
483 if (!test_bit(HCI_UP, &hdev->flags) ||
484 test_bit(HCI_INIT, &hdev->flags) ||
485 hci_dev_test_flag(hdev, HCI_SETUP) ||
486 hci_dev_test_flag(hdev, HCI_CONFIG) ||
487 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
488 hci_dev_test_flag(hdev, HCI_UNREGISTER))
489 return;
490
491
492 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
493 return;
494
495
496 if (hdev->discovery.state != DISCOVERY_STOPPED)
497 return;
498
499
500
501
502
503
504
505
506 hci_discovery_filter_clear(hdev);
507
508 if (list_empty(&hdev->pend_le_conns) &&
509 list_empty(&hdev->pend_le_reports)) {
510
511
512
513
514
515
516 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
517 return;
518
519 hci_req_add_le_scan_disable(req);
520
521 BT_DBG("%s stopping background scanning", hdev->name);
522 } else {
523
524
525
526
527
528
529
530
531 if (hci_lookup_le_connect(hdev))
532 return;
533
534
535
536
537 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
538 hci_req_add_le_scan_disable(req);
539
540 hci_req_add_le_passive_scan(req);
541
542 BT_DBG("%s starting background scanning", hdev->name);
543 }
544}
545
546static void update_background_scan_complete(struct hci_dev *hdev, u8 status,
547 u16 opcode)
548{
549 if (status)
550 BT_DBG("HCI request failed to update background scanning: "
551 "status 0x%2.2x", status);
552}
553
554void hci_update_background_scan(struct hci_dev *hdev)
555{
556 int err;
557 struct hci_request req;
558
559 hci_req_init(&req, hdev);
560
561 __hci_update_background_scan(&req);
562
563 err = hci_req_run(&req, update_background_scan_complete);
564 if (err && err != -ENODATA)
565 BT_ERR("Failed to run HCI request: err %d", err);
566}
567
568void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
569 u8 reason)
570{
571 switch (conn->state) {
572 case BT_CONNECTED:
573 case BT_CONFIG:
574 if (conn->type == AMP_LINK) {
575 struct hci_cp_disconn_phy_link cp;
576
577 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
578 cp.reason = reason;
579 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
580 &cp);
581 } else {
582 struct hci_cp_disconnect dc;
583
584 dc.handle = cpu_to_le16(conn->handle);
585 dc.reason = reason;
586 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
587 }
588
589 conn->state = BT_DISCONN;
590
591 break;
592 case BT_CONNECT:
593 if (conn->type == LE_LINK) {
594 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
595 break;
596 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
597 0, NULL);
598 } else if (conn->type == ACL_LINK) {
599 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
600 break;
601 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
602 6, &conn->dst);
603 }
604 break;
605 case BT_CONNECT2:
606 if (conn->type == ACL_LINK) {
607 struct hci_cp_reject_conn_req rej;
608
609 bacpy(&rej.bdaddr, &conn->dst);
610 rej.reason = reason;
611
612 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
613 sizeof(rej), &rej);
614 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
615 struct hci_cp_reject_sync_conn_req rej;
616
617 bacpy(&rej.bdaddr, &conn->dst);
618
619
620
621
622
623
624
625 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
626
627 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
628 sizeof(rej), &rej);
629 }
630 break;
631 default:
632 conn->state = BT_CLOSED;
633 break;
634 }
635}
636
637static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
638{
639 if (status)
640 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
641}
642
643int hci_abort_conn(struct hci_conn *conn, u8 reason)
644{
645 struct hci_request req;
646 int err;
647
648 hci_req_init(&req, conn->hdev);
649
650 __hci_abort_conn(&req, conn, reason);
651
652 err = hci_req_run(&req, abort_conn_complete);
653 if (err && err != -ENODATA) {
654 BT_ERR("Failed to run HCI request: err %d", err);
655 return err;
656 }
657
658 return 0;
659}
660