1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
24#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
25
26struct hci_request {
27 struct hci_dev *hdev;
28 struct sk_buff_head cmd_q;
29
30
31
32
33 int err;
34};
35
36void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
37int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
38int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
39void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
40 const void *param);
41void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
42 const void *param, u8 event);
43void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
44 hci_req_complete_t *req_complete,
45 hci_req_complete_skb_t *req_complete_skb);
46
47int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
48 unsigned long opt),
49 unsigned long opt, u32 timeout, u8 *hci_status);
50int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
51 unsigned long opt),
52 unsigned long opt, u32 timeout, u8 *hci_status);
53void hci_req_sync_cancel(struct hci_dev *hdev, int err);
54
55struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
56 const void *param);
57
58int __hci_req_hci_power_on(struct hci_dev *hdev);
59
60void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
61void __hci_req_update_name(struct hci_request *req);
62void __hci_req_update_eir(struct hci_request *req);
63
64void hci_req_add_le_scan_disable(struct hci_request *req);
65void hci_req_add_le_passive_scan(struct hci_request *req);
66
67void hci_req_reenable_advertising(struct hci_dev *hdev);
68void __hci_req_enable_advertising(struct hci_request *req);
69void __hci_req_disable_advertising(struct hci_request *req);
70void __hci_req_update_adv_data(struct hci_request *req, u8 instance);
71int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance);
72void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
73
74int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
75 bool force);
76void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
77 u8 instance, bool force);
78
79void __hci_req_update_class(struct hci_request *req);
80
81
82bool hci_req_stop_discovery(struct hci_request *req);
83
84static inline void hci_req_update_scan(struct hci_dev *hdev)
85{
86 queue_work(hdev->req_workqueue, &hdev->scan_update);
87}
88
89void __hci_req_update_scan(struct hci_request *req);
90
91int hci_update_random_address(struct hci_request *req, bool require_privacy,
92 bool use_rpa, u8 *own_addr_type);
93
94int hci_abort_conn(struct hci_conn *conn, u8 reason);
95void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
96 u8 reason);
97
98static inline void hci_update_background_scan(struct hci_dev *hdev)
99{
100 queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
101}
102
103void hci_request_setup(struct hci_dev *hdev);
104void hci_request_cancel_all(struct hci_dev *hdev);
105