This source file includes following definitions.
- hci_req_update_scan
- hci_update_background_scan
- eir_append_data
- eir_append_le16
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <asm/unaligned.h>
24
25 #define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
26 #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
27
28 struct hci_request {
29 struct hci_dev *hdev;
30 struct sk_buff_head cmd_q;
31
32
33
34
35 int err;
36 };
37
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
39 void hci_req_purge(struct hci_request *req);
40 bool hci_req_status_pend(struct hci_dev *hdev);
41 int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
42 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
43 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
44 const void *param);
45 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
46 const void *param, u8 event);
47 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
48 hci_req_complete_t *req_complete,
49 hci_req_complete_skb_t *req_complete_skb);
50
51 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
52 unsigned long opt),
53 unsigned long opt, u32 timeout, u8 *hci_status);
54 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
55 unsigned long opt),
56 unsigned long opt, u32 timeout, u8 *hci_status);
57 void hci_req_sync_cancel(struct hci_dev *hdev, int err);
58
59 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
60 const void *param);
61
62 int __hci_req_hci_power_on(struct hci_dev *hdev);
63
64 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
65 void __hci_req_update_name(struct hci_request *req);
66 void __hci_req_update_eir(struct hci_request *req);
67
68 void hci_req_add_le_scan_disable(struct hci_request *req);
69 void hci_req_add_le_passive_scan(struct hci_request *req);
70
71 void hci_req_reenable_advertising(struct hci_dev *hdev);
72 void __hci_req_enable_advertising(struct hci_request *req);
73 void __hci_req_disable_advertising(struct hci_request *req);
74 void __hci_req_update_adv_data(struct hci_request *req, u8 instance);
75 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance);
76 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
77
78 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
79 bool force);
80 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
81 struct hci_request *req, u8 instance,
82 bool force);
83
84 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance);
85 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance);
86 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance);
87 void __hci_req_clear_ext_adv_sets(struct hci_request *req);
88 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
89 bool use_rpa, struct adv_info *adv_instance,
90 u8 *own_addr_type, bdaddr_t *rand_addr);
91
92 void __hci_req_update_class(struct hci_request *req);
93
94
95 bool hci_req_stop_discovery(struct hci_request *req);
96
97 static inline void hci_req_update_scan(struct hci_dev *hdev)
98 {
99 queue_work(hdev->req_workqueue, &hdev->scan_update);
100 }
101
102 void __hci_req_update_scan(struct hci_request *req);
103
104 int hci_update_random_address(struct hci_request *req, bool require_privacy,
105 bool use_rpa, u8 *own_addr_type);
106
107 int hci_abort_conn(struct hci_conn *conn, u8 reason);
108 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
109 u8 reason);
110
111 static inline void hci_update_background_scan(struct hci_dev *hdev)
112 {
113 queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
114 }
115
116 void hci_request_setup(struct hci_dev *hdev);
117 void hci_request_cancel_all(struct hci_dev *hdev);
118
119 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
120
121 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
122 u8 *data, u8 data_len)
123 {
124 eir[eir_len++] = sizeof(type) + data_len;
125 eir[eir_len++] = type;
126 memcpy(&eir[eir_len], data, data_len);
127 eir_len += data_len;
128
129 return eir_len;
130 }
131
132 static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
133 {
134 eir[eir_len++] = sizeof(type) + sizeof(data);
135 eir[eir_len++] = type;
136 put_unaligned_le16(data, &eir[eir_len]);
137 eir_len += sizeof(data);
138
139 return eir_len;
140 }