Searched refs:ar (Results 1 - 200 of 231) sorted by relevance

12

/linux-4.1.27/net/rxrpc/
H A DMakefile7 ar-accept.o \
8 ar-ack.o \
9 ar-call.o \
10 ar-connection.o \
11 ar-connevent.o \
12 ar-error.o \
13 ar-input.o \
14 ar-key.o \
15 ar-local.o \
16 ar-output.o \
17 ar-peer.o \
18 ar-recvmsg.o \
19 ar-security.o \
20 ar-skbuff.o \
21 ar-transport.o
23 af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o
H A Dar-skbuff.c0 /* ar-skbuff.c: socket buffer destruction handling
17 #include "ar-internal.h"
H A Dar-internal.h440 * ar-accept.c
447 * ar-ack.c
461 * ar-call.c
485 * ar-connection.c
506 * ar-connevent.c
513 * ar-error.c
519 * ar-input.c
528 * ar-local.c
537 * ar-key.c
548 * ar-output.c
558 * ar-peer.c
566 * ar-proc.c
573 * ar-recvmsg.c
579 * ar-security.c
591 * ar-skbuff.c
596 * ar-transport.c
H A Dsysctl.c15 #include "ar-internal.h"
H A Dar-error.c23 #include "ar-internal.h"
H A Dar-proc.c15 #include "ar-internal.h"
H A Dar-security.c20 #include "ar-internal.h"
H A Dar-connevent.c23 #include "ar-internal.h"
H A Dar-local.c21 #include "ar-internal.h"
H A Dar-peer.c24 #include "ar-internal.h"
H A Dar-transport.c18 #include "ar-internal.h"
H A Dar-recvmsg.c17 #include "ar-internal.h"
H A Dar-accept.c24 #include "ar-internal.h"
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
H A Dcore.c52 void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb) ath6kl_core_tx_complete() argument
54 ath6kl_htc_tx_complete(ar, skb); ath6kl_core_tx_complete()
58 void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe) ath6kl_core_rx_complete() argument
60 ath6kl_htc_rx_complete(ar, skb, pipe); ath6kl_core_rx_complete()
64 int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type) ath6kl_core_init() argument
72 ath6kl_htc_mbox_attach(ar); ath6kl_core_init()
75 ath6kl_htc_pipe_attach(ar); ath6kl_core_init()
82 ar->ath6kl_wq = create_singlethread_workqueue("ath6kl"); ath6kl_core_init()
83 if (!ar->ath6kl_wq) ath6kl_core_init()
86 ret = ath6kl_bmi_init(ar); ath6kl_core_init()
95 ret = ath6kl_hif_power_on(ar); ath6kl_core_init()
99 ret = ath6kl_bmi_get_target_info(ar, &targ_info); ath6kl_core_init()
103 ar->version.target_ver = le32_to_cpu(targ_info.version); ath6kl_core_init()
104 ar->target_type = le32_to_cpu(targ_info.type); ath6kl_core_init()
105 ar->wiphy->hw_version = le32_to_cpu(targ_info.version); ath6kl_core_init()
107 ret = ath6kl_init_hw_params(ar); ath6kl_core_init()
111 ar->htc_target = ath6kl_htc_create(ar); ath6kl_core_init()
113 if (!ar->htc_target) { ath6kl_core_init()
118 ar->testmode = testmode; ath6kl_core_init()
120 ret = ath6kl_init_fetch_firmwares(ar); ath6kl_core_init()
130 if (ar->target_type == TARGET_TYPE_AR6004 && ath6kl_core_init()
131 ar->fw_api <= 4) { ath6kl_core_init()
133 ar->fw_capabilities); ath6kl_core_init()
135 ar->fw_capabilities); ath6kl_core_init()
137 if (ar->hw.id == AR6004_HW_1_3_VERSION) ath6kl_core_init()
139 ar->fw_capabilities); ath6kl_core_init()
143 set_bit(WMI_ENABLED, &ar->flag); ath6kl_core_init()
144 ar->wmi = ath6kl_wmi_init(ar); ath6kl_core_init()
145 if (!ar->wmi) { ath6kl_core_init()
151 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi); ath6kl_core_init()
154 ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */ ath6kl_core_init()
155 ar->ac_stream_pri_map[WMM_AC_BE] = 1; ath6kl_core_init()
156 ar->ac_stream_pri_map[WMM_AC_VI] = 2; ath6kl_core_init()
157 ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */ ath6kl_core_init()
160 ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS); ath6kl_core_init()
162 ath6kl_cookie_init(ar); ath6kl_core_init()
164 ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER | ath6kl_core_init()
170 ar->suspend_mode = suspend_mode; ath6kl_core_init()
172 ar->suspend_mode = 0; ath6kl_core_init()
177 ar->wow_suspend_mode = wow_mode; ath6kl_core_init()
179 ar->wow_suspend_mode = 0; ath6kl_core_init()
182 ar->conf_flags |= ATH6KL_CONF_UART_DEBUG; ath6kl_core_init()
184 set_bit(FIRST_BOOT, &ar->flag); ath6kl_core_init()
186 ath6kl_debug_init(ar); ath6kl_core_init()
188 ret = ath6kl_init_hw_start(ar); ath6kl_core_init()
195 ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep); ath6kl_core_init()
196 ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]); ath6kl_core_init()
198 ret = ath6kl_cfg80211_init(ar); ath6kl_core_init()
202 ret = ath6kl_debug_init_fs(ar); ath6kl_core_init()
204 wiphy_unregister(ar->wiphy); ath6kl_core_init()
208 for (i = 0; i < ar->vif_max; i++) ath6kl_core_init()
209 ar->avail_idx_map |= BIT(i); ath6kl_core_init()
214 wdev = ath6kl_interface_add(ar, "wlan%d", NET_NAME_ENUM, ath6kl_core_init()
222 wiphy_unregister(ar->wiphy); ath6kl_core_init()
226 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n", ath6kl_core_init()
227 __func__, wdev->netdev->name, wdev->netdev, ar); ath6kl_core_init()
229 ar->fw_recovery.enable = !!recovery_enable; ath6kl_core_init()
230 if (!ar->fw_recovery.enable) ath6kl_core_init()
235 ar->fw_capabilities)) ath6kl_core_init()
236 ar->fw_recovery.hb_poll = heart_beat_poll; ath6kl_core_init()
238 ath6kl_recovery_init(ar); ath6kl_core_init()
243 ath6kl_debug_cleanup(ar); ath6kl_core_init()
244 ath6kl_htc_flush_rx_buf(ar->htc_target); ath6kl_core_init()
245 ath6kl_cleanup_amsdu_rxbufs(ar); ath6kl_core_init()
246 ath6kl_wmi_shutdown(ar->wmi); ath6kl_core_init()
247 clear_bit(WMI_ENABLED, &ar->flag); ath6kl_core_init()
248 ar->wmi = NULL; ath6kl_core_init()
250 ath6kl_htc_cleanup(ar->htc_target); ath6kl_core_init()
252 ath6kl_hif_power_off(ar); ath6kl_core_init()
254 ath6kl_bmi_cleanup(ar); ath6kl_core_init()
256 destroy_workqueue(ar->ath6kl_wq); ath6kl_core_init()
264 struct ath6kl *ar; ath6kl_core_create() local
267 ar = ath6kl_cfg80211_create(); ath6kl_core_create()
268 if (!ar) ath6kl_core_create()
271 ar->p2p = !!ath6kl_p2p; ath6kl_core_create()
272 ar->dev = dev; ath6kl_core_create()
274 ar->vif_max = 1; ath6kl_core_create()
276 ar->max_norm_iface = 1; ath6kl_core_create()
278 spin_lock_init(&ar->lock); ath6kl_core_create()
279 spin_lock_init(&ar->mcastpsq_lock); ath6kl_core_create()
280 spin_lock_init(&ar->list_lock); ath6kl_core_create()
282 init_waitqueue_head(&ar->event_wq); ath6kl_core_create()
283 sema_init(&ar->sem, 1); ath6kl_core_create()
285 INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue); ath6kl_core_create()
286 INIT_LIST_HEAD(&ar->vif_list); ath6kl_core_create()
288 clear_bit(WMI_ENABLED, &ar->flag); ath6kl_core_create()
289 clear_bit(SKIP_SCAN, &ar->flag); ath6kl_core_create()
290 clear_bit(DESTROY_IN_PROGRESS, &ar->flag); ath6kl_core_create()
292 ar->tx_pwr = 0; ath6kl_core_create()
293 ar->intra_bss = 1; ath6kl_core_create()
294 ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD; ath6kl_core_create()
296 ar->state = ATH6KL_STATE_OFF; ath6kl_core_create()
298 memset((u8 *)ar->sta_list, 0, ath6kl_core_create()
303 spin_lock_init(&ar->sta_list[ctr].psq_lock); ath6kl_core_create()
304 skb_queue_head_init(&ar->sta_list[ctr].psq); ath6kl_core_create()
305 skb_queue_head_init(&ar->sta_list[ctr].apsdq); ath6kl_core_create()
306 ar->sta_list[ctr].mgmt_psq_len = 0; ath6kl_core_create()
307 INIT_LIST_HEAD(&ar->sta_list[ctr].mgmt_psq); ath6kl_core_create()
308 ar->sta_list[ctr].aggr_conn = ath6kl_core_create()
310 if (!ar->sta_list[ctr].aggr_conn) { ath6kl_core_create()
312 ath6kl_core_destroy(ar); ath6kl_core_create()
317 skb_queue_head_init(&ar->mcastpsq); ath6kl_core_create()
319 memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3); ath6kl_core_create()
321 return ar; ath6kl_core_create()
325 void ath6kl_core_cleanup(struct ath6kl *ar) ath6kl_core_cleanup() argument
327 ath6kl_hif_power_off(ar); ath6kl_core_cleanup()
329 ath6kl_recovery_cleanup(ar); ath6kl_core_cleanup()
331 destroy_workqueue(ar->ath6kl_wq); ath6kl_core_cleanup()
333 if (ar->htc_target) ath6kl_core_cleanup()
334 ath6kl_htc_cleanup(ar->htc_target); ath6kl_core_cleanup()
336 ath6kl_cookie_cleanup(ar); ath6kl_core_cleanup()
338 ath6kl_cleanup_amsdu_rxbufs(ar); ath6kl_core_cleanup()
340 ath6kl_bmi_cleanup(ar); ath6kl_core_cleanup()
342 ath6kl_debug_cleanup(ar); ath6kl_core_cleanup()
344 kfree(ar->fw_board); ath6kl_core_cleanup()
345 kfree(ar->fw_otp); ath6kl_core_cleanup()
346 vfree(ar->fw); ath6kl_core_cleanup()
347 kfree(ar->fw_patch); ath6kl_core_cleanup()
348 kfree(ar->fw_testscript); ath6kl_core_cleanup()
350 ath6kl_cfg80211_cleanup(ar); ath6kl_core_cleanup()
354 void ath6kl_core_destroy(struct ath6kl *ar) ath6kl_core_destroy() argument
356 ath6kl_cfg80211_destroy(ar); ath6kl_core_destroy()
H A Drecovery.c23 struct ath6kl *ar = container_of(work, struct ath6kl, ath6kl_recovery_work() local
26 ar->state = ATH6KL_STATE_RECOVERY; ath6kl_recovery_work()
28 del_timer_sync(&ar->fw_recovery.hb_timer); ath6kl_recovery_work()
30 ath6kl_init_hw_restart(ar); ath6kl_recovery_work()
32 ar->state = ATH6KL_STATE_ON; ath6kl_recovery_work()
33 clear_bit(WMI_CTRL_EP_FULL, &ar->flag); ath6kl_recovery_work()
35 ar->fw_recovery.err_reason = 0; ath6kl_recovery_work()
37 if (ar->fw_recovery.hb_poll) ath6kl_recovery_work()
38 mod_timer(&ar->fw_recovery.hb_timer, jiffies + ath6kl_recovery_work()
39 msecs_to_jiffies(ar->fw_recovery.hb_poll)); ath6kl_recovery_work()
42 void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason) ath6kl_recovery_err_notify() argument
44 if (!ar->fw_recovery.enable) ath6kl_recovery_err_notify()
50 set_bit(reason, &ar->fw_recovery.err_reason); ath6kl_recovery_err_notify()
52 if (!test_bit(RECOVERY_CLEANUP, &ar->flag) && ath6kl_recovery_err_notify()
53 ar->state != ATH6KL_STATE_RECOVERY) ath6kl_recovery_err_notify()
54 queue_work(ar->ath6kl_wq, &ar->fw_recovery.recovery_work); ath6kl_recovery_err_notify()
57 void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie) ath6kl_recovery_hb_event() argument
59 if (cookie == ar->fw_recovery.seq_num) ath6kl_recovery_hb_event()
60 ar->fw_recovery.hb_pending = false; ath6kl_recovery_hb_event()
65 struct ath6kl *ar = (struct ath6kl *) data; ath6kl_recovery_hb_timer() local
68 if (test_bit(RECOVERY_CLEANUP, &ar->flag) || ath6kl_recovery_hb_timer()
69 (ar->state == ATH6KL_STATE_RECOVERY)) ath6kl_recovery_hb_timer()
72 if (ar->fw_recovery.hb_pending) ath6kl_recovery_hb_timer()
73 ar->fw_recovery.hb_misscnt++; ath6kl_recovery_hb_timer()
75 ar->fw_recovery.hb_misscnt = 0; ath6kl_recovery_hb_timer()
77 if (ar->fw_recovery.hb_misscnt > ATH6KL_HB_RESP_MISS_THRES) { ath6kl_recovery_hb_timer()
78 ar->fw_recovery.hb_misscnt = 0; ath6kl_recovery_hb_timer()
79 ar->fw_recovery.seq_num = 0; ath6kl_recovery_hb_timer()
80 ar->fw_recovery.hb_pending = false; ath6kl_recovery_hb_timer()
81 ath6kl_recovery_err_notify(ar, ATH6KL_FW_HB_RESP_FAILURE); ath6kl_recovery_hb_timer()
85 ar->fw_recovery.seq_num++; ath6kl_recovery_hb_timer()
86 ar->fw_recovery.hb_pending = true; ath6kl_recovery_hb_timer()
88 err = ath6kl_wmi_get_challenge_resp_cmd(ar->wmi, ath6kl_recovery_hb_timer()
89 ar->fw_recovery.seq_num, 0); ath6kl_recovery_hb_timer()
94 mod_timer(&ar->fw_recovery.hb_timer, jiffies + ath6kl_recovery_hb_timer()
95 msecs_to_jiffies(ar->fw_recovery.hb_poll)); ath6kl_recovery_hb_timer()
98 void ath6kl_recovery_init(struct ath6kl *ar) ath6kl_recovery_init() argument
100 struct ath6kl_fw_recovery *recovery = &ar->fw_recovery; ath6kl_recovery_init()
102 clear_bit(RECOVERY_CLEANUP, &ar->flag); ath6kl_recovery_init()
106 ar->fw_recovery.hb_pending = false; ath6kl_recovery_init()
107 ar->fw_recovery.hb_timer.function = ath6kl_recovery_hb_timer; ath6kl_recovery_init()
108 ar->fw_recovery.hb_timer.data = (unsigned long) ar; ath6kl_recovery_init()
109 init_timer_deferrable(&ar->fw_recovery.hb_timer); ath6kl_recovery_init()
111 if (ar->fw_recovery.hb_poll) ath6kl_recovery_init()
112 mod_timer(&ar->fw_recovery.hb_timer, jiffies + ath6kl_recovery_init()
113 msecs_to_jiffies(ar->fw_recovery.hb_poll)); ath6kl_recovery_init()
116 void ath6kl_recovery_cleanup(struct ath6kl *ar) ath6kl_recovery_cleanup() argument
118 if (!ar->fw_recovery.enable) ath6kl_recovery_cleanup()
121 set_bit(RECOVERY_CLEANUP, &ar->flag); ath6kl_recovery_cleanup()
123 del_timer_sync(&ar->fw_recovery.hb_timer); ath6kl_recovery_cleanup()
124 cancel_work_sync(&ar->fw_recovery.recovery_work); ath6kl_recovery_cleanup()
127 void ath6kl_recovery_suspend(struct ath6kl *ar) ath6kl_recovery_suspend() argument
129 if (!ar->fw_recovery.enable) ath6kl_recovery_suspend()
132 ath6kl_recovery_cleanup(ar); ath6kl_recovery_suspend()
134 if (!ar->fw_recovery.err_reason) ath6kl_recovery_suspend()
138 ar->fw_recovery.err_reason = 0; ath6kl_recovery_suspend()
139 WARN_ON(ar->state != ATH6KL_STATE_ON); ath6kl_recovery_suspend()
140 ar->state = ATH6KL_STATE_RECOVERY; ath6kl_recovery_suspend()
141 ath6kl_init_hw_restart(ar); ath6kl_recovery_suspend()
142 ar->state = ATH6KL_STATE_ON; ath6kl_recovery_suspend()
145 void ath6kl_recovery_resume(struct ath6kl *ar) ath6kl_recovery_resume() argument
147 if (!ar->fw_recovery.enable) ath6kl_recovery_resume()
150 clear_bit(RECOVERY_CLEANUP, &ar->flag); ath6kl_recovery_resume()
152 if (!ar->fw_recovery.hb_poll) ath6kl_recovery_resume()
155 ar->fw_recovery.hb_pending = false; ath6kl_recovery_resume()
156 ar->fw_recovery.seq_num = 0; ath6kl_recovery_resume()
157 ar->fw_recovery.hb_misscnt = 0; ath6kl_recovery_resume()
158 mod_timer(&ar->fw_recovery.hb_timer, ath6kl_recovery_resume()
159 jiffies + msecs_to_jiffies(ar->fw_recovery.hb_poll)); ath6kl_recovery_resume()
H A Dhif-ops.h24 static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, hif_read_write_sync() argument
32 return ar->hif_ops->read_write_sync(ar, addr, buf, len, request); hif_read_write_sync()
35 static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer, hif_write_async() argument
43 return ar->hif_ops->write_async(ar, address, buffer, length, hif_write_async()
46 static inline void ath6kl_hif_irq_enable(struct ath6kl *ar) ath6kl_hif_irq_enable() argument
50 return ar->hif_ops->irq_enable(ar); ath6kl_hif_irq_enable()
53 static inline void ath6kl_hif_irq_disable(struct ath6kl *ar) ath6kl_hif_irq_disable() argument
57 return ar->hif_ops->irq_disable(ar); ath6kl_hif_irq_disable()
60 static inline struct hif_scatter_req *hif_scatter_req_get(struct ath6kl *ar) hif_scatter_req_get() argument
62 return ar->hif_ops->scatter_req_get(ar); hif_scatter_req_get()
65 static inline void hif_scatter_req_add(struct ath6kl *ar, hif_scatter_req_add() argument
68 return ar->hif_ops->scatter_req_add(ar, s_req); hif_scatter_req_add()
71 static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar) ath6kl_hif_enable_scatter() argument
73 return ar->hif_ops->enable_scatter(ar); ath6kl_hif_enable_scatter()
76 static inline int ath6kl_hif_scat_req_rw(struct ath6kl *ar, ath6kl_hif_scat_req_rw() argument
79 return ar->hif_ops->scat_req_rw(ar, scat_req); ath6kl_hif_scat_req_rw()
82 static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar) ath6kl_hif_cleanup_scatter() argument
84 return ar->hif_ops->cleanup_scatter(ar); ath6kl_hif_cleanup_scatter()
87 static inline int ath6kl_hif_suspend(struct ath6kl *ar, ath6kl_hif_suspend() argument
92 return ar->hif_ops->suspend(ar, wow); ath6kl_hif_suspend()
99 static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address, ath6kl_hif_diag_read32() argument
102 return ar->hif_ops->diag_read32(ar, address, value); ath6kl_hif_diag_read32()
109 static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address, ath6kl_hif_diag_write32() argument
112 return ar->hif_ops->diag_write32(ar, address, value); ath6kl_hif_diag_write32()
115 static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) ath6kl_hif_bmi_read() argument
117 return ar->hif_ops->bmi_read(ar, buf, len); ath6kl_hif_bmi_read()
120 static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) ath6kl_hif_bmi_write() argument
122 return ar->hif_ops->bmi_write(ar, buf, len); ath6kl_hif_bmi_write()
125 static inline int ath6kl_hif_resume(struct ath6kl *ar) ath6kl_hif_resume() argument
129 return ar->hif_ops->resume(ar); ath6kl_hif_resume()
132 static inline int ath6kl_hif_power_on(struct ath6kl *ar) ath6kl_hif_power_on() argument
136 return ar->hif_ops->power_on(ar); ath6kl_hif_power_on()
139 static inline int ath6kl_hif_power_off(struct ath6kl *ar) ath6kl_hif_power_off() argument
143 return ar->hif_ops->power_off(ar); ath6kl_hif_power_off()
146 static inline void ath6kl_hif_stop(struct ath6kl *ar) ath6kl_hif_stop() argument
150 ar->hif_ops->stop(ar); ath6kl_hif_stop()
153 static inline int ath6kl_hif_pipe_send(struct ath6kl *ar, ath6kl_hif_pipe_send() argument
159 return ar->hif_ops->pipe_send(ar, pipe, hdr_buf, buf); ath6kl_hif_pipe_send()
162 static inline void ath6kl_hif_pipe_get_default(struct ath6kl *ar, ath6kl_hif_pipe_get_default() argument
167 ar->hif_ops->pipe_get_default(ar, ul_pipe, dl_pipe); ath6kl_hif_pipe_get_default()
170 static inline int ath6kl_hif_pipe_map_service(struct ath6kl *ar, ath6kl_hif_pipe_map_service() argument
176 return ar->hif_ops->pipe_map_service(ar, service_id, ul_pipe, dl_pipe); ath6kl_hif_pipe_map_service()
179 static inline u16 ath6kl_hif_pipe_get_free_queue_number(struct ath6kl *ar, ath6kl_hif_pipe_get_free_queue_number() argument
184 return ar->hif_ops->pipe_get_free_queue_number(ar, pipe); ath6kl_hif_pipe_get_free_queue_number()
H A Dinit.c250 static int ath6kl_set_host_app_area(struct ath6kl *ar) ath6kl_set_host_app_area() argument
257 address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_app_host_interest)); ath6kl_set_host_app_area()
258 address = TARG_VTOP(ar->target_type, address); ath6kl_set_host_app_area()
260 if (ath6kl_diag_read32(ar, address, &data)) ath6kl_set_host_app_area()
263 address = TARG_VTOP(ar->target_type, data); ath6kl_set_host_app_area()
265 if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area, ath6kl_set_host_app_area()
272 static inline void set_ac2_ep_map(struct ath6kl *ar, set_ac2_ep_map() argument
276 ar->ac2ep_map[ac] = ep; set_ac2_ep_map()
277 ar->ep2ac_map[ep] = ac; set_ac2_ep_map()
281 static int ath6kl_connectservice(struct ath6kl *ar, ath6kl_connectservice() argument
290 status = ath6kl_htc_conn_service(ar->htc_target, con_req, &response); ath6kl_connectservice()
299 if (test_bit(WMI_ENABLED, &ar->flag)) ath6kl_connectservice()
300 ath6kl_wmi_set_control_ep(ar->wmi, response.endpoint); ath6kl_connectservice()
301 ar->ctrl_ep = response.endpoint; ath6kl_connectservice()
304 set_ac2_ep_map(ar, WMM_AC_BE, response.endpoint); ath6kl_connectservice()
307 set_ac2_ep_map(ar, WMM_AC_BK, response.endpoint); ath6kl_connectservice()
310 set_ac2_ep_map(ar, WMM_AC_VI, response.endpoint); ath6kl_connectservice()
313 set_ac2_ep_map(ar, WMM_AC_VO, response.endpoint); ath6kl_connectservice()
323 static int ath6kl_init_service_ep(struct ath6kl *ar) ath6kl_init_service_ep() argument
346 if (ath6kl_connectservice(ar, &connect, "WMI CONTROL")) ath6kl_init_service_ep()
376 if (ath6kl_connectservice(ar, &connect, "WMI DATA BE")) ath6kl_init_service_ep()
381 if (ath6kl_connectservice(ar, &connect, "WMI DATA BK")) ath6kl_init_service_ep()
386 if (ath6kl_connectservice(ar, &connect, "WMI DATA VI")) ath6kl_init_service_ep()
397 if (ath6kl_connectservice(ar, &connect, "WMI DATA VO")) ath6kl_init_service_ep()
415 static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val, ath6kl_set_htc_params() argument
421 blk_size = ar->mbox_info.block_size; ath6kl_set_htc_params()
427 status = ath6kl_bmi_write_hi32(ar, hi_mbox_io_block_sz, blk_size); ath6kl_set_htc_params()
435 ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_mbox_io_block_sz))); ath6kl_set_htc_params()
439 status = ath6kl_bmi_write_hi32(ar, hi_mbox_isr_yield_limit, ath6kl_set_htc_params()
451 static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx) ath6kl_target_config_wlan_params() argument
460 ret = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx, ath6kl_target_config_wlan_params()
461 ar->rx_meta_ver, 0, 0); ath6kl_target_config_wlan_params()
467 if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN) { ath6kl_target_config_wlan_params()
468 ret = ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1, ath6kl_target_config_wlan_params()
477 if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER)) { ath6kl_target_config_wlan_params()
478 ret = ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0, ath6kl_target_config_wlan_params()
487 ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx, ath6kl_target_config_wlan_params()
494 ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, idx, ath6kl_target_config_wlan_params()
501 if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST)) { ath6kl_target_config_wlan_params()
502 ret = ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED); ath6kl_target_config_wlan_params()
509 if (ar->p2p && (ar->vif_max == 1 || idx)) { ath6kl_target_config_wlan_params()
510 ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx, ath6kl_target_config_wlan_params()
518 ar->p2p = false; ath6kl_target_config_wlan_params()
522 if (ar->p2p && (ar->vif_max == 1 || idx)) { ath6kl_target_config_wlan_params()
524 ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true); ath6kl_target_config_wlan_params()
535 int ath6kl_configure_target(struct ath6kl *ar) ath6kl_configure_target() argument
541 param = !!(ar->conf_flags & ATH6KL_CONF_UART_DEBUG); ath6kl_configure_target()
542 if (ath6kl_bmi_write_hi32(ar, hi_serial_enable, param)) { ath6kl_configure_target()
557 for (i = 0; i < ar->vif_max; i++) ath6kl_configure_target()
570 ar->fw_capabilities)) { ath6kl_configure_target()
571 for (i = 0; i < ar->vif_max; i++) ath6kl_configure_target()
575 for (i = 0; i < ar->max_norm_iface; i++) ath6kl_configure_target()
579 for (i = ar->max_norm_iface; i < ar->vif_max; i++) ath6kl_configure_target()
583 if (ar->p2p && ar->vif_max == 1) ath6kl_configure_target()
587 if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest, ath6kl_configure_target()
596 if (ath6kl_bmi_read_hi32(ar, hi_option_flag, &param) != 0) { ath6kl_configure_target()
601 param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT); ath6kl_configure_target()
608 if (ath6kl_bmi_write_hi32(ar, hi_option_flag, param) != 0) { ath6kl_configure_target()
624 if ((ar->target_type == TARGET_TYPE_AR6003) || ath6kl_configure_target()
625 (ar->version.target_ver == AR6004_HW_1_3_VERSION) || ath6kl_configure_target()
626 (ar->version.target_ver == AR6004_HW_3_0_VERSION)) { ath6kl_configure_target()
627 param = ar->hw.board_ext_data_addr; ath6kl_configure_target()
628 ram_reserved_size = ar->hw.reserved_ram_size; ath6kl_configure_target()
630 if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) { ath6kl_configure_target()
635 if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, ath6kl_configure_target()
643 if (ath6kl_set_htc_params(ar, MBOX_YIELD_LIMIT, 0)) ath6kl_configure_target()
648 status = ath6kl_bmi_write_hi32(ar, hi_dbg_uart_txpin, ath6kl_configure_target()
649 ar->hw.uarttx_pin); ath6kl_configure_target()
654 if (ar->hw.refclk_hz != 0) { ath6kl_configure_target()
655 status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz, ath6kl_configure_target()
656 ar->hw.refclk_hz); ath6kl_configure_target()
665 static int ath6kl_get_fw(struct ath6kl *ar, const char *filename, ath6kl_get_fw() argument
671 ret = request_firmware(&fw_entry, filename, ar->dev); ath6kl_get_fw()
693 static bool check_device_tree(struct ath6kl *ar) check_device_tree() argument
709 "%s/bdata.%s.bin", ar->hw.fw.dir, board_id); check_device_tree()
711 ret = ath6kl_get_fw(ar, board_filename, &ar->fw_board, check_device_tree()
712 &ar->fw_board_len); check_device_tree()
723 static bool check_device_tree(struct ath6kl *ar) check_device_tree() argument
729 static int ath6kl_fetch_board_file(struct ath6kl *ar) ath6kl_fetch_board_file() argument
734 if (ar->fw_board != NULL) ath6kl_fetch_board_file()
737 if (WARN_ON(ar->hw.fw_board == NULL)) ath6kl_fetch_board_file()
740 filename = ar->hw.fw_board; ath6kl_fetch_board_file()
742 ret = ath6kl_get_fw(ar, filename, &ar->fw_board, ath6kl_fetch_board_file()
743 &ar->fw_board_len); ath6kl_fetch_board_file()
749 if (check_device_tree(ar)) { ath6kl_fetch_board_file()
758 filename = ar->hw.fw_default_board; ath6kl_fetch_board_file()
760 ret = ath6kl_get_fw(ar, filename, &ar->fw_board, ath6kl_fetch_board_file()
761 &ar->fw_board_len); ath6kl_fetch_board_file()
774 static int ath6kl_fetch_otp_file(struct ath6kl *ar) ath6kl_fetch_otp_file() argument
779 if (ar->fw_otp != NULL) ath6kl_fetch_otp_file()
782 if (ar->hw.fw.otp == NULL) { ath6kl_fetch_otp_file()
789 ar->hw.fw.dir, ar->hw.fw.otp); ath6kl_fetch_otp_file()
791 ret = ath6kl_get_fw(ar, filename, &ar->fw_otp, ath6kl_fetch_otp_file()
792 &ar->fw_otp_len); ath6kl_fetch_otp_file()
802 static int ath6kl_fetch_testmode_file(struct ath6kl *ar) ath6kl_fetch_testmode_file() argument
807 if (ar->testmode == 0) ath6kl_fetch_testmode_file()
810 ath6kl_dbg(ATH6KL_DBG_BOOT, "testmode %d\n", ar->testmode); ath6kl_fetch_testmode_file()
812 if (ar->testmode == 2) { ath6kl_fetch_testmode_file()
813 if (ar->hw.fw.utf == NULL) { ath6kl_fetch_testmode_file()
819 ar->hw.fw.dir, ar->hw.fw.utf); ath6kl_fetch_testmode_file()
821 if (ar->hw.fw.tcmd == NULL) { ath6kl_fetch_testmode_file()
827 ar->hw.fw.dir, ar->hw.fw.tcmd); ath6kl_fetch_testmode_file()
830 set_bit(TESTMODE, &ar->flag); ath6kl_fetch_testmode_file()
832 ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len); ath6kl_fetch_testmode_file()
835 ar->testmode, filename, ret); ath6kl_fetch_testmode_file()
842 static int ath6kl_fetch_fw_file(struct ath6kl *ar) ath6kl_fetch_fw_file() argument
847 if (ar->fw != NULL) ath6kl_fetch_fw_file()
851 if (WARN_ON(ar->hw.fw.fw == NULL)) ath6kl_fetch_fw_file()
855 ar->hw.fw.dir, ar->hw.fw.fw); ath6kl_fetch_fw_file()
857 ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len); ath6kl_fetch_fw_file()
867 static int ath6kl_fetch_patch_file(struct ath6kl *ar) ath6kl_fetch_patch_file() argument
872 if (ar->fw_patch != NULL) ath6kl_fetch_patch_file()
875 if (ar->hw.fw.patch == NULL) ath6kl_fetch_patch_file()
879 ar->hw.fw.dir, ar->hw.fw.patch); ath6kl_fetch_patch_file()
881 ret = ath6kl_get_fw(ar, filename, &ar->fw_patch, ath6kl_fetch_patch_file()
882 &ar->fw_patch_len); ath6kl_fetch_patch_file()
892 static int ath6kl_fetch_testscript_file(struct ath6kl *ar) ath6kl_fetch_testscript_file() argument
897 if (ar->testmode != 2) ath6kl_fetch_testscript_file()
900 if (ar->fw_testscript != NULL) ath6kl_fetch_testscript_file()
903 if (ar->hw.fw.testscript == NULL) ath6kl_fetch_testscript_file()
907 ar->hw.fw.dir, ar->hw.fw.testscript); ath6kl_fetch_testscript_file()
909 ret = ath6kl_get_fw(ar, filename, &ar->fw_testscript, ath6kl_fetch_testscript_file()
910 &ar->fw_testscript_len); ath6kl_fetch_testscript_file()
920 static int ath6kl_fetch_fw_api1(struct ath6kl *ar) ath6kl_fetch_fw_api1() argument
924 ret = ath6kl_fetch_otp_file(ar); ath6kl_fetch_fw_api1()
928 ret = ath6kl_fetch_fw_file(ar); ath6kl_fetch_fw_api1()
932 ret = ath6kl_fetch_patch_file(ar); ath6kl_fetch_fw_api1()
936 ret = ath6kl_fetch_testscript_file(ar); ath6kl_fetch_fw_api1()
943 static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) ath6kl_fetch_fw_apin() argument
953 snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, name); ath6kl_fetch_fw_apin()
955 ret = request_firmware(&fw, filename, ar->dev); ath6kl_fetch_fw_apin()
996 strlcpy(ar->wiphy->fw_version, data, ath6kl_fetch_fw_apin()
997 sizeof(ar->wiphy->fw_version)); ath6kl_fetch_fw_apin()
1001 ar->wiphy->fw_version); ath6kl_fetch_fw_apin()
1007 ar->fw_otp = kmemdup(data, ie_len, GFP_KERNEL); ath6kl_fetch_fw_apin()
1009 if (ar->fw_otp == NULL) { ath6kl_fetch_fw_apin()
1014 ar->fw_otp_len = ie_len; ath6kl_fetch_fw_apin()
1021 if (ar->fw != NULL) ath6kl_fetch_fw_apin()
1024 ar->fw = vmalloc(ie_len); ath6kl_fetch_fw_apin()
1026 if (ar->fw == NULL) { ath6kl_fetch_fw_apin()
1031 memcpy(ar->fw, data, ie_len); ath6kl_fetch_fw_apin()
1032 ar->fw_len = ie_len; ath6kl_fetch_fw_apin()
1038 ar->fw_patch = kmemdup(data, ie_len, GFP_KERNEL); ath6kl_fetch_fw_apin()
1040 if (ar->fw_patch == NULL) { ath6kl_fetch_fw_apin()
1045 ar->fw_patch_len = ie_len; ath6kl_fetch_fw_apin()
1049 ar->hw.reserved_ram_size = le32_to_cpup(val); ath6kl_fetch_fw_apin()
1053 ar->hw.reserved_ram_size); ath6kl_fetch_fw_apin()
1068 __set_bit(i, ar->fw_capabilities); ath6kl_fetch_fw_apin()
1072 ar->fw_capabilities, ath6kl_fetch_fw_apin()
1073 sizeof(ar->fw_capabilities)); ath6kl_fetch_fw_apin()
1080 ar->hw.dataset_patch_addr = le32_to_cpup(val); ath6kl_fetch_fw_apin()
1084 ar->hw.dataset_patch_addr); ath6kl_fetch_fw_apin()
1091 ar->hw.board_addr = le32_to_cpup(val); ath6kl_fetch_fw_apin()
1095 ar->hw.board_addr); ath6kl_fetch_fw_apin()
1102 ar->vif_max = min_t(unsigned int, le32_to_cpup(val), ath6kl_fetch_fw_apin()
1105 if (ar->vif_max > 1 && !ar->p2p) ath6kl_fetch_fw_apin()
1106 ar->max_norm_iface = 2; ath6kl_fetch_fw_apin()
1109 "found vif max ie %d\n", ar->vif_max); ath6kl_fetch_fw_apin()
1128 int ath6kl_init_fetch_firmwares(struct ath6kl *ar) ath6kl_init_fetch_firmwares() argument
1132 ret = ath6kl_fetch_board_file(ar); ath6kl_init_fetch_firmwares()
1136 ret = ath6kl_fetch_testmode_file(ar); ath6kl_init_fetch_firmwares()
1140 ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API5_FILE); ath6kl_init_fetch_firmwares()
1142 ar->fw_api = 5; ath6kl_init_fetch_firmwares()
1146 ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API4_FILE); ath6kl_init_fetch_firmwares()
1148 ar->fw_api = 4; ath6kl_init_fetch_firmwares()
1152 ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE); ath6kl_init_fetch_firmwares()
1154 ar->fw_api = 3; ath6kl_init_fetch_firmwares()
1158 ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API2_FILE); ath6kl_init_fetch_firmwares()
1160 ar->fw_api = 2; ath6kl_init_fetch_firmwares()
1164 ret = ath6kl_fetch_fw_api1(ar); ath6kl_init_fetch_firmwares()
1168 ar->fw_api = 1; ath6kl_init_fetch_firmwares()
1171 ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api %d\n", ar->fw_api); ath6kl_init_fetch_firmwares()
1176 static int ath6kl_upload_board_file(struct ath6kl *ar) ath6kl_upload_board_file() argument
1182 if (WARN_ON(ar->fw_board == NULL)) ath6kl_upload_board_file()
1190 if (ar->hw.board_addr != 0) { ath6kl_upload_board_file()
1191 board_address = ar->hw.board_addr; ath6kl_upload_board_file()
1192 ath6kl_bmi_write_hi32(ar, hi_board_data, ath6kl_upload_board_file()
1195 ret = ath6kl_bmi_read_hi32(ar, hi_board_data, &board_address); ath6kl_upload_board_file()
1203 ret = ath6kl_bmi_read_hi32(ar, hi_board_ext_data, &board_ext_address); ath6kl_upload_board_file()
1209 if (ar->target_type == TARGET_TYPE_AR6003 && ath6kl_upload_board_file()
1215 switch (ar->target_type) { ath6kl_upload_board_file()
1219 if (ar->fw_board_len > (board_data_size + board_ext_data_size)) ath6kl_upload_board_file()
1232 ar->fw_board_len == (board_data_size + board_ext_data_size)) { ath6kl_upload_board_file()
1238 ret = ath6kl_bmi_write(ar, board_ext_address, ath6kl_upload_board_file()
1239 ar->fw_board + board_data_size, ath6kl_upload_board_file()
1250 ath6kl_bmi_write_hi32(ar, hi_board_ext_data_config, param); ath6kl_upload_board_file()
1253 if (ar->fw_board_len < board_data_size) { ath6kl_upload_board_file()
1254 ath6kl_err("Too small board file: %zu\n", ar->fw_board_len); ath6kl_upload_board_file()
1262 ret = ath6kl_bmi_write(ar, board_address, ar->fw_board, ath6kl_upload_board_file()
1271 if ((ar->version.target_ver == AR6004_HW_1_3_VERSION) || ath6kl_upload_board_file()
1272 (ar->version.target_ver == AR6004_HW_3_0_VERSION)) ath6kl_upload_board_file()
1277 ath6kl_bmi_write_hi32(ar, hi_board_data_initialized, param); ath6kl_upload_board_file()
1282 static int ath6kl_upload_otp(struct ath6kl *ar) ath6kl_upload_otp() argument
1288 if (ar->fw_otp == NULL) ath6kl_upload_otp()
1291 address = ar->hw.app_load_addr; ath6kl_upload_otp()
1294 ar->fw_otp_len); ath6kl_upload_otp()
1296 ret = ath6kl_bmi_fast_download(ar, address, ar->fw_otp, ath6kl_upload_otp()
1297 ar->fw_otp_len); ath6kl_upload_otp()
1304 ret = ath6kl_bmi_read_hi32(ar, hi_app_start, &address); ath6kl_upload_otp()
1311 if (ar->hw.app_start_override_addr == 0) { ath6kl_upload_otp()
1312 ar->hw.app_start_override_addr = address; ath6kl_upload_otp()
1318 ar->hw.app_start_override_addr); ath6kl_upload_otp()
1322 ar->hw.app_start_override_addr); ath6kl_upload_otp()
1324 ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, &param); ath6kl_upload_otp()
1329 static int ath6kl_upload_firmware(struct ath6kl *ar) ath6kl_upload_firmware() argument
1334 if (WARN_ON(ar->fw == NULL)) ath6kl_upload_firmware()
1337 address = ar->hw.app_load_addr; ath6kl_upload_firmware()
1340 address, ar->fw_len); ath6kl_upload_firmware()
1342 ret = ath6kl_bmi_fast_download(ar, address, ar->fw, ar->fw_len); ath6kl_upload_firmware()
1353 if (ar->target_type != TARGET_TYPE_AR6004) { ath6kl_upload_firmware()
1354 address = ar->hw.app_start_override_addr; ath6kl_upload_firmware()
1355 ath6kl_bmi_set_app_start(ar, address); ath6kl_upload_firmware()
1360 static int ath6kl_upload_patch(struct ath6kl *ar) ath6kl_upload_patch() argument
1365 if (ar->fw_patch == NULL) ath6kl_upload_patch()
1368 address = ar->hw.dataset_patch_addr; ath6kl_upload_patch()
1371 address, ar->fw_patch_len); ath6kl_upload_patch()
1373 ret = ath6kl_bmi_write(ar, address, ar->fw_patch, ar->fw_patch_len); ath6kl_upload_patch()
1379 ath6kl_bmi_write_hi32(ar, hi_dset_list_head, address); ath6kl_upload_patch()
1384 static int ath6kl_upload_testscript(struct ath6kl *ar) ath6kl_upload_testscript() argument
1389 if (ar->testmode != 2) ath6kl_upload_testscript()
1392 if (ar->fw_testscript == NULL) ath6kl_upload_testscript()
1395 address = ar->hw.testscript_addr; ath6kl_upload_testscript()
1398 address, ar->fw_testscript_len); ath6kl_upload_testscript()
1400 ret = ath6kl_bmi_write(ar, address, ar->fw_testscript, ath6kl_upload_testscript()
1401 ar->fw_testscript_len); ath6kl_upload_testscript()
1407 ath6kl_bmi_write_hi32(ar, hi_ota_testscript, address); ath6kl_upload_testscript()
1409 if ((ar->version.target_ver != AR6004_HW_1_3_VERSION) && ath6kl_upload_testscript()
1410 (ar->version.target_ver != AR6004_HW_3_0_VERSION)) ath6kl_upload_testscript()
1411 ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 4096); ath6kl_upload_testscript()
1413 ath6kl_bmi_write_hi32(ar, hi_test_apps_related, 1); ath6kl_upload_testscript()
1418 static int ath6kl_init_upload(struct ath6kl *ar) ath6kl_init_upload() argument
1423 if (ar->target_type != TARGET_TYPE_AR6003 && ath6kl_init_upload()
1424 ar->target_type != TARGET_TYPE_AR6004) ath6kl_init_upload()
1429 status = ath6kl_bmi_reg_read(ar, address, &param); ath6kl_init_upload()
1436 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1441 status = ath6kl_bmi_reg_read(ar, address, &param); ath6kl_init_upload()
1448 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1457 if (ar->target_type != TARGET_TYPE_AR6004) { ath6kl_init_upload()
1458 status = ath6kl_bmi_reg_write(ar, ATH6KL_ANALOG_PLL_REGISTER, ath6kl_init_upload()
1468 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1476 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1481 if (ar->hw.flags & ATH6KL_HW_SDIO_CRC_ERROR_WAR) { ath6kl_init_upload()
1486 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1493 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1498 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1503 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1508 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1514 status = ath6kl_upload_board_file(ar); ath6kl_init_upload()
1519 status = ath6kl_upload_otp(ar); ath6kl_init_upload()
1524 status = ath6kl_upload_firmware(ar); ath6kl_init_upload()
1528 status = ath6kl_upload_patch(ar); ath6kl_init_upload()
1533 status = ath6kl_upload_testscript(ar); ath6kl_init_upload()
1539 status = ath6kl_bmi_reg_write(ar, address, sleep); ath6kl_init_upload()
1545 status = ath6kl_bmi_reg_write(ar, address, param); ath6kl_init_upload()
1552 int ath6kl_init_hw_params(struct ath6kl *ar) ath6kl_init_hw_params() argument
1560 if (hw->id == ar->version.target_ver) ath6kl_init_hw_params()
1566 ar->version.target_ver); ath6kl_init_hw_params()
1570 ar->hw = *hw; ath6kl_init_hw_params()
1574 ar->version.target_ver, ar->target_type, ath6kl_init_hw_params()
1575 ar->hw.dataset_patch_addr, ar->hw.app_load_addr); ath6kl_init_hw_params()
1578 ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr, ath6kl_init_hw_params()
1579 ar->hw.reserved_ram_size); ath6kl_init_hw_params()
1582 ar->hw.refclk_hz, ar->hw.uarttx_pin); ath6kl_init_hw_params()
1637 static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len) ath6kl_init_get_fwcaps() argument
1639 u8 *data = (u8 *) ar->fw_capabilities; ath6kl_init_get_fwcaps()
1648 if (index >= sizeof(ar->fw_capabilities) * 4) ath6kl_init_get_fwcaps()
1674 static int ath6kl_init_hw_reset(struct ath6kl *ar) ath6kl_init_hw_reset() argument
1678 return ath6kl_diag_write32(ar, RESET_CONTROL_ADDRESS, ath6kl_init_hw_reset()
1682 static int __ath6kl_init_hw_start(struct ath6kl *ar) __ath6kl_init_hw_start() argument
1690 ret = ath6kl_hif_power_on(ar); __ath6kl_init_hw_start()
1694 ret = ath6kl_configure_target(ar); __ath6kl_init_hw_start()
1698 ret = ath6kl_init_upload(ar); __ath6kl_init_hw_start()
1703 ret = ath6kl_bmi_done(ar); __ath6kl_init_hw_start()
1712 ret = ath6kl_htc_wait_target(ar->htc_target); __ath6kl_init_hw_start()
1722 ath6kl_init_hw_reset(ar); __ath6kl_init_hw_start()
1729 ret = ath6kl_init_service_ep(ar); __ath6kl_init_hw_start()
1736 ath6kl_htc_credit_setup(ar->htc_target, &ar->credit_state_info); __ath6kl_init_hw_start()
1739 ret = ath6kl_htc_start(ar->htc_target); __ath6kl_init_hw_start()
1742 ath6kl_cookie_cleanup(ar); __ath6kl_init_hw_start()
1747 timeleft = wait_event_interruptible_timeout(ar->event_wq, __ath6kl_init_hw_start()
1749 &ar->flag), __ath6kl_init_hw_start()
1752 clear_bit(WMI_READY, &ar->flag); __ath6kl_init_hw_start()
1761 if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) { __ath6kl_init_hw_start()
1763 ar->hw.name, __ath6kl_init_hw_start()
1764 ath6kl_init_get_hif_name(ar->hif_type), __ath6kl_init_hw_start()
1765 ar->wiphy->fw_version, __ath6kl_init_hw_start()
1766 ar->fw_api, __ath6kl_init_hw_start()
1767 test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); __ath6kl_init_hw_start()
1768 ath6kl_init_get_fwcaps(ar, buf, sizeof(buf)); __ath6kl_init_hw_start()
1772 if (ar->version.abi_ver != ATH6KL_ABI_VERSION) { __ath6kl_init_hw_start()
1774 ATH6KL_ABI_VERSION, ar->version.abi_ver); __ath6kl_init_hw_start()
1783 if ((ath6kl_set_host_app_area(ar)) != 0) __ath6kl_init_hw_start()
1786 for (i = 0; i < ar->vif_max; i++) { __ath6kl_init_hw_start()
1787 ret = ath6kl_target_config_wlan_params(ar, i); __ath6kl_init_hw_start()
1795 ath6kl_htc_stop(ar->htc_target); __ath6kl_init_hw_start()
1797 ath6kl_hif_cleanup_scatter(ar); __ath6kl_init_hw_start()
1799 ath6kl_hif_power_off(ar); __ath6kl_init_hw_start()
1804 int ath6kl_init_hw_start(struct ath6kl *ar) ath6kl_init_hw_start() argument
1808 err = __ath6kl_init_hw_start(ar); ath6kl_init_hw_start()
1811 ar->state = ATH6KL_STATE_ON; ath6kl_init_hw_start()
1815 static int __ath6kl_init_hw_stop(struct ath6kl *ar) __ath6kl_init_hw_stop() argument
1821 ath6kl_htc_stop(ar->htc_target); __ath6kl_init_hw_stop()
1823 ath6kl_hif_stop(ar); __ath6kl_init_hw_stop()
1825 ath6kl_bmi_reset(ar); __ath6kl_init_hw_stop()
1827 ret = ath6kl_hif_power_off(ar); __ath6kl_init_hw_stop()
1834 int ath6kl_init_hw_stop(struct ath6kl *ar) ath6kl_init_hw_stop() argument
1838 err = __ath6kl_init_hw_stop(ar); ath6kl_init_hw_stop()
1841 ar->state = ATH6KL_STATE_OFF; ath6kl_init_hw_stop()
1845 void ath6kl_init_hw_restart(struct ath6kl *ar) ath6kl_init_hw_restart() argument
1847 clear_bit(WMI_READY, &ar->flag); ath6kl_init_hw_restart()
1849 ath6kl_cfg80211_stop_all(ar); ath6kl_init_hw_restart()
1851 if (__ath6kl_init_hw_stop(ar)) { ath6kl_init_hw_restart()
1856 if (__ath6kl_init_hw_start(ar)) { ath6kl_init_hw_restart()
1862 void ath6kl_stop_txrx(struct ath6kl *ar) ath6kl_stop_txrx() argument
1867 set_bit(DESTROY_IN_PROGRESS, &ar->flag); ath6kl_stop_txrx()
1869 if (down_interruptible(&ar->sem)) { ath6kl_stop_txrx()
1875 aggr_reset_state(ar->sta_list[i].aggr_conn); ath6kl_stop_txrx()
1877 spin_lock_bh(&ar->list_lock); ath6kl_stop_txrx()
1878 list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) { ath6kl_stop_txrx()
1880 spin_unlock_bh(&ar->list_lock); ath6kl_stop_txrx()
1881 ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag)); ath6kl_stop_txrx()
1885 spin_lock_bh(&ar->list_lock); ath6kl_stop_txrx()
1887 spin_unlock_bh(&ar->list_lock); ath6kl_stop_txrx()
1889 clear_bit(WMI_READY, &ar->flag); ath6kl_stop_txrx()
1891 if (ar->fw_recovery.enable) ath6kl_stop_txrx()
1892 del_timer_sync(&ar->fw_recovery.hb_timer); ath6kl_stop_txrx()
1903 ath6kl_wmi_shutdown(ar->wmi); ath6kl_stop_txrx()
1905 clear_bit(WMI_ENABLED, &ar->flag); ath6kl_stop_txrx()
1906 if (ar->htc_target) { ath6kl_stop_txrx()
1908 ath6kl_htc_stop(ar->htc_target); ath6kl_stop_txrx()
1915 ath6kl_init_hw_reset(ar); ath6kl_stop_txrx()
1917 up(&ar->sem); ath6kl_stop_txrx()
H A Dbmi.c23 int ath6kl_bmi_done(struct ath6kl *ar) ath6kl_bmi_done() argument
28 if (ar->bmi.done_sent) { ath6kl_bmi_done()
33 ar->bmi.done_sent = true; ath6kl_bmi_done()
35 ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); ath6kl_bmi_done()
44 int ath6kl_bmi_get_target_info(struct ath6kl *ar, ath6kl_bmi_get_target_info() argument
50 if (ar->bmi.done_sent) { ath6kl_bmi_get_target_info()
55 ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); ath6kl_bmi_get_target_info()
61 if (ar->hif_type == ATH6KL_HIF_TYPE_USB) { ath6kl_bmi_get_target_info()
62 ret = ath6kl_hif_bmi_read(ar, (u8 *)targ_info, ath6kl_bmi_get_target_info()
65 ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version, ath6kl_bmi_get_target_info()
76 ret = ath6kl_hif_bmi_read(ar, ath6kl_bmi_get_target_info()
95 ret = ath6kl_hif_bmi_read(ar, ath6kl_bmi_get_target_info()
114 int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) ath6kl_bmi_read() argument
122 if (ar->bmi.done_sent) { ath6kl_bmi_read()
127 size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len); ath6kl_bmi_read()
128 if (size > ar->bmi.max_cmd_size) { ath6kl_bmi_read()
132 memset(ar->bmi.cmd_buf, 0, size); ath6kl_bmi_read()
141 rx_len = (len_remain < ar->bmi.max_data_size) ? ath6kl_bmi_read()
142 len_remain : ar->bmi.max_data_size; ath6kl_bmi_read()
144 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); ath6kl_bmi_read()
146 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); ath6kl_bmi_read()
148 memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len)); ath6kl_bmi_read()
151 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); ath6kl_bmi_read()
157 ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len); ath6kl_bmi_read()
163 memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len); ath6kl_bmi_read()
170 int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) ath6kl_bmi_write() argument
180 if (ar->bmi.done_sent) { ath6kl_bmi_write()
185 if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) { ath6kl_bmi_write()
190 if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf))) ath6kl_bmi_write()
193 memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header); ath6kl_bmi_write()
202 if (len_remain < (ar->bmi.max_data_size - header)) { ath6kl_bmi_write()
212 tx_len = (ar->bmi.max_data_size - header); ath6kl_bmi_write()
216 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); ath6kl_bmi_write()
218 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); ath6kl_bmi_write()
220 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len)); ath6kl_bmi_write()
222 memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len); ath6kl_bmi_write()
225 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); ath6kl_bmi_write()
237 int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param) ath6kl_bmi_execute() argument
244 if (ar->bmi.done_sent) { ath6kl_bmi_execute()
250 if (size > ar->bmi.max_cmd_size) { ath6kl_bmi_execute()
254 memset(ar->bmi.cmd_buf, 0, size); ath6kl_bmi_execute()
260 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); ath6kl_bmi_execute()
262 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); ath6kl_bmi_execute()
264 memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param)); ath6kl_bmi_execute()
267 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); ath6kl_bmi_execute()
273 ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); ath6kl_bmi_execute()
279 memcpy(param, ar->bmi.cmd_buf, sizeof(*param)); ath6kl_bmi_execute()
284 int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr) ath6kl_bmi_set_app_start() argument
291 if (ar->bmi.done_sent) { ath6kl_bmi_set_app_start()
297 if (size > ar->bmi.max_cmd_size) { ath6kl_bmi_set_app_start()
301 memset(ar->bmi.cmd_buf, 0, size); ath6kl_bmi_set_app_start()
306 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); ath6kl_bmi_set_app_start()
308 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); ath6kl_bmi_set_app_start()
311 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); ath6kl_bmi_set_app_start()
320 int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param) ath6kl_bmi_reg_read() argument
327 if (ar->bmi.done_sent) { ath6kl_bmi_reg_read()
333 if (size > ar->bmi.max_cmd_size) { ath6kl_bmi_reg_read()
337 memset(ar->bmi.cmd_buf, 0, size); ath6kl_bmi_reg_read()
342 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); ath6kl_bmi_reg_read()
344 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); ath6kl_bmi_reg_read()
347 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); ath6kl_bmi_reg_read()
353 ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); ath6kl_bmi_reg_read()
358 memcpy(param, ar->bmi.cmd_buf, sizeof(*param)); ath6kl_bmi_reg_read()
363 int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param) ath6kl_bmi_reg_write() argument
370 if (ar->bmi.done_sent) { ath6kl_bmi_reg_write()
376 if (size > ar->bmi.max_cmd_size) { ath6kl_bmi_reg_write()
380 memset(ar->bmi.cmd_buf, 0, size); ath6kl_bmi_reg_write()
387 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); ath6kl_bmi_reg_write()
389 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); ath6kl_bmi_reg_write()
391 memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param)); ath6kl_bmi_reg_write()
394 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); ath6kl_bmi_reg_write()
403 int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len) ath6kl_bmi_lz_data() argument
412 if (ar->bmi.done_sent) { ath6kl_bmi_lz_data()
417 size = ar->bmi.max_data_size + header; ath6kl_bmi_lz_data()
418 if (size > ar->bmi.max_cmd_size) { ath6kl_bmi_lz_data()
422 memset(ar->bmi.cmd_buf, 0, size); ath6kl_bmi_lz_data()
429 tx_len = (len_remain < (ar->bmi.max_data_size - header)) ? ath6kl_bmi_lz_data()
430 len_remain : (ar->bmi.max_data_size - header); ath6kl_bmi_lz_data()
433 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); ath6kl_bmi_lz_data()
435 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len)); ath6kl_bmi_lz_data()
437 memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain], ath6kl_bmi_lz_data()
441 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); ath6kl_bmi_lz_data()
454 int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr) ath6kl_bmi_lz_stream_start() argument
461 if (ar->bmi.done_sent) { ath6kl_bmi_lz_stream_start()
467 if (size > ar->bmi.max_cmd_size) { ath6kl_bmi_lz_stream_start()
471 memset(ar->bmi.cmd_buf, 0, size); ath6kl_bmi_lz_stream_start()
478 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); ath6kl_bmi_lz_stream_start()
480 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); ath6kl_bmi_lz_stream_start()
483 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); ath6kl_bmi_lz_stream_start()
493 int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) ath6kl_bmi_fast_download() argument
500 ret = ath6kl_bmi_lz_stream_start(ar, addr); ath6kl_bmi_fast_download()
509 ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset); ath6kl_bmi_fast_download()
514 ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4); ath6kl_bmi_fast_download()
519 ret = ath6kl_bmi_lz_stream_start(ar, 0x00); ath6kl_bmi_fast_download()
524 void ath6kl_bmi_reset(struct ath6kl *ar) ath6kl_bmi_reset() argument
526 ar->bmi.done_sent = false; ath6kl_bmi_reset()
529 int ath6kl_bmi_init(struct ath6kl *ar) ath6kl_bmi_init() argument
531 if (WARN_ON(ar->bmi.max_data_size == 0)) ath6kl_bmi_init()
535 ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3); ath6kl_bmi_init()
537 ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC); ath6kl_bmi_init()
538 if (!ar->bmi.cmd_buf) ath6kl_bmi_init()
544 void ath6kl_bmi_cleanup(struct ath6kl *ar) ath6kl_bmi_cleanup() argument
546 kfree(ar->bmi.cmd_buf); ath6kl_bmi_cleanup()
547 ar->bmi.cmd_buf = NULL; ath6kl_bmi_cleanup()
H A Dmain.c28 struct ath6kl *ar = vif->ar; ath6kl_find_sta() local
38 if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) { ath6kl_find_sta()
39 conn = &ar->sta_list[i]; ath6kl_find_sta()
47 struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid) ath6kl_find_sta_by_aid() argument
53 if (ar->sta_list[ctr].aid == aid) { ath6kl_find_sta_by_aid()
54 conn = &ar->sta_list[ctr]; ath6kl_find_sta_by_aid()
65 struct ath6kl *ar = vif->ar; ath6kl_add_new_sta() local
71 sta = &ar->sta_list[free_slot]; ath6kl_add_new_sta()
81 ar->sta_list_index = ar->sta_list_index | (1 << free_slot); ath6kl_add_new_sta()
82 ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid); ath6kl_add_new_sta()
86 static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i) ath6kl_sta_cleanup() argument
88 struct ath6kl_sta *sta = &ar->sta_list[i]; ath6kl_sta_cleanup()
106 memset(&ar->ap_stats.sta[sta->aid - 1], 0, ath6kl_sta_cleanup()
113 ar->sta_list_index = ar->sta_list_index & ~(1 << i); ath6kl_sta_cleanup()
117 static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason) ath6kl_remove_sta() argument
128 if (!is_zero_ether_addr(ar->sta_list[i].mac)) { ath6kl_remove_sta()
129 ath6kl_sta_cleanup(ar, i); ath6kl_remove_sta()
135 if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) { ath6kl_remove_sta()
138 mac, ar->sta_list[i].aid, reason); ath6kl_remove_sta()
139 ath6kl_sta_cleanup(ar, i); ath6kl_remove_sta()
151 struct ath6kl *ar = devt; ath6kl_ac2_endpoint_id() local
152 return ar->ac2ep_map[ac]; ath6kl_ac2_endpoint_id()
155 struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar) ath6kl_alloc_cookie() argument
159 cookie = ar->cookie_list; ath6kl_alloc_cookie()
161 ar->cookie_list = cookie->arc_list_next; ath6kl_alloc_cookie()
162 ar->cookie_count--; ath6kl_alloc_cookie()
168 void ath6kl_cookie_init(struct ath6kl *ar) ath6kl_cookie_init() argument
172 ar->cookie_list = NULL; ath6kl_cookie_init()
173 ar->cookie_count = 0; ath6kl_cookie_init()
175 memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem)); ath6kl_cookie_init()
178 ath6kl_free_cookie(ar, &ar->cookie_mem[i]); ath6kl_cookie_init()
181 void ath6kl_cookie_cleanup(struct ath6kl *ar) ath6kl_cookie_cleanup() argument
183 ar->cookie_list = NULL; ath6kl_cookie_cleanup()
184 ar->cookie_count = 0; ath6kl_cookie_cleanup()
187 void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie) ath6kl_free_cookie() argument
191 if (!ar || !cookie) ath6kl_free_cookie()
194 cookie->arc_list_next = ar->cookie_list; ath6kl_free_cookie()
195 ar->cookie_list = cookie; ath6kl_free_cookie()
196 ar->cookie_count++; ath6kl_free_cookie()
203 int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value) ath6kl_diag_read32() argument
207 ret = ath6kl_hif_diag_read32(ar, address, value); ath6kl_diag_read32()
221 int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value) ath6kl_diag_write32() argument
225 ret = ath6kl_hif_diag_write32(ar, address, value); ath6kl_diag_write32()
236 int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length) ath6kl_diag_read() argument
245 ret = ath6kl_diag_read32(ar, address, &buf[count]); ath6kl_diag_read()
253 int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length) ath6kl_diag_write() argument
263 ret = ath6kl_diag_write32(ar, address, buf[count]); ath6kl_diag_write()
271 int ath6kl_read_fwlogs(struct ath6kl *ar) ath6kl_read_fwlogs() argument
283 address = TARG_VTOP(ar->target_type, ath6kl_read_fwlogs()
284 ath6kl_get_hi_item_addr(ar, ath6kl_read_fwlogs()
287 ret = ath6kl_diag_read32(ar, address, &debug_hdr_addr); ath6kl_read_fwlogs()
298 address = TARG_VTOP(ar->target_type, debug_hdr_addr); ath6kl_read_fwlogs()
299 ret = ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr)); ath6kl_read_fwlogs()
303 address = TARG_VTOP(ar->target_type, ath6kl_read_fwlogs()
307 ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf)); ath6kl_read_fwlogs()
314 address = TARG_VTOP(ar->target_type, ath6kl_read_fwlogs()
322 ret = ath6kl_diag_read(ar, address, ath6kl_read_fwlogs()
327 ath6kl_debug_fwlog_event(ar, buf, length); ath6kl_read_fwlogs()
330 address = TARG_VTOP(ar->target_type, ath6kl_read_fwlogs()
332 ret = ath6kl_diag_read(ar, address, &debug_buf, ath6kl_read_fwlogs()
362 ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx, ath6kl_install_static_wep_keys()
377 struct ath6kl *ar = vif->ar; ath6kl_connect_ap_mode_bss() local
382 ik = &ar->ap_mode_bkey; ath6kl_connect_ap_mode_bss()
403 ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type, ath6kl_connect_ap_mode_bss()
414 if (ar->last_ch != channel) ath6kl_connect_ap_mode_bss()
418 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0); ath6kl_connect_ap_mode_bss()
510 ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); ath6kl_disconnect()
525 struct ath6kl *ar = devt; ath6kl_ready_event() local
527 memcpy(ar->mac_addr, datap, ETH_ALEN); ath6kl_ready_event()
531 ar->mac_addr, sw_ver, abi_ver, cap); ath6kl_ready_event()
533 ar->version.wlan_ver = sw_ver; ath6kl_ready_event()
534 ar->version.abi_ver = abi_ver; ath6kl_ready_event()
535 ar->hw.cap = cap; ath6kl_ready_event()
537 if (strlen(ar->wiphy->fw_version) == 0) { ath6kl_ready_event()
538 snprintf(ar->wiphy->fw_version, ath6kl_ready_event()
539 sizeof(ar->wiphy->fw_version), ath6kl_ready_event()
541 (ar->version.wlan_ver & 0xf0000000) >> 28, ath6kl_ready_event()
542 (ar->version.wlan_ver & 0x0f000000) >> 24, ath6kl_ready_event()
543 (ar->version.wlan_ver & 0x00ff0000) >> 16, ath6kl_ready_event()
544 (ar->version.wlan_ver & 0x0000ffff)); ath6kl_ready_event()
548 set_bit(WMI_READY, &ar->flag); ath6kl_ready_event()
549 wake_up(&ar->event_wq); ath6kl_ready_event()
554 struct ath6kl *ar = vif->ar; ath6kl_scan_complete_evt() local
562 if (!ar->usr_bss_filter) { ath6kl_scan_complete_evt()
564 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_scan_complete_evt()
573 struct ath6kl *ar = vif->ar; ath6kl_commit_ch_switch() local
585 ar->fw_capabilities)) ath6kl_commit_ch_switch()
586 ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_commit_ch_switch()
591 return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, ath6kl_commit_ch_switch()
599 static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel) ath6kl_check_ch_switch() argument
604 if (!ar->want_ch_switch) ath6kl_check_ch_switch()
607 spin_lock_bh(&ar->list_lock); ath6kl_check_ch_switch()
608 list_for_each_entry(vif, &ar->vif_list, list) { ath6kl_check_ch_switch()
609 if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) ath6kl_check_ch_switch()
613 ar->want_ch_switch &= ~(1 << vif->fw_vif_idx); ath6kl_check_ch_switch()
619 spin_unlock_bh(&ar->list_lock); ath6kl_check_ch_switch()
628 struct ath6kl *ar = vif->ar; ath6kl_connect_event() local
640 ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_connect_event()
642 ath6kl_check_ch_switch(ar, channel); ath6kl_connect_event()
657 if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { ath6kl_connect_event()
658 memset(ar->node_map, 0, sizeof(ar->node_map)); ath6kl_connect_event()
659 ar->node_num = 0; ath6kl_connect_event()
660 ar->next_ep_id = ENDPOINT_2; ath6kl_connect_event()
663 if (!ar->usr_bss_filter) { ath6kl_connect_event()
665 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_connect_event()
673 struct ath6kl *ar = vif->ar; ath6kl_tkip_micerr_event() local
681 sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2)); ath6kl_tkip_micerr_event()
701 struct ath6kl *ar = vif->ar; ath6kl_update_target_stats() local
736 stats->tx_ucast_rate = ath6kl_wmi_get_rate(ar->wmi, rate); ath6kl_update_target_stats()
755 stats->rx_ucast_rate = ath6kl_wmi_get_rate(ar->wmi, rate); ath6kl_update_target_stats()
807 wake_up(&ar->event_wq); ath6kl_update_target_stats()
819 struct ath6kl *ar = vif->ar; ath6kl_tgt_stats_event() local
820 struct wmi_ap_mode_stat *ap = &ar->ap_stats; ath6kl_tgt_stats_event()
849 struct ath6kl *ar = (struct ath6kl *) dev; ath6kl_wakeup_event() local
851 wake_up(&ar->event_wq); ath6kl_wakeup_event()
856 struct ath6kl *ar = (struct ath6kl *) devt; ath6kl_txpwr_rx_evt() local
858 ar->tx_pwr = tx_pwr; ath6kl_txpwr_rx_evt()
859 wake_up(&ar->event_wq); ath6kl_txpwr_rx_evt()
867 struct ath6kl *ar = vif->ar; ath6kl_pspoll_event() local
870 conn = ath6kl_find_sta_by_aid(ar, aid); ath6kl_pspoll_event()
895 ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_pspoll_event()
915 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0); ath6kl_pspoll_event()
922 struct ath6kl *ar = vif->ar; ath6kl_dtimexpiry_event() local
933 if (!ar->sta_list_index) ath6kl_dtimexpiry_event()
936 spin_lock_bh(&ar->mcastpsq_lock); ath6kl_dtimexpiry_event()
937 mcastq_empty = skb_queue_empty(&ar->mcastpsq); ath6kl_dtimexpiry_event()
938 spin_unlock_bh(&ar->mcastpsq_lock); ath6kl_dtimexpiry_event()
946 spin_lock_bh(&ar->mcastpsq_lock); ath6kl_dtimexpiry_event()
947 while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) { ath6kl_dtimexpiry_event()
948 spin_unlock_bh(&ar->mcastpsq_lock); ath6kl_dtimexpiry_event()
952 spin_lock_bh(&ar->mcastpsq_lock); ath6kl_dtimexpiry_event()
954 spin_unlock_bh(&ar->mcastpsq_lock); ath6kl_dtimexpiry_event()
959 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0); ath6kl_dtimexpiry_event()
966 struct ath6kl *ar = vif->ar; ath6kl_disconnect_event() local
972 ar->want_ch_switch |= 1 << vif->fw_vif_idx; ath6kl_disconnect_event()
974 ar->last_ch = le16_to_cpu(vif->profile.ch); ath6kl_disconnect_event()
991 if (!ath6kl_remove_sta(ar, bssid, prot_reason_status)) ath6kl_disconnect_event()
995 if (ar->sta_list_index == 0) { ath6kl_disconnect_event()
996 spin_lock_bh(&ar->mcastpsq_lock); ath6kl_disconnect_event()
997 skb_queue_purge(&ar->mcastpsq); ath6kl_disconnect_event()
998 spin_unlock_bh(&ar->mcastpsq_lock); ath6kl_disconnect_event()
1001 if (test_bit(WMI_READY, &ar->flag)) ath6kl_disconnect_event()
1002 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_disconnect_event()
1034 if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag)) ath6kl_disconnect_event()
1035 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_disconnect_event()
1049 ath6kl_check_ch_switch(ar, ar->last_ch); ath6kl_disconnect_event()
1061 ar->user_key_ctrl = 0; ath6kl_disconnect_event()
1067 ath6kl_tx_data_cleanup(ar); ath6kl_disconnect_event()
1070 struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar) ath6kl_vif_first() argument
1074 spin_lock_bh(&ar->list_lock); ath6kl_vif_first()
1075 if (list_empty(&ar->vif_list)) { ath6kl_vif_first()
1076 spin_unlock_bh(&ar->list_lock); ath6kl_vif_first()
1080 vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list); ath6kl_vif_first()
1082 spin_unlock_bh(&ar->list_lock); ath6kl_vif_first()
1127 struct ath6kl *ar = vif->ar; ath6kl_set_features() local
1131 (ar->rx_meta_ver != WMI_META_VERSION_2)) { ath6kl_set_features()
1132 ar->rx_meta_ver = WMI_META_VERSION_2; ath6kl_set_features()
1133 err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, ath6kl_set_features()
1135 ar->rx_meta_ver, 0, 0); ath6kl_set_features()
1141 (ar->rx_meta_ver == WMI_META_VERSION_2)) { ath6kl_set_features()
1142 ar->rx_meta_ver = 0; ath6kl_set_features()
1143 err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, ath6kl_set_features()
1145 ar->rx_meta_ver, 0, 0); ath6kl_set_features()
1166 if (!test_bit(WMI_READY, &vif->ar->flag) || ath6kl_set_multicast_list()
1181 vif->ar->fw_capabilities)) { ath6kl_set_multicast_list()
1182 mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON); ath6kl_set_multicast_list()
1196 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx, ath6kl_set_multicast_list()
1226 ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi,
1266 ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi, netdev_for_each_mc_addr()
1295 struct ath6kl *ar = ath6kl_priv(dev); init_netdev() local
1309 ar->fw_capabilities)) init_netdev()
H A Dhtc-ops.h23 static inline void *ath6kl_htc_create(struct ath6kl *ar) ath6kl_htc_create() argument
25 return ar->htc_ops->create(ar); ath6kl_htc_create()
30 return target->dev->ar->htc_ops->wait_target(target); ath6kl_htc_wait_target()
35 return target->dev->ar->htc_ops->start(target); ath6kl_htc_start()
42 return target->dev->ar->htc_ops->conn_service(target, req, resp); ath6kl_htc_conn_service()
48 return target->dev->ar->htc_ops->tx(target, packet); ath6kl_htc_tx()
53 return target->dev->ar->htc_ops->stop(target); ath6kl_htc_stop()
58 return target->dev->ar->htc_ops->cleanup(target); ath6kl_htc_cleanup()
65 return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag); ath6kl_htc_flush_txep()
70 return target->dev->ar->htc_ops->flush_rx_buf(target); ath6kl_htc_flush_rx_buf()
77 return target->dev->ar->htc_ops->activity_changed(target, endpoint, ath6kl_htc_activity_changed()
84 return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint); ath6kl_htc_get_rxbuf_num()
90 return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq); ath6kl_htc_add_rxbuf_multiple()
96 return target->dev->ar->htc_ops->credit_setup(target, info); ath6kl_htc_credit_setup()
99 static inline void ath6kl_htc_tx_complete(struct ath6kl *ar, ath6kl_htc_tx_complete() argument
102 ar->htc_ops->tx_complete(ar, skb); ath6kl_htc_tx_complete()
106 static inline void ath6kl_htc_rx_complete(struct ath6kl *ar, ath6kl_htc_rx_complete() argument
109 ar->htc_ops->rx_complete(ar, skb, pipe); ath6kl_htc_rx_complete()
H A Ddebug.c191 if (dev->ar->mbox_info.gmbox_addr != 0) { ath6kl_dump_registers()
264 void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war) ath6kl_debug_war() argument
268 ar->debug.war_stats.invalid_rate++; ath6kl_debug_war()
276 struct ath6kl *ar = file->private_data; read_file_war_stats() local
291 "Invalid rates", ar->debug.war_stats.invalid_rate); read_file_war_stats()
309 void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len) ath6kl_debug_fwlog_event() argument
332 spin_lock(&ar->debug.fwlog_queue.lock); ath6kl_debug_fwlog_event()
334 __skb_queue_tail(&ar->debug.fwlog_queue, skb); ath6kl_debug_fwlog_event()
335 complete(&ar->debug.fwlog_completion); ath6kl_debug_fwlog_event()
338 while (skb_queue_len(&ar->debug.fwlog_queue) > ath6kl_debug_fwlog_event()
340 skb = __skb_dequeue(&ar->debug.fwlog_queue); ath6kl_debug_fwlog_event()
344 spin_unlock(&ar->debug.fwlog_queue.lock); ath6kl_debug_fwlog_event()
351 struct ath6kl *ar = inode->i_private; ath6kl_fwlog_open() local
353 if (ar->debug.fwlog_open) ath6kl_fwlog_open()
356 ar->debug.fwlog_open = true; ath6kl_fwlog_open()
364 struct ath6kl *ar = inode->i_private; ath6kl_fwlog_release() local
366 ar->debug.fwlog_open = false; ath6kl_fwlog_release()
374 struct ath6kl *ar = file->private_data; ath6kl_fwlog_read() local
385 ath6kl_read_fwlogs(ar); ath6kl_fwlog_read()
387 spin_lock(&ar->debug.fwlog_queue.lock); ath6kl_fwlog_read()
389 while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) { ath6kl_fwlog_read()
392 __skb_queue_head(&ar->debug.fwlog_queue, skb); ath6kl_fwlog_read()
403 spin_unlock(&ar->debug.fwlog_queue.lock); ath6kl_fwlog_read()
427 struct ath6kl *ar = file->private_data; ath6kl_fwlog_block_read() local
438 spin_lock(&ar->debug.fwlog_queue.lock); ath6kl_fwlog_block_read()
440 if (skb_queue_len(&ar->debug.fwlog_queue) == 0) { ath6kl_fwlog_block_read()
442 init_completion(&ar->debug.fwlog_completion); ath6kl_fwlog_block_read()
444 spin_unlock(&ar->debug.fwlog_queue.lock); ath6kl_fwlog_block_read()
447 &ar->debug.fwlog_completion); ath6kl_fwlog_block_read()
453 spin_lock(&ar->debug.fwlog_queue.lock); ath6kl_fwlog_block_read()
456 while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) { ath6kl_fwlog_block_read()
459 __skb_queue_head(&ar->debug.fwlog_queue, skb); ath6kl_fwlog_block_read()
470 spin_unlock(&ar->debug.fwlog_queue.lock); ath6kl_fwlog_block_read()
501 struct ath6kl *ar = file->private_data; ath6kl_fwlog_mask_read() local
505 len = snprintf(buf, sizeof(buf), "0x%x\n", ar->debug.fwlog_mask); ath6kl_fwlog_mask_read()
514 struct ath6kl *ar = file->private_data; ath6kl_fwlog_mask_write() local
517 ret = kstrtou32_from_user(user_buf, count, 0, &ar->debug.fwlog_mask); ath6kl_fwlog_mask_write()
521 ret = ath6kl_wmi_config_debug_module_cmd(ar->wmi, ath6kl_fwlog_mask_write()
523 ar->debug.fwlog_mask); ath6kl_fwlog_mask_write()
541 struct ath6kl *ar = file->private_data; read_file_tgt_stats() local
550 vif = ath6kl_vif_first(ar); read_file_tgt_stats()
560 if (down_interruptible(&ar->sem)) { read_file_tgt_stats()
567 if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) { read_file_tgt_stats()
568 up(&ar->sem); read_file_tgt_stats()
573 left = wait_event_interruptible_timeout(ar->event_wq, read_file_tgt_stats()
577 up(&ar->sem); read_file_tgt_stats()
698 struct ath6kl *ar = file->private_data; read_file_credit_dist_stats() local
699 struct htc_target *target = ar->htc_target; read_file_credit_dist_stats()
776 struct ath6kl *ar = file->private_data; ath6kl_endpoint_stats_read() local
777 struct htc_target *target = ar->htc_target; ath6kl_endpoint_stats_read()
831 struct ath6kl *ar = file->private_data; ath6kl_endpoint_stats_write() local
832 struct htc_target *target = ar->htc_target; ath6kl_endpoint_stats_write()
886 struct ath6kl *ar = file->private_data; ath6kl_regread_read() local
890 if (ar->debug.dbgfs_diag_reg) ath6kl_regread_read()
892 ar->debug.dbgfs_diag_reg); ath6kl_regread_read()
904 struct ath6kl *ar = file->private_data; ath6kl_regread_write() local
916 ar->debug.dbgfs_diag_reg = reg_addr; ath6kl_regread_write()
931 struct ath6kl *ar = inode->i_private; ath6kl_regdump_open() local
940 if (!ar->debug.dbgfs_diag_reg) ath6kl_regdump_open()
954 addr = ar->debug.dbgfs_diag_reg; ath6kl_regdump_open()
956 status = ath6kl_diag_read32(ar, ath6kl_regdump_open()
957 TARG_VTOP(ar->target_type, addr), ath6kl_regdump_open()
972 status = ath6kl_diag_read32(ar, ath6kl_regdump_open()
973 TARG_VTOP(ar->target_type, addr), ath6kl_regdump_open()
1019 struct ath6kl *ar = file->private_data; ath6kl_lrssi_roam_write() local
1025 ar->lrssi_roam_threshold = lrssi_roam_threshold; ath6kl_lrssi_roam_write()
1027 ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold); ath6kl_lrssi_roam_write()
1036 struct ath6kl *ar = file->private_data; ath6kl_lrssi_roam_read() local
1040 len = snprintf(buf, sizeof(buf), "%u\n", ar->lrssi_roam_threshold); ath6kl_lrssi_roam_read()
1057 struct ath6kl *ar = file->private_data; ath6kl_regwrite_read() local
1062 ar->debug.diag_reg_addr_wr, ar->debug.diag_reg_val_wr); ath6kl_regwrite_read()
1071 struct ath6kl *ar = file->private_data; ath6kl_regwrite_write() local
1097 ar->debug.diag_reg_addr_wr = reg_addr; ath6kl_regwrite_write()
1098 ar->debug.diag_reg_val_wr = reg_val; ath6kl_regwrite_write()
1100 if (ath6kl_diag_write32(ar, ar->debug.diag_reg_addr_wr, ath6kl_regwrite_write()
1101 cpu_to_le32(ar->debug.diag_reg_val_wr))) ath6kl_regwrite_write()
1115 int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf, ath6kl_debug_roam_tbl_event() argument
1130 if (ar->debug.roam_tbl == NULL || ath6kl_debug_roam_tbl_event()
1131 ar->debug.roam_tbl_len < (unsigned int) len) { ath6kl_debug_roam_tbl_event()
1132 kfree(ar->debug.roam_tbl); ath6kl_debug_roam_tbl_event()
1133 ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC); ath6kl_debug_roam_tbl_event()
1134 if (ar->debug.roam_tbl == NULL) ath6kl_debug_roam_tbl_event()
1138 memcpy(ar->debug.roam_tbl, buf, len); ath6kl_debug_roam_tbl_event()
1139 ar->debug.roam_tbl_len = len; ath6kl_debug_roam_tbl_event()
1141 if (test_bit(ROAM_TBL_PEND, &ar->flag)) { ath6kl_debug_roam_tbl_event()
1142 clear_bit(ROAM_TBL_PEND, &ar->flag); ath6kl_debug_roam_tbl_event()
1143 wake_up(&ar->event_wq); ath6kl_debug_roam_tbl_event()
1152 struct ath6kl *ar = file->private_data; ath6kl_roam_table_read() local
1161 if (down_interruptible(&ar->sem)) ath6kl_roam_table_read()
1164 set_bit(ROAM_TBL_PEND, &ar->flag); ath6kl_roam_table_read()
1166 ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi); ath6kl_roam_table_read()
1168 up(&ar->sem); ath6kl_roam_table_read()
1173 ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT); ath6kl_roam_table_read()
1174 up(&ar->sem); ath6kl_roam_table_read()
1179 if (ar->debug.roam_tbl == NULL) ath6kl_roam_table_read()
1182 tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl; ath6kl_roam_table_read()
1224 struct ath6kl *ar = file->private_data; ath6kl_force_roam_write() local
1238 ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid); ath6kl_force_roam_write()
1256 struct ath6kl *ar = file->private_data; ath6kl_roam_mode_write() local
1278 ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode); ath6kl_roam_mode_write()
1292 void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive) ath6kl_debug_set_keepalive() argument
1294 ar->debug.keepalive = keepalive; ath6kl_debug_set_keepalive()
1300 struct ath6kl *ar = file->private_data; ath6kl_keepalive_read() local
1304 len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive); ath6kl_keepalive_read()
1313 struct ath6kl *ar = file->private_data; ath6kl_keepalive_write() local
1321 ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val); ath6kl_keepalive_write()
1336 void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout) ath6kl_debug_set_disconnect_timeout() argument
1338 ar->debug.disc_timeout = timeout; ath6kl_debug_set_disconnect_timeout()
1345 struct ath6kl *ar = file->private_data; ath6kl_disconnect_timeout_read() local
1349 len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout); ath6kl_disconnect_timeout_read()
1358 struct ath6kl *ar = file->private_data; ath6kl_disconnect_timeout_write() local
1366 ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val); ath6kl_disconnect_timeout_write()
1385 struct ath6kl *ar = file->private_data; ath6kl_create_qos_write() local
1394 vif = ath6kl_vif_first(ar); ath6kl_create_qos_write()
1547 ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream); ath6kl_create_qos_write()
1563 struct ath6kl *ar = file->private_data; ath6kl_delete_qos_write() local
1571 vif = ath6kl_vif_first(ar); ath6kl_delete_qos_write()
1593 ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_delete_qos_write()
1610 struct ath6kl *ar = file->private_data; ath6kl_bgscan_int_write() local
1616 vif = ath6kl_vif_first(ar); ath6kl_bgscan_int_write()
1633 ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3, ath6kl_bgscan_int_write()
1650 struct ath6kl *ar = file->private_data; ath6kl_listen_int_write() local
1656 vif = ath6kl_vif_first(ar); ath6kl_listen_int_write()
1672 ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_listen_int_write()
1682 struct ath6kl *ar = file->private_data; ath6kl_listen_int_read() local
1687 vif = ath6kl_vif_first(ar); ath6kl_listen_int_read()
1708 struct ath6kl *ar = file->private_data; ath6kl_power_params_write() local
1751 ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num, ath6kl_power_params_write()
1764 void ath6kl_debug_init(struct ath6kl *ar) ath6kl_debug_init() argument
1766 skb_queue_head_init(&ar->debug.fwlog_queue); ath6kl_debug_init()
1767 init_completion(&ar->debug.fwlog_completion); ath6kl_debug_init()
1773 ar->debug.fwlog_mask = 0; ath6kl_debug_init()
1781 int ath6kl_debug_init_fs(struct ath6kl *ar) ath6kl_debug_init_fs() argument
1783 ar->debugfs_phy = debugfs_create_dir("ath6kl", ath6kl_debug_init_fs()
1784 ar->wiphy->debugfsdir); ath6kl_debug_init_fs()
1785 if (!ar->debugfs_phy) ath6kl_debug_init_fs()
1788 debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1791 if (ar->hif_type == ATH6KL_HIF_TYPE_SDIO) ath6kl_debug_init_fs()
1793 ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1797 ar->debugfs_phy, ar, &fops_endpoint_stats); ath6kl_debug_init_fs()
1799 debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1802 debugfs_create_file("fwlog_block", S_IRUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1805 debugfs_create_file("fwlog_mask", S_IRUSR | S_IWUSR, ar->debugfs_phy, ath6kl_debug_init_fs()
1806 ar, &fops_fwlog_mask); ath6kl_debug_init_fs()
1808 debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1811 debugfs_create_file("reg_dump", S_IRUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1815 ar->debugfs_phy, ar, &fops_lrssi_roam_threshold); ath6kl_debug_init_fs()
1818 ar->debugfs_phy, ar, &fops_diag_reg_write); ath6kl_debug_init_fs()
1820 debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1823 debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1826 debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1829 debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1832 debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1836 ar->debugfs_phy, ar, &fops_disconnect_timeout); ath6kl_debug_init_fs()
1838 debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1841 debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1845 ar->debugfs_phy, ar, &fops_bgscan_int); ath6kl_debug_init_fs()
1848 ar->debugfs_phy, ar, &fops_listen_int); ath6kl_debug_init_fs()
1850 debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar, ath6kl_debug_init_fs()
1856 void ath6kl_debug_cleanup(struct ath6kl *ar) ath6kl_debug_cleanup() argument
1858 skb_queue_purge(&ar->debug.fwlog_queue); ath6kl_debug_cleanup()
1859 complete(&ar->debug.fwlog_completion); ath6kl_debug_cleanup()
1860 kfree(ar->debug.roam_tbl); ath6kl_debug_cleanup()
H A Dcfg80211.c148 struct ath6kl *ar = vif->ar; __ath6kl_cfg80211_sscan_stop() local
155 if (ar->state == ATH6KL_STATE_RECOVERY) __ath6kl_cfg80211_sscan_stop()
158 ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, false); __ath6kl_cfg80211_sscan_stop()
165 struct ath6kl *ar = vif->ar; ath6kl_cfg80211_sscan_disable() local
173 cfg80211_sched_scan_stopped(ar->wiphy); ath6kl_cfg80211_sscan_disable()
287 struct ath6kl *ar = vif->ar; ath6kl_cfg80211_ready() local
289 if (!test_bit(WMI_READY, &ar->flag)) { ath6kl_cfg80211_ready()
325 struct ath6kl *ar = vif->ar; ath6kl_set_assoc_req_ies() local
335 ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG; ath6kl_set_assoc_req_ies()
356 ar->connect_ctrl_flags |= CONNECT_WPS_FLAG; ath6kl_set_assoc_req_ies()
362 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_set_assoc_req_ies()
390 static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type, ath6kl_is_valid_iftype() argument
398 if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) && ath6kl_is_valid_iftype()
399 ar->num_vif)) ath6kl_is_valid_iftype()
404 for (i = 0; i < ar->vif_max; i++) { ath6kl_is_valid_iftype()
405 if ((ar->avail_idx_map) & BIT(i)) { ath6kl_is_valid_iftype()
414 for (i = ar->max_norm_iface; i < ar->vif_max; i++) { ath6kl_is_valid_iftype()
415 if ((ar->avail_idx_map) & BIT(i)) { ath6kl_is_valid_iftype()
425 static bool ath6kl_is_tx_pending(struct ath6kl *ar) ath6kl_is_tx_pending() argument
427 return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0; ath6kl_is_tx_pending()
435 if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag))) ath6kl_cfg80211_sta_bmiss_enhance()
442 vif->ar->fw_capabilities)) ath6kl_cfg80211_sta_bmiss_enhance()
448 err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi, ath6kl_cfg80211_sta_bmiss_enhance()
458 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_cfg80211_connect() local
461 u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE; ath6kl_cfg80211_connect()
471 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { ath6kl_cfg80211_connect()
476 if (test_bit(SKIP_SCAN, &ar->flag) && ath6kl_cfg80211_connect()
483 if (down_interruptible(&ar->sem)) { ath6kl_cfg80211_connect()
488 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { ath6kl_cfg80211_connect()
490 up(&ar->sem); ath6kl_cfg80211_connect()
494 if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) { ath6kl_cfg80211_connect()
498 wait_event_interruptible_timeout(ar->event_wq, ath6kl_cfg80211_connect()
499 ath6kl_is_tx_pending(ar), ath6kl_cfg80211_connect()
503 up(&ar->sem); ath6kl_cfg80211_connect()
510 up(&ar->sem); ath6kl_cfg80211_connect()
515 ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG; ath6kl_cfg80211_connect()
521 status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_connect()
525 up(&ar->sem); ath6kl_cfg80211_connect()
551 up(&ar->sem); ath6kl_cfg80211_connect()
573 up(&ar->sem); ath6kl_cfg80211_connect()
583 ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx, ath6kl_cfg80211_connect()
592 if (!ar->usr_bss_filter) { ath6kl_cfg80211_connect()
594 if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_connect()
597 up(&ar->sem); ath6kl_cfg80211_connect()
624 status = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_connect()
629 up(&ar->sem); ath6kl_cfg80211_connect()
634 status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type, ath6kl_cfg80211_connect()
641 ar->connect_ctrl_flags, nw_subtype); ath6kl_cfg80211_connect()
651 ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0, ath6kl_cfg80211_connect()
654 up(&ar->sem); ath6kl_cfg80211_connect()
666 if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) && ath6kl_cfg80211_connect()
673 ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD; ath6kl_cfg80211_connect()
687 struct ath6kl *ar = vif->ar; ath6kl_add_bss_if_needed() local
701 bss = cfg80211_get_bss(ar->wiphy, chan, bssid, ath6kl_add_bss_if_needed()
720 bss = cfg80211_inform_bss(ar->wiphy, chan, ath6kl_add_bss_if_needed()
744 struct ath6kl *ar = vif->ar; ath6kl_cfg80211_connect_event() local
784 chan = ieee80211_get_channel(ar->wiphy, (int) channel); ath6kl_cfg80211_connect_event()
797 cfg80211_put_bss(ar->wiphy, bss); ath6kl_cfg80211_connect_event()
808 cfg80211_put_bss(ar->wiphy, bss); ath6kl_cfg80211_connect_event()
819 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_cfg80211_disconnect() local
830 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { ath6kl_cfg80211_disconnect()
835 if (down_interruptible(&ar->sem)) { ath6kl_cfg80211_disconnect()
845 if (!test_bit(SKIP_SCAN, &ar->flag)) ath6kl_cfg80211_disconnect()
848 up(&ar->sem); ath6kl_cfg80211_disconnect()
859 struct ath6kl *ar = vif->ar; ath6kl_cfg80211_disconnect_event() local
906 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); ath6kl_cfg80211_disconnect_event()
909 static int ath6kl_set_probed_ssids(struct ath6kl *ar, ath6kl_set_probed_ssids() argument
972 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i, ath6kl_set_probed_ssids()
980 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i, ath6kl_set_probed_ssids()
991 struct ath6kl *ar = ath6kl_priv(vif->ndev); ath6kl_cfg80211_scan() local
1002 if (!ar->usr_bss_filter) { ath6kl_cfg80211_scan()
1004 ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_scan()
1012 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, ath6kl_cfg80211_scan()
1018 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_scan()
1052 ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_scan()
1071 struct ath6kl *ar = vif->ar; ath6kl_cfg80211_scan_complete_event() local
1085 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_scan_complete_event()
1106 ieee80211_get_channel(vif->ar->wiphy, freq), ath6kl_cfg80211_ch_switch_notify()
1120 struct ath6kl *ar = ath6kl_priv(ndev); ath6kl_cfg80211_add_key() local
1133 return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_add_key()
1202 ar->ap_mode_bkey.valid = true; ath6kl_cfg80211_add_key()
1203 ar->ap_mode_bkey.key_index = key_index; ath6kl_cfg80211_add_key()
1204 ar->ap_mode_bkey.key_type = key_type; ath6kl_cfg80211_add_key()
1205 ar->ap_mode_bkey.key_len = key->key_len; ath6kl_cfg80211_add_key()
1206 memcpy(ar->ap_mode_bkey.key, key->key, key->key_len); ath6kl_cfg80211_add_key()
1233 return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index, ath6kl_cfg80211_add_key()
1244 struct ath6kl *ar = ath6kl_priv(ndev); ath6kl_cfg80211_del_key() local
1267 return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index); ath6kl_cfg80211_del_key()
1310 struct ath6kl *ar = ath6kl_priv(ndev); ath6kl_cfg80211_set_default_key() local
1347 return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_set_default_key()
1370 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); ath6kl_cfg80211_set_wiphy_params() local
1377 vif = ath6kl_vif_first(ar); ath6kl_cfg80211_set_wiphy_params()
1385 ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold); ath6kl_cfg80211_set_wiphy_params()
1400 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); ath6kl_cfg80211_set_txpower() local
1407 vif = ath6kl_vif_first(ar); ath6kl_cfg80211_set_txpower()
1418 ar->tx_pwr = dbm; ath6kl_cfg80211_set_txpower()
1426 ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, dbm); ath6kl_cfg80211_set_txpower()
1435 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); ath6kl_cfg80211_get_txpower() local
1438 vif = ath6kl_vif_first(ar); ath6kl_cfg80211_get_txpower()
1446 ar->tx_pwr = 0; ath6kl_cfg80211_get_txpower()
1448 if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) { ath6kl_cfg80211_get_txpower()
1453 wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0, ath6kl_cfg80211_get_txpower()
1462 *dbm = ar->tx_pwr; ath6kl_cfg80211_get_txpower()
1470 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_cfg80211_set_power_mgmt() local
1488 if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_set_power_mgmt()
1504 struct ath6kl *ar = wiphy_priv(wiphy); ath6kl_cfg80211_add_iface() local
1508 if (ar->num_vif == ar->vif_max) { ath6kl_cfg80211_add_iface()
1513 if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) { ath6kl_cfg80211_add_iface()
1518 wdev = ath6kl_interface_add(ar, name, name_assign_type, type, if_idx, nw_type); ath6kl_cfg80211_add_iface()
1522 ar->num_vif++; ath6kl_cfg80211_add_iface()
1530 struct ath6kl *ar = wiphy_priv(wiphy); ath6kl_cfg80211_del_iface() local
1533 spin_lock_bh(&ar->list_lock); ath6kl_cfg80211_del_iface()
1535 spin_unlock_bh(&ar->list_lock); ath6kl_cfg80211_del_iface()
1537 ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag)); ath6kl_cfg80211_del_iface()
1562 vif->ar->fw_capabilities) && ath6kl_cfg80211_change_iface()
1565 if (vif->ar->vif_max == 1) { ath6kl_cfg80211_change_iface()
1572 for (i = vif->ar->max_norm_iface; i < vif->ar->vif_max; i++) { ath6kl_cfg80211_change_iface()
1577 if (i == vif->ar->vif_max) { ath6kl_cfg80211_change_iface()
1613 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_cfg80211_join_ibss() local
1666 status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type, ath6kl_cfg80211_join_ibss()
1673 ar->connect_ctrl_flags, SUBTYPE_NONE); ath6kl_cfg80211_join_ibss()
1767 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_get_station() local
1778 if (down_interruptible(&ar->sem)) ath6kl_get_station()
1783 ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx); ath6kl_get_station()
1786 up(&ar->sem); ath6kl_get_station()
1790 left = wait_event_interruptible_timeout(ar->event_wq, ath6kl_get_station()
1795 up(&ar->sem); ath6kl_get_station()
1846 ath6kl_debug_war(ar, ATH6KL_WAR_INVALID_RATE); ath6kl_get_station()
1867 struct ath6kl *ar = ath6kl_priv(netdev); ath6kl_set_pmksa() local
1870 return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid, ath6kl_set_pmksa()
1877 struct ath6kl *ar = ath6kl_priv(netdev); ath6kl_del_pmksa() local
1880 return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid, ath6kl_del_pmksa()
1886 struct ath6kl *ar = ath6kl_priv(netdev); ath6kl_flush_pmksa() local
1890 return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_flush_pmksa()
1895 static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif, ath6kl_wow_usr() argument
1923 ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, ath6kl_wow_usr()
1950 static int ath6kl_wow_ap(struct ath6kl *ar, struct ath6kl_vif *vif) ath6kl_wow_ap() argument
1983 ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, ath6kl_wow_ap()
1993 ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, ath6kl_wow_ap()
2006 ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, ath6kl_wow_ap()
2016 ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, ath6kl_wow_ap()
2028 static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif) ath6kl_wow_sta() argument
2039 ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, ath6kl_wow_sta()
2054 ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, ath6kl_wow_sta()
2072 static bool is_ctrl_ep_empty(struct ath6kl *ar) is_ctrl_ep_empty() argument
2074 return !ar->tx_pending[ar->ctrl_ep]; is_ctrl_ep_empty()
2077 static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif) ath6kl_cfg80211_host_sleep() argument
2083 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_host_sleep()
2088 left = wait_event_interruptible_timeout(ar->event_wq, ath6kl_cfg80211_host_sleep()
2100 if (ar->tx_pending[ar->ctrl_ep]) { ath6kl_cfg80211_host_sleep()
2101 left = wait_event_interruptible_timeout(ar->event_wq, ath6kl_cfg80211_host_sleep()
2102 is_ctrl_ep_empty(ar), ath6kl_cfg80211_host_sleep()
2119 struct ath6kl *ar = vif->ar; ath6kl_wow_suspend_vif() local
2129 ar->fw_capabilities)) { ath6kl_wow_suspend_vif()
2130 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, ath6kl_wow_suspend_vif()
2138 ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wow_suspend_vif()
2147 ret = ath6kl_wow_usr(ar, vif, wow, filter); ath6kl_wow_suspend_vif()
2149 ret = ath6kl_wow_ap(ar, vif); ath6kl_wow_suspend_vif()
2151 ret = ath6kl_wow_sta(ar, vif); ath6kl_wow_suspend_vif()
2159 ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wow_suspend_vif()
2170 ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wow_suspend_vif()
2175 ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wow_suspend_vif()
2202 ret = ath6kl_wmi_set_ip_cmd(ar->wmi, vif->fw_vif_idx, ips[0], ips[1]); ath6kl_wow_suspend_vif()
2211 static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) ath6kl_wow_suspend() argument
2219 first_vif = ath6kl_vif_first(ar); ath6kl_wow_suspend()
2228 spin_lock_bh(&ar->list_lock); ath6kl_wow_suspend()
2229 list_for_each_entry(vif, &ar->vif_list, list) { ath6kl_wow_suspend()
2239 spin_unlock_bh(&ar->list_lock); ath6kl_wow_suspend()
2246 ar->state = ATH6KL_STATE_SUSPENDING; ath6kl_wow_suspend()
2248 ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, first_vif->fw_vif_idx, ath6kl_wow_suspend()
2255 return ath6kl_cfg80211_host_sleep(ar, first_vif); ath6kl_wow_suspend()
2260 struct ath6kl *ar = vif->ar; ath6kl_wow_resume_vif() local
2264 ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wow_resume_vif()
2269 ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wow_resume_vif()
2274 ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wow_resume_vif()
2282 ar->fw_capabilities)) { ath6kl_wow_resume_vif()
2283 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, ath6kl_wow_resume_vif()
2294 static int ath6kl_wow_resume(struct ath6kl *ar) ath6kl_wow_resume() argument
2299 vif = ath6kl_vif_first(ar); ath6kl_wow_resume()
2304 ar->state = ATH6KL_STATE_RESUMING; ath6kl_wow_resume()
2306 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wow_resume()
2314 spin_lock_bh(&ar->list_lock); ath6kl_wow_resume()
2315 list_for_each_entry(vif, &ar->vif_list, list) { ath6kl_wow_resume()
2323 spin_unlock_bh(&ar->list_lock); ath6kl_wow_resume()
2328 ar->state = ATH6KL_STATE_ON; ath6kl_wow_resume()
2332 ar->state = ATH6KL_STATE_WOW; ath6kl_wow_resume()
2336 static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar) ath6kl_cfg80211_deepsleep_suspend() argument
2341 vif = ath6kl_vif_first(ar); ath6kl_cfg80211_deepsleep_suspend()
2345 if (!test_bit(WMI_READY, &ar->flag)) { ath6kl_cfg80211_deepsleep_suspend()
2350 ath6kl_cfg80211_stop_all(ar); ath6kl_cfg80211_deepsleep_suspend()
2353 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; ath6kl_cfg80211_deepsleep_suspend()
2355 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER); ath6kl_cfg80211_deepsleep_suspend()
2360 ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_deepsleep_suspend()
2367 ath6kl_tx_data_cleanup(ar); ath6kl_cfg80211_deepsleep_suspend()
2369 ret = ath6kl_cfg80211_host_sleep(ar, vif); ath6kl_cfg80211_deepsleep_suspend()
2376 static int ath6kl_cfg80211_deepsleep_resume(struct ath6kl *ar) ath6kl_cfg80211_deepsleep_resume() argument
2381 vif = ath6kl_vif_first(ar); ath6kl_cfg80211_deepsleep_resume()
2386 if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) { ath6kl_cfg80211_deepsleep_resume()
2387 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, ath6kl_cfg80211_deepsleep_resume()
2388 ar->wmi->saved_pwr_mode); ath6kl_cfg80211_deepsleep_resume()
2393 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_deepsleep_resume()
2398 ar->state = ATH6KL_STATE_ON; ath6kl_cfg80211_deepsleep_resume()
2401 ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_deepsleep_resume()
2409 int ath6kl_cfg80211_suspend(struct ath6kl *ar, ath6kl_cfg80211_suspend() argument
2423 ath6kl_tx_data_cleanup(ar); ath6kl_cfg80211_suspend()
2425 prev_state = ar->state; ath6kl_cfg80211_suspend()
2427 ret = ath6kl_wow_suspend(ar, wow); ath6kl_cfg80211_suspend()
2429 ar->state = prev_state; ath6kl_cfg80211_suspend()
2433 ar->state = ATH6KL_STATE_WOW; ath6kl_cfg80211_suspend()
2440 ret = ath6kl_cfg80211_deepsleep_suspend(ar); ath6kl_cfg80211_suspend()
2446 ar->state = ATH6KL_STATE_DEEPSLEEP; ath6kl_cfg80211_suspend()
2452 ath6kl_cfg80211_stop_all(ar); ath6kl_cfg80211_suspend()
2454 if (ar->state == ATH6KL_STATE_OFF) { ath6kl_cfg80211_suspend()
2462 ret = ath6kl_init_hw_stop(ar); ath6kl_cfg80211_suspend()
2468 ar->state = ATH6KL_STATE_CUTPOWER; ath6kl_cfg80211_suspend()
2476 list_for_each_entry(vif, &ar->vif_list, list) ath6kl_cfg80211_suspend()
2483 int ath6kl_cfg80211_resume(struct ath6kl *ar) ath6kl_cfg80211_resume() argument
2487 switch (ar->state) { ath6kl_cfg80211_resume()
2491 ret = ath6kl_wow_resume(ar); ath6kl_cfg80211_resume()
2502 ret = ath6kl_cfg80211_deepsleep_resume(ar); ath6kl_cfg80211_resume()
2512 ret = ath6kl_init_hw_start(ar); ath6kl_cfg80211_resume()
2533 struct ath6kl *ar = wiphy_priv(wiphy); __ath6kl_cfg80211_suspend() local
2535 ath6kl_recovery_suspend(ar); __ath6kl_cfg80211_suspend()
2537 return ath6kl_hif_suspend(ar, wow); __ath6kl_cfg80211_suspend()
2542 struct ath6kl *ar = wiphy_priv(wiphy); __ath6kl_cfg80211_resume() local
2545 err = ath6kl_hif_resume(ar); __ath6kl_cfg80211_resume()
2549 ath6kl_recovery_resume(ar); __ath6kl_cfg80211_resume()
2570 void ath6kl_check_wow_status(struct ath6kl *ar) ath6kl_check_wow_status() argument
2572 if (ar->state == ATH6KL_STATE_SUSPENDING) ath6kl_check_wow_status()
2575 if (ar->state == ATH6KL_STATE_WOW) ath6kl_check_wow_status()
2576 ath6kl_cfg80211_resume(ar); ath6kl_check_wow_status()
2581 void ath6kl_check_wow_status(struct ath6kl *ar) ath6kl_check_wow_status() argument
2603 return ath6kl_wmi_set_htcap_cmd(vif->ar->wmi, vif->fw_vif_idx, ath6kl_set_htcap()
2609 struct wiphy *wiphy = vif->ar->wiphy; ath6kl_restore_htcap()
2635 struct ath6kl *ar = vif->ar; ath6kl_set_ap_probe_resp_ies() local
2662 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_set_ap_probe_resp_ies()
2671 struct ath6kl *ar = vif->ar; ath6kl_set_ies() local
2675 res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_set_ies()
2689 res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_set_ies()
2754 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_start_ap() local
2776 ar->ap_mode_bkey.valid = false; ath6kl_start_ap()
2778 ret = ath6kl_wmi_ap_set_beacon_intvl_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_start_ap()
2784 ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_start_ap()
2806 res = ath6kl_wmi_ap_hidden_ssid(ar->wmi, vif->fw_vif_idx, hidden); ath6kl_start_ap()
2889 res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true); ath6kl_start_ap()
2907 ar->fw_capabilities)) ath6kl_start_ap()
2911 res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx, ath6kl_start_ap()
2930 ar->fw_capabilities)) { ath6kl_start_ap()
2931 res = ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_start_ap()
2941 res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p); ath6kl_start_ap()
2964 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_stop_ap() local
2972 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); ath6kl_stop_ap()
2984 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_del_station() local
2988 return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, WMI_AP_DEAUTH, ath6kl_del_station()
2996 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_change_station() local
3009 return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, ath6kl_change_station()
3011 return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, ath6kl_change_station()
3022 struct ath6kl *ar = ath6kl_priv(vif->ndev); ath6kl_remain_on_channel() local
3034 return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_remain_on_channel()
3043 struct ath6kl *ar = ath6kl_priv(vif->ndev); ath6kl_cancel_remain_on_channel() local
3049 return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx); ath6kl_cancel_remain_on_channel()
3056 struct ath6kl *ar = vif->ar; ath6kl_send_go_probe_resp() local
3083 ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq, ath6kl_send_go_probe_resp()
3103 struct ath6kl *ar = vif->ar; ath6kl_mgmt_powersave_ap() local
3141 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_mgmt_powersave_ap()
3178 struct ath6kl *ar = ath6kl_priv(vif->ndev); ath6kl_mgmt_tx() local
3230 return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq, ath6kl_mgmt_tx()
3256 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_cfg80211_sscan_start() local
3270 if (ar->state != ATH6KL_STATE_ON) ath6kl_cfg80211_sscan_start()
3278 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, ath6kl_cfg80211_sscan_start()
3286 ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_sscan_start()
3291 ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_sscan_start()
3298 ar->fw_capabilities)) { ath6kl_cfg80211_sscan_start()
3306 ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_sscan_start()
3317 ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_sscan_start()
3322 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_sscan_start()
3331 ret = ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, true); ath6kl_cfg80211_sscan_start()
3359 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_cfg80211_set_bitrate() local
3362 return ath6kl_wmi_set_bitrate_mask(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_set_bitrate()
3370 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_cfg80211_set_txe_config() local
3374 !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, ar->fw_capabilities)) ath6kl_cfg80211_set_txe_config()
3383 return ath6kl_wmi_set_txe_notify(ar->wmi, vif->fw_vif_idx, ath6kl_cfg80211_set_txe_config()
3474 if (vif->ar->state != ATH6KL_STATE_RECOVERY && ath6kl_cfg80211_stop()
3477 ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); ath6kl_cfg80211_stop()
3488 if (vif->ar->state != ATH6KL_STATE_RECOVERY && ath6kl_cfg80211_stop()
3489 ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF, ath6kl_cfg80211_stop()
3496 void ath6kl_cfg80211_stop_all(struct ath6kl *ar) ath6kl_cfg80211_stop_all() argument
3500 vif = ath6kl_vif_first(ar); ath6kl_cfg80211_stop_all()
3501 if (!vif && ar->state != ATH6KL_STATE_RECOVERY) { ath6kl_cfg80211_stop_all()
3503 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; ath6kl_cfg80211_stop_all()
3505 if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0) ath6kl_cfg80211_stop_all()
3511 * FIXME: we should take ar->list_lock to protect changes in the ath6kl_cfg80211_stop_all()
3515 list_for_each_entry(vif, &ar->vif_list, list) ath6kl_cfg80211_stop_all()
3522 struct ath6kl *ar = wiphy_priv(wiphy); ath6kl_cfg80211_reg_notify() local
3536 ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2); ath6kl_cfg80211_reg_notify()
3553 ret = ath6kl_wmi_beginscan_cmd(ar->wmi, 0, WMI_LONG_SCAN, false, ath6kl_cfg80211_reg_notify()
3617 struct ath6kl *ar = vif->ar; ath6kl_cfg80211_vif_cleanup() local
3622 ar->avail_idx_map |= BIT(vif->fw_vif_idx); ath6kl_cfg80211_vif_cleanup()
3625 ar->ibss_if_active = false; ath6kl_cfg80211_vif_cleanup()
3634 ar->num_vif--; ath6kl_cfg80211_vif_cleanup()
3637 struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name, ath6kl_interface_add() argument
3651 vif->wdev.wiphy = ar->wiphy; ath6kl_interface_add()
3652 vif->ar = ar; ath6kl_interface_add()
3666 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); ath6kl_interface_add()
3671 ar->fw_capabilities)) ath6kl_interface_add()
3685 ar->avail_idx_map &= ~BIT(fw_vif_idx); ath6kl_interface_add()
3688 ar->wlan_pwr_state = WLAN_POWER_STATE_ON; ath6kl_interface_add()
3691 ar->ibss_if_active = true; ath6kl_interface_add()
3693 spin_lock_bh(&ar->list_lock); ath6kl_interface_add()
3694 list_add_tail(&vif->list, &ar->vif_list); ath6kl_interface_add()
3695 spin_unlock_bh(&ar->list_lock); ath6kl_interface_add()
3719 int ath6kl_cfg80211_init(struct ath6kl *ar) ath6kl_cfg80211_init() argument
3721 struct wiphy *wiphy = ar->wiphy; ath6kl_cfg80211_init()
3730 set_wiphy_dev(wiphy, ar->dev); ath6kl_cfg80211_init()
3735 if (ar->p2p) { ath6kl_cfg80211_init()
3741 test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) { ath6kl_cfg80211_init()
3743 ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS; ath6kl_cfg80211_init()
3751 ar->fw_capabilities)) ath6kl_cfg80211_init()
3755 switch (ar->hw.cap) { ath6kl_cfg80211_init()
3784 ar->fw_capabilities))) { ath6kl_cfg80211_init()
3792 ar->fw_capabilities)) { ath6kl_cfg80211_init()
3818 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM | ath6kl_cfg80211_init()
3823 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities)) ath6kl_cfg80211_init()
3824 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; ath6kl_cfg80211_init()
3827 ar->fw_capabilities)) ath6kl_cfg80211_init()
3828 ar->wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER; ath6kl_cfg80211_init()
3830 ar->wiphy->probe_resp_offload = ath6kl_cfg80211_init()
3841 ar->wiphy_registered = true; ath6kl_cfg80211_init()
3846 void ath6kl_cfg80211_cleanup(struct ath6kl *ar) ath6kl_cfg80211_cleanup() argument
3848 wiphy_unregister(ar->wiphy); ath6kl_cfg80211_cleanup()
3850 ar->wiphy_registered = false; ath6kl_cfg80211_cleanup()
3855 struct ath6kl *ar; ath6kl_cfg80211_create() local
3866 ar = wiphy_priv(wiphy); ath6kl_cfg80211_create()
3867 ar->wiphy = wiphy; ath6kl_cfg80211_create()
3869 return ar; ath6kl_cfg80211_create()
3872 /* Note: ar variable must not be accessed after calling this! */ ath6kl_cfg80211_destroy()
3873 void ath6kl_cfg80211_destroy(struct ath6kl *ar) ath6kl_cfg80211_destroy() argument
3878 kfree(ar->sta_list[i].aggr_conn); ath6kl_cfg80211_destroy()
3880 wiphy_free(ar->wiphy); ath6kl_cfg80211_destroy()
H A Dtxrx.c45 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_ibss_map_epid() local
57 for (i = 0; i < ar->node_num; i++) { ath6kl_ibss_map_epid()
58 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr, ath6kl_ibss_map_epid()
61 ar->node_map[i].tx_pend++; ath6kl_ibss_map_epid()
62 return ar->node_map[i].ep_id; ath6kl_ibss_map_epid()
65 if ((ep_map == -1) && !ar->node_map[i].tx_pend) ath6kl_ibss_map_epid()
70 ep_map = ar->node_num; ath6kl_ibss_map_epid()
71 ar->node_num++; ath6kl_ibss_map_epid()
72 if (ar->node_num > MAX_NODE_NUM) ath6kl_ibss_map_epid()
76 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN); ath6kl_ibss_map_epid()
79 if (!ar->tx_pending[i]) { ath6kl_ibss_map_epid()
80 ar->node_map[ep_map].ep_id = i; ath6kl_ibss_map_epid()
89 ar->node_map[ep_map].ep_id = ar->next_ep_id; ath6kl_ibss_map_epid()
90 ar->next_ep_id++; ath6kl_ibss_map_epid()
91 if (ar->next_ep_id > ENDPOINT_5) ath6kl_ibss_map_epid()
92 ar->next_ep_id = ENDPOINT_2; ath6kl_ibss_map_epid()
97 ar->node_map[ep_map].tx_pend++; ath6kl_ibss_map_epid()
99 return ar->node_map[ep_map].ep_id; ath6kl_ibss_map_epid()
107 struct ath6kl *ar = vif->ar; ath6kl_process_uapsdq() local
166 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, ath6kl_process_uapsdq()
181 struct ath6kl *ar = vif->ar; ath6kl_process_psq() local
203 ath6kl_wmi_set_pvb_cmd(ar->wmi, ath6kl_process_psq()
215 struct ath6kl *ar = vif->ar; ath6kl_powersave_ap() local
222 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) { ath6kl_powersave_ap()
236 spin_lock_bh(&ar->mcastpsq_lock); ath6kl_powersave_ap()
238 skb_queue_empty(&ar->mcastpsq); ath6kl_powersave_ap()
239 skb_queue_tail(&ar->mcastpsq, skb); ath6kl_powersave_ap()
240 spin_unlock_bh(&ar->mcastpsq_lock); ath6kl_powersave_ap()
248 ath6kl_wmi_set_pvb_cmd(ar->wmi, ath6kl_powersave_ap()
258 spin_lock_bh(&ar->mcastpsq_lock); ath6kl_powersave_ap()
259 if (!skb_queue_empty(&ar->mcastpsq)) ath6kl_powersave_ap()
261 spin_unlock_bh(&ar->mcastpsq_lock); ath6kl_powersave_ap()
289 struct ath6kl *ar = devt; ath6kl_control_tx() local
295 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) { ath6kl_control_tx()
306 spin_lock_bh(&ar->lock); ath6kl_control_tx()
312 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) { ath6kl_control_tx()
321 cookie = ath6kl_alloc_cookie(ar); ath6kl_control_tx()
325 spin_unlock_bh(&ar->lock); ath6kl_control_tx()
330 ar->tx_pending[eid]++; ath6kl_control_tx()
332 if (eid != ar->ctrl_ep) ath6kl_control_tx()
333 ar->total_tx_data_pend++; ath6kl_control_tx()
335 spin_unlock_bh(&ar->lock); ath6kl_control_tx()
347 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); ath6kl_control_tx()
358 struct ath6kl *ar = ath6kl_priv(dev); ath6kl_data_tx() local
381 if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) ath6kl_data_tx()
384 if (!test_bit(WMI_READY, &ar->flag)) ath6kl_data_tx()
393 if (test_bit(WMI_ENABLED, &ar->flag)) { ath6kl_data_tx()
413 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { ath6kl_data_tx()
432 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb, ath6kl_data_tx()
444 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags)) ath6kl_data_tx()
448 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, ath6kl_data_tx()
458 spin_lock_bh(&ar->lock); ath6kl_data_tx()
463 eid = ar->ac2ep_map[ac]; ath6kl_data_tx()
467 spin_unlock_bh(&ar->lock); ath6kl_data_tx()
472 cookie = ath6kl_alloc_cookie(ar); ath6kl_data_tx()
475 spin_unlock_bh(&ar->lock); ath6kl_data_tx()
480 ar->tx_pending[eid]++; ath6kl_data_tx()
481 ar->total_tx_data_pend++; ath6kl_data_tx()
483 spin_unlock_bh(&ar->lock); ath6kl_data_tx()
516 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); ath6kl_data_tx()
532 struct ath6kl *ar = devt; ath6kl_indicate_tx_activity() local
536 eid = ar->ac2ep_map[traffic_class]; ath6kl_indicate_tx_activity()
538 if (!test_bit(WMI_ENABLED, &ar->flag)) ath6kl_indicate_tx_activity()
541 spin_lock_bh(&ar->lock); ath6kl_indicate_tx_activity()
543 ar->ac_stream_active[traffic_class] = active; ath6kl_indicate_tx_activity()
550 if (ar->ac_stream_pri_map[traffic_class] > ath6kl_indicate_tx_activity()
551 ar->hiac_stream_active_pri) ath6kl_indicate_tx_activity()
553 ar->hiac_stream_active_pri = ath6kl_indicate_tx_activity()
554 ar->ac_stream_pri_map[traffic_class]; ath6kl_indicate_tx_activity()
561 if (ar->hiac_stream_active_pri == ath6kl_indicate_tx_activity()
562 ar->ac_stream_pri_map[traffic_class]) { ath6kl_indicate_tx_activity()
568 ar->hiac_stream_active_pri = 0; ath6kl_indicate_tx_activity()
571 if (ar->ac_stream_active[i] && ath6kl_indicate_tx_activity()
572 (ar->ac_stream_pri_map[i] > ath6kl_indicate_tx_activity()
573 ar->hiac_stream_active_pri)) ath6kl_indicate_tx_activity()
578 ar->hiac_stream_active_pri = ath6kl_indicate_tx_activity()
579 ar->ac_stream_pri_map[i]; ath6kl_indicate_tx_activity()
584 spin_unlock_bh(&ar->lock); ath6kl_indicate_tx_activity()
588 ath6kl_htc_activity_changed(ar->htc_target, eid, active); ath6kl_indicate_tx_activity()
594 struct ath6kl *ar = target->dev->ar; ath6kl_tx_queue_full() local
599 if (endpoint == ar->ctrl_ep) { ath6kl_tx_queue_full()
606 set_bit(WMI_CTRL_EP_FULL, &ar->flag); ath6kl_tx_queue_full()
608 ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL); ath6kl_tx_queue_full()
619 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < ath6kl_tx_queue_full()
620 ar->hiac_stream_active_pri && ath6kl_tx_queue_full()
621 ar->cookie_count <= ath6kl_tx_queue_full()
630 spin_lock_bh(&ar->list_lock); ath6kl_tx_queue_full()
631 list_for_each_entry(vif, &ar->vif_list, list) { ath6kl_tx_queue_full()
634 spin_unlock_bh(&ar->list_lock); ath6kl_tx_queue_full()
642 spin_unlock_bh(&ar->list_lock); ath6kl_tx_queue_full()
651 struct ath6kl *ar = vif->ar; ath6kl_tx_clear_node_map() local
657 if (!ar->ibss_ps_enable) ath6kl_tx_clear_node_map()
660 if (eid == ar->ctrl_ep) ath6kl_tx_clear_node_map()
667 ar->node_map[map_no].tx_pend--; ath6kl_tx_clear_node_map()
669 if (ar->node_map[map_no].tx_pend) ath6kl_tx_clear_node_map()
672 if (map_no != (ar->node_num - 1)) ath6kl_tx_clear_node_map()
675 for (i = ar->node_num; i > 0; i--) { ath6kl_tx_clear_node_map()
676 if (ar->node_map[i - 1].tx_pend) ath6kl_tx_clear_node_map()
679 memset(&ar->node_map[i - 1], 0, ath6kl_tx_clear_node_map()
681 ar->node_num--; ath6kl_tx_clear_node_map()
688 struct ath6kl *ar = target->dev->ar; ath6kl_tx_complete() local
704 spin_lock_bh(&ar->lock); ath6kl_tx_complete()
727 ath6kl_free_cookie(ar, ath6kl_cookie); ath6kl_tx_complete()
734 ath6kl_free_cookie(ar, ath6kl_cookie); ath6kl_tx_complete()
738 ar->tx_pending[eid]--; ath6kl_tx_complete()
740 if (eid != ar->ctrl_ep) ath6kl_tx_complete()
741 ar->total_tx_data_pend--; ath6kl_tx_complete()
743 if (eid == ar->ctrl_ep) { ath6kl_tx_complete()
744 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag)) ath6kl_tx_complete()
745 clear_bit(WMI_CTRL_EP_FULL, &ar->flag); ath6kl_tx_complete()
747 if (ar->tx_pending[eid] == 0) ath6kl_tx_complete()
751 if (eid == ar->ctrl_ep) { ath6kl_tx_complete()
759 vif = ath6kl_get_vif_by_index(ar, if_idx); ath6kl_tx_complete()
761 ath6kl_free_cookie(ar, ath6kl_cookie); ath6kl_tx_complete()
792 ath6kl_free_cookie(ar, ath6kl_cookie); ath6kl_tx_complete()
798 spin_unlock_bh(&ar->lock); ath6kl_tx_complete()
803 spin_lock_bh(&ar->list_lock); ath6kl_tx_complete()
804 list_for_each_entry(vif, &ar->vif_list, list) { ath6kl_tx_complete()
807 spin_unlock_bh(&ar->list_lock); ath6kl_tx_complete()
809 spin_lock_bh(&ar->list_lock); ath6kl_tx_complete()
812 spin_unlock_bh(&ar->list_lock); ath6kl_tx_complete()
815 wake_up(&ar->event_wq); ath6kl_tx_complete()
820 void ath6kl_tx_data_cleanup(struct ath6kl *ar) ath6kl_tx_data_cleanup() argument
826 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i], ath6kl_tx_data_cleanup()
881 struct ath6kl *ar = target->dev->ar; ath6kl_rx_refill() local
889 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint); ath6kl_rx_refill()
918 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue); ath6kl_rx_refill()
921 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count) ath6kl_refill_amsdu_rxbufs() argument
941 spin_lock_bh(&ar->lock); ath6kl_refill_amsdu_rxbufs()
942 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); ath6kl_refill_amsdu_rxbufs()
943 spin_unlock_bh(&ar->lock); ath6kl_refill_amsdu_rxbufs()
956 struct ath6kl *ar = target->dev->ar; ath6kl_alloc_amsdu_rxbuf() local
968 spin_lock_bh(&ar->lock); ath6kl_alloc_amsdu_rxbuf()
970 if (list_empty(&ar->amsdu_rx_buffer_queue)) { ath6kl_alloc_amsdu_rxbuf()
971 spin_unlock_bh(&ar->lock); ath6kl_alloc_amsdu_rxbuf()
976 packet = list_first_entry(&ar->amsdu_rx_buffer_queue, ath6kl_alloc_amsdu_rxbuf()
979 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue) ath6kl_alloc_amsdu_rxbuf()
983 spin_unlock_bh(&ar->lock); ath6kl_alloc_amsdu_rxbuf()
990 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt); ath6kl_alloc_amsdu_rxbuf()
1240 struct ath6kl *ar = vif->ar; ath6kl_uapsd_trigger_frame_rx() local
1298 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, ath6kl_uapsd_trigger_frame_rx()
1308 struct ath6kl *ar = target->dev->ar; ath6kl_rx() local
1328 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", ath6kl_rx()
1329 __func__, ar, ept, skb, packet->buf, ath6kl_rx()
1343 if (ept == ar->ctrl_ep) { ath6kl_rx()
1344 if (test_bit(WMI_ENABLED, &ar->flag)) { ath6kl_rx()
1345 ath6kl_check_wow_status(ar); ath6kl_rx()
1346 ath6kl_wmi_control_rx(ar->wmi, skb); ath6kl_rx()
1356 vif = ath6kl_get_vif_by_index(ar, if_idx); ath6kl_rx()
1375 if (!test_bit(WMI_ENABLED, &ar->flag)) { ath6kl_rx()
1382 ath6kl_check_wow_status(ar); ath6kl_rx()
1473 ath6kl_wmi_send_mgmt_cmd(ar->wmi, ath6kl_rx()
1502 ar->wmi, ath6kl_rx()
1507 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_rx()
1551 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); ath6kl_rx()
1586 if (conn && ar->intra_bss) { ath6kl_rx()
1589 } else if (conn && !ar->intra_bss) { ath6kl_rx()
1714 sta = ath6kl_find_sta_by_aid(vif->ar, aid); aggr_recv_addba_req_evt()
1810 sta = ath6kl_find_sta_by_aid(vif->ar, aid); aggr_recv_delba_req_evt()
1847 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar) ath6kl_cleanup_amsdu_rxbufs() argument
1851 spin_lock_bh(&ar->lock); ath6kl_cleanup_amsdu_rxbufs()
1852 if (list_empty(&ar->amsdu_rx_buffer_queue)) { ath6kl_cleanup_amsdu_rxbufs()
1853 spin_unlock_bh(&ar->lock); ath6kl_cleanup_amsdu_rxbufs()
1857 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue, ath6kl_cleanup_amsdu_rxbufs()
1860 spin_unlock_bh(&ar->lock); ath6kl_cleanup_amsdu_rxbufs()
1862 spin_lock_bh(&ar->lock); ath6kl_cleanup_amsdu_rxbufs()
1865 spin_unlock_bh(&ar->lock); ath6kl_cleanup_amsdu_rxbufs()
H A Dsdio.c45 struct ath6kl *ar; member in struct:ath6kl_sdio
78 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar) ath6kl_sdio_priv() argument
80 return ar->hif_priv; ath6kl_sdio_priv()
94 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar) ath6kl_sdio_set_mbox_info() argument
96 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info; ath6kl_sdio_set_mbox_info()
337 scat_req->complete(ar_sdio->ar->htc_target, scat_req); ath6kl_sdio_scat_rw()
401 hif_scatter_req_add(ar_sdio->ar, s_req); ath6kl_sdio_alloc_prep_scat_req()
407 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, ath6kl_sdio_read_write_sync() argument
410 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_read_write_sync()
451 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address, __ath6kl_sdio_write_async()
492 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); ath6kl_sdio_irq_handler()
501 static int ath6kl_sdio_power_on(struct ath6kl *ar) ath6kl_sdio_power_on() argument
503 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_power_on()
534 static int ath6kl_sdio_power_off(struct ath6kl *ar) ath6kl_sdio_power_off() argument
536 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_power_off()
557 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, ath6kl_sdio_write_async() argument
561 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_write_async()
578 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); ath6kl_sdio_write_async()
583 static void ath6kl_sdio_irq_enable(struct ath6kl *ar) ath6kl_sdio_irq_enable() argument
585 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_irq_enable()
598 static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar) ath6kl_sdio_is_on_irq() argument
600 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_is_on_irq()
605 static void ath6kl_sdio_irq_disable(struct ath6kl *ar) ath6kl_sdio_irq_disable() argument
607 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_irq_disable()
616 ath6kl_sdio_is_on_irq(ar)); ath6kl_sdio_irq_disable()
630 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) ath6kl_sdio_scatter_req_get() argument
632 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_scatter_req_get()
650 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, ath6kl_sdio_scatter_req_add() argument
653 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_scatter_req_add()
663 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, ath6kl_sdio_async_rw_scatter() argument
666 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_async_rw_scatter()
683 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); ath6kl_sdio_async_rw_scatter()
690 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) ath6kl_sdio_cleanup_scatter() argument
692 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_cleanup_scatter()
718 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) ath6kl_sdio_enable_scatter() argument
720 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_enable_scatter()
721 struct htc_target *target = ar->htc_target; ath6kl_sdio_enable_scatter()
753 ath6kl_sdio_cleanup_scatter(ar); ath6kl_sdio_enable_scatter()
765 ath6kl_sdio_cleanup_scatter(ar); ath6kl_sdio_enable_scatter()
781 static int ath6kl_sdio_config(struct ath6kl *ar) ath6kl_sdio_config() argument
783 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_config()
820 static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar) ath6kl_set_sdio_pm_caps() argument
822 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_set_sdio_pm_caps()
849 static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) ath6kl_sdio_suspend() argument
851 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_suspend()
857 if (ar->suspend_mode == WLAN_POWER_STATE_WOW || ath6kl_sdio_suspend()
858 (!ar->suspend_mode && wow)) { ath6kl_sdio_suspend()
859 ret = ath6kl_set_sdio_pm_caps(ar); ath6kl_sdio_suspend()
863 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow); ath6kl_sdio_suspend()
868 (!ar->wow_suspend_mode || ath6kl_sdio_suspend()
869 ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP)) ath6kl_sdio_suspend()
872 ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR) ath6kl_sdio_suspend()
878 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || ath6kl_sdio_suspend()
879 !ar->suspend_mode || try_deepsleep) { ath6kl_sdio_suspend()
901 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, ath6kl_sdio_suspend()
913 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL); ath6kl_sdio_suspend()
916 static int ath6kl_sdio_resume(struct ath6kl *ar) ath6kl_sdio_resume() argument
918 switch (ar->state) { ath6kl_sdio_resume()
925 ath6kl_sdio_config(ar); ath6kl_sdio_resume()
947 ath6kl_cfg80211_resume(ar); ath6kl_sdio_resume()
953 static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) ath6kl_set_addrwin_reg() argument
976 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val, ath6kl_set_addrwin_reg()
994 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr), ath6kl_set_addrwin_reg()
1006 static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data) ath6kl_sdio_diag_read32() argument
1011 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, ath6kl_sdio_diag_read32()
1018 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, ath6kl_sdio_diag_read32()
1029 static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address, ath6kl_sdio_diag_write32() argument
1036 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, ath6kl_sdio_diag_write32()
1045 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, ath6kl_sdio_diag_write32()
1049 static int ath6kl_sdio_bmi_credits(struct ath6kl *ar) ath6kl_sdio_bmi_credits() argument
1055 ar->bmi.cmd_credits = 0; ath6kl_sdio_bmi_credits()
1061 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { ath6kl_sdio_bmi_credits()
1068 ret = ath6kl_sdio_read_write_sync(ar, addr, ath6kl_sdio_bmi_credits()
1069 (u8 *)&ar->bmi.cmd_credits, 4, ath6kl_sdio_bmi_credits()
1080 ar->bmi.cmd_credits &= 0xFF; ath6kl_sdio_bmi_credits()
1083 if (!ar->bmi.cmd_credits) { ath6kl_sdio_bmi_credits()
1091 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) ath6kl_bmi_get_rx_lkahd() argument
1099 ret = ath6kl_sdio_read_write_sync(ar, ath6kl_bmi_get_rx_lkahd()
1120 static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) ath6kl_sdio_bmi_write() argument
1125 ret = ath6kl_sdio_bmi_credits(ar); ath6kl_sdio_bmi_write()
1129 addr = ar->mbox_info.htc_addr; ath6kl_sdio_bmi_write()
1131 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, ath6kl_sdio_bmi_write()
1141 static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) ath6kl_sdio_bmi_read() argument
1193 ret = ath6kl_bmi_get_rx_lkahd(ar); ath6kl_sdio_bmi_read()
1198 addr = ar->mbox_info.htc_addr; ath6kl_sdio_bmi_read()
1199 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, ath6kl_sdio_bmi_read()
1210 static void ath6kl_sdio_stop(struct ath6kl *ar) ath6kl_sdio_stop() argument
1212 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); ath6kl_sdio_stop()
1228 req->scat_req->complete(ar_sdio->ar->htc_target, ath6kl_sdio_stop()
1299 struct ath6kl *ar; ath6kl_sdio_probe() local
1339 ar = ath6kl_core_create(&ar_sdio->func->dev); ath6kl_sdio_probe()
1340 if (!ar) { ath6kl_sdio_probe()
1346 ar_sdio->ar = ar; ath6kl_sdio_probe()
1347 ar->hif_type = ATH6KL_HIF_TYPE_SDIO; ath6kl_sdio_probe()
1348 ar->hif_priv = ar_sdio; ath6kl_sdio_probe()
1349 ar->hif_ops = &ath6kl_sdio_ops; ath6kl_sdio_probe()
1350 ar->bmi.max_data_size = 256; ath6kl_sdio_probe()
1352 ath6kl_sdio_set_mbox_info(ar); ath6kl_sdio_probe()
1354 ret = ath6kl_sdio_config(ar); ath6kl_sdio_probe()
1360 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX); ath6kl_sdio_probe()
1369 ath6kl_core_destroy(ar_sdio->ar); ath6kl_sdio_probe()
1388 ath6kl_stop_txrx(ar_sdio->ar); ath6kl_sdio_remove()
1391 ath6kl_core_cleanup(ar_sdio->ar); ath6kl_sdio_remove()
1392 ath6kl_core_destroy(ar_sdio->ar); ath6kl_sdio_remove()
H A Ddebug.h73 void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
74 void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
75 int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
77 void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
78 void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
79 void ath6kl_debug_init(struct ath6kl *ar);
80 int ath6kl_debug_init_fs(struct ath6kl *ar);
81 void ath6kl_debug_cleanup(struct ath6kl *ar);
105 static inline void ath6kl_debug_fwlog_event(struct ath6kl *ar, ath6kl_debug_fwlog_event() argument
110 static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war) ath6kl_debug_war() argument
114 static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, ath6kl_debug_roam_tbl_event() argument
120 static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive) ath6kl_debug_set_keepalive() argument
124 static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, ath6kl_debug_set_disconnect_timeout() argument
129 static inline void ath6kl_debug_init(struct ath6kl *ar) ath6kl_debug_init() argument
133 static inline int ath6kl_debug_init_fs(struct ath6kl *ar) ath6kl_debug_init_fs() argument
138 static inline void ath6kl_debug_cleanup(struct ath6kl *ar) ath6kl_debug_cleanup() argument
H A Dhif.h231 struct ath6kl *ar; member in struct:ath6kl_device
235 int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
237 int (*write_async)(struct ath6kl *ar, u32 address, u8 *buffer,
240 void (*irq_enable)(struct ath6kl *ar);
241 void (*irq_disable)(struct ath6kl *ar);
243 struct hif_scatter_req *(*scatter_req_get)(struct ath6kl *ar);
244 void (*scatter_req_add)(struct ath6kl *ar,
246 int (*enable_scatter)(struct ath6kl *ar);
247 int (*scat_req_rw) (struct ath6kl *ar,
249 void (*cleanup_scatter)(struct ath6kl *ar);
250 int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
251 int (*resume)(struct ath6kl *ar);
252 int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value);
253 int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value);
254 int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len);
255 int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len);
256 int (*power_on)(struct ath6kl *ar);
257 int (*power_off)(struct ath6kl *ar);
258 void (*stop)(struct ath6kl *ar);
259 int (*pipe_send)(struct ath6kl *ar, u8 pipe, struct sk_buff *hdr_buf,
261 void (*pipe_get_default)(struct ath6kl *ar, u8 *pipe_ul, u8 *pipe_dl);
262 int (*pipe_map_service)(struct ath6kl *ar, u16 service_id, u8 *pipe_ul,
264 u16 (*pipe_get_free_queue_number)(struct ath6kl *ar, u8 pipe);
276 int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
H A Dcfg80211.h27 struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
49 int ath6kl_cfg80211_suspend(struct ath6kl *ar,
53 int ath6kl_cfg80211_resume(struct ath6kl *ar);
58 void ath6kl_cfg80211_stop_all(struct ath6kl *ar);
60 int ath6kl_cfg80211_init(struct ath6kl *ar);
61 void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
64 void ath6kl_cfg80211_destroy(struct ath6kl *ar);
H A Dbmi.h226 #define ath6kl_bmi_write_hi32(ar, item, val) \
231 addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
233 ath6kl_bmi_write(ar, addr, (u8 *) &v, sizeof(v)); \
236 #define ath6kl_bmi_read_hi32(ar, item, val) \
243 addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
244 ret = ath6kl_bmi_read(ar, addr, (u8 *) &tmp, 4); \
250 int ath6kl_bmi_init(struct ath6kl *ar);
251 void ath6kl_bmi_cleanup(struct ath6kl *ar);
252 void ath6kl_bmi_reset(struct ath6kl *ar);
254 int ath6kl_bmi_done(struct ath6kl *ar);
255 int ath6kl_bmi_get_target_info(struct ath6kl *ar,
257 int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
258 int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
259 int ath6kl_bmi_execute(struct ath6kl *ar,
261 int ath6kl_bmi_set_app_start(struct ath6kl *ar,
263 int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param);
264 int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param);
265 int ath6kl_bmi_lz_data(struct ath6kl *ar,
267 int ath6kl_bmi_lz_stream_start(struct ath6kl *ar,
269 int ath6kl_bmi_fast_download(struct ath6kl *ar,
H A Dcore.h578 * and the runtime (current) limit must be checked from ar->vif_max.
603 struct ath6kl *ar; member in struct:ath6kl_vif
876 return ((struct ath6kl_vif *) netdev_priv(dev))->ar; ath6kl_priv()
879 static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar, ath6kl_get_hi_item_addr() argument
884 if (ar->target_type == TARGET_TYPE_AR6003) ath6kl_get_hi_item_addr()
886 else if (ar->target_type == TARGET_TYPE_AR6004) ath6kl_get_hi_item_addr()
892 int ath6kl_configure_target(struct ath6kl *ar);
896 void ath6kl_cookie_init(struct ath6kl *ar);
897 void ath6kl_cookie_cleanup(struct ath6kl *ar);
903 void ath6kl_stop_txrx(struct ath6kl *ar);
904 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar);
905 int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value);
906 int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
907 int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
908 int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
909 int ath6kl_read_fwlogs(struct ath6kl *ar);
911 void ath6kl_tx_data_cleanup(struct ath6kl *ar);
913 struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
914 void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
922 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count);
930 struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
965 struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
967 int ath6kl_init_hw_start(struct ath6kl *ar);
968 int ath6kl_init_hw_stop(struct ath6kl *ar);
969 int ath6kl_init_fetch_firmwares(struct ath6kl *ar);
970 int ath6kl_init_hw_params(struct ath6kl *ar);
972 void ath6kl_check_wow_status(struct ath6kl *ar);
974 void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb);
975 void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
978 int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type);
979 void ath6kl_core_cleanup(struct ath6kl *ar);
980 void ath6kl_core_destroy(struct ath6kl *ar);
983 void ath6kl_init_hw_restart(struct ath6kl *ar);
984 void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason);
985 void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie);
986 void ath6kl_recovery_init(struct ath6kl *ar);
987 void ath6kl_recovery_cleanup(struct ath6kl *ar);
988 void ath6kl_recovery_suspend(struct ath6kl *ar);
989 void ath6kl_recovery_resume(struct ath6kl *ar);
H A Dusb.c73 struct ath6kl *ar; member in struct:ath6kl_usb
81 struct ath6kl *ar; member in struct:ath6kl_urb_context
168 static inline struct ath6kl_usb *ath6kl_usb_priv(struct ath6kl *ar) ath6kl_usb_priv() argument
170 return ar->hif_priv; ath6kl_usb_priv()
586 ath6kl_core_tx_complete(ar_usb->ar, skb); ath6kl_usb_io_comp_work()
590 ath6kl_core_rx_complete(ar_usb->ar, skb, ath6kl_usb_io_comp_work()
668 ath6kl_stop_txrx(ar_usb->ar); ath6kl_usb_device_detached()
672 ath6kl_core_cleanup(ar_usb->ar); ath6kl_usb_device_detached()
677 static void hif_start(struct ath6kl *ar) hif_start() argument
679 struct ath6kl_usb *device = ath6kl_usb_priv(ar); hif_start()
692 static int ath6kl_usb_send(struct ath6kl *ar, u8 PipeID, ath6kl_usb_send() argument
695 struct ath6kl_usb *device = ath6kl_usb_priv(ar); ath6kl_usb_send()
768 static void hif_stop(struct ath6kl *ar) hif_stop() argument
770 struct ath6kl_usb *device = ath6kl_usb_priv(ar); hif_stop()
775 static void ath6kl_usb_get_default_pipe(struct ath6kl *ar, ath6kl_usb_get_default_pipe() argument
782 static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id, ath6kl_usb_map_service_pipe() argument
806 ar->fw_capabilities)) ath6kl_usb_map_service_pipe()
819 ar->fw_capabilities)) ath6kl_usb_map_service_pipe()
837 static u16 ath6kl_usb_get_free_queue_number(struct ath6kl *ar, u8 pipe_id) ath6kl_usb_get_free_queue_number() argument
839 struct ath6kl_usb *device = ath6kl_usb_priv(ar); ath6kl_usb_get_free_queue_number()
844 static void hif_detach_htc(struct ath6kl *ar) hif_detach_htc() argument
846 struct ath6kl_usb *device = ath6kl_usb_priv(ar); hif_detach_htc()
942 static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data) ath6kl_usb_diag_read32() argument
944 struct ath6kl_usb *ar_usb = ar->hif_priv; ath6kl_usb_diag_read32()
977 static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data) ath6kl_usb_diag_write32() argument
979 struct ath6kl_usb *ar_usb = ar->hif_priv; ath6kl_usb_diag_write32()
1003 static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) ath6kl_usb_bmi_read() argument
1005 struct ath6kl_usb *ar_usb = ar->hif_priv; ath6kl_usb_bmi_read()
1021 static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) ath6kl_usb_bmi_write() argument
1023 struct ath6kl_usb *ar_usb = ar->hif_priv; ath6kl_usb_bmi_write()
1039 static int ath6kl_usb_power_on(struct ath6kl *ar) ath6kl_usb_power_on() argument
1041 hif_start(ar); ath6kl_usb_power_on()
1045 static int ath6kl_usb_power_off(struct ath6kl *ar) ath6kl_usb_power_off() argument
1047 hif_detach_htc(ar); ath6kl_usb_power_off()
1051 static void ath6kl_usb_stop(struct ath6kl *ar) ath6kl_usb_stop() argument
1053 hif_stop(ar); ath6kl_usb_stop()
1056 static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar) ath6kl_usb_cleanup_scatter() argument
1064 static int ath6kl_usb_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) ath6kl_usb_suspend() argument
1072 static int ath6kl_usb_resume(struct ath6kl *ar) ath6kl_usb_resume() argument
1102 struct ath6kl *ar; ath6kl_usb_probe() local
1132 ar = ath6kl_core_create(&ar_usb->udev->dev); ath6kl_usb_probe()
1133 if (ar == NULL) { ath6kl_usb_probe()
1139 ar->hif_priv = ar_usb; ath6kl_usb_probe()
1140 ar->hif_type = ATH6KL_HIF_TYPE_USB; ath6kl_usb_probe()
1141 ar->hif_ops = &ath6kl_usb_ops; ath6kl_usb_probe()
1142 ar->mbox_info.block_size = 16; ath6kl_usb_probe()
1143 ar->bmi.max_data_size = 252; ath6kl_usb_probe()
1145 ar_usb->ar = ar; ath6kl_usb_probe()
1147 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_PIPE); ath6kl_usb_probe()
1156 ath6kl_core_destroy(ar); ath6kl_usb_probe()
H A Dtestmode.c46 void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len) ath6kl_tm_rx_event() argument
53 skb = cfg80211_testmode_alloc_event_skb(ar->wiphy, buf_len, GFP_KERNEL); ath6kl_tm_rx_event()
72 struct ath6kl *ar = wiphy_priv(wiphy); ath6kl_tm_cmd() local
93 ath6kl_wmi_test_cmd(ar->wmi, buf, buf_len); ath6kl_tm_cmd()
H A Dhif.c70 static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar) ath6kl_hif_dump_fw_crash() argument
76 if (ar->target_type != TARGET_TYPE_AR6003) ath6kl_hif_dump_fw_crash()
80 address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state)); ath6kl_hif_dump_fw_crash()
81 address = TARG_VTOP(ar->target_type, address); ath6kl_hif_dump_fw_crash()
84 ret = ath6kl_diag_read32(ar, address, &regdump_addr); ath6kl_hif_dump_fw_crash()
94 regdump_addr = TARG_VTOP(ar->target_type, regdump_addr); ath6kl_hif_dump_fw_crash()
97 ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0], ath6kl_hif_dump_fw_crash()
105 ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version, ath6kl_hif_dump_fw_crash()
106 ar->wiphy->fw_version); ath6kl_hif_dump_fw_crash()
131 ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, ath6kl_hif_proc_dbg_intr()
136 ath6kl_hif_dump_fw_crash(dev->ar); ath6kl_hif_proc_dbg_intr()
137 ath6kl_read_fwlogs(dev->ar); ath6kl_hif_proc_dbg_intr()
138 ath6kl_recovery_err_notify(dev->ar, ATH6KL_FW_ASSERT); ath6kl_hif_proc_dbg_intr()
153 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, ath6kl_hif_poll_mboxmsg_rx()
225 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, ath6kl_hif_rx_control()
240 scat_req->addr = dev->ar->mbox_info.htc_addr; ath6kl_hif_submit_scat_req()
246 dev->ar->mbox_info.htc_ext_addr : ath6kl_hif_submit_scat_req()
247 dev->ar->mbox_info.htc_addr; ath6kl_hif_submit_scat_req()
260 scat_req->complete(dev->ar->htc_target, scat_req); ath6kl_hif_submit_scat_req()
265 status = ath6kl_hif_scat_req_rw(dev->ar, scat_req); ath6kl_hif_submit_scat_req()
338 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS, ath6kl_hif_proc_err_intr()
382 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS, ath6kl_hif_proc_cpu_intr()
429 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, proc_pending_irqs()
543 int ath6kl_hif_intr_bh_handler(struct ath6kl *ar) ath6kl_hif_intr_bh_handler() argument
545 struct ath6kl_device *dev = ar->htc_target->dev; ath6kl_hif_intr_bh_handler()
608 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, ath6kl_hif_enable_intrs()
632 return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, ath6kl_hif_disable_intrs()
653 ath6kl_hif_irq_enable(dev->ar); ath6kl_hif_unmask_intrs()
667 ath6kl_hif_irq_disable(dev->ar); ath6kl_hif_mask_intrs()
683 dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size; ath6kl_hif_setup()
696 dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); ath6kl_hif_setup()
H A Dtestmode.h22 void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len);
28 static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, ath6kl_tm_rx_event() argument
H A Dhtc_pipe.c255 status = ath6kl_hif_pipe_send(target->dev->ar, htc_issue_packets()
307 struct ath6kl *ar = target->dev->ar; htc_try_send() local
413 ath6kl_hif_pipe_get_free_queue_number(ar,
488 ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
743 static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb) ath6kl_htc_pipe_tx_complete() argument
745 struct htc_target *target = ar->htc_target; ath6kl_htc_pipe_tx_complete()
952 static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb, ath6kl_htc_pipe_rx_complete() argument
955 struct htc_target *target = ar->htc_target; ath6kl_htc_pipe_rx_complete()
965 * ar->htc_target can be NULL due to a race condition that can occur ath6kl_htc_pipe_rx_complete()
967 * initializing 'ar->htc_target' via 'ath6kl_htc_create'). ath6kl_htc_pipe_rx_complete()
970 * Thus the possibility of ar->htc_target being NULL ath6kl_htc_pipe_rx_complete()
1232 struct ath6kl *ar = target->dev->ar; ath6kl_htc_pipe_conn_service() local
1391 status = ath6kl_hif_pipe_map_service(ar, ep->svc_id, ath6kl_htc_pipe_conn_service()
1416 static void *ath6kl_htc_pipe_create(struct ath6kl *ar) ath6kl_htc_pipe_create() argument
1450 target->dev->ar = ar; ath6kl_htc_pipe_create()
1456 ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul, ath6kl_htc_pipe_create()
1734 void ath6kl_htc_pipe_attach(struct ath6kl *ar) ath6kl_htc_pipe_attach() argument
1736 ar->htc_ops = &ath6kl_htc_pipe_ops; ath6kl_htc_pipe_attach()
H A Dhtc.h549 void* (*create)(struct ath6kl *ar);
570 int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb);
571 int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
674 void ath6kl_htc_pipe_attach(struct ath6kl *ar);
675 void ath6kl_htc_mbox_attach(struct ath6kl *ar);
H A Dhtc_mbox.c501 hif_scatter_req_add(target->dev->ar, scat_req); htc_async_tx_scat_complete()
524 target->dev->ar->mbox_info.htc_addr, ath6kl_htc_tx_issue()
528 status = hif_read_write_sync(target->dev->ar, ath6kl_htc_tx_issue()
529 target->dev->ar->mbox_info.htc_addr, ath6kl_htc_tx_issue()
536 status = hif_write_async(target->dev->ar, ath6kl_htc_tx_issue()
537 target->dev->ar->mbox_info.htc_addr, ath6kl_htc_tx_issue()
769 ac = target->dev->ar->ep2ac_map[endpoint->eid]; ath6kl_htc_tx_bundle()
780 scat_req = hif_scatter_req_get(target->dev->ar); ath6kl_htc_tx_bundle()
825 hif_scatter_req_add(target->dev->ar, scat_req); ath6kl_htc_tx_bundle()
886 ac = target->dev->ar->ep2ac_map[endpoint->eid]; ath6kl_htc_tx_from_queue()
1267 return (eid == target->dev->ar->ctrl_ep) ? htc_valid_rx_frame_len()
1321 padded_len, dev->ar->mbox_info.htc_addr); ath6kl_htc_rx_packet()
1323 status = hif_read_write_sync(dev->ar, ath6kl_htc_rx_packet()
1324 dev->ar->mbox_info.htc_addr, ath6kl_htc_rx_packet()
1947 scat_req = hif_scatter_req_get(target->dev->ar); ath6kl_htc_rx_bundle()
1998 hif_scatter_req_add(target->dev->ar, scat_req); ath6kl_htc_rx_bundle()
2615 if (ath6kl_hif_enable_scatter(target->dev->ar)) { htc_setup_msg_bndl()
2728 ath6kl_hif_cleanup_scatter(target->dev->ar); ath6kl_htc_mbox_wait_target()
2793 block_size = target->dev->ar->mbox_info.block_size; ath6kl_htc_reset()
2845 static void *ath6kl_htc_mbox_create(struct ath6kl *ar) ath6kl_htc_mbox_create() argument
2871 target->dev->ar = ar; ath6kl_htc_mbox_create()
2896 ath6kl_hif_cleanup_scatter(target->dev->ar); ath6kl_htc_mbox_cleanup()
2932 void ath6kl_htc_mbox_attach(struct ath6kl *ar) ath6kl_htc_mbox_attach() argument
2934 ar->htc_ops = &ath6kl_htc_mbox_ops; ath6kl_htc_mbox_attach()
H A Dwmi.c136 struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx) ath6kl_get_vif_by_index() argument
140 if (WARN_ON(if_idx > (ar->vif_max - 1))) ath6kl_get_vif_by_index()
144 spin_lock_bh(&ar->list_lock); ath6kl_get_vif_by_index()
145 list_for_each_entry(vif, &ar->vif_list, list) { ath6kl_get_vif_by_index()
151 spin_unlock_bh(&ar->list_lock); ath6kl_get_vif_by_index()
510 struct ath6kl *ar = wmi->parent_dev; ath6kl_wmi_remain_on_chnl_event_rx() local
521 chan = ieee80211_get_channel(ar->wiphy, freq); ath6kl_wmi_remain_on_chnl_event_rx()
543 struct ath6kl *ar = wmi->parent_dev; ath6kl_wmi_cancel_remain_on_chnl_event_rx() local
555 chan = ieee80211_get_channel(ar->wiphy, freq); ath6kl_wmi_cancel_remain_on_chnl_event_rx()
1085 cfg80211_sched_scan_results(vif->ar->wiphy); ath6kl_wmi_sscan_timer()
1094 struct ath6kl *ar = wmi->parent_dev; ath6kl_wmi_bssinfo_event_rx() local
1117 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wmi_bssinfo_event_rx()
1121 channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch)); ath6kl_wmi_bssinfo_event_rx()
1140 bss = cfg80211_inform_bss(ar->wiphy, channel, ath6kl_wmi_bssinfo_event_rx()
1151 cfg80211_put_bss(ar->wiphy, bss); ath6kl_wmi_bssinfo_event_rx()
2009 struct ath6kl *ar = wmi->parent_dev; ath6kl_wmi_beginscan_cmd() local
2014 ar->fw_capabilities)) { ath6kl_wmi_beginscan_cmd()
2047 sband = ar->wiphy->bands[band]; ath6kl_wmi_beginscan_cmd()
2862 struct ath6kl *ar = wmi->parent_dev; ath6kl_wmi_set_bitrate_mask() local
2865 ar->fw_capabilities)) ath6kl_wmi_set_bitrate_mask()
2907 struct ath6kl *ar = wmi->parent_dev; ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx() local
2910 wake_up(&ar->event_wq); ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx()
3308 struct ath6kl *ar = wmi->parent_dev; ath6kl_wmi_get_rate() local
3322 ar->fw_capabilities)) { ath6kl_wmi_get_rate()
3717 struct ath6kl *ar = wmi->parent_dev; ath6kl_wmi_send_mgmt_cmd() local
3720 ar->fw_capabilities)) { ath6kl_wmi_send_mgmt_cmd()
3727 status = __ath6kl_wmi_send_mgmt_cmd(ar->wmi, if_idx, id, freq, ath6kl_wmi_send_mgmt_cmd()
3731 status = ath6kl_wmi_send_action_cmd(ar->wmi, if_idx, id, freq, ath6kl_wmi_send_mgmt_cmd()
/linux-4.1.27/drivers/net/wireless/ath/ath10k/
H A Dcore.c105 static void ath10k_send_suspend_complete(struct ath10k *ar) ath10k_send_suspend_complete() argument
107 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n"); ath10k_send_suspend_complete()
109 complete(&ar->target_suspend); ath10k_send_suspend_complete()
112 static int ath10k_init_configure_target(struct ath10k *ar) ath10k_init_configure_target() argument
118 ret = ath10k_bmi_write32(ar, hi_app_host_interest, ath10k_init_configure_target()
121 ath10k_err(ar, "settings HTC version failed\n"); ath10k_init_configure_target()
126 ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host); ath10k_init_configure_target()
128 ath10k_err(ar, "setting firmware mode (1/2) failed\n"); ath10k_init_configure_target()
145 ret = ath10k_bmi_write32(ar, hi_option_flag, param_host); ath10k_init_configure_target()
147 ath10k_err(ar, "setting firmware mode (2/2) failed\n"); ath10k_init_configure_target()
152 ret = ath10k_bmi_write32(ar, hi_be, 0); ath10k_init_configure_target()
154 ath10k_err(ar, "setting host CPU BE mode failed\n"); ath10k_init_configure_target()
159 ret = ath10k_bmi_write32(ar, hi_fw_swap, 0); ath10k_init_configure_target()
162 ath10k_err(ar, "setting FW data/desc swap flags failed\n"); ath10k_init_configure_target()
169 static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, ath10k_fetch_fw_file() argument
184 ret = request_firmware(&fw, filename, ar->dev); ath10k_fetch_fw_file()
191 static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data, ath10k_push_board_ext_data() argument
194 u32 board_data_size = ar->hw_params.fw.board_size; ath10k_push_board_ext_data()
195 u32 board_ext_data_size = ar->hw_params.fw.board_ext_size; ath10k_push_board_ext_data()
199 ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr); ath10k_push_board_ext_data()
201 ath10k_err(ar, "could not read board ext data addr (%d)\n", ath10k_push_board_ext_data()
206 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_push_board_ext_data()
214 ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n", ath10k_push_board_ext_data()
219 ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, ath10k_push_board_ext_data()
223 ath10k_err(ar, "could not write board ext data (%d)\n", ret); ath10k_push_board_ext_data()
227 ret = ath10k_bmi_write32(ar, hi_board_ext_data_config, ath10k_push_board_ext_data()
230 ath10k_err(ar, "could not write board ext data bit (%d)\n", ath10k_push_board_ext_data()
238 static int ath10k_download_board_data(struct ath10k *ar, const void *data, ath10k_download_board_data() argument
241 u32 board_data_size = ar->hw_params.fw.board_size; ath10k_download_board_data()
245 ret = ath10k_push_board_ext_data(ar, data, data_len); ath10k_download_board_data()
247 ath10k_err(ar, "could not push board ext data (%d)\n", ret); ath10k_download_board_data()
251 ret = ath10k_bmi_read32(ar, hi_board_data, &address); ath10k_download_board_data()
253 ath10k_err(ar, "could not read board data addr (%d)\n", ret); ath10k_download_board_data()
257 ret = ath10k_bmi_write_memory(ar, address, data, ath10k_download_board_data()
261 ath10k_err(ar, "could not write board data (%d)\n", ret); ath10k_download_board_data()
265 ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1); ath10k_download_board_data()
267 ath10k_err(ar, "could not write board data bit (%d)\n", ret); ath10k_download_board_data()
275 static int ath10k_download_cal_file(struct ath10k *ar) ath10k_download_cal_file() argument
279 if (!ar->cal_file) ath10k_download_cal_file()
282 if (IS_ERR(ar->cal_file)) ath10k_download_cal_file()
283 return PTR_ERR(ar->cal_file); ath10k_download_cal_file()
285 ret = ath10k_download_board_data(ar, ar->cal_file->data, ath10k_download_cal_file()
286 ar->cal_file->size); ath10k_download_cal_file()
288 ath10k_err(ar, "failed to download cal_file data: %d\n", ret); ath10k_download_cal_file()
292 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n"); ath10k_download_cal_file()
297 static int ath10k_download_cal_dt(struct ath10k *ar) ath10k_download_cal_dt() argument
304 node = ar->dev->of_node; ath10k_download_cal_dt()
318 ath10k_warn(ar, "invalid calibration data length in DT: %d\n", ath10k_download_cal_dt()
333 ath10k_warn(ar, "failed to read calibration data from DT: %d\n", ath10k_download_cal_dt()
338 ret = ath10k_download_board_data(ar, data, data_len); ath10k_download_cal_dt()
340 ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n", ath10k_download_cal_dt()
354 static int ath10k_download_and_run_otp(struct ath10k *ar) ath10k_download_and_run_otp() argument
356 u32 result, address = ar->hw_params.patch_load_addr; ath10k_download_and_run_otp()
359 ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len); ath10k_download_and_run_otp()
361 ath10k_err(ar, "failed to download board data: %d\n", ret); ath10k_download_and_run_otp()
367 if (!ar->otp_data || !ar->otp_len) { ath10k_download_and_run_otp()
368 ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", ath10k_download_and_run_otp()
369 ar->otp_data, ar->otp_len); ath10k_download_and_run_otp()
373 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", ath10k_download_and_run_otp()
374 address, ar->otp_len); ath10k_download_and_run_otp()
376 ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); ath10k_download_and_run_otp()
378 ath10k_err(ar, "could not write otp (%d)\n", ret); ath10k_download_and_run_otp()
382 ret = ath10k_bmi_execute(ar, address, 0, &result); ath10k_download_and_run_otp()
384 ath10k_err(ar, "could not execute otp (%d)\n", ret); ath10k_download_and_run_otp()
388 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); ath10k_download_and_run_otp()
391 ath10k_err(ar, "otp calibration failed: %d", result); ath10k_download_and_run_otp()
398 static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) ath10k_download_fw() argument
405 address = ar->hw_params.patch_load_addr; ath10k_download_fw()
409 data = ar->firmware_data; ath10k_download_fw()
410 data_len = ar->firmware_len; ath10k_download_fw()
414 data = ar->testmode.utf->data; ath10k_download_fw()
415 data_len = ar->testmode.utf->size; ath10k_download_fw()
419 ath10k_err(ar, "unknown firmware mode: %d\n", mode); ath10k_download_fw()
423 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_download_fw()
427 ret = ath10k_bmi_fast_download(ar, address, data, data_len); ath10k_download_fw()
429 ath10k_err(ar, "failed to download %s firmware: %d\n", ath10k_download_fw()
437 static void ath10k_core_free_firmware_files(struct ath10k *ar) ath10k_core_free_firmware_files() argument
439 if (!IS_ERR(ar->board)) ath10k_core_free_firmware_files()
440 release_firmware(ar->board); ath10k_core_free_firmware_files()
442 if (!IS_ERR(ar->otp)) ath10k_core_free_firmware_files()
443 release_firmware(ar->otp); ath10k_core_free_firmware_files()
445 if (!IS_ERR(ar->firmware)) ath10k_core_free_firmware_files()
446 release_firmware(ar->firmware); ath10k_core_free_firmware_files()
448 if (!IS_ERR(ar->cal_file)) ath10k_core_free_firmware_files()
449 release_firmware(ar->cal_file); ath10k_core_free_firmware_files()
451 ar->board = NULL; ath10k_core_free_firmware_files()
452 ar->board_data = NULL; ath10k_core_free_firmware_files()
453 ar->board_len = 0; ath10k_core_free_firmware_files()
455 ar->otp = NULL; ath10k_core_free_firmware_files()
456 ar->otp_data = NULL; ath10k_core_free_firmware_files()
457 ar->otp_len = 0; ath10k_core_free_firmware_files()
459 ar->firmware = NULL; ath10k_core_free_firmware_files()
460 ar->firmware_data = NULL; ath10k_core_free_firmware_files()
461 ar->firmware_len = 0; ath10k_core_free_firmware_files()
463 ar->cal_file = NULL; ath10k_core_free_firmware_files()
466 static int ath10k_fetch_cal_file(struct ath10k *ar) ath10k_fetch_cal_file() argument
472 ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); ath10k_fetch_cal_file()
474 ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename); ath10k_fetch_cal_file()
475 if (IS_ERR(ar->cal_file)) ath10k_fetch_cal_file()
477 return PTR_ERR(ar->cal_file); ath10k_fetch_cal_file()
479 ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n", ath10k_fetch_cal_file()
485 static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) ath10k_core_fetch_firmware_api_1() argument
489 if (ar->hw_params.fw.fw == NULL) { ath10k_core_fetch_firmware_api_1()
490 ath10k_err(ar, "firmware file not defined\n"); ath10k_core_fetch_firmware_api_1()
494 if (ar->hw_params.fw.board == NULL) { ath10k_core_fetch_firmware_api_1()
495 ath10k_err(ar, "board data file not defined"); ath10k_core_fetch_firmware_api_1()
499 ar->board = ath10k_fetch_fw_file(ar, ath10k_core_fetch_firmware_api_1()
500 ar->hw_params.fw.dir, ath10k_core_fetch_firmware_api_1()
501 ar->hw_params.fw.board); ath10k_core_fetch_firmware_api_1()
502 if (IS_ERR(ar->board)) { ath10k_core_fetch_firmware_api_1()
503 ret = PTR_ERR(ar->board); ath10k_core_fetch_firmware_api_1()
504 ath10k_err(ar, "could not fetch board data (%d)\n", ret); ath10k_core_fetch_firmware_api_1()
508 ar->board_data = ar->board->data; ath10k_core_fetch_firmware_api_1()
509 ar->board_len = ar->board->size; ath10k_core_fetch_firmware_api_1()
511 ar->firmware = ath10k_fetch_fw_file(ar, ath10k_core_fetch_firmware_api_1()
512 ar->hw_params.fw.dir, ath10k_core_fetch_firmware_api_1()
513 ar->hw_params.fw.fw); ath10k_core_fetch_firmware_api_1()
514 if (IS_ERR(ar->firmware)) { ath10k_core_fetch_firmware_api_1()
515 ret = PTR_ERR(ar->firmware); ath10k_core_fetch_firmware_api_1()
516 ath10k_err(ar, "could not fetch firmware (%d)\n", ret); ath10k_core_fetch_firmware_api_1()
520 ar->firmware_data = ar->firmware->data; ath10k_core_fetch_firmware_api_1()
521 ar->firmware_len = ar->firmware->size; ath10k_core_fetch_firmware_api_1()
524 if (ar->hw_params.fw.otp == NULL) ath10k_core_fetch_firmware_api_1()
527 ar->otp = ath10k_fetch_fw_file(ar, ath10k_core_fetch_firmware_api_1()
528 ar->hw_params.fw.dir, ath10k_core_fetch_firmware_api_1()
529 ar->hw_params.fw.otp); ath10k_core_fetch_firmware_api_1()
530 if (IS_ERR(ar->otp)) { ath10k_core_fetch_firmware_api_1()
531 ret = PTR_ERR(ar->otp); ath10k_core_fetch_firmware_api_1()
532 ath10k_err(ar, "could not fetch otp (%d)\n", ret); ath10k_core_fetch_firmware_api_1()
536 ar->otp_data = ar->otp->data; ath10k_core_fetch_firmware_api_1()
537 ar->otp_len = ar->otp->size; ath10k_core_fetch_firmware_api_1()
542 ath10k_core_free_firmware_files(ar); ath10k_core_fetch_firmware_api_1()
546 static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) ath10k_core_fetch_firmware_api_n() argument
555 ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); ath10k_core_fetch_firmware_api_n()
556 if (IS_ERR(ar->firmware)) { ath10k_core_fetch_firmware_api_n()
557 ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n", ath10k_core_fetch_firmware_api_n()
558 ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware)); ath10k_core_fetch_firmware_api_n()
559 return PTR_ERR(ar->firmware); ath10k_core_fetch_firmware_api_n()
562 data = ar->firmware->data; ath10k_core_fetch_firmware_api_n()
563 len = ar->firmware->size; ath10k_core_fetch_firmware_api_n()
569 ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n", ath10k_core_fetch_firmware_api_n()
570 ar->hw_params.fw.dir, name, len); ath10k_core_fetch_firmware_api_n()
576 ath10k_err(ar, "invalid firmware magic\n"); ath10k_core_fetch_firmware_api_n()
598 ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", ath10k_core_fetch_firmware_api_n()
606 if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1) ath10k_core_fetch_firmware_api_n()
609 memcpy(ar->hw->wiphy->fw_version, data, ie_len); ath10k_core_fetch_firmware_api_n()
610 ar->hw->wiphy->fw_version[ie_len] = '\0'; ath10k_core_fetch_firmware_api_n()
612 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_core_fetch_firmware_api_n()
614 ar->hw->wiphy->fw_version); ath10k_core_fetch_firmware_api_n()
622 ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n", ath10k_core_fetch_firmware_api_n()
626 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_core_fetch_firmware_api_n()
638 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_core_fetch_firmware_api_n()
641 __set_bit(i, ar->fw_features); ath10k_core_fetch_firmware_api_n()
645 ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", ath10k_core_fetch_firmware_api_n()
646 ar->fw_features, ath10k_core_fetch_firmware_api_n()
647 sizeof(ar->fw_features)); ath10k_core_fetch_firmware_api_n()
650 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_core_fetch_firmware_api_n()
654 ar->firmware_data = data; ath10k_core_fetch_firmware_api_n()
655 ar->firmware_len = ie_len; ath10k_core_fetch_firmware_api_n()
659 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_core_fetch_firmware_api_n()
663 ar->otp_data = data; ath10k_core_fetch_firmware_api_n()
664 ar->otp_len = ie_len; ath10k_core_fetch_firmware_api_n()
673 ar->wmi.op_version = le32_to_cpup(version); ath10k_core_fetch_firmware_api_n()
675 ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n", ath10k_core_fetch_firmware_api_n()
676 ar->wmi.op_version); ath10k_core_fetch_firmware_api_n()
679 ath10k_warn(ar, "Unknown FW IE: %u\n", ath10k_core_fetch_firmware_api_n()
691 if (!ar->firmware_data || !ar->firmware_len) { ath10k_core_fetch_firmware_api_n()
692 ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", ath10k_core_fetch_firmware_api_n()
693 ar->hw_params.fw.dir, name); ath10k_core_fetch_firmware_api_n()
699 if (ar->hw_params.fw.board == NULL) { ath10k_core_fetch_firmware_api_n()
700 ath10k_err(ar, "board data file not defined"); ath10k_core_fetch_firmware_api_n()
705 ar->board = ath10k_fetch_fw_file(ar, ath10k_core_fetch_firmware_api_n()
706 ar->hw_params.fw.dir, ath10k_core_fetch_firmware_api_n()
707 ar->hw_params.fw.board); ath10k_core_fetch_firmware_api_n()
708 if (IS_ERR(ar->board)) { ath10k_core_fetch_firmware_api_n()
709 ret = PTR_ERR(ar->board); ath10k_core_fetch_firmware_api_n()
710 ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n", ath10k_core_fetch_firmware_api_n()
711 ar->hw_params.fw.dir, ar->hw_params.fw.board, ath10k_core_fetch_firmware_api_n()
716 ar->board_data = ar->board->data; ath10k_core_fetch_firmware_api_n()
717 ar->board_len = ar->board->size; ath10k_core_fetch_firmware_api_n()
722 ath10k_core_free_firmware_files(ar); ath10k_core_fetch_firmware_api_n()
726 static int ath10k_core_fetch_firmware_files(struct ath10k *ar) ath10k_core_fetch_firmware_files() argument
731 ath10k_fetch_cal_file(ar); ath10k_core_fetch_firmware_files()
733 ar->fw_api = 4; ath10k_core_fetch_firmware_files()
734 ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ath10k_core_fetch_firmware_files()
736 ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE); ath10k_core_fetch_firmware_files()
740 ar->fw_api = 3; ath10k_core_fetch_firmware_files()
741 ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ath10k_core_fetch_firmware_files()
743 ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE); ath10k_core_fetch_firmware_files()
747 ar->fw_api = 2; ath10k_core_fetch_firmware_files()
748 ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ath10k_core_fetch_firmware_files()
750 ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); ath10k_core_fetch_firmware_files()
754 ar->fw_api = 1; ath10k_core_fetch_firmware_files()
755 ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ath10k_core_fetch_firmware_files()
757 ret = ath10k_core_fetch_firmware_api_1(ar); ath10k_core_fetch_firmware_files()
762 ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); ath10k_core_fetch_firmware_files()
767 static int ath10k_download_cal_data(struct ath10k *ar) ath10k_download_cal_data() argument
771 ret = ath10k_download_cal_file(ar); ath10k_download_cal_data()
773 ar->cal_mode = ATH10K_CAL_MODE_FILE; ath10k_download_cal_data()
777 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_download_cal_data()
781 ret = ath10k_download_cal_dt(ar); ath10k_download_cal_data()
783 ar->cal_mode = ATH10K_CAL_MODE_DT; ath10k_download_cal_data()
787 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_download_cal_data()
791 ret = ath10k_download_and_run_otp(ar); ath10k_download_cal_data()
793 ath10k_err(ar, "failed to run otp: %d\n", ret); ath10k_download_cal_data()
797 ar->cal_mode = ATH10K_CAL_MODE_OTP; ath10k_download_cal_data()
800 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n", ath10k_download_cal_data()
801 ath10k_cal_mode_str(ar->cal_mode)); ath10k_download_cal_data()
805 static int ath10k_init_uart(struct ath10k *ar) ath10k_init_uart() argument
813 ret = ath10k_bmi_write32(ar, hi_serial_enable, 0); ath10k_init_uart()
815 ath10k_warn(ar, "could not disable UART prints (%d)\n", ret); ath10k_init_uart()
822 ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin); ath10k_init_uart()
824 ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); ath10k_init_uart()
828 ret = ath10k_bmi_write32(ar, hi_serial_enable, 1); ath10k_init_uart()
830 ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); ath10k_init_uart()
835 ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200); ath10k_init_uart()
837 ath10k_warn(ar, "could not set the baud rate (%d)\n", ret); ath10k_init_uart()
841 ath10k_info(ar, "UART prints enabled\n"); ath10k_init_uart()
845 static int ath10k_init_hw_params(struct ath10k *ar) ath10k_init_hw_params() argument
853 if (hw_params->id == ar->target_version) ath10k_init_hw_params()
858 ath10k_err(ar, "Unsupported hardware version: 0x%x\n", ath10k_init_hw_params()
859 ar->target_version); ath10k_init_hw_params()
863 ar->hw_params = *hw_params; ath10k_init_hw_params()
865 ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n", ath10k_init_hw_params()
866 ar->hw_params.name, ar->target_version); ath10k_init_hw_params()
873 struct ath10k *ar = container_of(work, struct ath10k, restart_work); ath10k_core_restart() local
875 set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); ath10k_core_restart()
882 ieee80211_stop_queues(ar->hw); ath10k_core_restart()
883 ath10k_drain_tx(ar); ath10k_core_restart()
884 complete_all(&ar->scan.started); ath10k_core_restart()
885 complete_all(&ar->scan.completed); ath10k_core_restart()
886 complete_all(&ar->scan.on_channel); ath10k_core_restart()
887 complete_all(&ar->offchan_tx_completed); ath10k_core_restart()
888 complete_all(&ar->install_key_done); ath10k_core_restart()
889 complete_all(&ar->vdev_setup_done); ath10k_core_restart()
890 complete_all(&ar->thermal.wmi_sync); ath10k_core_restart()
891 wake_up(&ar->htt.empty_tx_wq); ath10k_core_restart()
892 wake_up(&ar->wmi.tx_credits_wq); ath10k_core_restart()
893 wake_up(&ar->peer_mapping_wq); ath10k_core_restart()
895 mutex_lock(&ar->conf_mutex); ath10k_core_restart()
897 switch (ar->state) { ath10k_core_restart()
899 ar->state = ATH10K_STATE_RESTARTING; ath10k_core_restart()
900 ath10k_hif_stop(ar); ath10k_core_restart()
901 ath10k_scan_finish(ar); ath10k_core_restart()
902 ieee80211_restart_hw(ar->hw); ath10k_core_restart()
907 ath10k_warn(ar, "cannot restart a device that hasn't been started\n"); ath10k_core_restart()
913 ar->state = ATH10K_STATE_WEDGED; ath10k_core_restart()
916 ath10k_warn(ar, "device is wedged, will not restart\n"); ath10k_core_restart()
919 ath10k_warn(ar, "firmware restart in UTF mode not supported\n"); ath10k_core_restart()
923 mutex_unlock(&ar->conf_mutex); ath10k_core_restart()
926 static int ath10k_core_init_firmware_features(struct ath10k *ar) ath10k_core_init_firmware_features() argument
928 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) && ath10k_core_init_firmware_features()
929 !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { ath10k_core_init_firmware_features()
930 ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well"); ath10k_core_init_firmware_features()
934 if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) { ath10k_core_init_firmware_features()
935 ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n", ath10k_core_init_firmware_features()
936 ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version); ath10k_core_init_firmware_features()
943 if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) { ath10k_core_init_firmware_features()
944 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { ath10k_core_init_firmware_features()
946 ar->fw_features)) ath10k_core_init_firmware_features()
947 ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2; ath10k_core_init_firmware_features()
949 ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1; ath10k_core_init_firmware_features()
951 ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN; ath10k_core_init_firmware_features()
955 switch (ar->wmi.op_version) { ath10k_core_init_firmware_features()
957 ar->max_num_peers = TARGET_NUM_PEERS; ath10k_core_init_firmware_features()
958 ar->max_num_stations = TARGET_NUM_STATIONS; ath10k_core_init_firmware_features()
959 ar->max_num_vdevs = TARGET_NUM_VDEVS; ath10k_core_init_firmware_features()
960 ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC; ath10k_core_init_firmware_features()
965 ar->max_num_peers = TARGET_10X_NUM_PEERS; ath10k_core_init_firmware_features()
966 ar->max_num_stations = TARGET_10X_NUM_STATIONS; ath10k_core_init_firmware_features()
967 ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; ath10k_core_init_firmware_features()
968 ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; ath10k_core_init_firmware_features()
971 ar->max_num_peers = TARGET_TLV_NUM_PEERS; ath10k_core_init_firmware_features()
972 ar->max_num_stations = TARGET_TLV_NUM_STATIONS; ath10k_core_init_firmware_features()
973 ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS; ath10k_core_init_firmware_features()
974 ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; ath10k_core_init_firmware_features()
985 int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) ath10k_core_start() argument
989 lockdep_assert_held(&ar->conf_mutex); ath10k_core_start()
991 clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); ath10k_core_start()
993 ath10k_bmi_start(ar); ath10k_core_start()
995 if (ath10k_init_configure_target(ar)) { ath10k_core_start()
1000 status = ath10k_download_cal_data(ar); ath10k_core_start()
1004 status = ath10k_download_fw(ar, mode); ath10k_core_start()
1008 status = ath10k_init_uart(ar); ath10k_core_start()
1012 ar->htc.htc_ops.target_send_suspend_complete = ath10k_core_start()
1015 status = ath10k_htc_init(ar); ath10k_core_start()
1017 ath10k_err(ar, "could not init HTC (%d)\n", status); ath10k_core_start()
1021 status = ath10k_bmi_done(ar); ath10k_core_start()
1025 status = ath10k_wmi_attach(ar); ath10k_core_start()
1027 ath10k_err(ar, "WMI attach failed: %d\n", status); ath10k_core_start()
1031 status = ath10k_htt_init(ar); ath10k_core_start()
1033 ath10k_err(ar, "failed to init htt: %d\n", status); ath10k_core_start()
1037 status = ath10k_htt_tx_alloc(&ar->htt); ath10k_core_start()
1039 ath10k_err(ar, "failed to alloc htt tx: %d\n", status); ath10k_core_start()
1043 status = ath10k_htt_rx_alloc(&ar->htt); ath10k_core_start()
1045 ath10k_err(ar, "failed to alloc htt rx: %d\n", status); ath10k_core_start()
1049 status = ath10k_hif_start(ar); ath10k_core_start()
1051 ath10k_err(ar, "could not start HIF: %d\n", status); ath10k_core_start()
1055 status = ath10k_htc_wait_target(&ar->htc); ath10k_core_start()
1057 ath10k_err(ar, "failed to connect to HTC: %d\n", status); ath10k_core_start()
1062 status = ath10k_htt_connect(&ar->htt); ath10k_core_start()
1064 ath10k_err(ar, "failed to connect htt (%d)\n", status); ath10k_core_start()
1069 status = ath10k_wmi_connect(ar); ath10k_core_start()
1071 ath10k_err(ar, "could not connect wmi: %d\n", status); ath10k_core_start()
1075 status = ath10k_htc_start(&ar->htc); ath10k_core_start()
1077 ath10k_err(ar, "failed to start htc: %d\n", status); ath10k_core_start()
1082 status = ath10k_wmi_wait_for_service_ready(ar); ath10k_core_start()
1084 ath10k_warn(ar, "wmi service ready event not received"); ath10k_core_start()
1090 ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ath10k_core_start()
1091 ar->hw->wiphy->fw_version); ath10k_core_start()
1093 status = ath10k_wmi_cmd_init(ar); ath10k_core_start()
1095 ath10k_err(ar, "could not send WMI init command (%d)\n", ath10k_core_start()
1100 status = ath10k_wmi_wait_for_unified_ready(ar); ath10k_core_start()
1102 ath10k_err(ar, "wmi unified ready event not received\n"); ath10k_core_start()
1110 ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, ath10k_core_start()
1111 ar->wmi.svc_map)); ath10k_core_start()
1113 status = ath10k_htt_rx_ring_refill(ar); ath10k_core_start()
1115 ath10k_err(ar, "failed to refill htt rx ring: %d\n", status); ath10k_core_start()
1121 status = ath10k_htt_setup(&ar->htt); ath10k_core_start()
1123 ath10k_err(ar, "failed to setup htt: %d\n", status); ath10k_core_start()
1128 status = ath10k_debug_start(ar); ath10k_core_start()
1132 ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1; ath10k_core_start()
1134 INIT_LIST_HEAD(&ar->arvifs); ath10k_core_start()
1139 ath10k_hif_stop(ar); ath10k_core_start()
1141 ath10k_htt_rx_free(&ar->htt); ath10k_core_start()
1143 ath10k_htt_tx_free(&ar->htt); ath10k_core_start()
1145 ath10k_wmi_detach(ar); ath10k_core_start()
1151 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt) ath10k_wait_for_suspend() argument
1155 reinit_completion(&ar->target_suspend); ath10k_wait_for_suspend()
1157 ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt); ath10k_wait_for_suspend()
1159 ath10k_warn(ar, "could not suspend target (%d)\n", ret); ath10k_wait_for_suspend()
1163 ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ); ath10k_wait_for_suspend()
1166 ath10k_warn(ar, "suspend timed out - target pause event never came\n"); ath10k_wait_for_suspend()
1173 void ath10k_core_stop(struct ath10k *ar) ath10k_core_stop() argument
1175 lockdep_assert_held(&ar->conf_mutex); ath10k_core_stop()
1178 if (ar->state != ATH10K_STATE_RESTARTING && ath10k_core_stop()
1179 ar->state != ATH10K_STATE_UTF) ath10k_core_stop()
1180 ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); ath10k_core_stop()
1182 ath10k_debug_stop(ar); ath10k_core_stop()
1183 ath10k_hif_stop(ar); ath10k_core_stop()
1184 ath10k_htt_tx_free(&ar->htt); ath10k_core_stop()
1185 ath10k_htt_rx_free(&ar->htt); ath10k_core_stop()
1186 ath10k_wmi_detach(ar); ath10k_core_stop()
1194 static int ath10k_core_probe_fw(struct ath10k *ar) ath10k_core_probe_fw() argument
1199 ret = ath10k_hif_power_up(ar); ath10k_core_probe_fw()
1201 ath10k_err(ar, "could not start pci hif (%d)\n", ret); ath10k_core_probe_fw()
1206 ret = ath10k_bmi_get_target_info(ar, &target_info); ath10k_core_probe_fw()
1208 ath10k_err(ar, "could not get target info (%d)\n", ret); ath10k_core_probe_fw()
1212 ar->target_version = target_info.version; ath10k_core_probe_fw()
1213 ar->hw->wiphy->hw_version = target_info.version; ath10k_core_probe_fw()
1215 ret = ath10k_init_hw_params(ar); ath10k_core_probe_fw()
1217 ath10k_err(ar, "could not get hw params (%d)\n", ret); ath10k_core_probe_fw()
1221 ret = ath10k_core_fetch_firmware_files(ar); ath10k_core_probe_fw()
1223 ath10k_err(ar, "could not fetch firmware files (%d)\n", ret); ath10k_core_probe_fw()
1227 ret = ath10k_core_init_firmware_features(ar); ath10k_core_probe_fw()
1229 ath10k_err(ar, "fatal problem with firmware features: %d\n", ath10k_core_probe_fw()
1234 mutex_lock(&ar->conf_mutex); ath10k_core_probe_fw()
1236 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); ath10k_core_probe_fw()
1238 ath10k_err(ar, "could not init core (%d)\n", ret); ath10k_core_probe_fw()
1242 ath10k_print_driver_info(ar); ath10k_core_probe_fw()
1243 ath10k_core_stop(ar); ath10k_core_probe_fw()
1245 mutex_unlock(&ar->conf_mutex); ath10k_core_probe_fw()
1247 ath10k_hif_power_down(ar); ath10k_core_probe_fw()
1251 mutex_unlock(&ar->conf_mutex); ath10k_core_probe_fw()
1254 ath10k_core_free_firmware_files(ar); ath10k_core_probe_fw()
1257 ath10k_hif_power_down(ar); ath10k_core_probe_fw()
1264 struct ath10k *ar = container_of(work, struct ath10k, register_work); ath10k_core_register_work() local
1267 status = ath10k_core_probe_fw(ar); ath10k_core_register_work()
1269 ath10k_err(ar, "could not probe fw (%d)\n", status); ath10k_core_register_work()
1273 status = ath10k_mac_register(ar); ath10k_core_register_work()
1275 ath10k_err(ar, "could not register to mac80211 (%d)\n", status); ath10k_core_register_work()
1279 status = ath10k_debug_register(ar); ath10k_core_register_work()
1281 ath10k_err(ar, "unable to initialize debugfs\n"); ath10k_core_register_work()
1285 status = ath10k_spectral_create(ar); ath10k_core_register_work()
1287 ath10k_err(ar, "failed to initialize spectral\n"); ath10k_core_register_work()
1291 status = ath10k_thermal_register(ar); ath10k_core_register_work()
1293 ath10k_err(ar, "could not register thermal device: %d\n", ath10k_core_register_work()
1298 set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags); ath10k_core_register_work()
1302 ath10k_spectral_destroy(ar); ath10k_core_register_work()
1304 ath10k_debug_destroy(ar); ath10k_core_register_work()
1306 ath10k_mac_unregister(ar); ath10k_core_register_work()
1308 ath10k_core_free_firmware_files(ar); ath10k_core_register_work()
1316 int ath10k_core_register(struct ath10k *ar, u32 chip_id) ath10k_core_register() argument
1318 ar->chip_id = chip_id; ath10k_core_register()
1319 queue_work(ar->workqueue, &ar->register_work); ath10k_core_register()
1325 void ath10k_core_unregister(struct ath10k *ar) ath10k_core_unregister() argument
1327 cancel_work_sync(&ar->register_work); ath10k_core_unregister()
1329 if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) ath10k_core_unregister()
1332 ath10k_thermal_unregister(ar); ath10k_core_unregister()
1337 ath10k_spectral_destroy(ar); ath10k_core_unregister()
1342 ath10k_mac_unregister(ar); ath10k_core_unregister()
1344 ath10k_testmode_destroy(ar); ath10k_core_unregister()
1346 ath10k_core_free_firmware_files(ar); ath10k_core_unregister()
1348 ath10k_debug_unregister(ar); ath10k_core_unregister()
1357 struct ath10k *ar; ath10k_core_create() local
1360 ar = ath10k_mac_create(priv_size); ath10k_core_create()
1361 if (!ar) ath10k_core_create()
1364 ar->ath_common.priv = ar; ath10k_core_create()
1365 ar->ath_common.hw = ar->hw; ath10k_core_create()
1366 ar->dev = dev; ath10k_core_create()
1367 ar->hw_rev = hw_rev; ath10k_core_create()
1368 ar->hif.ops = hif_ops; ath10k_core_create()
1369 ar->hif.bus = bus; ath10k_core_create()
1373 ar->regs = &qca988x_regs; ath10k_core_create()
1376 ar->regs = &qca6174_regs; ath10k_core_create()
1379 ath10k_err(ar, "unsupported core hardware revision %d\n", ath10k_core_create()
1385 init_completion(&ar->scan.started); ath10k_core_create()
1386 init_completion(&ar->scan.completed); ath10k_core_create()
1387 init_completion(&ar->scan.on_channel); ath10k_core_create()
1388 init_completion(&ar->target_suspend); ath10k_core_create()
1390 init_completion(&ar->install_key_done); ath10k_core_create()
1391 init_completion(&ar->vdev_setup_done); ath10k_core_create()
1392 init_completion(&ar->thermal.wmi_sync); ath10k_core_create()
1394 INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work); ath10k_core_create()
1396 ar->workqueue = create_singlethread_workqueue("ath10k_wq"); ath10k_core_create()
1397 if (!ar->workqueue) ath10k_core_create()
1400 mutex_init(&ar->conf_mutex); ath10k_core_create()
1401 spin_lock_init(&ar->data_lock); ath10k_core_create()
1403 INIT_LIST_HEAD(&ar->peers); ath10k_core_create()
1404 init_waitqueue_head(&ar->peer_mapping_wq); ath10k_core_create()
1405 init_waitqueue_head(&ar->htt.empty_tx_wq); ath10k_core_create()
1406 init_waitqueue_head(&ar->wmi.tx_credits_wq); ath10k_core_create()
1408 init_completion(&ar->offchan_tx_completed); ath10k_core_create()
1409 INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); ath10k_core_create()
1410 skb_queue_head_init(&ar->offchan_tx_queue); ath10k_core_create()
1412 INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work); ath10k_core_create()
1413 skb_queue_head_init(&ar->wmi_mgmt_tx_queue); ath10k_core_create()
1415 INIT_WORK(&ar->register_work, ath10k_core_register_work); ath10k_core_create()
1416 INIT_WORK(&ar->restart_work, ath10k_core_restart); ath10k_core_create()
1418 ret = ath10k_debug_create(ar); ath10k_core_create()
1422 return ar; ath10k_core_create()
1425 destroy_workqueue(ar->workqueue); ath10k_core_create()
1428 ath10k_mac_destroy(ar); ath10k_core_create()
1434 void ath10k_core_destroy(struct ath10k *ar) ath10k_core_destroy() argument
1436 flush_workqueue(ar->workqueue); ath10k_core_destroy()
1437 destroy_workqueue(ar->workqueue); ath10k_core_destroy()
1439 ath10k_debug_destroy(ar); ath10k_core_destroy()
1440 ath10k_mac_destroy(ar); ath10k_core_destroy()
H A Dwmi-ops.h25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
49 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
50 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
51 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
54 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
56 struct sk_buff *(*gen_init)(struct ath10k *ar);
57 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
59 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
61 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
65 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
66 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
69 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
70 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
72 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
75 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
77 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
79 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
81 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
83 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
85 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
87 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
90 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
94 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
96 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
98 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
101 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
105 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
107 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
111 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
113 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
114 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
117 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
118 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
120 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
121 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
122 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
126 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
127 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
129 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
131 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
134 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
137 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
141 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
143 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
145 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
149 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
153 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
156 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_rx() argument
158 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) ath10k_wmi_rx()
161 ar->wmi.ops->rx(ar, skb); ath10k_wmi_rx()
166 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, ath10k_wmi_map_svc() argument
169 if (!ar->wmi.ops->map_svc) ath10k_wmi_map_svc()
172 ar->wmi.ops->map_svc(in, out, len); ath10k_wmi_map_svc()
177 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_scan() argument
180 if (!ar->wmi.ops->pull_scan) ath10k_wmi_pull_scan()
183 return ar->wmi.ops->pull_scan(ar, skb, arg); ath10k_wmi_pull_scan()
187 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_mgmt_rx() argument
190 if (!ar->wmi.ops->pull_mgmt_rx) ath10k_wmi_pull_mgmt_rx()
193 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); ath10k_wmi_pull_mgmt_rx()
197 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_ch_info() argument
200 if (!ar->wmi.ops->pull_ch_info) ath10k_wmi_pull_ch_info()
203 return ar->wmi.ops->pull_ch_info(ar, skb, arg); ath10k_wmi_pull_ch_info()
207 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_vdev_start() argument
210 if (!ar->wmi.ops->pull_vdev_start) ath10k_wmi_pull_vdev_start()
213 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); ath10k_wmi_pull_vdev_start()
217 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_peer_kick() argument
220 if (!ar->wmi.ops->pull_peer_kick) ath10k_wmi_pull_peer_kick()
223 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); ath10k_wmi_pull_peer_kick()
227 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_swba() argument
230 if (!ar->wmi.ops->pull_swba) ath10k_wmi_pull_swba()
233 return ar->wmi.ops->pull_swba(ar, skb, arg); ath10k_wmi_pull_swba()
237 ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_phyerr() argument
240 if (!ar->wmi.ops->pull_phyerr) ath10k_wmi_pull_phyerr()
243 return ar->wmi.ops->pull_phyerr(ar, skb, arg); ath10k_wmi_pull_phyerr()
247 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_svc_rdy() argument
250 if (!ar->wmi.ops->pull_svc_rdy) ath10k_wmi_pull_svc_rdy()
253 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); ath10k_wmi_pull_svc_rdy()
257 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_rdy() argument
260 if (!ar->wmi.ops->pull_rdy) ath10k_wmi_pull_rdy()
263 return ar->wmi.ops->pull_rdy(ar, skb, arg); ath10k_wmi_pull_rdy()
267 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_fw_stats() argument
270 if (!ar->wmi.ops->pull_fw_stats) ath10k_wmi_pull_fw_stats()
273 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); ath10k_wmi_pull_fw_stats()
277 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) ath10k_wmi_mgmt_tx() argument
283 if (!ar->wmi.ops->gen_mgmt_tx) ath10k_wmi_mgmt_tx()
286 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); ath10k_wmi_mgmt_tx()
290 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid); ath10k_wmi_mgmt_tx()
297 ieee80211_tx_status_irqsafe(ar->hw, msdu); ath10k_wmi_mgmt_tx()
303 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, ath10k_wmi_pdev_set_regdomain() argument
309 if (!ar->wmi.ops->gen_pdev_set_rd) ath10k_wmi_pdev_set_regdomain()
312 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, ath10k_wmi_pdev_set_regdomain()
317 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_set_regdomain()
318 ar->wmi.cmd->pdev_set_regdomain_cmdid); ath10k_wmi_pdev_set_regdomain()
322 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) ath10k_wmi_pdev_suspend_target() argument
326 if (!ar->wmi.ops->gen_pdev_suspend) ath10k_wmi_pdev_suspend_target()
329 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); ath10k_wmi_pdev_suspend_target()
333 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); ath10k_wmi_pdev_suspend_target()
337 ath10k_wmi_pdev_resume_target(struct ath10k *ar) ath10k_wmi_pdev_resume_target() argument
341 if (!ar->wmi.ops->gen_pdev_resume) ath10k_wmi_pdev_resume_target()
344 skb = ar->wmi.ops->gen_pdev_resume(ar); ath10k_wmi_pdev_resume_target()
348 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); ath10k_wmi_pdev_resume_target()
352 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) ath10k_wmi_pdev_set_param() argument
356 if (!ar->wmi.ops->gen_pdev_set_param) ath10k_wmi_pdev_set_param()
359 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); ath10k_wmi_pdev_set_param()
363 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); ath10k_wmi_pdev_set_param()
367 ath10k_wmi_cmd_init(struct ath10k *ar) ath10k_wmi_cmd_init() argument
371 if (!ar->wmi.ops->gen_init) ath10k_wmi_cmd_init()
374 skb = ar->wmi.ops->gen_init(ar); ath10k_wmi_cmd_init()
378 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); ath10k_wmi_cmd_init()
382 ath10k_wmi_start_scan(struct ath10k *ar, ath10k_wmi_start_scan() argument
387 if (!ar->wmi.ops->gen_start_scan) ath10k_wmi_start_scan()
390 skb = ar->wmi.ops->gen_start_scan(ar, arg); ath10k_wmi_start_scan()
394 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); ath10k_wmi_start_scan()
398 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) ath10k_wmi_stop_scan() argument
402 if (!ar->wmi.ops->gen_stop_scan) ath10k_wmi_stop_scan()
405 skb = ar->wmi.ops->gen_stop_scan(ar, arg); ath10k_wmi_stop_scan()
409 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); ath10k_wmi_stop_scan()
413 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, ath10k_wmi_vdev_create() argument
420 if (!ar->wmi.ops->gen_vdev_create) ath10k_wmi_vdev_create()
423 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); ath10k_wmi_vdev_create()
427 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); ath10k_wmi_vdev_create()
431 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) ath10k_wmi_vdev_delete() argument
435 if (!ar->wmi.ops->gen_vdev_delete) ath10k_wmi_vdev_delete()
438 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); ath10k_wmi_vdev_delete()
442 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); ath10k_wmi_vdev_delete()
446 ath10k_wmi_vdev_start(struct ath10k *ar, ath10k_wmi_vdev_start() argument
451 if (!ar->wmi.ops->gen_vdev_start) ath10k_wmi_vdev_start()
454 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); ath10k_wmi_vdev_start()
458 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_vdev_start()
459 ar->wmi.cmd->vdev_start_request_cmdid); ath10k_wmi_vdev_start()
463 ath10k_wmi_vdev_restart(struct ath10k *ar, ath10k_wmi_vdev_restart() argument
468 if (!ar->wmi.ops->gen_vdev_start) ath10k_wmi_vdev_restart()
471 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); ath10k_wmi_vdev_restart()
475 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_vdev_restart()
476 ar->wmi.cmd->vdev_restart_request_cmdid); ath10k_wmi_vdev_restart()
480 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) ath10k_wmi_vdev_stop() argument
484 if (!ar->wmi.ops->gen_vdev_stop) ath10k_wmi_vdev_stop()
487 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); ath10k_wmi_vdev_stop()
491 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); ath10k_wmi_vdev_stop()
495 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) ath10k_wmi_vdev_up() argument
499 if (!ar->wmi.ops->gen_vdev_up) ath10k_wmi_vdev_up()
502 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); ath10k_wmi_vdev_up()
506 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); ath10k_wmi_vdev_up()
510 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) ath10k_wmi_vdev_down() argument
514 if (!ar->wmi.ops->gen_vdev_down) ath10k_wmi_vdev_down()
517 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); ath10k_wmi_vdev_down()
521 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); ath10k_wmi_vdev_down()
525 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, ath10k_wmi_vdev_set_param() argument
530 if (!ar->wmi.ops->gen_vdev_set_param) ath10k_wmi_vdev_set_param()
533 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, ath10k_wmi_vdev_set_param()
538 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); ath10k_wmi_vdev_set_param()
542 ath10k_wmi_vdev_install_key(struct ath10k *ar, ath10k_wmi_vdev_install_key() argument
547 if (!ar->wmi.ops->gen_vdev_install_key) ath10k_wmi_vdev_install_key()
550 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); ath10k_wmi_vdev_install_key()
554 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_vdev_install_key()
555 ar->wmi.cmd->vdev_install_key_cmdid); ath10k_wmi_vdev_install_key()
559 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, ath10k_wmi_vdev_spectral_conf() argument
565 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); ath10k_wmi_vdev_spectral_conf()
569 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; ath10k_wmi_vdev_spectral_conf()
570 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_vdev_spectral_conf()
574 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, ath10k_wmi_vdev_spectral_enable() argument
580 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, ath10k_wmi_vdev_spectral_enable()
585 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; ath10k_wmi_vdev_spectral_enable()
586 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_vdev_spectral_enable()
590 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, ath10k_wmi_vdev_sta_uapsd() argument
598 if (!ar->wmi.ops->gen_vdev_sta_uapsd) ath10k_wmi_vdev_sta_uapsd()
601 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, ath10k_wmi_vdev_sta_uapsd()
606 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; ath10k_wmi_vdev_sta_uapsd()
607 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_vdev_sta_uapsd()
611 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, ath10k_wmi_vdev_wmm_conf() argument
617 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); ath10k_wmi_vdev_wmm_conf()
621 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; ath10k_wmi_vdev_wmm_conf()
622 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_vdev_wmm_conf()
626 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, ath10k_wmi_peer_create() argument
631 if (!ar->wmi.ops->gen_peer_create) ath10k_wmi_peer_create()
634 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr); ath10k_wmi_peer_create()
638 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); ath10k_wmi_peer_create()
642 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, ath10k_wmi_peer_delete() argument
647 if (!ar->wmi.ops->gen_peer_delete) ath10k_wmi_peer_delete()
650 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); ath10k_wmi_peer_delete()
654 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); ath10k_wmi_peer_delete()
658 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, ath10k_wmi_peer_flush() argument
663 if (!ar->wmi.ops->gen_peer_flush) ath10k_wmi_peer_flush()
666 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); ath10k_wmi_peer_flush()
670 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); ath10k_wmi_peer_flush()
674 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, ath10k_wmi_peer_set_param() argument
679 if (!ar->wmi.ops->gen_peer_set_param) ath10k_wmi_peer_set_param()
682 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, ath10k_wmi_peer_set_param()
687 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); ath10k_wmi_peer_set_param()
691 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, ath10k_wmi_set_psmode() argument
696 if (!ar->wmi.ops->gen_set_psmode) ath10k_wmi_set_psmode()
699 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); ath10k_wmi_set_psmode()
703 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_set_psmode()
704 ar->wmi.cmd->sta_powersave_mode_cmdid); ath10k_wmi_set_psmode()
708 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, ath10k_wmi_set_sta_ps_param() argument
713 if (!ar->wmi.ops->gen_set_sta_ps) ath10k_wmi_set_sta_ps_param()
716 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); ath10k_wmi_set_sta_ps_param()
720 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_set_sta_ps_param()
721 ar->wmi.cmd->sta_powersave_param_cmdid); ath10k_wmi_set_sta_ps_param()
725 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, ath10k_wmi_set_ap_ps_param() argument
730 if (!ar->wmi.ops->gen_set_ap_ps) ath10k_wmi_set_ap_ps_param()
733 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); ath10k_wmi_set_ap_ps_param()
737 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_set_ap_ps_param()
738 ar->wmi.cmd->ap_ps_peer_param_cmdid); ath10k_wmi_set_ap_ps_param()
742 ath10k_wmi_scan_chan_list(struct ath10k *ar, ath10k_wmi_scan_chan_list() argument
747 if (!ar->wmi.ops->gen_scan_chan_list) ath10k_wmi_scan_chan_list()
750 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); ath10k_wmi_scan_chan_list()
754 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); ath10k_wmi_scan_chan_list()
758 ath10k_wmi_peer_assoc(struct ath10k *ar, ath10k_wmi_peer_assoc() argument
763 if (!ar->wmi.ops->gen_peer_assoc) ath10k_wmi_peer_assoc()
766 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); ath10k_wmi_peer_assoc()
770 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); ath10k_wmi_peer_assoc()
774 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, ath10k_wmi_beacon_send_ref_nowait() argument
782 if (!ar->wmi.ops->gen_beacon_dma) ath10k_wmi_beacon_send_ref_nowait()
785 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, ath10k_wmi_beacon_send_ref_nowait()
790 ret = ath10k_wmi_cmd_send_nowait(ar, skb, ath10k_wmi_beacon_send_ref_nowait()
791 ar->wmi.cmd->pdev_send_bcn_cmdid); ath10k_wmi_beacon_send_ref_nowait()
801 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, ath10k_wmi_pdev_set_wmm_params() argument
806 if (!ar->wmi.ops->gen_pdev_set_wmm) ath10k_wmi_pdev_set_wmm_params()
809 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); ath10k_wmi_pdev_set_wmm_params()
813 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_set_wmm_params()
814 ar->wmi.cmd->pdev_set_wmm_params_cmdid); ath10k_wmi_pdev_set_wmm_params()
818 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) ath10k_wmi_request_stats() argument
822 if (!ar->wmi.ops->gen_request_stats) ath10k_wmi_request_stats()
825 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); ath10k_wmi_request_stats()
829 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); ath10k_wmi_request_stats()
833 ath10k_wmi_force_fw_hang(struct ath10k *ar, ath10k_wmi_force_fw_hang() argument
838 if (!ar->wmi.ops->gen_force_fw_hang) ath10k_wmi_force_fw_hang()
841 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); ath10k_wmi_force_fw_hang()
845 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); ath10k_wmi_force_fw_hang()
849 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level) ath10k_wmi_dbglog_cfg() argument
853 if (!ar->wmi.ops->gen_dbglog_cfg) ath10k_wmi_dbglog_cfg()
856 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); ath10k_wmi_dbglog_cfg()
860 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); ath10k_wmi_dbglog_cfg()
864 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) ath10k_wmi_pdev_pktlog_enable() argument
868 if (!ar->wmi.ops->gen_pktlog_enable) ath10k_wmi_pdev_pktlog_enable()
871 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); ath10k_wmi_pdev_pktlog_enable()
875 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); ath10k_wmi_pdev_pktlog_enable()
879 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) ath10k_wmi_pdev_pktlog_disable() argument
883 if (!ar->wmi.ops->gen_pktlog_disable) ath10k_wmi_pdev_pktlog_disable()
886 skb = ar->wmi.ops->gen_pktlog_disable(ar); ath10k_wmi_pdev_pktlog_disable()
890 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_pktlog_disable()
891 ar->wmi.cmd->pdev_pktlog_disable_cmdid); ath10k_wmi_pdev_pktlog_disable()
895 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, ath10k_wmi_pdev_set_quiet_mode() argument
900 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) ath10k_wmi_pdev_set_quiet_mode()
903 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, ath10k_wmi_pdev_set_quiet_mode()
908 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_set_quiet_mode()
909 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); ath10k_wmi_pdev_set_quiet_mode()
913 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) ath10k_wmi_pdev_get_temperature() argument
917 if (!ar->wmi.ops->gen_pdev_get_temperature) ath10k_wmi_pdev_get_temperature()
920 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); ath10k_wmi_pdev_get_temperature()
924 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_get_temperature()
925 ar->wmi.cmd->pdev_get_temperature_cmdid); ath10k_wmi_pdev_get_temperature()
929 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) ath10k_wmi_addba_clear_resp() argument
933 if (!ar->wmi.ops->gen_addba_clear_resp) ath10k_wmi_addba_clear_resp()
936 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); ath10k_wmi_addba_clear_resp()
940 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_addba_clear_resp()
941 ar->wmi.cmd->addba_clear_resp_cmdid); ath10k_wmi_addba_clear_resp()
945 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, ath10k_wmi_addba_send() argument
950 if (!ar->wmi.ops->gen_addba_send) ath10k_wmi_addba_send()
953 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); ath10k_wmi_addba_send()
957 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_addba_send()
958 ar->wmi.cmd->addba_send_cmdid); ath10k_wmi_addba_send()
962 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, ath10k_wmi_addba_set_resp() argument
967 if (!ar->wmi.ops->gen_addba_set_resp) ath10k_wmi_addba_set_resp()
970 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); ath10k_wmi_addba_set_resp()
974 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_addba_set_resp()
975 ar->wmi.cmd->addba_set_resp_cmdid); ath10k_wmi_addba_set_resp()
979 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, ath10k_wmi_delba_send() argument
984 if (!ar->wmi.ops->gen_delba_send) ath10k_wmi_delba_send()
987 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, ath10k_wmi_delba_send()
992 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_delba_send()
993 ar->wmi.cmd->delba_send_cmdid); ath10k_wmi_delba_send()
997 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, ath10k_wmi_bcn_tmpl() argument
1003 if (!ar->wmi.ops->gen_bcn_tmpl) ath10k_wmi_bcn_tmpl()
1006 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, ath10k_wmi_bcn_tmpl()
1012 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); ath10k_wmi_bcn_tmpl()
1016 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) ath10k_wmi_prb_tmpl() argument
1020 if (!ar->wmi.ops->gen_prb_tmpl) ath10k_wmi_prb_tmpl()
1023 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); ath10k_wmi_prb_tmpl()
1027 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); ath10k_wmi_prb_tmpl()
1031 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) ath10k_wmi_p2p_go_bcn_ie() argument
1035 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) ath10k_wmi_p2p_go_bcn_ie()
1038 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); ath10k_wmi_p2p_go_bcn_ie()
1042 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); ath10k_wmi_p2p_go_bcn_ie()
1046 ath10k_wmi_sta_keepalive(struct ath10k *ar, ath10k_wmi_sta_keepalive() argument
1052 if (!ar->wmi.ops->gen_sta_keepalive) ath10k_wmi_sta_keepalive()
1055 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); ath10k_wmi_sta_keepalive()
1059 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; ath10k_wmi_sta_keepalive()
1060 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_sta_keepalive()
H A Dtestmode.c41 bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) ath10k_tm_event_wmi() argument
47 ath10k_dbg(ar, ATH10K_DBG_TESTMODE, ath10k_tm_event_wmi()
51 ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len); ath10k_tm_event_wmi()
53 spin_lock_bh(&ar->data_lock); ath10k_tm_event_wmi()
55 if (!ar->testmode.utf_monitor) { ath10k_tm_event_wmi()
66 nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy, ath10k_tm_event_wmi()
70 ath10k_warn(ar, ath10k_tm_event_wmi()
77 ath10k_warn(ar, ath10k_tm_event_wmi()
86 ath10k_warn(ar, ath10k_tm_event_wmi()
95 ath10k_warn(ar, ath10k_tm_event_wmi()
105 spin_unlock_bh(&ar->data_lock); ath10k_tm_event_wmi()
110 static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[]) ath10k_tm_cmd_get_version() argument
115 ath10k_dbg(ar, ATH10K_DBG_TESTMODE, ath10k_tm_cmd_get_version()
120 skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy, ath10k_tm_cmd_get_version()
142 static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) ath10k_tm_cmd_utf_start() argument
147 ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n"); ath10k_tm_cmd_utf_start()
149 mutex_lock(&ar->conf_mutex); ath10k_tm_cmd_utf_start()
151 if (ar->state == ATH10K_STATE_UTF) { ath10k_tm_cmd_utf_start()
157 if (ar->state != ATH10K_STATE_OFF) { ath10k_tm_cmd_utf_start()
162 if (WARN_ON(ar->testmode.utf != NULL)) { ath10k_tm_cmd_utf_start()
169 ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE); ath10k_tm_cmd_utf_start()
172 ret = request_firmware(&ar->testmode.utf, filename, ar->dev); ath10k_tm_cmd_utf_start()
174 ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", ath10k_tm_cmd_utf_start()
179 spin_lock_bh(&ar->data_lock); ath10k_tm_cmd_utf_start()
181 ar->testmode.utf_monitor = true; ath10k_tm_cmd_utf_start()
183 spin_unlock_bh(&ar->data_lock); ath10k_tm_cmd_utf_start()
185 BUILD_BUG_ON(sizeof(ar->fw_features) != ath10k_tm_cmd_utf_start()
186 sizeof(ar->testmode.orig_fw_features)); ath10k_tm_cmd_utf_start()
188 memcpy(ar->testmode.orig_fw_features, ar->fw_features, ath10k_tm_cmd_utf_start()
189 sizeof(ar->fw_features)); ath10k_tm_cmd_utf_start()
190 ar->testmode.orig_wmi_op_version = ar->wmi.op_version; ath10k_tm_cmd_utf_start()
196 memset(ar->fw_features, 0, sizeof(ar->fw_features)); ath10k_tm_cmd_utf_start()
197 ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1; ath10k_tm_cmd_utf_start()
199 ret = ath10k_hif_power_up(ar); ath10k_tm_cmd_utf_start()
201 ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret); ath10k_tm_cmd_utf_start()
202 ar->state = ATH10K_STATE_OFF; ath10k_tm_cmd_utf_start()
206 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF); ath10k_tm_cmd_utf_start()
208 ath10k_err(ar, "failed to start core (testmode): %d\n", ret); ath10k_tm_cmd_utf_start()
209 ar->state = ATH10K_STATE_OFF; ath10k_tm_cmd_utf_start()
213 ar->state = ATH10K_STATE_UTF; ath10k_tm_cmd_utf_start()
215 ath10k_info(ar, "UTF firmware started\n"); ath10k_tm_cmd_utf_start()
217 mutex_unlock(&ar->conf_mutex); ath10k_tm_cmd_utf_start()
222 ath10k_hif_power_down(ar); ath10k_tm_cmd_utf_start()
226 memcpy(ar->fw_features, ar->testmode.orig_fw_features, ath10k_tm_cmd_utf_start()
227 sizeof(ar->fw_features)); ath10k_tm_cmd_utf_start()
228 ar->wmi.op_version = ar->testmode.orig_wmi_op_version; ath10k_tm_cmd_utf_start()
230 release_firmware(ar->testmode.utf); ath10k_tm_cmd_utf_start()
231 ar->testmode.utf = NULL; ath10k_tm_cmd_utf_start()
234 mutex_unlock(&ar->conf_mutex); ath10k_tm_cmd_utf_start()
239 static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar) __ath10k_tm_cmd_utf_stop() argument
241 lockdep_assert_held(&ar->conf_mutex); __ath10k_tm_cmd_utf_stop()
243 ath10k_core_stop(ar); __ath10k_tm_cmd_utf_stop()
244 ath10k_hif_power_down(ar); __ath10k_tm_cmd_utf_stop()
246 spin_lock_bh(&ar->data_lock); __ath10k_tm_cmd_utf_stop()
248 ar->testmode.utf_monitor = false; __ath10k_tm_cmd_utf_stop()
250 spin_unlock_bh(&ar->data_lock); __ath10k_tm_cmd_utf_stop()
253 memcpy(ar->fw_features, ar->testmode.orig_fw_features, __ath10k_tm_cmd_utf_stop()
254 sizeof(ar->fw_features)); __ath10k_tm_cmd_utf_stop()
255 ar->wmi.op_version = ar->testmode.orig_wmi_op_version; __ath10k_tm_cmd_utf_stop()
257 release_firmware(ar->testmode.utf); __ath10k_tm_cmd_utf_stop()
258 ar->testmode.utf = NULL; __ath10k_tm_cmd_utf_stop()
260 ar->state = ATH10K_STATE_OFF; __ath10k_tm_cmd_utf_stop()
263 static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[]) ath10k_tm_cmd_utf_stop() argument
267 ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n"); ath10k_tm_cmd_utf_stop()
269 mutex_lock(&ar->conf_mutex); ath10k_tm_cmd_utf_stop()
271 if (ar->state != ATH10K_STATE_UTF) { ath10k_tm_cmd_utf_stop()
276 __ath10k_tm_cmd_utf_stop(ar); ath10k_tm_cmd_utf_stop()
280 ath10k_info(ar, "UTF firmware stopped\n"); ath10k_tm_cmd_utf_stop()
283 mutex_unlock(&ar->conf_mutex); ath10k_tm_cmd_utf_stop()
287 static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[]) ath10k_tm_cmd_wmi() argument
294 mutex_lock(&ar->conf_mutex); ath10k_tm_cmd_wmi()
296 if (ar->state != ATH10K_STATE_UTF) { ath10k_tm_cmd_wmi()
315 ath10k_dbg(ar, ATH10K_DBG_TESTMODE, ath10k_tm_cmd_wmi()
319 ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len); ath10k_tm_cmd_wmi()
321 skb = ath10k_wmi_alloc_skb(ar, buf_len); ath10k_tm_cmd_wmi()
329 ret = ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_tm_cmd_wmi()
331 ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n", ath10k_tm_cmd_wmi()
339 mutex_unlock(&ar->conf_mutex); ath10k_tm_cmd_wmi()
346 struct ath10k *ar = hw->priv; ath10k_tm_cmd() local
360 return ath10k_tm_cmd_get_version(ar, tb); ath10k_tm_cmd()
362 return ath10k_tm_cmd_utf_start(ar, tb); ath10k_tm_cmd()
364 return ath10k_tm_cmd_utf_stop(ar, tb); ath10k_tm_cmd()
366 return ath10k_tm_cmd_wmi(ar, tb); ath10k_tm_cmd()
372 void ath10k_testmode_destroy(struct ath10k *ar) ath10k_testmode_destroy() argument
374 mutex_lock(&ar->conf_mutex); ath10k_testmode_destroy()
376 if (ar->state != ATH10K_STATE_UTF) { ath10k_testmode_destroy()
381 __ath10k_tm_cmd_utf_stop(ar); ath10k_testmode_destroy()
384 mutex_unlock(&ar->conf_mutex); ath10k_testmode_destroy()
H A Dhif.h34 int (*tx_completion)(struct ath10k *ar,
36 int (*rx_completion)(struct ath10k *ar,
42 int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
46 int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
49 int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
56 int (*exchange_bmi_msg)(struct ath10k *ar,
61 int (*start)(struct ath10k *ar);
65 void (*stop)(struct ath10k *ar);
67 int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
71 void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
81 void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
83 void (*set_callbacks)(struct ath10k *ar,
86 u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
88 u32 (*read32)(struct ath10k *ar, u32 address);
90 void (*write32)(struct ath10k *ar, u32 address, u32 value);
93 int (*power_up)(struct ath10k *ar);
97 void (*power_down)(struct ath10k *ar);
99 int (*suspend)(struct ath10k *ar);
100 int (*resume)(struct ath10k *ar);
103 static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, ath10k_hif_tx_sg() argument
107 return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items); ath10k_hif_tx_sg()
110 static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf, ath10k_hif_diag_read() argument
113 return ar->hif.ops->diag_read(ar, address, buf, buf_len); ath10k_hif_diag_read()
116 static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address, ath10k_hif_diag_write() argument
119 if (!ar->hif.ops->diag_write) ath10k_hif_diag_write()
122 return ar->hif.ops->diag_write(ar, address, data, nbytes); ath10k_hif_diag_write()
125 static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar, ath10k_hif_exchange_bmi_msg() argument
129 return ar->hif.ops->exchange_bmi_msg(ar, request, request_len, ath10k_hif_exchange_bmi_msg()
133 static inline int ath10k_hif_start(struct ath10k *ar) ath10k_hif_start() argument
135 return ar->hif.ops->start(ar); ath10k_hif_start()
138 static inline void ath10k_hif_stop(struct ath10k *ar) ath10k_hif_stop() argument
140 return ar->hif.ops->stop(ar); ath10k_hif_stop()
143 static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar, ath10k_hif_map_service_to_pipe() argument
149 return ar->hif.ops->map_service_to_pipe(ar, service_id, ath10k_hif_map_service_to_pipe()
154 static inline void ath10k_hif_get_default_pipe(struct ath10k *ar, ath10k_hif_get_default_pipe() argument
157 ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe); ath10k_hif_get_default_pipe()
160 static inline void ath10k_hif_send_complete_check(struct ath10k *ar, ath10k_hif_send_complete_check() argument
163 ar->hif.ops->send_complete_check(ar, pipe_id, force); ath10k_hif_send_complete_check()
166 static inline void ath10k_hif_set_callbacks(struct ath10k *ar, ath10k_hif_set_callbacks() argument
169 ar->hif.ops->set_callbacks(ar, callbacks); ath10k_hif_set_callbacks()
172 static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, ath10k_hif_get_free_queue_number() argument
175 return ar->hif.ops->get_free_queue_number(ar, pipe_id); ath10k_hif_get_free_queue_number()
178 static inline int ath10k_hif_power_up(struct ath10k *ar) ath10k_hif_power_up() argument
180 return ar->hif.ops->power_up(ar); ath10k_hif_power_up()
183 static inline void ath10k_hif_power_down(struct ath10k *ar) ath10k_hif_power_down() argument
185 ar->hif.ops->power_down(ar); ath10k_hif_power_down()
188 static inline int ath10k_hif_suspend(struct ath10k *ar) ath10k_hif_suspend() argument
190 if (!ar->hif.ops->suspend) ath10k_hif_suspend()
193 return ar->hif.ops->suspend(ar); ath10k_hif_suspend()
196 static inline int ath10k_hif_resume(struct ath10k *ar) ath10k_hif_resume() argument
198 if (!ar->hif.ops->resume) ath10k_hif_resume()
201 return ar->hif.ops->resume(ar); ath10k_hif_resume()
204 static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address) ath10k_hif_read32() argument
206 if (!ar->hif.ops->read32) { ath10k_hif_read32()
207 ath10k_warn(ar, "hif read32 not supported\n"); ath10k_hif_read32()
211 return ar->hif.ops->read32(ar, address); ath10k_hif_read32()
214 static inline void ath10k_hif_write32(struct ath10k *ar, ath10k_hif_write32() argument
217 if (!ar->hif.ops->write32) { ath10k_hif_write32()
218 ath10k_warn(ar, "hif write32 not supported\n"); ath10k_hif_write32()
222 ar->hif.ops->write32(ar, address, data); ath10k_hif_write32()
H A Dmac.c42 struct ath10k *ar = arvif->ar; ath10k_send_key() local
51 lockdep_assert_held(&arvif->ar->conf_mutex); ath10k_send_key()
83 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); ath10k_send_key()
92 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); ath10k_send_key()
100 struct ath10k *ar = arvif->ar; ath10k_install_key() local
103 lockdep_assert_held(&ar->conf_mutex); ath10k_install_key()
105 reinit_completion(&ar->install_key_done); ath10k_install_key()
111 ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ); ath10k_install_key()
121 struct ath10k *ar = arvif->ar; ath10k_install_peer_wep_keys() local
127 lockdep_assert_held(&ar->conf_mutex); ath10k_install_peer_wep_keys()
129 spin_lock_bh(&ar->data_lock); ath10k_install_peer_wep_keys()
130 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); ath10k_install_peer_wep_keys()
131 spin_unlock_bh(&ar->data_lock); ath10k_install_peer_wep_keys()
150 spin_lock_bh(&ar->data_lock); ath10k_install_peer_wep_keys()
152 spin_unlock_bh(&ar->data_lock); ath10k_install_peer_wep_keys()
161 struct ath10k *ar = arvif->ar; ath10k_clear_peer_keys() local
167 lockdep_assert_held(&ar->conf_mutex); ath10k_clear_peer_keys()
169 spin_lock_bh(&ar->data_lock); ath10k_clear_peer_keys()
170 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); ath10k_clear_peer_keys()
171 spin_unlock_bh(&ar->data_lock); ath10k_clear_peer_keys()
187 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", ath10k_clear_peer_keys()
190 spin_lock_bh(&ar->data_lock); ath10k_clear_peer_keys()
192 spin_unlock_bh(&ar->data_lock); ath10k_clear_peer_keys()
198 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, ath10k_mac_is_peer_wep_key_set() argument
204 lockdep_assert_held(&ar->data_lock); ath10k_mac_is_peer_wep_key_set()
211 peer = ath10k_peer_find(ar, 0, addr); ath10k_mac_is_peer_wep_key_set()
226 struct ath10k *ar = arvif->ar; ath10k_clear_vdev_key() local
233 lockdep_assert_held(&ar->conf_mutex); ath10k_clear_vdev_key()
238 spin_lock_bh(&ar->data_lock); ath10k_clear_vdev_key()
240 list_for_each_entry(peer, &ar->peers, list) { ath10k_clear_vdev_key()
252 spin_unlock_bh(&ar->data_lock); ath10k_clear_vdev_key()
262 ath10k_warn(ar, "failed to remove key for %pM: %d\n", ath10k_clear_vdev_key()
367 static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) ath10k_peer_create() argument
371 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_create()
373 if (ar->num_peers >= ar->max_num_peers) ath10k_peer_create()
376 ret = ath10k_wmi_peer_create(ar, vdev_id, addr); ath10k_peer_create()
378 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", ath10k_peer_create()
383 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); ath10k_peer_create()
385 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", ath10k_peer_create()
390 ar->num_peers++; ath10k_peer_create()
397 struct ath10k *ar = arvif->ar; ath10k_mac_set_kickout() local
401 param = ar->wmi.pdev_param->sta_kickout_th; ath10k_mac_set_kickout()
402 ret = ath10k_wmi_pdev_set_param(ar, param, ath10k_mac_set_kickout()
405 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", ath10k_mac_set_kickout()
410 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; ath10k_mac_set_kickout()
411 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, ath10k_mac_set_kickout()
414 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", ath10k_mac_set_kickout()
419 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; ath10k_mac_set_kickout()
420 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, ath10k_mac_set_kickout()
423 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", ath10k_mac_set_kickout()
428 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; ath10k_mac_set_kickout()
429 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, ath10k_mac_set_kickout()
432 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", ath10k_mac_set_kickout()
442 struct ath10k *ar = arvif->ar; ath10k_mac_set_rts() local
445 vdev_param = ar->wmi.vdev_param->rts_threshold; ath10k_mac_set_rts()
446 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); ath10k_mac_set_rts()
451 struct ath10k *ar = arvif->ar; ath10k_mac_set_frag() local
455 value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold, ath10k_mac_set_frag()
459 vdev_param = ar->wmi.vdev_param->fragmentation_threshold; ath10k_mac_set_frag()
460 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); ath10k_mac_set_frag()
463 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) ath10k_peer_delete() argument
467 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_delete()
469 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); ath10k_peer_delete()
473 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); ath10k_peer_delete()
477 ar->num_peers--; ath10k_peer_delete()
482 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) ath10k_peer_cleanup() argument
486 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_cleanup()
488 spin_lock_bh(&ar->data_lock); ath10k_peer_cleanup()
489 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { ath10k_peer_cleanup()
493 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", ath10k_peer_cleanup()
498 ar->num_peers--; ath10k_peer_cleanup()
500 spin_unlock_bh(&ar->data_lock); ath10k_peer_cleanup()
503 static void ath10k_peer_cleanup_all(struct ath10k *ar) ath10k_peer_cleanup_all() argument
507 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_cleanup_all()
509 spin_lock_bh(&ar->data_lock); ath10k_peer_cleanup_all()
510 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { ath10k_peer_cleanup_all()
514 spin_unlock_bh(&ar->data_lock); ath10k_peer_cleanup_all()
516 ar->num_peers = 0; ath10k_peer_cleanup_all()
517 ar->num_stations = 0; ath10k_peer_cleanup_all()
526 struct ath10k *ar = arvif->ar; ath10k_mac_vif_beacon_free() local
528 lockdep_assert_held(&ar->data_lock); ath10k_mac_vif_beacon_free()
534 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, ath10k_mac_vif_beacon_free()
549 struct ath10k *ar = arvif->ar; ath10k_mac_vif_beacon_cleanup() local
551 lockdep_assert_held(&ar->data_lock); ath10k_mac_vif_beacon_cleanup()
556 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, ath10k_mac_vif_beacon_cleanup()
562 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) ath10k_vdev_setup_sync() argument
566 lockdep_assert_held(&ar->conf_mutex); ath10k_vdev_setup_sync()
568 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) ath10k_vdev_setup_sync()
571 ret = wait_for_completion_timeout(&ar->vdev_setup_done, ath10k_vdev_setup_sync()
579 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) ath10k_monitor_vdev_start() argument
581 struct cfg80211_chan_def *chandef = &ar->chandef; ath10k_monitor_vdev_start()
586 lockdep_assert_held(&ar->conf_mutex); ath10k_monitor_vdev_start()
603 reinit_completion(&ar->vdev_setup_done); ath10k_monitor_vdev_start()
605 ret = ath10k_wmi_vdev_start(ar, &arg); ath10k_monitor_vdev_start()
607 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", ath10k_monitor_vdev_start()
612 ret = ath10k_vdev_setup_sync(ar); ath10k_monitor_vdev_start()
614 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", ath10k_monitor_vdev_start()
619 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); ath10k_monitor_vdev_start()
621 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", ath10k_monitor_vdev_start()
626 ar->monitor_vdev_id = vdev_id; ath10k_monitor_vdev_start()
628 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", ath10k_monitor_vdev_start()
629 ar->monitor_vdev_id); ath10k_monitor_vdev_start()
633 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); ath10k_monitor_vdev_start()
635 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", ath10k_monitor_vdev_start()
636 ar->monitor_vdev_id, ret); ath10k_monitor_vdev_start()
641 static int ath10k_monitor_vdev_stop(struct ath10k *ar) ath10k_monitor_vdev_stop() argument
645 lockdep_assert_held(&ar->conf_mutex); ath10k_monitor_vdev_stop()
647 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); ath10k_monitor_vdev_stop()
649 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", ath10k_monitor_vdev_stop()
650 ar->monitor_vdev_id, ret); ath10k_monitor_vdev_stop()
652 reinit_completion(&ar->vdev_setup_done); ath10k_monitor_vdev_stop()
654 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); ath10k_monitor_vdev_stop()
656 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", ath10k_monitor_vdev_stop()
657 ar->monitor_vdev_id, ret); ath10k_monitor_vdev_stop()
659 ret = ath10k_vdev_setup_sync(ar); ath10k_monitor_vdev_stop()
661 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", ath10k_monitor_vdev_stop()
662 ar->monitor_vdev_id, ret); ath10k_monitor_vdev_stop()
664 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", ath10k_monitor_vdev_stop()
665 ar->monitor_vdev_id); ath10k_monitor_vdev_stop()
669 static int ath10k_monitor_vdev_create(struct ath10k *ar) ath10k_monitor_vdev_create() argument
673 lockdep_assert_held(&ar->conf_mutex); ath10k_monitor_vdev_create()
675 if (ar->free_vdev_map == 0) { ath10k_monitor_vdev_create()
676 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); ath10k_monitor_vdev_create()
680 bit = __ffs64(ar->free_vdev_map); ath10k_monitor_vdev_create()
682 ar->monitor_vdev_id = bit; ath10k_monitor_vdev_create()
684 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, ath10k_monitor_vdev_create()
686 0, ar->mac_addr); ath10k_monitor_vdev_create()
688 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", ath10k_monitor_vdev_create()
689 ar->monitor_vdev_id, ret); ath10k_monitor_vdev_create()
693 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); ath10k_monitor_vdev_create()
694 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", ath10k_monitor_vdev_create()
695 ar->monitor_vdev_id); ath10k_monitor_vdev_create()
700 static int ath10k_monitor_vdev_delete(struct ath10k *ar) ath10k_monitor_vdev_delete() argument
704 lockdep_assert_held(&ar->conf_mutex); ath10k_monitor_vdev_delete()
706 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); ath10k_monitor_vdev_delete()
708 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", ath10k_monitor_vdev_delete()
709 ar->monitor_vdev_id, ret); ath10k_monitor_vdev_delete()
713 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; ath10k_monitor_vdev_delete()
715 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", ath10k_monitor_vdev_delete()
716 ar->monitor_vdev_id); ath10k_monitor_vdev_delete()
720 static int ath10k_monitor_start(struct ath10k *ar) ath10k_monitor_start() argument
724 lockdep_assert_held(&ar->conf_mutex); ath10k_monitor_start()
726 ret = ath10k_monitor_vdev_create(ar); ath10k_monitor_start()
728 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); ath10k_monitor_start()
732 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); ath10k_monitor_start()
734 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); ath10k_monitor_start()
735 ath10k_monitor_vdev_delete(ar); ath10k_monitor_start()
739 ar->monitor_started = true; ath10k_monitor_start()
740 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); ath10k_monitor_start()
745 static int ath10k_monitor_stop(struct ath10k *ar) ath10k_monitor_stop() argument
749 lockdep_assert_held(&ar->conf_mutex); ath10k_monitor_stop()
751 ret = ath10k_monitor_vdev_stop(ar); ath10k_monitor_stop()
753 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); ath10k_monitor_stop()
757 ret = ath10k_monitor_vdev_delete(ar); ath10k_monitor_stop()
759 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); ath10k_monitor_stop()
763 ar->monitor_started = false; ath10k_monitor_stop()
764 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); ath10k_monitor_stop()
769 static int ath10k_monitor_recalc(struct ath10k *ar) ath10k_monitor_recalc() argument
773 lockdep_assert_held(&ar->conf_mutex); ath10k_monitor_recalc()
775 should_start = ar->monitor || ath10k_monitor_recalc()
776 ar->filter_flags & FIF_PROMISC_IN_BSS || ath10k_monitor_recalc()
777 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); ath10k_monitor_recalc()
779 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_monitor_recalc()
781 ar->monitor_started, should_start); ath10k_monitor_recalc()
783 if (should_start == ar->monitor_started) ath10k_monitor_recalc()
787 return ath10k_monitor_start(ar); ath10k_monitor_recalc()
789 return ath10k_monitor_stop(ar); ath10k_monitor_recalc()
794 struct ath10k *ar = arvif->ar; ath10k_recalc_rtscts_prot() local
797 lockdep_assert_held(&ar->conf_mutex); ath10k_recalc_rtscts_prot()
799 vdev_param = ar->wmi.vdev_param->enable_rtscts; ath10k_recalc_rtscts_prot()
808 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ath10k_recalc_rtscts_prot()
812 static int ath10k_start_cac(struct ath10k *ar) ath10k_start_cac() argument
816 lockdep_assert_held(&ar->conf_mutex); ath10k_start_cac()
818 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); ath10k_start_cac()
820 ret = ath10k_monitor_recalc(ar); ath10k_start_cac()
822 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); ath10k_start_cac()
823 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); ath10k_start_cac()
827 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", ath10k_start_cac()
828 ar->monitor_vdev_id); ath10k_start_cac()
833 static int ath10k_stop_cac(struct ath10k *ar) ath10k_stop_cac() argument
835 lockdep_assert_held(&ar->conf_mutex); ath10k_stop_cac()
838 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ath10k_stop_cac()
841 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); ath10k_stop_cac()
842 ath10k_monitor_stop(ar); ath10k_stop_cac()
844 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); ath10k_stop_cac()
849 static void ath10k_recalc_radar_detection(struct ath10k *ar) ath10k_recalc_radar_detection() argument
853 lockdep_assert_held(&ar->conf_mutex); ath10k_recalc_radar_detection()
855 ath10k_stop_cac(ar); ath10k_recalc_radar_detection()
857 if (!ar->radar_enabled) ath10k_recalc_radar_detection()
860 if (ar->num_started_vdevs > 0) ath10k_recalc_radar_detection()
863 ret = ath10k_start_cac(ar); ath10k_recalc_radar_detection()
870 ath10k_warn(ar, "failed to start CAC: %d\n", ret); ath10k_recalc_radar_detection()
871 ieee80211_radar_detected(ar->hw); ath10k_recalc_radar_detection()
877 struct ath10k *ar = arvif->ar; ath10k_vdev_start_restart() local
878 struct cfg80211_chan_def *chandef = &ar->chandef; ath10k_vdev_start_restart()
882 lockdep_assert_held(&ar->conf_mutex); ath10k_vdev_start_restart()
884 reinit_completion(&ar->vdev_setup_done); ath10k_vdev_start_restart()
912 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_vdev_start_restart()
918 ret = ath10k_wmi_vdev_restart(ar, &arg); ath10k_vdev_start_restart()
920 ret = ath10k_wmi_vdev_start(ar, &arg); ath10k_vdev_start_restart()
923 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", ath10k_vdev_start_restart()
928 ret = ath10k_vdev_setup_sync(ar); ath10k_vdev_start_restart()
930 ath10k_warn(ar, ath10k_vdev_start_restart()
936 ar->num_started_vdevs++; ath10k_vdev_start_restart()
937 ath10k_recalc_radar_detection(ar); ath10k_vdev_start_restart()
954 struct ath10k *ar = arvif->ar; ath10k_vdev_stop() local
957 lockdep_assert_held(&ar->conf_mutex); ath10k_vdev_stop()
959 reinit_completion(&ar->vdev_setup_done); ath10k_vdev_stop()
961 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); ath10k_vdev_stop()
963 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", ath10k_vdev_stop()
968 ret = ath10k_vdev_setup_sync(ar); ath10k_vdev_stop()
970 ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n", ath10k_vdev_stop()
975 WARN_ON(ar->num_started_vdevs == 0); ath10k_vdev_stop()
977 if (ar->num_started_vdevs != 0) { ath10k_vdev_stop()
978 ar->num_started_vdevs--; ath10k_vdev_stop()
979 ath10k_recalc_radar_detection(ar); ath10k_vdev_stop()
988 struct ath10k *ar = arvif->ar; ath10k_mac_setup_bcn_p2p_ie() local
1007 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); ath10k_mac_setup_bcn_p2p_ie()
1009 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", ath10k_mac_setup_bcn_p2p_ie()
1049 struct ath10k *ar = arvif->ar; ath10k_mac_setup_bcn_tmpl() local
1050 struct ieee80211_hw *hw = ar->hw; ath10k_mac_setup_bcn_tmpl()
1056 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) ath10k_mac_setup_bcn_tmpl()
1061 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); ath10k_mac_setup_bcn_tmpl()
1067 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); ath10k_mac_setup_bcn_tmpl()
1080 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, ath10k_mac_setup_bcn_tmpl()
1085 ath10k_warn(ar, "failed to submit beacon template command: %d\n", ath10k_mac_setup_bcn_tmpl()
1095 struct ath10k *ar = arvif->ar; ath10k_mac_setup_prb_tmpl() local
1096 struct ieee80211_hw *hw = ar->hw; ath10k_mac_setup_prb_tmpl()
1101 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) ath10k_mac_setup_prb_tmpl()
1106 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); ath10k_mac_setup_prb_tmpl()
1110 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); ath10k_mac_setup_prb_tmpl()
1114 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", ath10k_mac_setup_prb_tmpl()
1125 struct ath10k *ar = arvif->ar; ath10k_control_beaconing() local
1128 lockdep_assert_held(&arvif->ar->conf_mutex); ath10k_control_beaconing()
1136 spin_lock_bh(&arvif->ar->data_lock); ath10k_control_beaconing()
1138 spin_unlock_bh(&arvif->ar->data_lock); ath10k_control_beaconing()
1152 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, ath10k_control_beaconing()
1155 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", ath10k_control_beaconing()
1164 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); ath10k_control_beaconing()
1171 struct ath10k *ar = arvif->ar; ath10k_control_ibss() local
1175 lockdep_assert_held(&arvif->ar->conf_mutex); ath10k_control_ibss()
1178 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); ath10k_control_ibss()
1180 ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n", ath10k_control_ibss()
1191 ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer); ath10k_control_ibss()
1193 ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n", ath10k_control_ibss()
1198 vdev_param = arvif->ar->wmi.vdev_param->atim_window; ath10k_control_ibss()
1199 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, ath10k_control_ibss()
1202 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", ath10k_control_ibss()
1208 struct ath10k *ar = arvif->ar; ath10k_mac_vif_recalc_ps_wake_threshold() local
1213 lockdep_assert_held(&arvif->ar->conf_mutex); ath10k_mac_vif_recalc_ps_wake_threshold()
1221 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); ath10k_mac_vif_recalc_ps_wake_threshold()
1223 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", ath10k_mac_vif_recalc_ps_wake_threshold()
1233 struct ath10k *ar = arvif->ar; ath10k_mac_vif_recalc_ps_poll_count() local
1238 lockdep_assert_held(&arvif->ar->conf_mutex); ath10k_mac_vif_recalc_ps_poll_count()
1246 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, ath10k_mac_vif_recalc_ps_poll_count()
1249 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", ath10k_mac_vif_recalc_ps_poll_count()
1257 static int ath10k_mac_ps_vif_count(struct ath10k *ar) ath10k_mac_ps_vif_count() argument
1262 lockdep_assert_held(&ar->conf_mutex); ath10k_mac_ps_vif_count()
1264 list_for_each_entry(arvif, &ar->arvifs, list) ath10k_mac_ps_vif_count()
1273 struct ath10k *ar = arvif->ar; ath10k_mac_vif_setup_ps() local
1275 struct ieee80211_conf *conf = &ar->hw->conf; ath10k_mac_vif_setup_ps()
1282 lockdep_assert_held(&arvif->ar->conf_mutex); ath10k_mac_vif_setup_ps()
1289 if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 && ath10k_mac_vif_setup_ps()
1291 ar->fw_features)) { ath10k_mac_vif_setup_ps()
1292 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", ath10k_mac_vif_setup_ps()
1308 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, ath10k_mac_vif_setup_ps()
1311 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", ath10k_mac_vif_setup_ps()
1319 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", ath10k_mac_vif_setup_ps()
1322 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); ath10k_mac_vif_setup_ps()
1324 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", ath10k_mac_vif_setup_ps()
1334 struct ath10k *ar = arvif->ar; ath10k_mac_vif_disable_keepalive() local
1338 lockdep_assert_held(&arvif->ar->conf_mutex); ath10k_mac_vif_disable_keepalive()
1343 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) ath10k_mac_vif_disable_keepalive()
1354 ret = ath10k_wmi_sta_keepalive(ar, &arg); ath10k_mac_vif_disable_keepalive()
1356 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", ath10k_mac_vif_disable_keepalive()
1368 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, ath10k_peer_assoc_h_listen_intval() argument
1382 return ar->hw->conf.listen_interval; ath10k_peer_assoc_h_listen_intval()
1385 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, ath10k_peer_assoc_h_basic() argument
1392 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_assoc_h_basic()
1398 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); ath10k_peer_assoc_h_basic()
1403 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, ath10k_peer_assoc_h_crypto() argument
1412 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_assoc_h_crypto()
1414 bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan, ath10k_peer_assoc_h_crypto()
1430 cfg80211_put_bss(ar->hw->wiphy, bss); ath10k_peer_assoc_h_crypto()
1435 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); ath10k_peer_assoc_h_crypto()
1440 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); ath10k_peer_assoc_h_crypto()
1445 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, ath10k_peer_assoc_h_rates() argument
1455 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_assoc_h_rates()
1457 sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; ath10k_peer_assoc_h_rates()
1458 ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band]; ath10k_peer_assoc_h_rates()
1472 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, ath10k_peer_assoc_h_ht() argument
1480 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_assoc_h_ht()
1549 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", ath10k_peer_assoc_h_ht()
1555 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, ath10k_peer_assoc_qos_ap() argument
1563 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_assoc_qos_ap()
1566 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", ath10k_peer_assoc_qos_ap()
1585 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, ath10k_peer_assoc_qos_ap()
1590 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", ath10k_peer_assoc_qos_ap()
1595 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, ath10k_peer_assoc_qos_ap()
1600 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", ath10k_peer_assoc_qos_ap()
1609 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, ath10k_peer_assoc_qos_ap()
1613 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", ath10k_peer_assoc_qos_ap()
1622 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, ath10k_peer_assoc_h_vht() argument
1634 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) ath10k_peer_assoc_h_vht()
1663 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", ath10k_peer_assoc_h_vht()
1667 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, ath10k_peer_assoc_h_qos() argument
1696 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", ath10k_peer_assoc_h_qos()
1706 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, ath10k_peer_assoc_h_phymode() argument
1713 switch (ar->hw->conf.chandef.chan->band) { ath10k_peer_assoc_h_phymode()
1757 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", ath10k_peer_assoc_h_phymode()
1764 static int ath10k_peer_assoc_prepare(struct ath10k *ar, ath10k_peer_assoc_prepare() argument
1769 lockdep_assert_held(&ar->conf_mutex); ath10k_peer_assoc_prepare()
1773 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); ath10k_peer_assoc_prepare()
1774 ath10k_peer_assoc_h_crypto(ar, vif, arg); ath10k_peer_assoc_prepare()
1775 ath10k_peer_assoc_h_rates(ar, sta, arg); ath10k_peer_assoc_prepare()
1776 ath10k_peer_assoc_h_ht(ar, sta, arg); ath10k_peer_assoc_prepare()
1777 ath10k_peer_assoc_h_vht(ar, sta, arg); ath10k_peer_assoc_prepare()
1778 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); ath10k_peer_assoc_prepare()
1779 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); ath10k_peer_assoc_prepare()
1791 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, ath10k_setup_peer_smps() argument
1806 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, ath10k_setup_peer_smps()
1811 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, ath10k_mac_vif_recalc_txbf() argument
1820 if (!(ar->vht_cap_info & ath10k_mac_vif_recalc_txbf()
1827 param = ar->wmi.vdev_param->txbf; ath10k_mac_vif_recalc_txbf()
1837 if (ar->vht_cap_info & ath10k_mac_vif_recalc_txbf()
1847 if (ar->vht_cap_info & ath10k_mac_vif_recalc_txbf()
1863 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); ath10k_mac_vif_recalc_txbf()
1865 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", ath10k_mac_vif_recalc_txbf()
1878 struct ath10k *ar = hw->priv; ath10k_bss_assoc() local
1886 lockdep_assert_held(&ar->conf_mutex); ath10k_bss_assoc()
1888 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", ath10k_bss_assoc()
1895 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", ath10k_bss_assoc()
1906 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); ath10k_bss_assoc()
1908 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", ath10k_bss_assoc()
1916 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); ath10k_bss_assoc()
1918 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", ath10k_bss_assoc()
1923 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); ath10k_bss_assoc()
1925 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", ath10k_bss_assoc()
1930 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); ath10k_bss_assoc()
1932 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", ath10k_bss_assoc()
1937 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_bss_assoc()
1946 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); ath10k_bss_assoc()
1948 ath10k_warn(ar, "failed to set vdev %d up: %d\n", ath10k_bss_assoc()
1959 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, ath10k_bss_assoc()
1962 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", ath10k_bss_assoc()
1971 struct ath10k *ar = hw->priv; ath10k_bss_disassoc() local
1976 lockdep_assert_held(&ar->conf_mutex); ath10k_bss_disassoc()
1978 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", ath10k_bss_disassoc()
1981 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); ath10k_bss_disassoc()
1983 ath10k_warn(ar, "faield to down vdev %i: %d\n", ath10k_bss_disassoc()
1988 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); ath10k_bss_disassoc()
1990 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", ath10k_bss_disassoc()
1998 static int ath10k_station_assoc(struct ath10k *ar, ath10k_station_assoc() argument
2007 lockdep_assert_held(&ar->conf_mutex); ath10k_station_assoc()
2009 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); ath10k_station_assoc()
2011 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", ath10k_station_assoc()
2017 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); ath10k_station_assoc()
2019 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", ath10k_station_assoc()
2028 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, ath10k_station_assoc()
2031 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", ath10k_station_assoc()
2036 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); ath10k_station_assoc()
2038 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", ath10k_station_assoc()
2047 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", ath10k_station_assoc()
2057 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", ath10k_station_assoc()
2067 static int ath10k_station_disassoc(struct ath10k *ar, ath10k_station_disassoc() argument
2074 lockdep_assert_held(&ar->conf_mutex); ath10k_station_disassoc()
2080 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", ath10k_station_disassoc()
2088 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", ath10k_station_disassoc()
2100 static int ath10k_update_channel_list(struct ath10k *ar) ath10k_update_channel_list() argument
2102 struct ieee80211_hw *hw = ar->hw; ath10k_update_channel_list()
2113 lockdep_assert_held(&ar->conf_mutex); ath10k_update_channel_list()
2181 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_update_channel_list()
2191 ret = ath10k_wmi_scan_chan_list(ar, &arg); ath10k_update_channel_list()
2213 static void ath10k_regd_update(struct ath10k *ar) ath10k_regd_update() argument
2220 lockdep_assert_held(&ar->conf_mutex); ath10k_regd_update()
2222 ret = ath10k_update_channel_list(ar); ath10k_regd_update()
2224 ath10k_warn(ar, "failed to update channel list: %d\n", ret); ath10k_regd_update()
2226 regpair = ar->ath_common.regulatory.regpair; ath10k_regd_update()
2228 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { ath10k_regd_update()
2229 nl_dfs_reg = ar->dfs_detector->region; ath10k_regd_update()
2237 ret = ath10k_wmi_pdev_set_regdomain(ar, ath10k_regd_update()
2245 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); ath10k_regd_update()
2252 struct ath10k *ar = hw->priv; ath10k_reg_notifier() local
2255 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); ath10k_reg_notifier()
2257 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { ath10k_reg_notifier()
2258 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", ath10k_reg_notifier()
2260 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, ath10k_reg_notifier()
2263 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", ath10k_reg_notifier()
2267 mutex_lock(&ar->conf_mutex); ath10k_reg_notifier()
2268 if (ar->state == ATH10K_STATE_ON) ath10k_reg_notifier()
2269 ath10k_regd_update(ar); ath10k_reg_notifier()
2270 mutex_unlock(&ar->conf_mutex); ath10k_reg_notifier()
2291 static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif) ath10k_tx_h_get_vdev_id() argument
2296 if (ar->monitor_started) ath10k_tx_h_get_vdev_id()
2297 return ar->monitor_vdev_id; ath10k_tx_h_get_vdev_id()
2299 ath10k_warn(ar, "failed to resolve vdev id\n"); ath10k_tx_h_get_vdev_id()
2332 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, ath10k_tx_h_add_p2p_noa_ie() argument
2345 spin_lock_bh(&ar->data_lock); ath10k_tx_h_add_p2p_noa_ie()
2352 spin_unlock_bh(&ar->data_lock); ath10k_tx_h_add_p2p_noa_ie()
2356 static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar) ath10k_mac_need_offchan_tx_work() argument
2364 return !(ar->htt.target_version_major >= 3 && ath10k_mac_need_offchan_tx_work()
2365 ar->htt.target_version_minor >= 4); ath10k_mac_need_offchan_tx_work()
2368 static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) ath10k_tx_htt() argument
2373 if (ar->htt.target_version_major >= 3) { ath10k_tx_htt()
2375 ret = ath10k_htt_tx(&ar->htt, skb); ath10k_tx_htt()
2381 ar->fw_features)) { ath10k_tx_htt()
2382 if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >= ath10k_tx_htt()
2384 ath10k_warn(ar, "reached WMI management transmit queue limit\n"); ath10k_tx_htt()
2389 skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb); ath10k_tx_htt()
2390 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); ath10k_tx_htt()
2392 ret = ath10k_htt_mgmt_tx(&ar->htt, skb); ath10k_tx_htt()
2395 ar->fw_features) && ath10k_tx_htt()
2401 ret = ath10k_htt_mgmt_tx(&ar->htt, skb); ath10k_tx_htt()
2403 ret = ath10k_htt_tx(&ar->htt, skb); ath10k_tx_htt()
2408 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", ath10k_tx_htt()
2410 ieee80211_free_txskb(ar->hw, skb); ath10k_tx_htt()
2414 void ath10k_offchan_tx_purge(struct ath10k *ar) ath10k_offchan_tx_purge() argument
2419 skb = skb_dequeue(&ar->offchan_tx_queue); ath10k_offchan_tx_purge()
2423 ieee80211_free_txskb(ar->hw, skb); ath10k_offchan_tx_purge()
2429 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); ath10k_offchan_tx_work() local
2445 skb = skb_dequeue(&ar->offchan_tx_queue); ath10k_offchan_tx_work()
2449 mutex_lock(&ar->conf_mutex); ath10k_offchan_tx_work()
2451 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n", ath10k_offchan_tx_work()
2458 spin_lock_bh(&ar->data_lock); ath10k_offchan_tx_work()
2459 peer = ath10k_peer_find(ar, vdev_id, peer_addr); ath10k_offchan_tx_work()
2460 spin_unlock_bh(&ar->data_lock); ath10k_offchan_tx_work()
2464 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", ath10k_offchan_tx_work()
2468 ret = ath10k_peer_create(ar, vdev_id, peer_addr); ath10k_offchan_tx_work()
2470 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", ath10k_offchan_tx_work()
2474 spin_lock_bh(&ar->data_lock); ath10k_offchan_tx_work()
2475 reinit_completion(&ar->offchan_tx_completed); ath10k_offchan_tx_work()
2476 ar->offchan_tx_skb = skb; ath10k_offchan_tx_work()
2477 spin_unlock_bh(&ar->data_lock); ath10k_offchan_tx_work()
2479 ath10k_tx_htt(ar, skb); ath10k_offchan_tx_work()
2481 ret = wait_for_completion_timeout(&ar->offchan_tx_completed, ath10k_offchan_tx_work()
2484 ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", ath10k_offchan_tx_work()
2488 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); ath10k_offchan_tx_work()
2490 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", ath10k_offchan_tx_work()
2494 mutex_unlock(&ar->conf_mutex); ath10k_offchan_tx_work()
2498 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) ath10k_mgmt_over_wmi_tx_purge() argument
2503 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); ath10k_mgmt_over_wmi_tx_purge()
2507 ieee80211_free_txskb(ar->hw, skb); ath10k_mgmt_over_wmi_tx_purge()
2513 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); ath10k_mgmt_over_wmi_tx_work() local
2518 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); ath10k_mgmt_over_wmi_tx_work()
2522 ret = ath10k_wmi_mgmt_tx(ar, skb); ath10k_mgmt_over_wmi_tx_work()
2524 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", ath10k_mgmt_over_wmi_tx_work()
2526 ieee80211_free_txskb(ar->hw, skb); ath10k_mgmt_over_wmi_tx_work()
2535 void __ath10k_scan_finish(struct ath10k *ar) __ath10k_scan_finish() argument
2537 lockdep_assert_held(&ar->data_lock); __ath10k_scan_finish()
2539 switch (ar->scan.state) { __ath10k_scan_finish()
2543 if (ar->scan.is_roc) __ath10k_scan_finish()
2544 ieee80211_remain_on_channel_expired(ar->hw); __ath10k_scan_finish()
2547 if (!ar->scan.is_roc) __ath10k_scan_finish()
2548 ieee80211_scan_completed(ar->hw, __ath10k_scan_finish()
2549 (ar->scan.state == __ath10k_scan_finish()
2553 ar->scan.state = ATH10K_SCAN_IDLE; __ath10k_scan_finish()
2554 ar->scan_channel = NULL; __ath10k_scan_finish()
2555 ath10k_offchan_tx_purge(ar); __ath10k_scan_finish()
2556 cancel_delayed_work(&ar->scan.timeout); __ath10k_scan_finish()
2557 complete_all(&ar->scan.completed); __ath10k_scan_finish()
2562 void ath10k_scan_finish(struct ath10k *ar) ath10k_scan_finish() argument
2564 spin_lock_bh(&ar->data_lock); ath10k_scan_finish()
2565 __ath10k_scan_finish(ar); ath10k_scan_finish()
2566 spin_unlock_bh(&ar->data_lock); ath10k_scan_finish()
2569 static int ath10k_scan_stop(struct ath10k *ar) ath10k_scan_stop() argument
2578 lockdep_assert_held(&ar->conf_mutex); ath10k_scan_stop()
2580 ret = ath10k_wmi_stop_scan(ar, &arg); ath10k_scan_stop()
2582 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); ath10k_scan_stop()
2586 ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); ath10k_scan_stop()
2588 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); ath10k_scan_stop()
2602 spin_lock_bh(&ar->data_lock); ath10k_scan_stop()
2603 if (ar->scan.state != ATH10K_SCAN_IDLE) ath10k_scan_stop()
2604 __ath10k_scan_finish(ar); ath10k_scan_stop()
2605 spin_unlock_bh(&ar->data_lock); ath10k_scan_stop()
2610 static void ath10k_scan_abort(struct ath10k *ar) ath10k_scan_abort() argument
2614 lockdep_assert_held(&ar->conf_mutex); ath10k_scan_abort()
2616 spin_lock_bh(&ar->data_lock); ath10k_scan_abort()
2618 switch (ar->scan.state) { ath10k_scan_abort()
2626 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", ath10k_scan_abort()
2627 ath10k_scan_state_str(ar->scan.state), ath10k_scan_abort()
2628 ar->scan.state); ath10k_scan_abort()
2631 ar->scan.state = ATH10K_SCAN_ABORTING; ath10k_scan_abort()
2632 spin_unlock_bh(&ar->data_lock); ath10k_scan_abort()
2634 ret = ath10k_scan_stop(ar); ath10k_scan_abort()
2636 ath10k_warn(ar, "failed to abort scan: %d\n", ret); ath10k_scan_abort()
2638 spin_lock_bh(&ar->data_lock); ath10k_scan_abort()
2642 spin_unlock_bh(&ar->data_lock); ath10k_scan_abort()
2647 struct ath10k *ar = container_of(work, struct ath10k, ath10k_scan_timeout_work() local
2650 mutex_lock(&ar->conf_mutex); ath10k_scan_timeout_work()
2651 ath10k_scan_abort(ar); ath10k_scan_timeout_work()
2652 mutex_unlock(&ar->conf_mutex); ath10k_scan_timeout_work()
2655 static int ath10k_start_scan(struct ath10k *ar, ath10k_start_scan() argument
2660 lockdep_assert_held(&ar->conf_mutex); ath10k_start_scan()
2662 ret = ath10k_wmi_start_scan(ar, arg); ath10k_start_scan()
2666 ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); ath10k_start_scan()
2668 ret = ath10k_scan_stop(ar); ath10k_start_scan()
2670 ath10k_warn(ar, "failed to stop scan: %d\n", ret); ath10k_start_scan()
2679 spin_lock_bh(&ar->data_lock); ath10k_start_scan()
2680 if (ar->scan.state == ATH10K_SCAN_IDLE) { ath10k_start_scan()
2681 spin_unlock_bh(&ar->data_lock); ath10k_start_scan()
2684 spin_unlock_bh(&ar->data_lock); ath10k_start_scan()
2687 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, ath10k_start_scan()
2700 struct ath10k *ar = hw->priv; ath10k_tx() local
2707 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); ath10k_tx()
2711 ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif); ath10k_tx()
2716 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); ath10k_tx()
2721 spin_lock_bh(&ar->data_lock); ath10k_tx()
2722 ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq; ath10k_tx()
2723 ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; ath10k_tx()
2724 spin_unlock_bh(&ar->data_lock); ath10k_tx()
2726 if (ath10k_mac_need_offchan_tx_work(ar)) { ath10k_tx()
2730 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", ath10k_tx()
2733 skb_queue_tail(&ar->offchan_tx_queue, skb); ath10k_tx()
2734 ieee80211_queue_work(hw, &ar->offchan_tx_work); ath10k_tx()
2739 ath10k_tx_htt(ar, skb); ath10k_tx()
2743 void ath10k_drain_tx(struct ath10k *ar) ath10k_drain_tx() argument
2748 ath10k_offchan_tx_purge(ar); ath10k_drain_tx()
2749 ath10k_mgmt_over_wmi_tx_purge(ar); ath10k_drain_tx()
2751 cancel_work_sync(&ar->offchan_tx_work); ath10k_drain_tx()
2752 cancel_work_sync(&ar->wmi_mgmt_tx_work); ath10k_drain_tx()
2755 void ath10k_halt(struct ath10k *ar) ath10k_halt() argument
2759 lockdep_assert_held(&ar->conf_mutex); ath10k_halt()
2761 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); ath10k_halt()
2762 ar->filter_flags = 0; ath10k_halt()
2763 ar->monitor = false; ath10k_halt()
2765 if (ar->monitor_started) ath10k_halt()
2766 ath10k_monitor_stop(ar); ath10k_halt()
2768 ar->monitor_started = false; ath10k_halt()
2770 ath10k_scan_finish(ar); ath10k_halt()
2771 ath10k_peer_cleanup_all(ar); ath10k_halt()
2772 ath10k_core_stop(ar); ath10k_halt()
2773 ath10k_hif_power_down(ar); ath10k_halt()
2775 spin_lock_bh(&ar->data_lock); ath10k_halt()
2776 list_for_each_entry(arvif, &ar->arvifs, list) ath10k_halt()
2778 spin_unlock_bh(&ar->data_lock); ath10k_halt()
2783 struct ath10k *ar = hw->priv; ath10k_get_antenna() local
2785 mutex_lock(&ar->conf_mutex); ath10k_get_antenna()
2787 if (ar->cfg_tx_chainmask) { ath10k_get_antenna()
2788 *tx_ant = ar->cfg_tx_chainmask; ath10k_get_antenna()
2789 *rx_ant = ar->cfg_rx_chainmask; ath10k_get_antenna()
2791 *tx_ant = ar->supp_tx_chainmask; ath10k_get_antenna()
2792 *rx_ant = ar->supp_rx_chainmask; ath10k_get_antenna()
2795 mutex_unlock(&ar->conf_mutex); ath10k_get_antenna()
2800 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) ath10k_check_chain_mask() argument
2809 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", ath10k_check_chain_mask()
2813 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) __ath10k_set_antenna() argument
2817 lockdep_assert_held(&ar->conf_mutex); __ath10k_set_antenna()
2819 ath10k_check_chain_mask(ar, tx_ant, "tx"); __ath10k_set_antenna()
2820 ath10k_check_chain_mask(ar, rx_ant, "rx"); __ath10k_set_antenna()
2822 ar->cfg_tx_chainmask = tx_ant; __ath10k_set_antenna()
2823 ar->cfg_rx_chainmask = rx_ant; __ath10k_set_antenna()
2825 if ((ar->state != ATH10K_STATE_ON) && __ath10k_set_antenna()
2826 (ar->state != ATH10K_STATE_RESTARTED)) __ath10k_set_antenna()
2829 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, __ath10k_set_antenna()
2832 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", __ath10k_set_antenna()
2837 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, __ath10k_set_antenna()
2840 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", __ath10k_set_antenna()
2850 struct ath10k *ar = hw->priv; ath10k_set_antenna() local
2853 mutex_lock(&ar->conf_mutex); ath10k_set_antenna()
2854 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); ath10k_set_antenna()
2855 mutex_unlock(&ar->conf_mutex); ath10k_set_antenna()
2861 struct ath10k *ar = hw->priv; ath10k_start() local
2869 ath10k_drain_tx(ar); ath10k_start()
2871 mutex_lock(&ar->conf_mutex); ath10k_start()
2873 switch (ar->state) { ath10k_start()
2875 ar->state = ATH10K_STATE_ON; ath10k_start()
2878 ath10k_halt(ar); ath10k_start()
2879 ar->state = ATH10K_STATE_RESTARTED; ath10k_start()
2892 ret = ath10k_hif_power_up(ar); ath10k_start()
2894 ath10k_err(ar, "Could not init hif: %d\n", ret); ath10k_start()
2898 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); ath10k_start()
2900 ath10k_err(ar, "Could not init core: %d\n", ret); ath10k_start()
2904 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); ath10k_start()
2906 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); ath10k_start()
2910 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); ath10k_start()
2912 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); ath10k_start()
2916 if (ar->cfg_tx_chainmask) ath10k_start()
2917 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ath10k_start()
2918 ar->cfg_rx_chainmask); ath10k_start()
2929 ret = ath10k_wmi_pdev_set_param(ar, ath10k_start()
2930 ar->wmi.pdev_param->arp_ac_override, 0); ath10k_start()
2932 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", ath10k_start()
2937 ar->num_started_vdevs = 0; ath10k_start()
2938 ath10k_regd_update(ar); ath10k_start()
2940 ath10k_spectral_start(ar); ath10k_start()
2942 mutex_unlock(&ar->conf_mutex); ath10k_start()
2946 ath10k_core_stop(ar); ath10k_start()
2949 ath10k_hif_power_down(ar); ath10k_start()
2952 ar->state = ATH10K_STATE_OFF; ath10k_start()
2955 mutex_unlock(&ar->conf_mutex); ath10k_start()
2961 struct ath10k *ar = hw->priv; ath10k_stop() local
2963 ath10k_drain_tx(ar); ath10k_stop()
2965 mutex_lock(&ar->conf_mutex); ath10k_stop()
2966 if (ar->state != ATH10K_STATE_OFF) { ath10k_stop()
2967 ath10k_halt(ar); ath10k_stop()
2968 ar->state = ATH10K_STATE_OFF; ath10k_stop()
2970 mutex_unlock(&ar->conf_mutex); ath10k_stop()
2972 cancel_delayed_work_sync(&ar->scan.timeout); ath10k_stop()
2973 cancel_work_sync(&ar->restart_work); ath10k_stop()
2976 static int ath10k_config_ps(struct ath10k *ar) ath10k_config_ps() argument
2981 lockdep_assert_held(&ar->conf_mutex); ath10k_config_ps()
2983 list_for_each_entry(arvif, &ar->arvifs, list) { ath10k_config_ps()
2986 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); ath10k_config_ps()
3017 static void ath10k_config_chan(struct ath10k *ar) ath10k_config_chan() argument
3022 lockdep_assert_held(&ar->conf_mutex); ath10k_config_chan()
3024 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_config_chan()
3026 ar->chandef.chan->center_freq, ath10k_config_chan()
3027 ar->chandef.center_freq1, ath10k_config_chan()
3028 ar->chandef.center_freq2, ath10k_config_chan()
3029 chandef_get_width(ar->chandef.width)); ath10k_config_chan()
3033 if (ar->monitor_started) ath10k_config_chan()
3034 ath10k_monitor_stop(ar); ath10k_config_chan()
3036 list_for_each_entry(arvif, &ar->arvifs, list) { ath10k_config_chan()
3046 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); ath10k_config_chan()
3048 ath10k_warn(ar, "failed to down vdev %d: %d\n", ath10k_config_chan()
3056 list_for_each_entry(arvif, &ar->arvifs, list) { ath10k_config_chan()
3065 ath10k_warn(ar, "failed to restart vdev %d: %d\n", ath10k_config_chan()
3073 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, ath10k_config_chan()
3076 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", ath10k_config_chan()
3082 ath10k_monitor_recalc(ar); ath10k_config_chan()
3085 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) ath10k_mac_txpower_setup() argument
3090 lockdep_assert_held(&ar->conf_mutex); ath10k_mac_txpower_setup()
3092 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); ath10k_mac_txpower_setup()
3094 param = ar->wmi.pdev_param->txpower_limit2g; ath10k_mac_txpower_setup()
3095 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); ath10k_mac_txpower_setup()
3097 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", ath10k_mac_txpower_setup()
3102 param = ar->wmi.pdev_param->txpower_limit5g; ath10k_mac_txpower_setup()
3103 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); ath10k_mac_txpower_setup()
3105 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", ath10k_mac_txpower_setup()
3113 static int ath10k_mac_txpower_recalc(struct ath10k *ar) ath10k_mac_txpower_recalc() argument
3118 lockdep_assert_held(&ar->conf_mutex); ath10k_mac_txpower_recalc()
3120 list_for_each_entry(arvif, &ar->arvifs, list) { ath10k_mac_txpower_recalc()
3132 ret = ath10k_mac_txpower_setup(ar, txpower); ath10k_mac_txpower_recalc()
3134 ath10k_warn(ar, "failed to setup tx power %d: %d\n", ath10k_mac_txpower_recalc()
3144 struct ath10k *ar = hw->priv; ath10k_config() local
3148 mutex_lock(&ar->conf_mutex); ath10k_config()
3151 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_config()
3157 spin_lock_bh(&ar->data_lock); ath10k_config()
3158 ar->rx_channel = conf->chandef.chan; ath10k_config()
3159 spin_unlock_bh(&ar->data_lock); ath10k_config()
3161 ar->radar_enabled = conf->radar_enabled; ath10k_config()
3162 ath10k_recalc_radar_detection(ar); ath10k_config()
3164 if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) { ath10k_config()
3165 ar->chandef = conf->chandef; ath10k_config()
3166 ath10k_config_chan(ar); ath10k_config()
3171 ath10k_config_ps(ar); ath10k_config()
3174 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; ath10k_config()
3175 ret = ath10k_monitor_recalc(ar); ath10k_config()
3177 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); ath10k_config()
3180 mutex_unlock(&ar->conf_mutex); ath10k_config()
3205 struct ath10k *ar = hw->priv; ath10k_add_interface() local
3215 mutex_lock(&ar->conf_mutex); ath10k_add_interface()
3219 arvif->ar = ar; ath10k_add_interface()
3224 if (ar->free_vdev_map == 0) { ath10k_add_interface()
3225 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); ath10k_add_interface()
3229 bit = __ffs64(ar->free_vdev_map); ath10k_add_interface()
3231 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", ath10k_add_interface()
3232 bit, ar->free_vdev_map); ath10k_add_interface()
3283 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, ath10k_add_interface()
3289 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", ath10k_add_interface()
3295 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", ath10k_add_interface()
3299 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, ath10k_add_interface()
3302 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", ath10k_add_interface()
3307 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); ath10k_add_interface()
3308 list_add(&arvif->list, &ar->arvifs); ath10k_add_interface()
3315 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", ath10k_add_interface()
3322 vdev_param = ar->wmi.vdev_param->tx_encap_type; ath10k_add_interface()
3323 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ath10k_add_interface()
3327 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", ath10k_add_interface()
3335 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { ath10k_add_interface()
3336 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); ath10k_add_interface()
3338 vdev_param = ar->wmi.vdev_param->nss; ath10k_add_interface()
3339 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ath10k_add_interface()
3342 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", ath10k_add_interface()
3343 arvif->vdev_id, ar->cfg_tx_chainmask, nss, ath10k_add_interface()
3350 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); ath10k_add_interface()
3352 ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n", ath10k_add_interface()
3359 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", ath10k_add_interface()
3368 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, ath10k_add_interface()
3371 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", ath10k_add_interface()
3378 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", ath10k_add_interface()
3385 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", ath10k_add_interface()
3391 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); ath10k_add_interface()
3393 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", ath10k_add_interface()
3398 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); ath10k_add_interface()
3400 ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n", ath10k_add_interface()
3406 ret = ath10k_mac_txpower_recalc(ar); ath10k_add_interface()
3408 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); ath10k_add_interface()
3412 mutex_unlock(&ar->conf_mutex); ath10k_add_interface()
3417 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); ath10k_add_interface()
3420 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); ath10k_add_interface()
3421 ar->free_vdev_map |= 1LL << arvif->vdev_id; ath10k_add_interface()
3426 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, ath10k_add_interface()
3431 mutex_unlock(&ar->conf_mutex); ath10k_add_interface()
3439 struct ath10k *ar = hw->priv; ath10k_remove_interface() local
3443 mutex_lock(&ar->conf_mutex); ath10k_remove_interface()
3445 spin_lock_bh(&ar->data_lock); ath10k_remove_interface()
3447 spin_unlock_bh(&ar->data_lock); ath10k_remove_interface()
3451 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", ath10k_remove_interface()
3454 ar->free_vdev_map |= 1LL << arvif->vdev_id; ath10k_remove_interface()
3458 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, ath10k_remove_interface()
3461 ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n", ath10k_remove_interface()
3467 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", ath10k_remove_interface()
3470 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); ath10k_remove_interface()
3472 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", ath10k_remove_interface()
3479 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, ath10k_remove_interface()
3482 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", ath10k_remove_interface()
3485 spin_lock_bh(&ar->data_lock); ath10k_remove_interface()
3486 ar->num_peers--; ath10k_remove_interface()
3487 spin_unlock_bh(&ar->data_lock); ath10k_remove_interface()
3490 ath10k_peer_cleanup(ar, arvif->vdev_id); ath10k_remove_interface()
3492 mutex_unlock(&ar->conf_mutex); ath10k_remove_interface()
3513 struct ath10k *ar = hw->priv; ath10k_configure_filter() local
3516 mutex_lock(&ar->conf_mutex); ath10k_configure_filter()
3520 ar->filter_flags = *total_flags; ath10k_configure_filter()
3522 ret = ath10k_monitor_recalc(ar); ath10k_configure_filter()
3524 ath10k_warn(ar, "failed to recalc montior: %d\n", ret); ath10k_configure_filter()
3526 mutex_unlock(&ar->conf_mutex); ath10k_configure_filter()
3534 struct ath10k *ar = hw->priv; ath10k_bss_info_changed() local
3539 mutex_lock(&ar->conf_mutex); ath10k_bss_info_changed()
3546 vdev_param = ar->wmi.vdev_param->beacon_interval; ath10k_bss_info_changed()
3547 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ath10k_bss_info_changed()
3549 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_bss_info_changed()
3554 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", ath10k_bss_info_changed()
3559 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_bss_info_changed()
3563 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; ath10k_bss_info_changed()
3564 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, ath10k_bss_info_changed()
3567 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", ath10k_bss_info_changed()
3572 ath10k_warn(ar, "failed to update beacon template: %d\n", ath10k_bss_info_changed()
3579 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", ath10k_bss_info_changed()
3586 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_bss_info_changed()
3590 vdev_param = ar->wmi.vdev_param->dtim_period; ath10k_bss_info_changed()
3591 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ath10k_bss_info_changed()
3594 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", ath10k_bss_info_changed()
3614 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", ath10k_bss_info_changed()
3619 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", ath10k_bss_info_changed()
3630 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", ath10k_bss_info_changed()
3633 vdev_param = ar->wmi.vdev_param->slot_time; ath10k_bss_info_changed()
3634 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ath10k_bss_info_changed()
3637 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", ath10k_bss_info_changed()
3647 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_bss_info_changed()
3651 vdev_param = ar->wmi.vdev_param->preamble; ath10k_bss_info_changed()
3652 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ath10k_bss_info_changed()
3655 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", ath10k_bss_info_changed()
3665 if (ar->monitor_started) ath10k_bss_info_changed()
3666 ath10k_monitor_stop(ar); ath10k_bss_info_changed()
3668 ath10k_monitor_recalc(ar); ath10k_bss_info_changed()
3675 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", ath10k_bss_info_changed()
3679 ret = ath10k_mac_txpower_recalc(ar); ath10k_bss_info_changed()
3681 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); ath10k_bss_info_changed()
3687 ret = ath10k_config_ps(ar); ath10k_bss_info_changed()
3689 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", ath10k_bss_info_changed()
3693 mutex_unlock(&ar->conf_mutex); ath10k_bss_info_changed()
3700 struct ath10k *ar = hw->priv; ath10k_hw_scan() local
3707 mutex_lock(&ar->conf_mutex); ath10k_hw_scan()
3709 spin_lock_bh(&ar->data_lock); ath10k_hw_scan()
3710 switch (ar->scan.state) { ath10k_hw_scan()
3712 reinit_completion(&ar->scan.started); ath10k_hw_scan()
3713 reinit_completion(&ar->scan.completed); ath10k_hw_scan()
3714 ar->scan.state = ATH10K_SCAN_STARTING; ath10k_hw_scan()
3715 ar->scan.is_roc = false; ath10k_hw_scan()
3716 ar->scan.vdev_id = arvif->vdev_id; ath10k_hw_scan()
3725 spin_unlock_bh(&ar->data_lock); ath10k_hw_scan()
3731 ath10k_wmi_start_scan_init(ar, &arg); ath10k_hw_scan()
3759 ret = ath10k_start_scan(ar, &arg); ath10k_hw_scan()
3761 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); ath10k_hw_scan()
3762 spin_lock_bh(&ar->data_lock); ath10k_hw_scan()
3763 ar->scan.state = ATH10K_SCAN_IDLE; ath10k_hw_scan()
3764 spin_unlock_bh(&ar->data_lock); ath10k_hw_scan()
3768 mutex_unlock(&ar->conf_mutex); ath10k_hw_scan()
3775 struct ath10k *ar = hw->priv; ath10k_cancel_hw_scan() local
3777 mutex_lock(&ar->conf_mutex); ath10k_cancel_hw_scan()
3778 ath10k_scan_abort(ar); ath10k_cancel_hw_scan()
3779 mutex_unlock(&ar->conf_mutex); ath10k_cancel_hw_scan()
3781 cancel_delayed_work_sync(&ar->scan.timeout); ath10k_cancel_hw_scan()
3784 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, ath10k_set_key_h_def_keyidx() argument
3789 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; ath10k_set_key_h_def_keyidx()
3815 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ath10k_set_key_h_def_keyidx()
3818 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", ath10k_set_key_h_def_keyidx()
3826 struct ath10k *ar = hw->priv; ath10k_set_key() local
3838 mutex_lock(&ar->conf_mutex); ath10k_set_key()
3851 spin_lock_bh(&ar->data_lock); ath10k_set_key()
3852 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); ath10k_set_key()
3853 spin_unlock_bh(&ar->data_lock); ath10k_set_key()
3857 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", ath10k_set_key()
3887 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", ath10k_set_key()
3892 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); ath10k_set_key()
3894 spin_lock_bh(&ar->data_lock); ath10k_set_key()
3895 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); ath10k_set_key()
3902 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); ath10k_set_key()
3903 spin_unlock_bh(&ar->data_lock); ath10k_set_key()
3906 mutex_unlock(&ar->conf_mutex); ath10k_set_key()
3914 struct ath10k *ar = hw->priv; ath10k_set_default_unicast_key() local
3918 mutex_lock(&arvif->ar->conf_mutex); ath10k_set_default_unicast_key()
3920 if (arvif->ar->state != ATH10K_STATE_ON) ath10k_set_default_unicast_key()
3923 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", ath10k_set_default_unicast_key()
3926 ret = ath10k_wmi_vdev_set_param(arvif->ar, ath10k_set_default_unicast_key()
3928 arvif->ar->wmi.vdev_param->def_keyid, ath10k_set_default_unicast_key()
3932 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", ath10k_set_default_unicast_key()
3940 mutex_unlock(&arvif->ar->conf_mutex); ath10k_set_default_unicast_key()
3945 struct ath10k *ar; ath10k_sta_rc_update_wk() local
3955 ar = arvif->ar; ath10k_sta_rc_update_wk()
3957 spin_lock_bh(&ar->data_lock); ath10k_sta_rc_update_wk()
3966 spin_unlock_bh(&ar->data_lock); ath10k_sta_rc_update_wk()
3968 mutex_lock(&ar->conf_mutex); ath10k_sta_rc_update_wk()
3971 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", ath10k_sta_rc_update_wk()
3974 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, ath10k_sta_rc_update_wk()
3977 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", ath10k_sta_rc_update_wk()
3982 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", ath10k_sta_rc_update_wk()
3985 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, ath10k_sta_rc_update_wk()
3988 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", ath10k_sta_rc_update_wk()
3993 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", ath10k_sta_rc_update_wk()
3996 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, ath10k_sta_rc_update_wk()
3999 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", ath10k_sta_rc_update_wk()
4005 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", ath10k_sta_rc_update_wk()
4008 err = ath10k_station_assoc(ar, arvif->vif, sta, true); ath10k_sta_rc_update_wk()
4010 ath10k_warn(ar, "failed to reassociate station: %pM\n", ath10k_sta_rc_update_wk()
4014 mutex_unlock(&ar->conf_mutex); ath10k_sta_rc_update_wk()
4019 struct ath10k *ar = arvif->ar; ath10k_mac_inc_num_stations() local
4021 lockdep_assert_held(&ar->conf_mutex); ath10k_mac_inc_num_stations()
4027 if (ar->num_stations >= ar->max_num_stations) ath10k_mac_inc_num_stations()
4030 ar->num_stations++; ath10k_mac_inc_num_stations()
4037 struct ath10k *ar = arvif->ar; ath10k_mac_dec_num_stations() local
4039 lockdep_assert_held(&ar->conf_mutex); ath10k_mac_dec_num_stations()
4045 ar->num_stations--; ath10k_mac_dec_num_stations()
4054 struct ath10k *ar = hw->priv; ath10k_sta_state() local
4071 mutex_lock(&ar->conf_mutex); ath10k_sta_state()
4078 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_sta_state()
4081 ar->num_stations + 1, ar->max_num_stations, ath10k_sta_state()
4082 ar->num_peers + 1, ar->max_num_peers); ath10k_sta_state()
4086 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", ath10k_sta_state()
4087 ar->max_num_stations); ath10k_sta_state()
4091 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); ath10k_sta_state()
4093 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", ath10k_sta_state()
4104 ath10k_warn(ar, "failed to start vdev %i: %d\n", ath10k_sta_state()
4106 WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id, ath10k_sta_state()
4119 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_sta_state()
4128 ath10k_warn(ar, "failed to stop vdev %i: %d\n", ath10k_sta_state()
4134 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); ath10k_sta_state()
4136 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", ath10k_sta_state()
4147 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", ath10k_sta_state()
4150 ret = ath10k_station_assoc(ar, vif, sta, false); ath10k_sta_state()
4152 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", ath10k_sta_state()
4161 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", ath10k_sta_state()
4164 ret = ath10k_station_disassoc(ar, vif, sta); ath10k_sta_state()
4166 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", ath10k_sta_state()
4170 mutex_unlock(&ar->conf_mutex); ath10k_sta_state()
4174 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, ath10k_conf_tx_uapsd() argument
4183 lockdep_assert_held(&ar->conf_mutex); ath10k_conf_tx_uapsd()
4220 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, ath10k_conf_tx_uapsd()
4224 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); ath10k_conf_tx_uapsd()
4233 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, ath10k_conf_tx_uapsd()
4237 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); ath10k_conf_tx_uapsd()
4241 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", ath10k_conf_tx_uapsd()
4248 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", ath10k_conf_tx_uapsd()
4253 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || ath10k_conf_tx_uapsd()
4254 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { ath10k_conf_tx_uapsd()
4267 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, ath10k_conf_tx_uapsd()
4270 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", ath10k_conf_tx_uapsd()
4284 struct ath10k *ar = hw->priv; ath10k_conf_tx() local
4289 mutex_lock(&ar->conf_mutex); ath10k_conf_tx()
4322 if (ar->wmi.ops->gen_vdev_wmm_conf) { ath10k_conf_tx()
4323 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, ath10k_conf_tx()
4326 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", ath10k_conf_tx()
4334 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); ath10k_conf_tx()
4336 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); ath10k_conf_tx()
4341 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); ath10k_conf_tx()
4343 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); ath10k_conf_tx()
4346 mutex_unlock(&ar->conf_mutex); ath10k_conf_tx()
4358 struct ath10k *ar = hw->priv; ath10k_remain_on_channel() local
4363 mutex_lock(&ar->conf_mutex); ath10k_remain_on_channel()
4365 spin_lock_bh(&ar->data_lock); ath10k_remain_on_channel()
4366 switch (ar->scan.state) { ath10k_remain_on_channel()
4368 reinit_completion(&ar->scan.started); ath10k_remain_on_channel()
4369 reinit_completion(&ar->scan.completed); ath10k_remain_on_channel()
4370 reinit_completion(&ar->scan.on_channel); ath10k_remain_on_channel()
4371 ar->scan.state = ATH10K_SCAN_STARTING; ath10k_remain_on_channel()
4372 ar->scan.is_roc = true; ath10k_remain_on_channel()
4373 ar->scan.vdev_id = arvif->vdev_id; ath10k_remain_on_channel()
4374 ar->scan.roc_freq = chan->center_freq; ath10k_remain_on_channel()
4383 spin_unlock_bh(&ar->data_lock); ath10k_remain_on_channel()
4391 ath10k_wmi_start_scan_init(ar, &arg); ath10k_remain_on_channel()
4402 ret = ath10k_start_scan(ar, &arg); ath10k_remain_on_channel()
4404 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); ath10k_remain_on_channel()
4405 spin_lock_bh(&ar->data_lock); ath10k_remain_on_channel()
4406 ar->scan.state = ATH10K_SCAN_IDLE; ath10k_remain_on_channel()
4407 spin_unlock_bh(&ar->data_lock); ath10k_remain_on_channel()
4411 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); ath10k_remain_on_channel()
4413 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); ath10k_remain_on_channel()
4415 ret = ath10k_scan_stop(ar); ath10k_remain_on_channel()
4417 ath10k_warn(ar, "failed to stop scan: %d\n", ret); ath10k_remain_on_channel()
4425 mutex_unlock(&ar->conf_mutex); ath10k_remain_on_channel()
4431 struct ath10k *ar = hw->priv; ath10k_cancel_remain_on_channel() local
4433 mutex_lock(&ar->conf_mutex); ath10k_cancel_remain_on_channel()
4434 ath10k_scan_abort(ar); ath10k_cancel_remain_on_channel()
4435 mutex_unlock(&ar->conf_mutex); ath10k_cancel_remain_on_channel()
4437 cancel_delayed_work_sync(&ar->scan.timeout); ath10k_cancel_remain_on_channel()
4449 struct ath10k *ar = hw->priv; ath10k_set_rts_threshold() local
4453 mutex_lock(&ar->conf_mutex); ath10k_set_rts_threshold()
4454 list_for_each_entry(arvif, &ar->arvifs, list) { ath10k_set_rts_threshold()
4455 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", ath10k_set_rts_threshold()
4460 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", ath10k_set_rts_threshold()
4465 mutex_unlock(&ar->conf_mutex); ath10k_set_rts_threshold()
4488 struct ath10k *ar = hw->priv; ath10k_flush() local
4497 mutex_lock(&ar->conf_mutex); ath10k_flush()
4499 if (ar->state == ATH10K_STATE_WEDGED) ath10k_flush()
4502 ret = wait_event_timeout(ar->htt.empty_tx_wq, ({ ath10k_flush()
4505 spin_lock_bh(&ar->htt.tx_lock); ath10k_flush()
4506 empty = (ar->htt.num_pending_tx == 0); ath10k_flush()
4507 spin_unlock_bh(&ar->htt.tx_lock); ath10k_flush()
4509 skip = (ar->state == ATH10K_STATE_WEDGED) || ath10k_flush()
4511 &ar->dev_flags); ath10k_flush()
4517 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n", ath10k_flush()
4518 skip, ar->state, ret); ath10k_flush()
4521 mutex_unlock(&ar->conf_mutex); ath10k_flush()
4537 struct ath10k *ar = hw->priv; ath10k_suspend() local
4540 mutex_lock(&ar->conf_mutex); ath10k_suspend()
4542 ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND); ath10k_suspend()
4550 ret = ath10k_hif_suspend(ar); ath10k_suspend()
4552 ath10k_warn(ar, "failed to suspend hif: %d\n", ret); ath10k_suspend()
4559 ret = ath10k_wmi_pdev_resume_target(ar); ath10k_suspend()
4561 ath10k_warn(ar, "failed to resume target: %d\n", ret); ath10k_suspend()
4565 mutex_unlock(&ar->conf_mutex); ath10k_suspend()
4571 struct ath10k *ar = hw->priv; ath10k_resume() local
4574 mutex_lock(&ar->conf_mutex); ath10k_resume()
4576 ret = ath10k_hif_resume(ar); ath10k_resume()
4578 ath10k_warn(ar, "failed to resume hif: %d\n", ret); ath10k_resume()
4583 ret = ath10k_wmi_pdev_resume_target(ar); ath10k_resume()
4585 ath10k_warn(ar, "failed to resume target: %d\n", ret); ath10k_resume()
4592 mutex_unlock(&ar->conf_mutex); ath10k_resume()
4600 struct ath10k *ar = hw->priv; ath10k_reconfig_complete() local
4605 mutex_lock(&ar->conf_mutex); ath10k_reconfig_complete()
4609 if (ar->state == ATH10K_STATE_RESTARTED) { ath10k_reconfig_complete()
4610 ath10k_info(ar, "device successfully recovered\n"); ath10k_reconfig_complete()
4611 ar->state = ATH10K_STATE_ON; ath10k_reconfig_complete()
4612 ieee80211_wake_queues(ar->hw); ath10k_reconfig_complete()
4615 mutex_unlock(&ar->conf_mutex); ath10k_reconfig_complete()
4621 struct ath10k *ar = hw->priv; ath10k_get_survey() local
4623 struct survey_info *ar_survey = &ar->survey[idx]; ath10k_get_survey()
4626 mutex_lock(&ar->conf_mutex); ath10k_get_survey()
4642 spin_lock_bh(&ar->data_lock); ath10k_get_survey()
4644 spin_unlock_bh(&ar->data_lock); ath10k_get_survey()
4648 if (ar->rx_channel == survey->channel) ath10k_get_survey()
4652 mutex_unlock(&ar->conf_mutex); ath10k_get_survey()
4691 ath10k_default_bitrate_mask(struct ath10k *ar, ath10k_default_bitrate_mask() argument
4698 u16 nrf = ar->num_rf_chains; ath10k_default_bitrate_mask()
4700 if (ar->cfg_tx_chainmask) ath10k_default_bitrate_mask()
4701 nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask); ath10k_default_bitrate_mask()
4815 ath10k_bitrate_mask_rate(struct ath10k *ar, ath10k_bitrate_mask_rate() argument
4874 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n", ath10k_bitrate_mask_rate()
4882 static bool ath10k_get_fixed_rate_nss(struct ath10k *ar, ath10k_get_fixed_rate_nss() argument
4893 return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss); ath10k_get_fixed_rate_nss()
4901 struct ath10k *ar = arvif->ar; ath10k_set_fixed_rate_param() local
4905 mutex_lock(&ar->conf_mutex); ath10k_set_fixed_rate_param()
4913 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n"); ath10k_set_fixed_rate_param()
4916 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n"); ath10k_set_fixed_rate_param()
4918 vdev_param = ar->wmi.vdev_param->fixed_rate; ath10k_set_fixed_rate_param()
4919 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, ath10k_set_fixed_rate_param()
4922 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", ath10k_set_fixed_rate_param()
4930 vdev_param = ar->wmi.vdev_param->nss; ath10k_set_fixed_rate_param()
4931 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, ath10k_set_fixed_rate_param()
4935 ath10k_warn(ar, "failed to set fixed nss param %d: %d\n", ath10k_set_fixed_rate_param()
4943 vdev_param = ar->wmi.vdev_param->sgi; ath10k_set_fixed_rate_param()
4944 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ath10k_set_fixed_rate_param()
4948 ath10k_warn(ar, "failed to set sgi param %d: %d\n", ath10k_set_fixed_rate_param()
4957 mutex_unlock(&ar->conf_mutex); ath10k_set_fixed_rate_param()
4966 struct ath10k *ar = arvif->ar; ath10k_set_bitrate_mask() local
4967 enum ieee80211_band band = ar->hw->conf.chandef.chan->band; ath10k_set_bitrate_mask()
4969 u8 fixed_nss = ar->num_rf_chains; ath10k_set_bitrate_mask()
4972 if (ar->cfg_tx_chainmask) ath10k_set_bitrate_mask()
4973 fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); ath10k_set_bitrate_mask()
4979 if (!ath10k_default_bitrate_mask(ar, band, mask)) { ath10k_set_bitrate_mask()
4980 if (!ath10k_get_fixed_rate_nss(ar, mask, band, ath10k_set_bitrate_mask()
4987 ath10k_warn(ar, "failed to force SGI usage for default rate settings\n"); ath10k_set_bitrate_mask()
5000 struct ath10k *ar = hw->priv; ath10k_sta_rc_update() local
5004 spin_lock_bh(&ar->data_lock); ath10k_sta_rc_update()
5006 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_sta_rc_update()
5025 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", ath10k_sta_rc_update()
5052 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", ath10k_sta_rc_update()
5063 spin_unlock_bh(&ar->data_lock); ath10k_sta_rc_update()
5084 struct ath10k *ar = hw->priv; ath10k_ampdu_action() local
5087 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", ath10k_ampdu_action()
5251 struct ath10k *ar; ath10k_mac_create() local
5257 ar = hw->priv; ath10k_mac_create()
5258 ar->hw = hw; ath10k_mac_create()
5260 return ar; ath10k_mac_create()
5263 void ath10k_mac_destroy(struct ath10k *ar) ath10k_mac_destroy() argument
5265 ieee80211_free_hw(ar->hw); ath10k_mac_destroy()
5321 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) ath10k_create_vht_cap() argument
5328 vht_cap.cap = ar->vht_cap_info; ath10k_create_vht_cap()
5332 if (i < ar->num_rf_chains) ath10k_create_vht_cap()
5344 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) ath10k_get_ht_cap() argument
5349 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) ath10k_get_ht_cap()
5359 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) ath10k_get_ht_cap()
5362 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) ath10k_get_ht_cap()
5365 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { ath10k_get_ht_cap()
5374 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC) ath10k_get_ht_cap()
5377 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { ath10k_get_ht_cap()
5380 stbc = ar->ht_cap_info; ath10k_get_ht_cap()
5389 if (ar->ht_cap_info & WMI_HT_CAP_LDPC) ath10k_get_ht_cap()
5392 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) ath10k_get_ht_cap()
5396 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) ath10k_get_ht_cap()
5399 for (i = 0; i < ar->num_rf_chains; i++) ath10k_get_ht_cap()
5417 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) ath10k_get_arvif() argument
5426 ieee80211_iterate_active_interfaces_atomic(ar->hw, ath10k_get_arvif()
5431 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); ath10k_get_arvif()
5438 int ath10k_mac_register(struct ath10k *ar) ath10k_mac_register() argument
5453 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); ath10k_mac_register()
5455 SET_IEEE80211_DEV(ar->hw, ar->dev); ath10k_mac_register()
5457 ht_cap = ath10k_get_ht_cap(ar); ath10k_mac_register()
5458 vht_cap = ath10k_create_vht_cap(ar); ath10k_mac_register()
5460 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { ath10k_mac_register()
5469 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; ath10k_mac_register()
5479 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; ath10k_mac_register()
5482 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { ath10k_mac_register()
5491 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; ath10k_mac_register()
5498 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; ath10k_mac_register()
5501 ar->hw->wiphy->interface_modes = ath10k_mac_register()
5505 ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask; ath10k_mac_register()
5506 ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask; ath10k_mac_register()
5508 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) ath10k_mac_register()
5509 ar->hw->wiphy->interface_modes |= ath10k_mac_register()
5514 ar->hw->flags = IEEE80211_HW_SIGNAL_DBM | ath10k_mac_register()
5524 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; ath10k_mac_register()
5526 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) ath10k_mac_register()
5527 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; ath10k_mac_register()
5529 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { ath10k_mac_register()
5530 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; ath10k_mac_register()
5531 ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW; ath10k_mac_register()
5534 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; ath10k_mac_register()
5535 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; ath10k_mac_register()
5537 ar->hw->vif_data_size = sizeof(struct ath10k_vif); ath10k_mac_register()
5538 ar->hw->sta_data_size = sizeof(struct ath10k_sta); ath10k_mac_register()
5540 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; ath10k_mac_register()
5542 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { ath10k_mac_register()
5543 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; ath10k_mac_register()
5549 ar->hw->wiphy->probe_resp_offload |= ath10k_mac_register()
5555 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; ath10k_mac_register()
5556 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; ath10k_mac_register()
5557 ar->hw->wiphy->max_remain_on_channel_duration = 5000; ath10k_mac_register()
5559 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; ath10k_mac_register()
5560 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; ath10k_mac_register()
5566 ar->hw->queues = 4; ath10k_mac_register()
5568 switch (ar->wmi.op_version) { ath10k_mac_register()
5571 ar->hw->wiphy->iface_combinations = ath10k_if_comb; ath10k_mac_register()
5572 ar->hw->wiphy->n_iface_combinations = ath10k_mac_register()
5574 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); ath10k_mac_register()
5579 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; ath10k_mac_register()
5580 ar->hw->wiphy->n_iface_combinations = ath10k_mac_register()
5590 ar->hw->netdev_features = NETIF_F_HW_CSUM; ath10k_mac_register()
5594 ar->ath_common.debug_mask = ATH_DBG_DFS; ath10k_mac_register()
5595 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, ath10k_mac_register()
5598 if (!ar->dfs_detector) ath10k_mac_register()
5599 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); ath10k_mac_register()
5602 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, ath10k_mac_register()
5605 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); ath10k_mac_register()
5609 ar->hw->wiphy->cipher_suites = cipher_suites; ath10k_mac_register()
5610 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); ath10k_mac_register()
5612 ret = ieee80211_register_hw(ar->hw); ath10k_mac_register()
5614 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); ath10k_mac_register()
5618 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { ath10k_mac_register()
5619 ret = regulatory_hint(ar->hw->wiphy, ath10k_mac_register()
5620 ar->ath_common.regulatory.alpha2); ath10k_mac_register()
5628 ieee80211_unregister_hw(ar->hw); ath10k_mac_register()
5630 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); ath10k_mac_register()
5631 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); ath10k_mac_register()
5636 void ath10k_mac_unregister(struct ath10k *ar) ath10k_mac_unregister() argument
5638 ieee80211_unregister_hw(ar->hw); ath10k_mac_unregister()
5640 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) ath10k_mac_unregister()
5641 ar->dfs_detector->exit(ar->dfs_detector); ath10k_mac_unregister()
5643 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); ath10k_mac_unregister()
5644 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); ath10k_mac_unregister()
5646 SET_IEEE80211_DEV(ar->hw, NULL); ath10k_mac_unregister()
H A Dpci.c82 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
83 static int ath10k_pci_cold_reset(struct ath10k *ar);
84 static int ath10k_pci_warm_reset(struct ath10k *ar);
85 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
86 static int ath10k_pci_init_irq(struct ath10k *ar);
87 static int ath10k_pci_deinit_irq(struct ath10k *ar);
88 static int ath10k_pci_request_irq(struct ath10k *ar);
89 static void ath10k_pci_free_irq(struct ath10k *ar);
333 static bool ath10k_pci_irq_pending(struct ath10k *ar) ath10k_pci_irq_pending() argument
338 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + ath10k_pci_irq_pending()
346 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) ath10k_pci_disable_and_clear_legacy_irq() argument
351 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, ath10k_pci_disable_and_clear_legacy_irq()
353 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, ath10k_pci_disable_and_clear_legacy_irq()
358 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + ath10k_pci_disable_and_clear_legacy_irq()
362 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) ath10k_pci_enable_legacy_irq() argument
364 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + ath10k_pci_enable_legacy_irq()
370 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + ath10k_pci_enable_legacy_irq()
374 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) ath10k_pci_get_irq_method() argument
376 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_get_irq_method()
389 struct ath10k *ar = pipe->hif_ce_state; __ath10k_pci_rx_post_buf() local
390 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); __ath10k_pci_rx_post_buf()
404 paddr = dma_map_single(ar->dev, skb->data, __ath10k_pci_rx_post_buf()
407 if (unlikely(dma_mapping_error(ar->dev, paddr))) { __ath10k_pci_rx_post_buf()
408 ath10k_warn(ar, "failed to dma map pci rx buf\n"); __ath10k_pci_rx_post_buf()
417 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); __ath10k_pci_rx_post_buf()
418 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), __ath10k_pci_rx_post_buf()
429 struct ath10k *ar = pipe->hif_ce_state; __ath10k_pci_rx_post_pipe() local
430 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); __ath10k_pci_rx_post_pipe()
446 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); __ath10k_pci_rx_post_pipe()
456 struct ath10k *ar = pipe->hif_ce_state; ath10k_pci_rx_post_pipe() local
457 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_rx_post_pipe()
464 static void ath10k_pci_rx_post(struct ath10k *ar) ath10k_pci_rx_post() argument
466 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_rx_post()
477 struct ath10k *ar = (void *)ptr; ath10k_pci_rx_replenish_retry() local
479 ath10k_pci_rx_post(ar); ath10k_pci_rx_replenish_retry()
487 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, ath10k_pci_diag_read_mem() argument
490 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_diag_read_mem()
514 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, ath10k_pci_diag_read_mem()
544 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, ath10k_pci_diag_read_mem()
604 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", ath10k_pci_diag_read_mem()
608 dma_free_coherent(ar->dev, orig_nbytes, data_buf, ath10k_pci_diag_read_mem()
616 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) ath10k_pci_diag_read32() argument
621 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); ath10k_pci_diag_read32()
627 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, __ath10k_pci_diag_read_hi() argument
635 ret = ath10k_pci_diag_read32(ar, host_addr, &addr); __ath10k_pci_diag_read_hi()
637 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", __ath10k_pci_diag_read_hi()
642 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); __ath10k_pci_diag_read_hi()
644 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", __ath10k_pci_diag_read_hi()
652 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \
653 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
655 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, ath10k_pci_diag_write_mem() argument
658 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_diag_write_mem()
681 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, ath10k_pci_diag_write_mem()
703 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); ath10k_pci_diag_write_mem()
776 dma_free_coherent(ar->dev, orig_nbytes, data_buf, ath10k_pci_diag_write_mem()
781 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", ath10k_pci_diag_write_mem()
789 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) ath10k_pci_diag_write32() argument
793 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); ath10k_pci_diag_write32()
796 static bool ath10k_pci_is_awake(struct ath10k *ar) ath10k_pci_is_awake() argument
798 u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS); ath10k_pci_is_awake()
803 static int ath10k_pci_wake_wait(struct ath10k *ar) ath10k_pci_wake_wait() argument
809 if (ath10k_pci_is_awake(ar)) ath10k_pci_wake_wait()
822 static int ath10k_pci_wake(struct ath10k *ar) ath10k_pci_wake() argument
824 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, ath10k_pci_wake()
826 return ath10k_pci_wake_wait(ar); ath10k_pci_wake()
829 static void ath10k_pci_sleep(struct ath10k *ar) ath10k_pci_sleep() argument
831 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, ath10k_pci_sleep()
838 struct ath10k *ar = ce_state->ar; ath10k_pci_ce_send_done() local
839 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_ce_send_done()
858 cb->tx_completion(ar, skb); ath10k_pci_ce_send_done()
864 struct ath10k *ar = ce_state->ar; ath10k_pci_ce_recv_data() local
865 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_ce_recv_data()
882 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, ath10k_pci_ce_recv_data()
886 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", ath10k_pci_ce_recv_data()
897 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", ath10k_pci_ce_recv_data()
899 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", ath10k_pci_ce_recv_data()
902 cb->rx_completion(ar, skb); ath10k_pci_ce_recv_data()
908 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, ath10k_pci_hif_tx_sg() argument
911 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_hif_tx_sg()
933 ath10k_dbg(ar, ATH10K_DBG_PCI, ath10k_pci_hif_tx_sg()
936 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", ath10k_pci_hif_tx_sg()
951 ath10k_dbg(ar, ATH10K_DBG_PCI, ath10k_pci_hif_tx_sg()
954 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", ath10k_pci_hif_tx_sg()
977 static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, ath10k_pci_hif_diag_read() argument
980 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); ath10k_pci_hif_diag_read()
983 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) ath10k_pci_hif_get_free_queue_number() argument
985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_hif_get_free_queue_number()
987 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); ath10k_pci_hif_get_free_queue_number()
992 static void ath10k_pci_dump_registers(struct ath10k *ar, ath10k_pci_dump_registers() argument
998 lockdep_assert_held(&ar->data_lock); ath10k_pci_dump_registers()
1000 ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0], ath10k_pci_dump_registers()
1004 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); ath10k_pci_dump_registers()
1010 ath10k_err(ar, "firmware register dump:\n"); ath10k_pci_dump_registers()
1012 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", ath10k_pci_dump_registers()
1026 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) ath10k_pci_fw_crashed_dump() argument
1031 spin_lock_bh(&ar->data_lock); ath10k_pci_fw_crashed_dump()
1033 ar->stats.fw_crash_counter++; ath10k_pci_fw_crashed_dump()
1035 crash_data = ath10k_debug_get_new_fw_crash_data(ar); ath10k_pci_fw_crashed_dump()
1042 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); ath10k_pci_fw_crashed_dump()
1043 ath10k_print_driver_info(ar); ath10k_pci_fw_crashed_dump()
1044 ath10k_pci_dump_registers(ar, crash_data); ath10k_pci_fw_crashed_dump()
1046 spin_unlock_bh(&ar->data_lock); ath10k_pci_fw_crashed_dump()
1048 queue_work(ar->workqueue, &ar->restart_work); ath10k_pci_fw_crashed_dump()
1051 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, ath10k_pci_hif_send_complete_check() argument
1054 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); ath10k_pci_hif_send_complete_check()
1065 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); ath10k_pci_hif_send_complete_check()
1074 ath10k_ce_per_engine_service(ar, pipe); ath10k_pci_hif_send_complete_check()
1077 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, ath10k_pci_hif_set_callbacks() argument
1080 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_hif_set_callbacks()
1082 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n"); ath10k_pci_hif_set_callbacks()
1088 static void ath10k_pci_kill_tasklet(struct ath10k *ar) ath10k_pci_kill_tasklet() argument
1090 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_kill_tasklet()
1102 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, ath10k_pci_hif_map_service_to_pipe() argument
1111 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); ath10k_pci_hif_map_service_to_pipe()
1155 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, ath10k_pci_hif_get_default_pipe() argument
1160 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); ath10k_pci_hif_get_default_pipe()
1162 (void)ath10k_pci_hif_map_service_to_pipe(ar, ath10k_pci_hif_get_default_pipe()
1170 static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) ath10k_pci_irq_msi_fw_mask() argument
1174 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); ath10k_pci_irq_msi_fw_mask()
1177 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); ath10k_pci_irq_msi_fw_mask()
1180 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) ath10k_pci_irq_msi_fw_unmask() argument
1184 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); ath10k_pci_irq_msi_fw_unmask()
1187 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); ath10k_pci_irq_msi_fw_unmask()
1190 static void ath10k_pci_irq_disable(struct ath10k *ar) ath10k_pci_irq_disable() argument
1192 ath10k_ce_disable_interrupts(ar); ath10k_pci_irq_disable()
1193 ath10k_pci_disable_and_clear_legacy_irq(ar); ath10k_pci_irq_disable()
1194 ath10k_pci_irq_msi_fw_mask(ar); ath10k_pci_irq_disable()
1197 static void ath10k_pci_irq_sync(struct ath10k *ar) ath10k_pci_irq_sync() argument
1199 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_irq_sync()
1206 static void ath10k_pci_irq_enable(struct ath10k *ar) ath10k_pci_irq_enable() argument
1208 ath10k_ce_enable_interrupts(ar); ath10k_pci_irq_enable()
1209 ath10k_pci_enable_legacy_irq(ar); ath10k_pci_irq_enable()
1210 ath10k_pci_irq_msi_fw_unmask(ar); ath10k_pci_irq_enable()
1213 static int ath10k_pci_hif_start(struct ath10k *ar) ath10k_pci_hif_start() argument
1215 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); ath10k_pci_hif_start()
1217 ath10k_pci_irq_enable(ar); ath10k_pci_hif_start()
1218 ath10k_pci_rx_post(ar); ath10k_pci_hif_start()
1225 struct ath10k *ar; ath10k_pci_rx_pipe_cleanup() local
1231 ar = pci_pipe->hif_ce_state; ath10k_pci_rx_pipe_cleanup()
1248 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, ath10k_pci_rx_pipe_cleanup()
1257 struct ath10k *ar; ath10k_pci_tx_pipe_cleanup() local
1266 ar = pci_pipe->hif_ce_state; ath10k_pci_tx_pipe_cleanup()
1267 ar_pci = ath10k_pci_priv(ar); ath10k_pci_tx_pipe_cleanup()
1290 ar_pci->msg_callbacks_current.tx_completion(ar, skb); ath10k_pci_tx_pipe_cleanup()
1302 static void ath10k_pci_buffer_cleanup(struct ath10k *ar) ath10k_pci_buffer_cleanup() argument
1304 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_buffer_cleanup()
1316 static void ath10k_pci_ce_deinit(struct ath10k *ar) ath10k_pci_ce_deinit() argument
1321 ath10k_ce_deinit_pipe(ar, i); ath10k_pci_ce_deinit()
1324 static void ath10k_pci_flush(struct ath10k *ar) ath10k_pci_flush() argument
1326 ath10k_pci_kill_tasklet(ar); ath10k_pci_flush()
1327 ath10k_pci_buffer_cleanup(ar); ath10k_pci_flush()
1330 static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_hif_stop() argument
1332 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); ath10k_pci_hif_stop()
1345 ath10k_pci_warm_reset(ar); ath10k_pci_hif_stop()
1347 ath10k_pci_irq_disable(ar); ath10k_pci_hif_stop()
1348 ath10k_pci_irq_sync(ar); ath10k_pci_hif_stop()
1349 ath10k_pci_flush(ar); ath10k_pci_hif_stop()
1352 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, ath10k_pci_hif_exchange_bmi_msg() argument
1356 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_hif_exchange_bmi_msg()
1379 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); ath10k_pci_hif_exchange_bmi_msg()
1380 ret = dma_mapping_error(ar->dev, req_paddr); ath10k_pci_hif_exchange_bmi_msg()
1393 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, ath10k_pci_hif_exchange_bmi_msg()
1395 ret = dma_mapping_error(ar->dev, resp_paddr); ath10k_pci_hif_exchange_bmi_msg()
1429 dma_unmap_single(ar->dev, resp_paddr, ath10k_pci_hif_exchange_bmi_msg()
1433 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); ath10k_pci_hif_exchange_bmi_msg()
1462 struct ath10k *ar = ce_state->ar; ath10k_pci_bmi_recv_data() local
1477 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); ath10k_pci_bmi_recv_data()
1508 static int ath10k_pci_wake_target_cpu(struct ath10k *ar) ath10k_pci_wake_target_cpu() argument
1513 val = ath10k_pci_read32(ar, addr); ath10k_pci_wake_target_cpu()
1515 ath10k_pci_write32(ar, addr, val); ath10k_pci_wake_target_cpu()
1520 static int ath10k_pci_get_num_banks(struct ath10k *ar) ath10k_pci_get_num_banks() argument
1522 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_get_num_banks()
1528 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { ath10k_pci_get_num_banks()
1544 ath10k_warn(ar, "unknown number of banks, assuming 1\n"); ath10k_pci_get_num_banks()
1548 static int ath10k_pci_init_config(struct ath10k *ar) ath10k_pci_init_config() argument
1566 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, ath10k_pci_init_config()
1569 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); ath10k_pci_init_config()
1575 ath10k_err(ar, "Invalid pcie state addr\n"); ath10k_pci_init_config()
1579 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + ath10k_pci_init_config()
1584 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); ath10k_pci_init_config()
1590 ath10k_err(ar, "Invalid pipe cfg addr\n"); ath10k_pci_init_config()
1594 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, ath10k_pci_init_config()
1599 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); ath10k_pci_init_config()
1603 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + ath10k_pci_init_config()
1608 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); ath10k_pci_init_config()
1614 ath10k_err(ar, "Invalid svc_to_pipe map\n"); ath10k_pci_init_config()
1618 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, ath10k_pci_init_config()
1622 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); ath10k_pci_init_config()
1626 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + ath10k_pci_init_config()
1631 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); ath10k_pci_init_config()
1637 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + ath10k_pci_init_config()
1642 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); ath10k_pci_init_config()
1649 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); ath10k_pci_init_config()
1651 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret); ath10k_pci_init_config()
1658 ealloc_value |= ((ath10k_pci_get_num_banks(ar) << ath10k_pci_init_config()
1662 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); ath10k_pci_init_config()
1664 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); ath10k_pci_init_config()
1671 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); ath10k_pci_init_config()
1673 ath10k_err(ar, "Failed to get option val: %d\n", ret); ath10k_pci_init_config()
1679 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); ath10k_pci_init_config()
1681 ath10k_err(ar, "Failed to set option val: %d\n", ret); ath10k_pci_init_config()
1688 static int ath10k_pci_alloc_pipes(struct ath10k *ar) ath10k_pci_alloc_pipes() argument
1690 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_alloc_pipes()
1698 pipe->hif_ce_state = ar; ath10k_pci_alloc_pipes()
1700 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i], ath10k_pci_alloc_pipes()
1704 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", ath10k_pci_alloc_pipes()
1721 static void ath10k_pci_free_pipes(struct ath10k *ar) ath10k_pci_free_pipes() argument
1726 ath10k_ce_free_pipe(ar, i); ath10k_pci_free_pipes()
1729 static int ath10k_pci_init_pipes(struct ath10k *ar) ath10k_pci_init_pipes() argument
1734 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); ath10k_pci_init_pipes()
1736 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", ath10k_pci_init_pipes()
1745 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) ath10k_pci_has_fw_crashed() argument
1747 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & ath10k_pci_has_fw_crashed()
1751 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) ath10k_pci_fw_crashed_clear() argument
1755 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); ath10k_pci_fw_crashed_clear()
1757 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); ath10k_pci_fw_crashed_clear()
1761 static void ath10k_pci_warm_reset_si0(struct ath10k *ar) ath10k_pci_warm_reset_si0() argument
1765 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); ath10k_pci_warm_reset_si0()
1766 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, ath10k_pci_warm_reset_si0()
1768 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); ath10k_pci_warm_reset_si0()
1772 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); ath10k_pci_warm_reset_si0()
1773 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, ath10k_pci_warm_reset_si0()
1775 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); ath10k_pci_warm_reset_si0()
1780 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) ath10k_pci_warm_reset_cpu() argument
1784 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); ath10k_pci_warm_reset_cpu()
1786 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + ath10k_pci_warm_reset_cpu()
1788 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, ath10k_pci_warm_reset_cpu()
1792 static void ath10k_pci_warm_reset_ce(struct ath10k *ar) ath10k_pci_warm_reset_ce() argument
1796 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + ath10k_pci_warm_reset_ce()
1799 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, ath10k_pci_warm_reset_ce()
1802 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, ath10k_pci_warm_reset_ce()
1806 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) ath10k_pci_warm_reset_clear_lf() argument
1810 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + ath10k_pci_warm_reset_clear_lf()
1812 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + ath10k_pci_warm_reset_clear_lf()
1817 static int ath10k_pci_warm_reset(struct ath10k *ar) ath10k_pci_warm_reset() argument
1821 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); ath10k_pci_warm_reset()
1823 spin_lock_bh(&ar->data_lock); ath10k_pci_warm_reset()
1824 ar->stats.fw_warm_reset_counter++; ath10k_pci_warm_reset()
1825 spin_unlock_bh(&ar->data_lock); ath10k_pci_warm_reset()
1827 ath10k_pci_irq_disable(ar); ath10k_pci_warm_reset()
1834 ath10k_pci_warm_reset_si0(ar); ath10k_pci_warm_reset()
1835 ath10k_pci_warm_reset_cpu(ar); ath10k_pci_warm_reset()
1836 ath10k_pci_init_pipes(ar); ath10k_pci_warm_reset()
1837 ath10k_pci_wait_for_target_init(ar); ath10k_pci_warm_reset()
1839 ath10k_pci_warm_reset_clear_lf(ar); ath10k_pci_warm_reset()
1840 ath10k_pci_warm_reset_ce(ar); ath10k_pci_warm_reset()
1841 ath10k_pci_warm_reset_cpu(ar); ath10k_pci_warm_reset()
1842 ath10k_pci_init_pipes(ar); ath10k_pci_warm_reset()
1844 ret = ath10k_pci_wait_for_target_init(ar); ath10k_pci_warm_reset()
1846 ath10k_warn(ar, "failed to wait for target init: %d\n", ret); ath10k_pci_warm_reset()
1850 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); ath10k_pci_warm_reset()
1855 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) ath10k_pci_qca988x_chip_reset() argument
1860 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); ath10k_pci_qca988x_chip_reset()
1870 ret = ath10k_pci_warm_reset(ar); ath10k_pci_qca988x_chip_reset()
1872 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", ath10k_pci_qca988x_chip_reset()
1887 ret = ath10k_pci_init_pipes(ar); ath10k_pci_qca988x_chip_reset()
1889 ath10k_warn(ar, "failed to init copy engine: %d\n", ath10k_pci_qca988x_chip_reset()
1894 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, ath10k_pci_qca988x_chip_reset()
1897 ath10k_warn(ar, "failed to poke copy engine: %d\n", ath10k_pci_qca988x_chip_reset()
1902 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); ath10k_pci_qca988x_chip_reset()
1907 ath10k_warn(ar, "refusing cold reset as requested\n"); ath10k_pci_qca988x_chip_reset()
1911 ret = ath10k_pci_cold_reset(ar); ath10k_pci_qca988x_chip_reset()
1913 ath10k_warn(ar, "failed to cold reset: %d\n", ret); ath10k_pci_qca988x_chip_reset()
1917 ret = ath10k_pci_wait_for_target_init(ar); ath10k_pci_qca988x_chip_reset()
1919 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", ath10k_pci_qca988x_chip_reset()
1924 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); ath10k_pci_qca988x_chip_reset()
1929 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) ath10k_pci_qca6174_chip_reset() argument
1933 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); ath10k_pci_qca6174_chip_reset()
1937 ret = ath10k_pci_cold_reset(ar); ath10k_pci_qca6174_chip_reset()
1939 ath10k_warn(ar, "failed to cold reset: %d\n", ret); ath10k_pci_qca6174_chip_reset()
1943 ret = ath10k_pci_wait_for_target_init(ar); ath10k_pci_qca6174_chip_reset()
1945 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", ath10k_pci_qca6174_chip_reset()
1950 ret = ath10k_pci_warm_reset(ar); ath10k_pci_qca6174_chip_reset()
1952 ath10k_warn(ar, "failed to warm reset: %d\n", ret); ath10k_pci_qca6174_chip_reset()
1956 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); ath10k_pci_qca6174_chip_reset()
1961 static int ath10k_pci_chip_reset(struct ath10k *ar) ath10k_pci_chip_reset() argument
1963 if (QCA_REV_988X(ar)) ath10k_pci_chip_reset()
1964 return ath10k_pci_qca988x_chip_reset(ar); ath10k_pci_chip_reset()
1965 else if (QCA_REV_6174(ar)) ath10k_pci_chip_reset()
1966 return ath10k_pci_qca6174_chip_reset(ar); ath10k_pci_chip_reset()
1971 static int ath10k_pci_hif_power_up(struct ath10k *ar) ath10k_pci_hif_power_up() argument
1975 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); ath10k_pci_hif_power_up()
1977 ret = ath10k_pci_wake(ar); ath10k_pci_hif_power_up()
1979 ath10k_err(ar, "failed to wake up target: %d\n", ret); ath10k_pci_hif_power_up()
1993 ret = ath10k_pci_chip_reset(ar); ath10k_pci_hif_power_up()
1995 if (ath10k_pci_has_fw_crashed(ar)) { ath10k_pci_hif_power_up()
1996 ath10k_warn(ar, "firmware crashed during chip reset\n"); ath10k_pci_hif_power_up()
1997 ath10k_pci_fw_crashed_clear(ar); ath10k_pci_hif_power_up()
1998 ath10k_pci_fw_crashed_dump(ar); ath10k_pci_hif_power_up()
2001 ath10k_err(ar, "failed to reset chip: %d\n", ret); ath10k_pci_hif_power_up()
2005 ret = ath10k_pci_init_pipes(ar); ath10k_pci_hif_power_up()
2007 ath10k_err(ar, "failed to initialize CE: %d\n", ret); ath10k_pci_hif_power_up()
2011 ret = ath10k_pci_init_config(ar); ath10k_pci_hif_power_up()
2013 ath10k_err(ar, "failed to setup init config: %d\n", ret); ath10k_pci_hif_power_up()
2017 ret = ath10k_pci_wake_target_cpu(ar); ath10k_pci_hif_power_up()
2019 ath10k_err(ar, "could not wake up target CPU: %d\n", ret); ath10k_pci_hif_power_up()
2026 ath10k_pci_ce_deinit(ar); ath10k_pci_hif_power_up()
2029 ath10k_pci_sleep(ar); ath10k_pci_hif_power_up()
2033 static void ath10k_pci_hif_power_down(struct ath10k *ar) ath10k_pci_hif_power_down() argument
2035 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); ath10k_pci_hif_power_down()
2041 ath10k_pci_sleep(ar); ath10k_pci_hif_power_down()
2048 static int ath10k_pci_hif_suspend(struct ath10k *ar) ath10k_pci_hif_suspend() argument
2050 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_hif_suspend()
2066 static int ath10k_pci_hif_resume(struct ath10k *ar) ath10k_pci_hif_resume() argument
2068 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_hif_resume()
2120 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); ath10k_pci_ce_tasklet()
2125 struct ath10k *ar = (struct ath10k *)data; ath10k_msi_err_tasklet() local
2127 if (!ath10k_pci_has_fw_crashed(ar)) { ath10k_msi_err_tasklet()
2128 ath10k_warn(ar, "received unsolicited fw crash interrupt\n"); ath10k_msi_err_tasklet()
2132 ath10k_pci_irq_disable(ar); ath10k_msi_err_tasklet()
2133 ath10k_pci_fw_crashed_clear(ar); ath10k_msi_err_tasklet()
2134 ath10k_pci_fw_crashed_dump(ar); ath10k_msi_err_tasklet()
2143 struct ath10k *ar = arg; ath10k_pci_per_engine_handler() local
2144 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_per_engine_handler()
2148 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, ath10k_pci_per_engine_handler()
2167 struct ath10k *ar = arg; ath10k_pci_msi_fw_handler() local
2168 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_msi_fw_handler()
2181 struct ath10k *ar = arg; ath10k_pci_interrupt_handler() local
2182 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_interrupt_handler()
2185 if (!ath10k_pci_irq_pending(ar)) ath10k_pci_interrupt_handler()
2188 ath10k_pci_disable_and_clear_legacy_irq(ar); ath10k_pci_interrupt_handler()
2198 struct ath10k *ar = (struct ath10k *)data; ath10k_pci_tasklet() local
2199 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_tasklet()
2201 if (ath10k_pci_has_fw_crashed(ar)) { ath10k_pci_tasklet()
2202 ath10k_pci_irq_disable(ar); ath10k_pci_tasklet()
2203 ath10k_pci_fw_crashed_clear(ar); ath10k_pci_tasklet()
2204 ath10k_pci_fw_crashed_dump(ar); ath10k_pci_tasklet()
2208 ath10k_ce_per_engine_service_any(ar); ath10k_pci_tasklet()
2212 ath10k_pci_enable_legacy_irq(ar); ath10k_pci_tasklet()
2215 static int ath10k_pci_request_irq_msix(struct ath10k *ar) ath10k_pci_request_irq_msix() argument
2217 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_request_irq_msix()
2222 IRQF_SHARED, "ath10k_pci", ar); ath10k_pci_request_irq_msix()
2224 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n", ath10k_pci_request_irq_msix()
2232 IRQF_SHARED, "ath10k_pci", ar); ath10k_pci_request_irq_msix()
2234 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n", ath10k_pci_request_irq_msix()
2238 free_irq(ar_pci->pdev->irq + i, ar); ath10k_pci_request_irq_msix()
2240 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); ath10k_pci_request_irq_msix()
2248 static int ath10k_pci_request_irq_msi(struct ath10k *ar) ath10k_pci_request_irq_msi() argument
2250 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_request_irq_msi()
2255 IRQF_SHARED, "ath10k_pci", ar); ath10k_pci_request_irq_msi()
2257 ath10k_warn(ar, "failed to request MSI irq %d: %d\n", ath10k_pci_request_irq_msi()
2265 static int ath10k_pci_request_irq_legacy(struct ath10k *ar) ath10k_pci_request_irq_legacy() argument
2267 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_request_irq_legacy()
2272 IRQF_SHARED, "ath10k_pci", ar); ath10k_pci_request_irq_legacy()
2274 ath10k_warn(ar, "failed to request legacy irq %d: %d\n", ath10k_pci_request_irq_legacy()
2282 static int ath10k_pci_request_irq(struct ath10k *ar) ath10k_pci_request_irq() argument
2284 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_request_irq()
2288 return ath10k_pci_request_irq_legacy(ar); ath10k_pci_request_irq()
2290 return ath10k_pci_request_irq_msi(ar); ath10k_pci_request_irq()
2292 return ath10k_pci_request_irq_msix(ar); ath10k_pci_request_irq()
2295 ath10k_warn(ar, "unknown irq configuration upon request\n"); ath10k_pci_request_irq()
2299 static void ath10k_pci_free_irq(struct ath10k *ar) ath10k_pci_free_irq() argument
2301 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_free_irq()
2307 free_irq(ar_pci->pdev->irq + i, ar); ath10k_pci_free_irq()
2310 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) ath10k_pci_init_irq_tasklets() argument
2312 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_init_irq_tasklets()
2315 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); ath10k_pci_init_irq_tasklets()
2317 (unsigned long)ar); ath10k_pci_init_irq_tasklets()
2326 static int ath10k_pci_init_irq(struct ath10k *ar) ath10k_pci_init_irq() argument
2328 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_init_irq()
2331 ath10k_pci_init_irq_tasklets(ar); ath10k_pci_init_irq()
2334 ath10k_info(ar, "limiting irq mode to: %d\n", ath10k_pci_init_irq()
2368 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, ath10k_pci_init_irq()
2374 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) ath10k_pci_deinit_irq_legacy() argument
2376 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, ath10k_pci_deinit_irq_legacy()
2380 static int ath10k_pci_deinit_irq(struct ath10k *ar) ath10k_pci_deinit_irq() argument
2382 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_deinit_irq()
2386 ath10k_pci_deinit_irq_legacy(ar); ath10k_pci_deinit_irq()
2397 ath10k_warn(ar, "unknown irq configuration upon deinit\n"); ath10k_pci_deinit_irq()
2401 static int ath10k_pci_wait_for_target_init(struct ath10k *ar) ath10k_pci_wait_for_target_init() argument
2403 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_wait_for_target_init()
2407 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); ath10k_pci_wait_for_target_init()
2412 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); ath10k_pci_wait_for_target_init()
2414 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", ath10k_pci_wait_for_target_init()
2430 ath10k_pci_enable_legacy_irq(ar); ath10k_pci_wait_for_target_init()
2435 ath10k_pci_disable_and_clear_legacy_irq(ar); ath10k_pci_wait_for_target_init()
2436 ath10k_pci_irq_msi_fw_mask(ar); ath10k_pci_wait_for_target_init()
2439 ath10k_err(ar, "failed to read device register, device is gone\n"); ath10k_pci_wait_for_target_init()
2444 ath10k_warn(ar, "device has crashed during init\n"); ath10k_pci_wait_for_target_init()
2449 ath10k_err(ar, "failed to receive initialized event from target: %08x\n", ath10k_pci_wait_for_target_init()
2454 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); ath10k_pci_wait_for_target_init()
2458 static int ath10k_pci_cold_reset(struct ath10k *ar) ath10k_pci_cold_reset() argument
2463 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); ath10k_pci_cold_reset()
2465 spin_lock_bh(&ar->data_lock); ath10k_pci_cold_reset()
2467 ar->stats.fw_cold_reset_counter++; ath10k_pci_cold_reset()
2469 spin_unlock_bh(&ar->data_lock); ath10k_pci_cold_reset()
2472 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); ath10k_pci_cold_reset()
2474 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); ath10k_pci_cold_reset()
2477 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & ath10k_pci_cold_reset()
2485 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); ath10k_pci_cold_reset()
2488 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & ath10k_pci_cold_reset()
2494 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); ath10k_pci_cold_reset()
2499 static int ath10k_pci_claim(struct ath10k *ar) ath10k_pci_claim() argument
2501 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_claim()
2506 pci_set_drvdata(pdev, ar); ath10k_pci_claim()
2510 ath10k_err(ar, "failed to enable pci device: %d\n", ret); ath10k_pci_claim()
2516 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, ath10k_pci_claim()
2524 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); ath10k_pci_claim()
2530 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", ath10k_pci_claim()
2544 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); ath10k_pci_claim()
2549 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); ath10k_pci_claim()
2564 static void ath10k_pci_release(struct ath10k *ar) ath10k_pci_release() argument
2566 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_release()
2596 struct ath10k *ar; ath10k_pci_probe() local
2613 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, ath10k_pci_probe()
2615 if (!ar) { ath10k_pci_probe()
2620 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n"); ath10k_pci_probe()
2622 ar_pci = ath10k_pci_priv(ar); ath10k_pci_probe()
2625 ar_pci->ar = ar; ath10k_pci_probe()
2629 (unsigned long)ar); ath10k_pci_probe()
2631 ret = ath10k_pci_claim(ar); ath10k_pci_probe()
2633 ath10k_err(ar, "failed to claim device: %d\n", ret); ath10k_pci_probe()
2637 ret = ath10k_pci_wake(ar); ath10k_pci_probe()
2639 ath10k_err(ar, "failed to wake up: %d\n", ret); ath10k_pci_probe()
2643 ret = ath10k_pci_alloc_pipes(ar); ath10k_pci_probe()
2645 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ath10k_pci_probe()
2650 ath10k_pci_ce_deinit(ar); ath10k_pci_probe()
2651 ath10k_pci_irq_disable(ar); ath10k_pci_probe()
2653 ret = ath10k_pci_init_irq(ar); ath10k_pci_probe()
2655 ath10k_err(ar, "failed to init irqs: %d\n", ret); ath10k_pci_probe()
2659 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", ath10k_pci_probe()
2660 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs, ath10k_pci_probe()
2663 ret = ath10k_pci_request_irq(ar); ath10k_pci_probe()
2665 ath10k_warn(ar, "failed to request irqs: %d\n", ret); ath10k_pci_probe()
2669 ret = ath10k_pci_chip_reset(ar); ath10k_pci_probe()
2671 ath10k_err(ar, "failed to reset chip: %d\n", ret); ath10k_pci_probe()
2675 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); ath10k_pci_probe()
2677 ath10k_err(ar, "failed to get chip id\n"); ath10k_pci_probe()
2682 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", ath10k_pci_probe()
2687 ath10k_pci_sleep(ar); ath10k_pci_probe()
2689 ret = ath10k_core_register(ar, chip_id); ath10k_pci_probe()
2691 ath10k_err(ar, "failed to register driver core: %d\n", ret); ath10k_pci_probe()
2698 ath10k_pci_free_irq(ar); ath10k_pci_probe()
2699 ath10k_pci_kill_tasklet(ar); ath10k_pci_probe()
2702 ath10k_pci_deinit_irq(ar); ath10k_pci_probe()
2705 ath10k_pci_free_pipes(ar); ath10k_pci_probe()
2708 ath10k_pci_sleep(ar); ath10k_pci_probe()
2711 ath10k_pci_release(ar); ath10k_pci_probe()
2714 ath10k_core_destroy(ar); ath10k_pci_probe()
2721 struct ath10k *ar = pci_get_drvdata(pdev); ath10k_pci_remove() local
2724 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); ath10k_pci_remove()
2726 if (!ar) ath10k_pci_remove()
2729 ar_pci = ath10k_pci_priv(ar); ath10k_pci_remove()
2734 ath10k_core_unregister(ar); ath10k_pci_remove()
2735 ath10k_pci_free_irq(ar); ath10k_pci_remove()
2736 ath10k_pci_kill_tasklet(ar); ath10k_pci_remove()
2737 ath10k_pci_deinit_irq(ar); ath10k_pci_remove()
2738 ath10k_pci_ce_deinit(ar); ath10k_pci_remove()
2739 ath10k_pci_free_pipes(ar); ath10k_pci_remove()
2740 ath10k_pci_release(ar); ath10k_pci_remove()
2741 ath10k_core_destroy(ar); ath10k_pci_remove()
H A Dtrace.h52 TP_PROTO(struct ath10k *ar, struct va_format *vaf),
53 TP_ARGS(ar, vaf),
55 __string(device, dev_name(ar->dev))
56 __string(driver, dev_driver_string(ar->dev))
60 __assign_str(device, dev_name(ar->dev));
61 __assign_str(driver, dev_driver_string(ar->dev));
76 TP_PROTO(struct ath10k *ar, struct va_format *vaf),
77 TP_ARGS(ar, vaf)
81 TP_PROTO(struct ath10k *ar, struct va_format *vaf),
82 TP_ARGS(ar, vaf)
86 TP_PROTO(struct ath10k *ar, struct va_format *vaf),
87 TP_ARGS(ar, vaf)
91 TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf),
92 TP_ARGS(ar, level, vaf),
94 __string(device, dev_name(ar->dev))
95 __string(driver, dev_driver_string(ar->dev))
100 __assign_str(device, dev_name(ar->dev));
101 __assign_str(driver, dev_driver_string(ar->dev));
117 TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix,
120 TP_ARGS(ar, msg, prefix, buf, buf_len),
123 __string(device, dev_name(ar->dev))
124 __string(driver, dev_driver_string(ar->dev))
132 __assign_str(device, dev_name(ar->dev));
133 __assign_str(driver, dev_driver_string(ar->dev));
150 TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
153 TP_ARGS(ar, id, buf, buf_len, ret),
156 __string(device, dev_name(ar->dev))
157 __string(driver, dev_driver_string(ar->dev))
165 __assign_str(device, dev_name(ar->dev));
166 __assign_str(driver, dev_driver_string(ar->dev));
184 TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
186 TP_ARGS(ar, id, buf, buf_len),
189 __string(device, dev_name(ar->dev))
190 __string(driver, dev_driver_string(ar->dev))
197 __assign_str(device, dev_name(ar->dev));
198 __assign_str(driver, dev_driver_string(ar->dev));
214 TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
216 TP_ARGS(ar, buf, buf_len),
219 __string(device, dev_name(ar->dev))
220 __string(driver, dev_driver_string(ar->dev))
226 __assign_str(device, dev_name(ar->dev));
227 __assign_str(driver, dev_driver_string(ar->dev));
241 TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
243 TP_ARGS(ar, buf, buf_len),
246 __string(device, dev_name(ar->dev))
247 __string(driver, dev_driver_string(ar->dev))
253 __assign_str(device, dev_name(ar->dev));
254 __assign_str(driver, dev_driver_string(ar->dev));
268 TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len),
270 TP_ARGS(ar, buf, buf_len),
273 __string(device, dev_name(ar->dev))
274 __string(driver, dev_driver_string(ar->dev))
280 __assign_str(device, dev_name(ar->dev));
281 __assign_str(driver, dev_driver_string(ar->dev));
295 TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
298 TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
301 __string(device, dev_name(ar->dev))
302 __string(driver, dev_driver_string(ar->dev))
310 __assign_str(device, dev_name(ar->dev));
311 __assign_str(driver, dev_driver_string(ar->dev));
330 TP_PROTO(struct ath10k *ar, u16 msdu_id),
332 TP_ARGS(ar, msdu_id),
335 __string(device, dev_name(ar->dev))
336 __string(driver, dev_driver_string(ar->dev))
341 __assign_str(device, dev_name(ar->dev));
342 __assign_str(driver, dev_driver_string(ar->dev));
355 TP_PROTO(struct ath10k *ar, const void *data, size_t len),
357 TP_ARGS(ar, data, len),
360 __string(device, dev_name(ar->dev))
361 __string(driver, dev_driver_string(ar->dev))
367 __assign_str(device, dev_name(ar->dev));
368 __assign_str(driver, dev_driver_string(ar->dev));
382 TP_PROTO(struct ath10k *ar, const void *data, size_t len),
384 TP_ARGS(ar, data, len),
387 __string(device, dev_name(ar->dev))
388 __string(driver, dev_driver_string(ar->dev))
394 __assign_str(device, dev_name(ar->dev));
395 __assign_str(driver, dev_driver_string(ar->dev));
410 TP_PROTO(struct ath10k *ar, const void *data, size_t len),
411 TP_ARGS(ar, data, len)
415 TP_PROTO(struct ath10k *ar, const void *data, size_t len),
416 TP_ARGS(ar, data, len)
420 TP_PROTO(struct ath10k *ar, const void *data, size_t len),
421 TP_ARGS(ar, data, len)
425 TP_PROTO(struct ath10k *ar, const void *data, size_t len),
426 TP_ARGS(ar, data, len)
430 TP_PROTO(struct ath10k *ar, const void *data, size_t len),
432 TP_ARGS(ar, data, len),
435 __string(device, dev_name(ar->dev))
436 __string(driver, dev_driver_string(ar->dev))
442 __assign_str(device, dev_name(ar->dev));
443 __assign_str(driver, dev_driver_string(ar->dev));
457 TP_PROTO(struct ath10k *ar,
464 TP_ARGS(ar, type, timestamp, code, len, data),
467 __string(device, dev_name(ar->dev))
468 __string(driver, dev_driver_string(ar->dev))
477 __assign_str(device, dev_name(ar->dev));
478 __assign_str(driver, dev_driver_string(ar->dev));
498 TP_PROTO(struct ath10k *ar, const void *data, size_t len),
500 TP_ARGS(ar, data, len),
503 __string(device, dev_name(ar->dev))
504 __string(driver, dev_driver_string(ar->dev))
510 __assign_str(device, dev_name(ar->dev));
511 __assign_str(driver, dev_driver_string(ar->dev));
H A Dbmi.c23 void ath10k_bmi_start(struct ath10k *ar) ath10k_bmi_start() argument
25 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n"); ath10k_bmi_start()
27 ar->bmi.done_sent = false; ath10k_bmi_start()
30 int ath10k_bmi_done(struct ath10k *ar) ath10k_bmi_done() argument
36 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n"); ath10k_bmi_done()
38 if (ar->bmi.done_sent) { ath10k_bmi_done()
39 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n"); ath10k_bmi_done()
43 ar->bmi.done_sent = true; ath10k_bmi_done()
46 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); ath10k_bmi_done()
48 ath10k_warn(ar, "unable to write to the device: %d\n", ret); ath10k_bmi_done()
55 int ath10k_bmi_get_target_info(struct ath10k *ar, ath10k_bmi_get_target_info() argument
64 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n"); ath10k_bmi_get_target_info()
66 if (ar->bmi.done_sent) { ath10k_bmi_get_target_info()
67 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n"); ath10k_bmi_get_target_info()
73 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); ath10k_bmi_get_target_info()
75 ath10k_warn(ar, "unable to get target info from device\n"); ath10k_bmi_get_target_info()
80 ath10k_warn(ar, "invalid get_target_info response length (%d)\n", ath10k_bmi_get_target_info()
91 int ath10k_bmi_read_memory(struct ath10k *ar, ath10k_bmi_read_memory() argument
100 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", ath10k_bmi_read_memory()
103 if (ar->bmi.done_sent) { ath10k_bmi_read_memory()
104 ath10k_warn(ar, "command disallowed\n"); ath10k_bmi_read_memory()
115 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, ath10k_bmi_read_memory()
118 ath10k_warn(ar, "unable to read from the device (%d)\n", ath10k_bmi_read_memory()
132 int ath10k_bmi_write_memory(struct ath10k *ar, ath10k_bmi_write_memory() argument
140 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n", ath10k_bmi_write_memory()
143 if (ar->bmi.done_sent) { ath10k_bmi_write_memory()
144 ath10k_warn(ar, "command disallowed\n"); ath10k_bmi_write_memory()
159 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, ath10k_bmi_write_memory()
162 ath10k_warn(ar, "unable to write to the device (%d)\n", ath10k_bmi_write_memory()
178 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result) ath10k_bmi_execute() argument
186 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", ath10k_bmi_execute()
189 if (ar->bmi.done_sent) { ath10k_bmi_execute()
190 ath10k_warn(ar, "command disallowed\n"); ath10k_bmi_execute()
198 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); ath10k_bmi_execute()
200 ath10k_warn(ar, "unable to read from the device\n"); ath10k_bmi_execute()
205 ath10k_warn(ar, "invalid execute response length (%d)\n", ath10k_bmi_execute()
212 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result); ath10k_bmi_execute()
217 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) ath10k_bmi_lz_data() argument
224 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", ath10k_bmi_lz_data()
227 if (ar->bmi.done_sent) { ath10k_bmi_lz_data()
228 ath10k_warn(ar, "command disallowed\n"); ath10k_bmi_lz_data()
241 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, ath10k_bmi_lz_data()
244 ath10k_warn(ar, "unable to write to the device\n"); ath10k_bmi_lz_data()
255 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) ath10k_bmi_lz_stream_start() argument
261 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n", ath10k_bmi_lz_stream_start()
264 if (ar->bmi.done_sent) { ath10k_bmi_lz_stream_start()
265 ath10k_warn(ar, "command disallowed\n"); ath10k_bmi_lz_stream_start()
272 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); ath10k_bmi_lz_stream_start()
274 ath10k_warn(ar, "unable to Start LZ Stream to the device\n"); ath10k_bmi_lz_stream_start()
281 int ath10k_bmi_fast_download(struct ath10k *ar, ath10k_bmi_fast_download() argument
289 ath10k_dbg(ar, ATH10K_DBG_BMI, ath10k_bmi_fast_download()
293 ret = ath10k_bmi_lz_stream_start(ar, address); ath10k_bmi_fast_download()
301 ret = ath10k_bmi_lz_data(ar, buffer, head_len); ath10k_bmi_fast_download()
306 ret = ath10k_bmi_lz_data(ar, trailer, 4); ath10k_bmi_fast_download()
315 ret = ath10k_bmi_lz_stream_start(ar, 0x00); ath10k_bmi_fast_download()
H A Ddebug.c110 void ath10k_info(struct ath10k *ar, const char *fmt, ...) ath10k_info() argument
119 dev_info(ar->dev, "%pV", &vaf); ath10k_info()
120 trace_ath10k_log_info(ar, &vaf); ath10k_info()
125 void ath10k_print_driver_info(struct ath10k *ar) ath10k_print_driver_info() argument
127 ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n", ath10k_print_driver_info()
128 ar->hw_params.name, ath10k_print_driver_info()
129 ar->target_version, ath10k_print_driver_info()
130 ar->chip_id, ath10k_print_driver_info()
131 ar->hw->wiphy->fw_version, ath10k_print_driver_info()
132 ar->fw_api, ath10k_print_driver_info()
133 ar->htt.target_version_major, ath10k_print_driver_info()
134 ar->htt.target_version_minor, ath10k_print_driver_info()
135 ar->wmi.op_version, ath10k_print_driver_info()
136 ath10k_cal_mode_str(ar->cal_mode), ath10k_print_driver_info()
137 ar->max_num_stations); ath10k_print_driver_info()
138 ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", ath10k_print_driver_info()
147 void ath10k_err(struct ath10k *ar, const char *fmt, ...) ath10k_err() argument
156 dev_err(ar->dev, "%pV", &vaf); ath10k_err()
157 trace_ath10k_log_err(ar, &vaf); ath10k_err()
162 void ath10k_warn(struct ath10k *ar, const char *fmt, ...) ath10k_warn() argument
171 dev_warn_ratelimited(ar->dev, "%pV", &vaf); ath10k_warn()
172 trace_ath10k_log_warn(ar, &vaf); ath10k_warn()
184 struct ath10k *ar = file->private_data; ath10k_read_wmi_services() local
196 mutex_lock(&ar->conf_mutex); ath10k_read_wmi_services()
201 spin_lock_bh(&ar->data_lock); ath10k_read_wmi_services()
203 enabled = test_bit(i, ar->wmi.svc_map); ath10k_read_wmi_services()
219 spin_unlock_bh(&ar->data_lock); ath10k_read_wmi_services()
223 mutex_unlock(&ar->conf_mutex); ath10k_read_wmi_services()
266 static void ath10k_debug_fw_stats_reset(struct ath10k *ar) ath10k_debug_fw_stats_reset() argument
268 spin_lock_bh(&ar->data_lock); ath10k_debug_fw_stats_reset()
269 ar->debug.fw_stats_done = false; ath10k_debug_fw_stats_reset()
270 ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); ath10k_debug_fw_stats_reset()
271 ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); ath10k_debug_fw_stats_reset()
272 ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers); ath10k_debug_fw_stats_reset()
273 spin_unlock_bh(&ar->data_lock); ath10k_debug_fw_stats_reset()
298 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) ath10k_debug_fw_stats_process() argument
310 spin_lock_bh(&ar->data_lock); ath10k_debug_fw_stats_process()
311 ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats); ath10k_debug_fw_stats_process()
313 ath10k_warn(ar, "failed to pull fw stats: %d\n", ret); ath10k_debug_fw_stats_process()
329 if (ar->debug.fw_stats_done) { ath10k_debug_fw_stats_process()
330 ath10k_warn(ar, "received unsolicited stats update event\n"); ath10k_debug_fw_stats_process()
334 num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers); ath10k_debug_fw_stats_process()
335 num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs); ath10k_debug_fw_stats_process()
336 is_start = (list_empty(&ar->debug.fw_stats.pdevs) && ath10k_debug_fw_stats_process()
338 is_end = (!list_empty(&ar->debug.fw_stats.pdevs) && ath10k_debug_fw_stats_process()
342 list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs); ath10k_debug_fw_stats_process()
345 ar->debug.fw_stats_done = true; ath10k_debug_fw_stats_process()
347 is_started = !list_empty(&ar->debug.fw_stats.pdevs); ath10k_debug_fw_stats_process()
354 ath10k_warn(ar, "dropping fw peer stats\n"); ath10k_debug_fw_stats_process()
359 ath10k_warn(ar, "dropping fw vdev stats\n"); ath10k_debug_fw_stats_process()
363 list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); ath10k_debug_fw_stats_process()
364 list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); ath10k_debug_fw_stats_process()
367 complete(&ar->debug.fw_stats_complete); ath10k_debug_fw_stats_process()
378 spin_unlock_bh(&ar->data_lock); ath10k_debug_fw_stats_process()
381 static int ath10k_debug_fw_stats_request(struct ath10k *ar) ath10k_debug_fw_stats_request() argument
386 lockdep_assert_held(&ar->conf_mutex); ath10k_debug_fw_stats_request()
390 ath10k_debug_fw_stats_reset(ar); ath10k_debug_fw_stats_request()
396 reinit_completion(&ar->debug.fw_stats_complete); ath10k_debug_fw_stats_request()
398 ret = ath10k_wmi_request_stats(ar, ath10k_debug_fw_stats_request()
403 ath10k_warn(ar, "could not request stats (%d)\n", ret); ath10k_debug_fw_stats_request()
407 ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete, ath10k_debug_fw_stats_request()
412 spin_lock_bh(&ar->data_lock); ath10k_debug_fw_stats_request()
413 if (ar->debug.fw_stats_done) { ath10k_debug_fw_stats_request()
414 spin_unlock_bh(&ar->data_lock); ath10k_debug_fw_stats_request()
417 spin_unlock_bh(&ar->data_lock); ath10k_debug_fw_stats_request()
426 static void ath10k_fw_stats_fill(struct ath10k *ar, ath10k_fw_stats_fill() argument
439 spin_lock_bh(&ar->data_lock); ath10k_fw_stats_fill()
444 ath10k_warn(ar, "failed to get pdev stats\n"); ath10k_fw_stats_fill()
649 spin_unlock_bh(&ar->data_lock); ath10k_fw_stats_fill()
659 struct ath10k *ar = inode->i_private; ath10k_fw_stats_open() local
663 mutex_lock(&ar->conf_mutex); ath10k_fw_stats_open()
665 if (ar->state != ATH10K_STATE_ON) { ath10k_fw_stats_open()
676 ret = ath10k_debug_fw_stats_request(ar); ath10k_fw_stats_open()
678 ath10k_warn(ar, "failed to request fw stats: %d\n", ret); ath10k_fw_stats_open()
682 ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf); ath10k_fw_stats_open()
685 mutex_unlock(&ar->conf_mutex); ath10k_fw_stats_open()
692 mutex_unlock(&ar->conf_mutex); ath10k_fw_stats_open()
724 struct ath10k *ar = file->private_data; ath10k_debug_fw_reset_stats_read() local
733 spin_lock_bh(&ar->data_lock); ath10k_debug_fw_reset_stats_read()
737 "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter); ath10k_debug_fw_reset_stats_read()
740 ar->stats.fw_warm_reset_counter); ath10k_debug_fw_reset_stats_read()
743 ar->stats.fw_cold_reset_counter); ath10k_debug_fw_reset_stats_read()
745 spin_unlock_bh(&ar->data_lock); ath10k_debug_fw_reset_stats_read()
762 static int ath10k_debug_fw_assert(struct ath10k *ar) ath10k_debug_fw_assert() argument
767 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16); ath10k_debug_fw_assert()
777 return ath10k_wmi_cmd_send(ar, skb, ath10k_debug_fw_assert()
778 ar->wmi.cmd->vdev_install_key_cmdid); ath10k_debug_fw_assert()
806 struct ath10k *ar = file->private_data; ath10k_write_simulate_fw_crash() local
810 mutex_lock(&ar->conf_mutex); ath10k_write_simulate_fw_crash()
817 if (ar->state != ATH10K_STATE_ON && ath10k_write_simulate_fw_crash()
818 ar->state != ATH10K_STATE_RESTARTED) { ath10k_write_simulate_fw_crash()
830 ath10k_info(ar, "simulating soft firmware crash\n"); ath10k_write_simulate_fw_crash()
831 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); ath10k_write_simulate_fw_crash()
833 ath10k_info(ar, "simulating hard firmware crash\n"); ath10k_write_simulate_fw_crash()
837 ret = ath10k_wmi_vdev_set_param(ar, 0x7fff, ath10k_write_simulate_fw_crash()
838 ar->wmi.vdev_param->rts_threshold, ath10k_write_simulate_fw_crash()
841 ath10k_info(ar, "simulating firmware assert crash\n"); ath10k_write_simulate_fw_crash()
842 ret = ath10k_debug_fw_assert(ar); ath10k_write_simulate_fw_crash()
844 ath10k_info(ar, "user requested hw restart\n"); ath10k_write_simulate_fw_crash()
845 queue_work(ar->workqueue, &ar->restart_work); ath10k_write_simulate_fw_crash()
853 ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret); ath10k_write_simulate_fw_crash()
860 mutex_unlock(&ar->conf_mutex); ath10k_write_simulate_fw_crash()
875 struct ath10k *ar = file->private_data; ath10k_read_chip_id() local
879 len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id); ath10k_read_chip_id()
892 ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) ath10k_debug_get_new_fw_crash_data() argument
894 struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; ath10k_debug_get_new_fw_crash_data()
896 lockdep_assert_held(&ar->data_lock); ath10k_debug_get_new_fw_crash_data()
906 static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) ath10k_build_dump_file() argument
908 struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; ath10k_build_dump_file()
927 spin_lock_bh(&ar->data_lock); ath10k_build_dump_file()
930 spin_unlock_bh(&ar->data_lock); ath10k_build_dump_file()
943 dump_data->chip_id = cpu_to_le32(ar->chip_id); ath10k_build_dump_file()
945 dump_data->target_version = cpu_to_le32(ar->target_version); ath10k_build_dump_file()
946 dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major); ath10k_build_dump_file()
947 dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor); ath10k_build_dump_file()
948 dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release); ath10k_build_dump_file()
949 dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build); ath10k_build_dump_file()
950 dump_data->phy_capability = cpu_to_le32(ar->phy_capability); ath10k_build_dump_file()
951 dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power); ath10k_build_dump_file()
952 dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power); ath10k_build_dump_file()
953 dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info); ath10k_build_dump_file()
954 dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info); ath10k_build_dump_file()
955 dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains); ath10k_build_dump_file()
957 strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, ath10k_build_dump_file()
975 ar->debug.fw_crash_data->crashed_since_read = false; ath10k_build_dump_file()
977 spin_unlock_bh(&ar->data_lock); ath10k_build_dump_file()
984 struct ath10k *ar = inode->i_private; ath10k_fw_crash_dump_open() local
987 dump = ath10k_build_dump_file(ar); ath10k_fw_crash_dump_open()
1027 struct ath10k *ar = file->private_data; ath10k_reg_addr_read() local
1032 mutex_lock(&ar->conf_mutex); ath10k_reg_addr_read()
1033 reg_addr = ar->debug.reg_addr; ath10k_reg_addr_read()
1034 mutex_unlock(&ar->conf_mutex); ath10k_reg_addr_read()
1045 struct ath10k *ar = file->private_data; ath10k_reg_addr_write() local
1056 mutex_lock(&ar->conf_mutex); ath10k_reg_addr_write()
1057 ar->debug.reg_addr = reg_addr; ath10k_reg_addr_write()
1058 mutex_unlock(&ar->conf_mutex); ath10k_reg_addr_write()
1075 struct ath10k *ar = file->private_data; ath10k_reg_value_read() local
1081 mutex_lock(&ar->conf_mutex); ath10k_reg_value_read()
1083 if (ar->state != ATH10K_STATE_ON && ath10k_reg_value_read()
1084 ar->state != ATH10K_STATE_UTF) { ath10k_reg_value_read()
1089 reg_addr = ar->debug.reg_addr; ath10k_reg_value_read()
1091 reg_val = ath10k_hif_read32(ar, reg_addr); ath10k_reg_value_read()
1097 mutex_unlock(&ar->conf_mutex); ath10k_reg_value_read()
1106 struct ath10k *ar = file->private_data; ath10k_reg_value_write() local
1110 mutex_lock(&ar->conf_mutex); ath10k_reg_value_write()
1112 if (ar->state != ATH10K_STATE_ON && ath10k_reg_value_write()
1113 ar->state != ATH10K_STATE_UTF) { ath10k_reg_value_write()
1118 reg_addr = ar->debug.reg_addr; ath10k_reg_value_write()
1124 ath10k_hif_write32(ar, reg_addr, reg_val); ath10k_reg_value_write()
1129 mutex_unlock(&ar->conf_mutex); ath10k_reg_value_write()
1146 struct ath10k *ar = file->private_data; ath10k_mem_value_read() local
1156 mutex_lock(&ar->conf_mutex); ath10k_mem_value_read()
1164 if (ar->state != ATH10K_STATE_ON && ath10k_mem_value_read()
1165 ar->state != ATH10K_STATE_UTF) { ath10k_mem_value_read()
1170 ret = ath10k_hif_diag_read(ar, *ppos, buf, count); ath10k_mem_value_read()
1172 ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n", ath10k_mem_value_read()
1189 mutex_unlock(&ar->conf_mutex); ath10k_mem_value_read()
1198 struct ath10k *ar = file->private_data; ath10k_mem_value_write() local
1208 mutex_lock(&ar->conf_mutex); ath10k_mem_value_write()
1216 if (ar->state != ATH10K_STATE_ON && ath10k_mem_value_write()
1217 ar->state != ATH10K_STATE_UTF) { ath10k_mem_value_write()
1228 ret = ath10k_hif_diag_write(ar, *ppos, buf, count); ath10k_mem_value_write()
1230 ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n", ath10k_mem_value_write()
1240 mutex_unlock(&ar->conf_mutex); ath10k_mem_value_write()
1253 static int ath10k_debug_htt_stats_req(struct ath10k *ar) ath10k_debug_htt_stats_req() argument
1258 lockdep_assert_held(&ar->conf_mutex); ath10k_debug_htt_stats_req()
1260 if (ar->debug.htt_stats_mask == 0) ath10k_debug_htt_stats_req()
1264 if (ar->state != ATH10K_STATE_ON) ath10k_debug_htt_stats_req()
1269 ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask, ath10k_debug_htt_stats_req()
1272 ath10k_warn(ar, "failed to send htt stats request: %d\n", ret); ath10k_debug_htt_stats_req()
1276 queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork, ath10k_debug_htt_stats_req()
1284 struct ath10k *ar = container_of(work, struct ath10k, ath10k_debug_htt_stats_dwork() local
1287 mutex_lock(&ar->conf_mutex); ath10k_debug_htt_stats_dwork()
1289 ath10k_debug_htt_stats_req(ar); ath10k_debug_htt_stats_dwork()
1291 mutex_unlock(&ar->conf_mutex); ath10k_debug_htt_stats_dwork()
1298 struct ath10k *ar = file->private_data; ath10k_read_htt_stats_mask() local
1302 len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask); ath10k_read_htt_stats_mask()
1311 struct ath10k *ar = file->private_data; ath10k_write_htt_stats_mask() local
1323 mutex_lock(&ar->conf_mutex); ath10k_write_htt_stats_mask()
1325 ar->debug.htt_stats_mask = mask; ath10k_write_htt_stats_mask()
1327 ret = ath10k_debug_htt_stats_req(ar); ath10k_write_htt_stats_mask()
1334 mutex_unlock(&ar->conf_mutex); ath10k_write_htt_stats_mask()
1351 struct ath10k *ar = file->private_data; ath10k_read_htt_max_amsdu_ampdu() local
1356 mutex_lock(&ar->conf_mutex); ath10k_read_htt_max_amsdu_ampdu()
1358 if (ar->debug.htt_max_amsdu) ath10k_read_htt_max_amsdu_ampdu()
1359 amsdu = ar->debug.htt_max_amsdu; ath10k_read_htt_max_amsdu_ampdu()
1361 if (ar->debug.htt_max_ampdu) ath10k_read_htt_max_amsdu_ampdu()
1362 ampdu = ar->debug.htt_max_ampdu; ath10k_read_htt_max_amsdu_ampdu()
1364 mutex_unlock(&ar->conf_mutex); ath10k_read_htt_max_amsdu_ampdu()
1375 struct ath10k *ar = file->private_data; ath10k_write_htt_max_amsdu_ampdu() local
1390 mutex_lock(&ar->conf_mutex); ath10k_write_htt_max_amsdu_ampdu()
1392 res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu); ath10k_write_htt_max_amsdu_ampdu()
1397 ar->debug.htt_max_amsdu = amsdu; ath10k_write_htt_max_amsdu_ampdu()
1398 ar->debug.htt_max_ampdu = ampdu; ath10k_write_htt_max_amsdu_ampdu()
1401 mutex_unlock(&ar->conf_mutex); ath10k_write_htt_max_amsdu_ampdu()
1417 struct ath10k *ar = file->private_data; ath10k_read_fw_dbglog() local
1422 ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level); ath10k_read_fw_dbglog()
1431 struct ath10k *ar = file->private_data; ath10k_write_fw_dbglog() local
1450 mutex_lock(&ar->conf_mutex); ath10k_write_fw_dbglog()
1452 ar->debug.fw_dbglog_mask = mask; ath10k_write_fw_dbglog()
1453 ar->debug.fw_dbglog_level = log_level; ath10k_write_fw_dbglog()
1455 if (ar->state == ATH10K_STATE_ON) { ath10k_write_fw_dbglog()
1456 ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, ath10k_write_fw_dbglog()
1457 ar->debug.fw_dbglog_level); ath10k_write_fw_dbglog()
1459 ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", ath10k_write_fw_dbglog()
1468 mutex_unlock(&ar->conf_mutex); ath10k_write_fw_dbglog()
1552 struct ath10k *ar = hw->priv; ath10k_debug_get_et_stats() local
1557 mutex_lock(&ar->conf_mutex); ath10k_debug_get_et_stats()
1559 if (ar->state == ATH10K_STATE_ON) { ath10k_debug_get_et_stats()
1560 ret = ath10k_debug_fw_stats_request(ar); ath10k_debug_get_et_stats()
1563 ath10k_warn(ar, ath10k_debug_get_et_stats()
1569 pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs, ath10k_debug_get_et_stats()
1577 spin_lock_bh(&ar->data_lock); ath10k_debug_get_et_stats()
1622 data[i++] = ar->stats.fw_crash_counter; ath10k_debug_get_et_stats()
1623 data[i++] = ar->stats.fw_warm_reset_counter; ath10k_debug_get_et_stats()
1624 data[i++] = ar->stats.fw_cold_reset_counter; ath10k_debug_get_et_stats()
1626 spin_unlock_bh(&ar->data_lock); ath10k_debug_get_et_stats()
1628 mutex_unlock(&ar->conf_mutex); ath10k_debug_get_et_stats()
1643 struct ath10k *ar = inode->i_private; ath10k_debug_cal_data_open() local
1649 mutex_lock(&ar->conf_mutex); ath10k_debug_cal_data_open()
1651 if (ar->state != ATH10K_STATE_ON && ath10k_debug_cal_data_open()
1652 ar->state != ATH10K_STATE_UTF) { ath10k_debug_cal_data_open()
1665 ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); ath10k_debug_cal_data_open()
1667 ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret); ath10k_debug_cal_data_open()
1671 ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf, ath10k_debug_cal_data_open()
1674 ath10k_warn(ar, "failed to read calibration data: %d\n", ret); ath10k_debug_cal_data_open()
1680 mutex_unlock(&ar->conf_mutex); ath10k_debug_cal_data_open()
1688 mutex_unlock(&ar->conf_mutex); ath10k_debug_cal_data_open()
1723 struct ath10k *ar = file->private_data; ath10k_read_nf_cal_period() local
1728 ar->debug.nf_cal_period); ath10k_read_nf_cal_period()
1737 struct ath10k *ar = file->private_data; ath10k_write_nf_cal_period() local
1752 mutex_lock(&ar->conf_mutex); ath10k_write_nf_cal_period()
1754 ar->debug.nf_cal_period = period; ath10k_write_nf_cal_period()
1756 if (ar->state != ATH10K_STATE_ON) { ath10k_write_nf_cal_period()
1762 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, ath10k_write_nf_cal_period()
1763 ar->debug.nf_cal_period); ath10k_write_nf_cal_period()
1765 ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n", ath10k_write_nf_cal_period()
1773 mutex_unlock(&ar->conf_mutex); ath10k_write_nf_cal_period()
1786 int ath10k_debug_start(struct ath10k *ar) ath10k_debug_start() argument
1790 lockdep_assert_held(&ar->conf_mutex); ath10k_debug_start()
1792 ret = ath10k_debug_htt_stats_req(ar); ath10k_debug_start()
1795 ath10k_warn(ar, "failed to start htt stats workqueue: %d\n", ath10k_debug_start()
1798 if (ar->debug.fw_dbglog_mask) { ath10k_debug_start()
1799 ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, ath10k_debug_start()
1803 ath10k_warn(ar, "failed to enable dbglog during start: %d", ath10k_debug_start()
1807 if (ar->debug.pktlog_filter) { ath10k_debug_start()
1808 ret = ath10k_wmi_pdev_pktlog_enable(ar, ath10k_debug_start()
1809 ar->debug.pktlog_filter); ath10k_debug_start()
1812 ath10k_warn(ar, ath10k_debug_start()
1814 ar->debug.pktlog_filter, ret); ath10k_debug_start()
1816 ret = ath10k_wmi_pdev_pktlog_disable(ar); ath10k_debug_start()
1819 ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); ath10k_debug_start()
1822 if (ar->debug.nf_cal_period) { ath10k_debug_start()
1823 ret = ath10k_wmi_pdev_set_param(ar, ath10k_debug_start()
1824 ar->wmi.pdev_param->cal_period, ath10k_debug_start()
1825 ar->debug.nf_cal_period); ath10k_debug_start()
1828 ath10k_warn(ar, "cal period cfg failed from debug start: %d\n", ath10k_debug_start()
1835 void ath10k_debug_stop(struct ath10k *ar) ath10k_debug_stop() argument
1837 lockdep_assert_held(&ar->conf_mutex); ath10k_debug_stop()
1842 if (ar->debug.htt_stats_mask != 0) ath10k_debug_stop()
1843 cancel_delayed_work(&ar->debug.htt_stats_dwork); ath10k_debug_stop()
1845 ar->debug.htt_max_amsdu = 0; ath10k_debug_stop()
1846 ar->debug.htt_max_ampdu = 0; ath10k_debug_stop()
1848 ath10k_wmi_pdev_pktlog_disable(ar); ath10k_debug_stop()
1855 struct ath10k *ar = file->private_data; ath10k_write_simulate_radar() local
1857 ieee80211_radar_detected(ar->hw); ath10k_write_simulate_radar()
1871 ar->debug.dfs_stats.p))
1875 ar->debug.dfs_pool_stats.p))
1882 struct ath10k *ar = file->private_data; ath10k_read_dfs_stats() local
1889 if (!ar->dfs_detector) { ath10k_read_dfs_stats()
1894 ar->debug.dfs_pool_stats = ath10k_read_dfs_stats()
1895 ar->dfs_detector->get_stats(ar->dfs_detector); ath10k_read_dfs_stats()
1935 struct ath10k *ar = file->private_data; ath10k_write_pktlog_filter() local
1942 mutex_lock(&ar->conf_mutex); ath10k_write_pktlog_filter()
1944 if (ar->state != ATH10K_STATE_ON) { ath10k_write_pktlog_filter()
1945 ar->debug.pktlog_filter = filter; ath10k_write_pktlog_filter()
1950 if (filter == ar->debug.pktlog_filter) { ath10k_write_pktlog_filter()
1956 ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); ath10k_write_pktlog_filter()
1958 ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", ath10k_write_pktlog_filter()
1959 ar->debug.pktlog_filter, ret); ath10k_write_pktlog_filter()
1963 ret = ath10k_wmi_pdev_pktlog_disable(ar); ath10k_write_pktlog_filter()
1965 ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); ath10k_write_pktlog_filter()
1970 ar->debug.pktlog_filter = filter; ath10k_write_pktlog_filter()
1974 mutex_unlock(&ar->conf_mutex); ath10k_write_pktlog_filter()
1982 struct ath10k *ar = file->private_data; ath10k_read_pktlog_filter() local
1985 mutex_lock(&ar->conf_mutex); ath10k_read_pktlog_filter()
1987 ar->debug.pktlog_filter); ath10k_read_pktlog_filter()
1988 mutex_unlock(&ar->conf_mutex); ath10k_read_pktlog_filter()
1999 int ath10k_debug_create(struct ath10k *ar) ath10k_debug_create() argument
2001 ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data)); ath10k_debug_create()
2002 if (!ar->debug.fw_crash_data) ath10k_debug_create()
2005 INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); ath10k_debug_create()
2006 INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); ath10k_debug_create()
2007 INIT_LIST_HEAD(&ar->debug.fw_stats.peers); ath10k_debug_create()
2012 void ath10k_debug_destroy(struct ath10k *ar) ath10k_debug_destroy() argument
2014 vfree(ar->debug.fw_crash_data); ath10k_debug_destroy()
2015 ar->debug.fw_crash_data = NULL; ath10k_debug_destroy()
2017 ath10k_debug_fw_stats_reset(ar); ath10k_debug_destroy()
2020 int ath10k_debug_register(struct ath10k *ar) ath10k_debug_register() argument
2022 ar->debug.debugfs_phy = debugfs_create_dir("ath10k", ath10k_debug_register()
2023 ar->hw->wiphy->debugfsdir); ath10k_debug_register()
2024 if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) { ath10k_debug_register()
2025 if (IS_ERR(ar->debug.debugfs_phy)) ath10k_debug_register()
2026 return PTR_ERR(ar->debug.debugfs_phy); ath10k_debug_register()
2031 INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, ath10k_debug_register()
2034 init_completion(&ar->debug.fw_stats_complete); ath10k_debug_register()
2036 debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, ath10k_debug_register()
2039 debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy, ath10k_debug_register()
2040 ar, &fops_fw_reset_stats); ath10k_debug_register()
2042 debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, ath10k_debug_register()
2045 debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy, ath10k_debug_register()
2046 ar, &fops_simulate_fw_crash); ath10k_debug_register()
2048 debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy, ath10k_debug_register()
2049 ar, &fops_fw_crash_dump); ath10k_debug_register()
2052 ar->debug.debugfs_phy, ar, &fops_reg_addr); ath10k_debug_register()
2055 ar->debug.debugfs_phy, ar, &fops_reg_value); ath10k_debug_register()
2058 ar->debug.debugfs_phy, ar, &fops_mem_value); ath10k_debug_register()
2060 debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, ath10k_debug_register()
2061 ar, &fops_chip_id); ath10k_debug_register()
2063 debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy, ath10k_debug_register()
2064 ar, &fops_htt_stats_mask); ath10k_debug_register()
2067 ar->debug.debugfs_phy, ar, ath10k_debug_register()
2070 debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy, ath10k_debug_register()
2071 ar, &fops_fw_dbglog); ath10k_debug_register()
2073 debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy, ath10k_debug_register()
2074 ar, &fops_cal_data); ath10k_debug_register()
2077 ar->debug.debugfs_phy, ar, &fops_nf_cal_period); ath10k_debug_register()
2081 ar->debug.debugfs_phy, ar, ath10k_debug_register()
2085 ar->debug.debugfs_phy, ath10k_debug_register()
2086 &ar->dfs_block_radar_events); ath10k_debug_register()
2089 ar->debug.debugfs_phy, ar, ath10k_debug_register()
2094 ar->debug.debugfs_phy, ar, &fops_pktlog_filter); ath10k_debug_register()
2099 void ath10k_debug_unregister(struct ath10k *ar) ath10k_debug_unregister() argument
2101 cancel_delayed_work_sync(&ar->debug.htt_stats_dwork); ath10k_debug_unregister()
2107 void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask, ath10k_dbg() argument
2119 dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf); ath10k_dbg()
2121 trace_ath10k_log_dbg(ar, mask, &vaf); ath10k_dbg()
2127 void ath10k_dbg_dump(struct ath10k *ar, ath10k_dbg_dump() argument
2138 ath10k_dbg(ar, mask, "%s\n", msg); ath10k_dbg_dump()
2150 dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf); ath10k_dbg_dump()
2155 trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "", ath10k_dbg_dump()
H A Dtxrx.c24 static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) ath10k_report_offchan_tx() argument
33 spin_lock_bh(&ar->data_lock); ath10k_report_offchan_tx()
34 if (ar->offchan_tx_skb != skb) { ath10k_report_offchan_tx()
35 ath10k_warn(ar, "completed old offchannel frame\n"); ath10k_report_offchan_tx()
39 complete(&ar->offchan_tx_completed); ath10k_report_offchan_tx()
40 ar->offchan_tx_skb = NULL; /* just for sanity */ ath10k_report_offchan_tx()
42 ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); ath10k_report_offchan_tx()
44 spin_unlock_bh(&ar->data_lock); ath10k_report_offchan_tx()
50 struct ath10k *ar = htt->ar; ath10k_txrx_tx_unref() local
51 struct device *dev = ar->dev; ath10k_txrx_tx_unref()
58 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", ath10k_txrx_tx_unref()
62 ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", ath10k_txrx_tx_unref()
69 ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n", ath10k_txrx_tx_unref()
83 ath10k_report_offchan_tx(htt->ar, msdu); ath10k_txrx_tx_unref()
87 trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); ath10k_txrx_tx_unref()
90 ieee80211_free_txskb(htt->ar->hw, msdu); ath10k_txrx_tx_unref()
100 ieee80211_tx_status(htt->ar->hw, msdu); ath10k_txrx_tx_unref()
110 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, ath10k_peer_find() argument
115 lockdep_assert_held(&ar->data_lock); ath10k_peer_find()
117 list_for_each_entry(peer, &ar->peers, list) { ath10k_peer_find()
129 struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id) ath10k_peer_find_by_id() argument
133 lockdep_assert_held(&ar->data_lock); ath10k_peer_find_by_id()
135 list_for_each_entry(peer, &ar->peers, list) ath10k_peer_find_by_id()
142 static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, ath10k_wait_for_peer_common() argument
147 ret = wait_event_timeout(ar->peer_mapping_wq, ({ ath10k_wait_for_peer_common()
150 spin_lock_bh(&ar->data_lock); ath10k_wait_for_peer_common()
151 mapped = !!ath10k_peer_find(ar, vdev_id, addr); ath10k_wait_for_peer_common()
152 spin_unlock_bh(&ar->data_lock); ath10k_wait_for_peer_common()
155 test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)); ath10k_wait_for_peer_common()
164 int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr) ath10k_wait_for_peer_created() argument
166 return ath10k_wait_for_peer_common(ar, vdev_id, addr, true); ath10k_wait_for_peer_created()
169 int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr) ath10k_wait_for_peer_deleted() argument
171 return ath10k_wait_for_peer_common(ar, vdev_id, addr, false); ath10k_wait_for_peer_deleted()
177 struct ath10k *ar = htt->ar; ath10k_peer_map_event() local
180 spin_lock_bh(&ar->data_lock); ath10k_peer_map_event()
181 peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr); ath10k_peer_map_event()
189 list_add(&peer->list, &ar->peers); ath10k_peer_map_event()
190 wake_up(&ar->peer_mapping_wq); ath10k_peer_map_event()
193 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", ath10k_peer_map_event()
198 spin_unlock_bh(&ar->data_lock); ath10k_peer_map_event()
204 struct ath10k *ar = htt->ar; ath10k_peer_unmap_event() local
207 spin_lock_bh(&ar->data_lock); ath10k_peer_unmap_event()
208 peer = ath10k_peer_find_by_id(ar, ev->peer_id); ath10k_peer_unmap_event()
210 ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n", ath10k_peer_unmap_event()
215 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", ath10k_peer_unmap_event()
223 wake_up(&ar->peer_mapping_wq); ath10k_peer_unmap_event()
227 spin_unlock_bh(&ar->data_lock); ath10k_peer_unmap_event()
H A Dthermal.c26 static int ath10k_thermal_get_active_vifs(struct ath10k *ar, ath10k_thermal_get_active_vifs() argument
32 lockdep_assert_held(&ar->conf_mutex); ath10k_thermal_get_active_vifs()
34 list_for_each_entry(arvif, &ar->arvifs, list) { ath10k_thermal_get_active_vifs()
60 struct ath10k *ar = cdev->devdata; ath10k_thermal_get_cur_dutycycle() local
62 mutex_lock(&ar->conf_mutex); ath10k_thermal_get_cur_dutycycle()
63 *state = ar->thermal.duty_cycle; ath10k_thermal_get_cur_dutycycle()
64 mutex_unlock(&ar->conf_mutex); ath10k_thermal_get_cur_dutycycle()
72 struct ath10k *ar = cdev->devdata; ath10k_thermal_set_cur_dutycycle() local
76 mutex_lock(&ar->conf_mutex); ath10k_thermal_set_cur_dutycycle()
77 if (ar->state != ATH10K_STATE_ON) { ath10k_thermal_set_cur_dutycycle()
83 ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n", ath10k_thermal_set_cur_dutycycle()
93 num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP); ath10k_thermal_set_cur_dutycycle()
95 ath10k_warn(ar, "no active AP interfaces\n"); ath10k_thermal_set_cur_dutycycle()
104 ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration, ath10k_thermal_set_cur_dutycycle()
108 ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n", ath10k_thermal_set_cur_dutycycle()
112 ar->thermal.duty_cycle = duty_cycle; ath10k_thermal_set_cur_dutycycle()
114 mutex_unlock(&ar->conf_mutex); ath10k_thermal_set_cur_dutycycle()
128 struct ath10k *ar = dev_get_drvdata(dev); ath10k_thermal_show_temp() local
131 mutex_lock(&ar->conf_mutex); ath10k_thermal_show_temp()
134 if (ar->state != ATH10K_STATE_ON) { ath10k_thermal_show_temp()
139 reinit_completion(&ar->thermal.wmi_sync); ath10k_thermal_show_temp()
140 ret = ath10k_wmi_pdev_get_temperature(ar); ath10k_thermal_show_temp()
142 ath10k_warn(ar, "failed to read temperature %d\n", ret); ath10k_thermal_show_temp()
146 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) { ath10k_thermal_show_temp()
151 ret = wait_for_completion_timeout(&ar->thermal.wmi_sync, ath10k_thermal_show_temp()
154 ath10k_warn(ar, "failed to synchronize thermal read\n"); ath10k_thermal_show_temp()
159 spin_lock_bh(&ar->data_lock); ath10k_thermal_show_temp()
160 temperature = ar->thermal.temperature; ath10k_thermal_show_temp()
161 spin_unlock_bh(&ar->data_lock); ath10k_thermal_show_temp()
166 mutex_unlock(&ar->conf_mutex); ath10k_thermal_show_temp()
170 void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature) ath10k_thermal_event_temperature() argument
172 spin_lock_bh(&ar->data_lock); ath10k_thermal_event_temperature()
173 ar->thermal.temperature = temperature; ath10k_thermal_event_temperature()
174 spin_unlock_bh(&ar->data_lock); ath10k_thermal_event_temperature()
175 complete(&ar->thermal.wmi_sync); ath10k_thermal_event_temperature()
187 int ath10k_thermal_register(struct ath10k *ar) ath10k_thermal_register() argument
193 cdev = thermal_cooling_device_register("ath10k_thermal", ar, ath10k_thermal_register()
197 ath10k_err(ar, "failed to setup thermal device result: %ld\n", ath10k_thermal_register()
202 ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj, ath10k_thermal_register()
205 ath10k_err(ar, "failed to create thermal symlink\n"); ath10k_thermal_register()
209 ar->thermal.cdev = cdev; ath10k_thermal_register()
214 if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4) ath10k_thermal_register()
222 hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev, ath10k_thermal_register()
223 "ath10k_hwmon", ar, ath10k_thermal_register()
226 ath10k_err(ar, "failed to register hwmon device: %ld\n", ath10k_thermal_register()
234 sysfs_remove_link(&ar->dev->kobj, "thermal_sensor"); ath10k_thermal_register()
240 void ath10k_thermal_unregister(struct ath10k *ar) ath10k_thermal_unregister() argument
242 thermal_cooling_device_unregister(ar->thermal.cdev); ath10k_thermal_unregister()
243 sysfs_remove_link(&ar->dev->kobj, "cooling_device"); ath10k_thermal_unregister()
H A Ddebug.h59 __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
60 __printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
61 __printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
62 void ath10k_print_driver_info(struct ath10k *ar);
65 int ath10k_debug_start(struct ath10k *ar);
66 void ath10k_debug_stop(struct ath10k *ar);
67 int ath10k_debug_create(struct ath10k *ar);
68 void ath10k_debug_destroy(struct ath10k *ar);
69 int ath10k_debug_register(struct ath10k *ar);
70 void ath10k_debug_unregister(struct ath10k *ar);
71 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
73 ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
75 void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
76 #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
87 static inline int ath10k_debug_start(struct ath10k *ar) ath10k_debug_start() argument
92 static inline void ath10k_debug_stop(struct ath10k *ar) ath10k_debug_stop() argument
96 static inline int ath10k_debug_create(struct ath10k *ar) ath10k_debug_create() argument
101 static inline void ath10k_debug_destroy(struct ath10k *ar) ath10k_debug_destroy() argument
105 static inline int ath10k_debug_register(struct ath10k *ar) ath10k_debug_register() argument
110 static inline void ath10k_debug_unregister(struct ath10k *ar) ath10k_debug_unregister() argument
114 static inline void ath10k_debug_fw_stats_process(struct ath10k *ar, ath10k_debug_fw_stats_process() argument
119 static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, ath10k_debug_dbglog_add() argument
125 ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) ath10k_debug_get_new_fw_crash_data() argument
130 #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
143 __printf(3, 4) void ath10k_dbg(struct ath10k *ar,
146 void ath10k_dbg_dump(struct ath10k *ar,
152 static inline int ath10k_dbg(struct ath10k *ar, ath10k_dbg() argument
159 static inline void ath10k_dbg_dump(struct ath10k *ar, ath10k_dbg_dump() argument
H A Dspectral.c22 static void send_fft_sample(struct ath10k *ar, send_fft_sample() argument
27 if (!ar->spectral.rfs_chan_spec_scan) send_fft_sample()
32 relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length); send_fft_sample()
59 int ath10k_spectral_process_fft(struct ath10k *ar, ath10k_spectral_process_fft() argument
151 send_fft_sample(ar, &fft_sample->tlv); ath10k_spectral_process_fft()
156 static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar) ath10k_get_spectral_vdev() argument
160 lockdep_assert_held(&ar->conf_mutex); ath10k_get_spectral_vdev()
162 if (list_empty(&ar->arvifs)) ath10k_get_spectral_vdev()
166 list_for_each_entry(arvif, &ar->arvifs, list) ath10k_get_spectral_vdev()
171 return list_first_entry(&ar->arvifs, typeof(*arvif), list); ath10k_get_spectral_vdev()
174 static int ath10k_spectral_scan_trigger(struct ath10k *ar) ath10k_spectral_scan_trigger() argument
180 lockdep_assert_held(&ar->conf_mutex); ath10k_spectral_scan_trigger()
182 arvif = ath10k_get_spectral_vdev(ar); ath10k_spectral_scan_trigger()
187 if (ar->spectral.mode == SPECTRAL_DISABLED) ath10k_spectral_scan_trigger()
190 res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, ath10k_spectral_scan_trigger()
196 res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, ath10k_spectral_scan_trigger()
205 static int ath10k_spectral_scan_config(struct ath10k *ar, ath10k_spectral_scan_config() argument
212 lockdep_assert_held(&ar->conf_mutex); ath10k_spectral_scan_config()
214 arvif = ath10k_get_spectral_vdev(ar); ath10k_spectral_scan_config()
221 ar->spectral.mode = mode; ath10k_spectral_scan_config()
223 res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, ath10k_spectral_scan_config()
227 ath10k_warn(ar, "failed to enable spectral scan: %d\n", res); ath10k_spectral_scan_config()
237 count = max_t(u8, 1, ar->spectral.config.count); ath10k_spectral_scan_config()
243 arg.scan_fft_size = ar->spectral.config.fft_size; ath10k_spectral_scan_config()
259 res = ath10k_wmi_vdev_spectral_conf(ar, &arg); ath10k_spectral_scan_config()
261 ath10k_warn(ar, "failed to configure spectral scan: %d\n", res); ath10k_spectral_scan_config()
271 struct ath10k *ar = file->private_data; read_file_spec_scan_ctl() local
276 mutex_lock(&ar->conf_mutex); read_file_spec_scan_ctl()
277 spectral_mode = ar->spectral.mode; read_file_spec_scan_ctl()
278 mutex_unlock(&ar->conf_mutex); read_file_spec_scan_ctl()
300 struct ath10k *ar = file->private_data; write_file_spec_scan_ctl() local
311 mutex_lock(&ar->conf_mutex); write_file_spec_scan_ctl()
314 if (ar->spectral.mode == SPECTRAL_MANUAL || write_file_spec_scan_ctl()
315 ar->spectral.mode == SPECTRAL_BACKGROUND) { write_file_spec_scan_ctl()
319 res = ath10k_spectral_scan_config(ar, write_file_spec_scan_ctl()
320 ar->spectral.mode); write_file_spec_scan_ctl()
322 ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n", write_file_spec_scan_ctl()
325 res = ath10k_spectral_scan_trigger(ar); write_file_spec_scan_ctl()
327 ath10k_warn(ar, "failed to trigger spectral scan: %d\n", write_file_spec_scan_ctl()
334 res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND); write_file_spec_scan_ctl()
336 res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL); write_file_spec_scan_ctl()
338 res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED); write_file_spec_scan_ctl()
343 mutex_unlock(&ar->conf_mutex); write_file_spec_scan_ctl()
363 struct ath10k *ar = file->private_data; read_file_spectral_count() local
368 mutex_lock(&ar->conf_mutex); read_file_spectral_count()
369 spectral_count = ar->spectral.config.count; read_file_spectral_count()
370 mutex_unlock(&ar->conf_mutex); read_file_spectral_count()
380 struct ath10k *ar = file->private_data; write_file_spectral_count() local
396 mutex_lock(&ar->conf_mutex); write_file_spectral_count()
397 ar->spectral.config.count = val; write_file_spectral_count()
398 mutex_unlock(&ar->conf_mutex); write_file_spectral_count()
415 struct ath10k *ar = file->private_data; read_file_spectral_bins() local
419 mutex_lock(&ar->conf_mutex); read_file_spectral_bins()
421 fft_size = ar->spectral.config.fft_size; read_file_spectral_bins()
425 mutex_unlock(&ar->conf_mutex); read_file_spectral_bins()
435 struct ath10k *ar = file->private_data; write_file_spectral_bins() local
454 mutex_lock(&ar->conf_mutex); write_file_spectral_bins()
455 ar->spectral.config.fft_size = ilog2(val); write_file_spectral_bins()
456 ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT; write_file_spectral_bins()
457 mutex_unlock(&ar->conf_mutex); write_file_spectral_bins()
496 int ath10k_spectral_start(struct ath10k *ar) ath10k_spectral_start() argument
500 lockdep_assert_held(&ar->conf_mutex); ath10k_spectral_start()
502 list_for_each_entry(arvif, &ar->arvifs, list) ath10k_spectral_start()
505 ar->spectral.mode = SPECTRAL_DISABLED; ath10k_spectral_start()
506 ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT; ath10k_spectral_start()
507 ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT; ath10k_spectral_start()
517 return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED); ath10k_spectral_vif_stop()
520 int ath10k_spectral_create(struct ath10k *ar) ath10k_spectral_create() argument
522 ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan", ath10k_spectral_create()
523 ar->debug.debugfs_phy, ath10k_spectral_create()
528 ar->debug.debugfs_phy, ar, ath10k_spectral_create()
532 ar->debug.debugfs_phy, ar, ath10k_spectral_create()
536 ar->debug.debugfs_phy, ar, ath10k_spectral_create()
542 void ath10k_spectral_destroy(struct ath10k *ar) ath10k_spectral_destroy() argument
544 if (ar->spectral.rfs_chan_spec_scan) { ath10k_spectral_destroy()
545 relay_close(ar->spectral.rfs_chan_spec_scan); ath10k_spectral_destroy()
546 ar->spectral.rfs_chan_spec_scan = NULL; ath10k_spectral_destroy()
H A Dce.c62 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, ath10k_ce_dest_ring_write_index_set() argument
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n); ath10k_ce_dest_ring_write_index_set()
69 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, ath10k_ce_dest_ring_write_index_get() argument
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS); ath10k_ce_dest_ring_write_index_get()
75 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, ath10k_ce_src_ring_write_index_set() argument
79 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); ath10k_ce_src_ring_write_index_set()
82 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, ath10k_ce_src_ring_write_index_get() argument
85 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS); ath10k_ce_src_ring_write_index_get()
88 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, ath10k_ce_src_ring_read_index_get() argument
91 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS); ath10k_ce_src_ring_read_index_get()
94 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, ath10k_ce_src_ring_base_addr_set() argument
98 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr); ath10k_ce_src_ring_base_addr_set()
101 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, ath10k_ce_src_ring_size_set() argument
105 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n); ath10k_ce_src_ring_size_set()
108 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, ath10k_ce_src_ring_dmax_set() argument
112 u32 ctrl1_addr = ath10k_pci_read32((ar), ath10k_ce_src_ring_dmax_set()
115 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, ath10k_ce_src_ring_dmax_set()
120 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, ath10k_ce_src_ring_byte_swap_set() argument
124 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); ath10k_ce_src_ring_byte_swap_set()
126 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, ath10k_ce_src_ring_byte_swap_set()
131 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, ath10k_ce_dest_ring_byte_swap_set() argument
135 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); ath10k_ce_dest_ring_byte_swap_set()
137 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, ath10k_ce_dest_ring_byte_swap_set()
142 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, ath10k_ce_dest_ring_read_index_get() argument
145 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS); ath10k_ce_dest_ring_read_index_get()
148 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, ath10k_ce_dest_ring_base_addr_set() argument
152 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr); ath10k_ce_dest_ring_base_addr_set()
155 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, ath10k_ce_dest_ring_size_set() argument
159 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n); ath10k_ce_dest_ring_size_set()
162 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, ath10k_ce_src_ring_highmark_set() argument
166 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); ath10k_ce_src_ring_highmark_set()
168 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, ath10k_ce_src_ring_highmark_set()
173 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, ath10k_ce_src_ring_lowmark_set() argument
177 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); ath10k_ce_src_ring_lowmark_set()
179 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, ath10k_ce_src_ring_lowmark_set()
184 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, ath10k_ce_dest_ring_highmark_set() argument
188 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); ath10k_ce_dest_ring_highmark_set()
190 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, ath10k_ce_dest_ring_highmark_set()
195 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, ath10k_ce_dest_ring_lowmark_set() argument
199 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); ath10k_ce_dest_ring_lowmark_set()
201 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, ath10k_ce_dest_ring_lowmark_set()
206 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, ath10k_ce_copy_complete_inter_enable() argument
209 u32 host_ie_addr = ath10k_pci_read32(ar, ath10k_ce_copy_complete_inter_enable()
212 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, ath10k_ce_copy_complete_inter_enable()
216 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, ath10k_ce_copy_complete_intr_disable() argument
219 u32 host_ie_addr = ath10k_pci_read32(ar, ath10k_ce_copy_complete_intr_disable()
222 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, ath10k_ce_copy_complete_intr_disable()
226 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, ath10k_ce_watermark_intr_disable() argument
229 u32 host_ie_addr = ath10k_pci_read32(ar, ath10k_ce_watermark_intr_disable()
232 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, ath10k_ce_watermark_intr_disable()
236 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, ath10k_ce_error_intr_enable() argument
239 u32 misc_ie_addr = ath10k_pci_read32(ar, ath10k_ce_error_intr_enable()
242 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, ath10k_ce_error_intr_enable()
246 static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, ath10k_ce_error_intr_disable() argument
249 u32 misc_ie_addr = ath10k_pci_read32(ar, ath10k_ce_error_intr_disable()
252 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, ath10k_ce_error_intr_disable()
256 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, ath10k_ce_engine_int_status_clear() argument
260 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); ath10k_ce_engine_int_status_clear()
275 struct ath10k *ar = ce_state->ar; ath10k_ce_send_nolock() local
286 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", ath10k_ce_send_nolock()
319 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); ath10k_ce_send_nolock()
328 struct ath10k *ar = pipe->ar; __ath10k_ce_send_revert() local
329 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); __ath10k_ce_send_revert()
344 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr))) __ath10k_ce_send_revert()
360 struct ath10k *ar = ce_state->ar; ath10k_ce_send() local
361 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_send()
374 struct ath10k *ar = pipe->ar; ath10k_ce_num_free_src_entries() local
375 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_num_free_src_entries()
389 struct ath10k *ar = pipe->ar; __ath10k_ce_rx_num_free_bufs() local
390 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); __ath10k_ce_rx_num_free_bufs()
403 struct ath10k *ar = pipe->ar; __ath10k_ce_rx_post_buf() local
404 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); __ath10k_ce_rx_post_buf()
423 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); __ath10k_ce_rx_post_buf()
431 struct ath10k *ar = pipe->ar; ath10k_ce_rx_post_buf() local
432 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_rx_post_buf()
509 struct ath10k *ar = ce_state->ar; ath10k_ce_completed_recv_next() local
510 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_completed_recv_next()
532 struct ath10k *ar; ath10k_ce_revoke_recv_next() local
540 ar = ce_state->ar; ath10k_ce_revoke_recv_next()
541 ar_pci = ath10k_pci_priv(ar); ath10k_ce_revoke_recv_next()
588 struct ath10k *ar = ce_state->ar; ath10k_ce_completed_send_next_nolock() local
603 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); ath10k_ce_completed_send_next_nolock()
651 struct ath10k *ar; ath10k_ce_cancel_send_next() local
659 ar = ce_state->ar; ath10k_ce_cancel_send_next()
660 ar_pci = ath10k_pci_priv(ar); ath10k_ce_cancel_send_next()
704 struct ath10k *ar = ce_state->ar; ath10k_ce_completed_send_next() local
705 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_completed_send_next()
724 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) ath10k_ce_per_engine_service() argument
726 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_per_engine_service()
733 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, ath10k_ce_per_engine_service()
750 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); ath10k_ce_per_engine_service()
761 void ath10k_ce_per_engine_service_any(struct ath10k *ar) ath10k_ce_per_engine_service_any() argument
766 intr_summary = CE_INTERRUPT_SUMMARY(ar); ath10k_ce_per_engine_service_any()
775 ath10k_ce_per_engine_service(ar, ce_id); ath10k_ce_per_engine_service_any()
789 struct ath10k *ar = ce_state->ar; ath10k_ce_per_engine_handler_adjust() local
794 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr); ath10k_ce_per_engine_handler_adjust()
796 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); ath10k_ce_per_engine_handler_adjust()
798 ath10k_ce_watermark_intr_disable(ar, ctrl_addr); ath10k_ce_per_engine_handler_adjust()
801 int ath10k_ce_disable_interrupts(struct ath10k *ar) ath10k_ce_disable_interrupts() argument
806 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); ath10k_ce_disable_interrupts()
808 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); ath10k_ce_disable_interrupts()
809 ath10k_ce_error_intr_disable(ar, ctrl_addr); ath10k_ce_disable_interrupts()
810 ath10k_ce_watermark_intr_disable(ar, ctrl_addr); ath10k_ce_disable_interrupts()
816 void ath10k_ce_enable_interrupts(struct ath10k *ar) ath10k_ce_enable_interrupts() argument
818 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_enable_interrupts()
828 static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_init_src_ring() argument
832 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_init_src_ring()
835 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); ath10k_ce_init_src_ring()
842 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); ath10k_ce_init_src_ring()
847 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); ath10k_ce_init_src_ring()
850 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, ath10k_ce_init_src_ring()
852 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_init_src_ring()
853 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); ath10k_ce_init_src_ring()
854 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); ath10k_ce_init_src_ring()
855 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_init_src_ring()
856 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); ath10k_ce_init_src_ring()
858 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_ce_init_src_ring()
865 static int ath10k_ce_init_dest_ring(struct ath10k *ar, ath10k_ce_init_dest_ring() argument
869 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_init_dest_ring()
872 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); ath10k_ce_init_dest_ring()
879 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); ath10k_ce_init_dest_ring()
882 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); ath10k_ce_init_dest_ring()
885 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, ath10k_ce_init_dest_ring()
887 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_init_dest_ring()
888 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); ath10k_ce_init_dest_ring()
889 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_init_dest_ring()
890 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); ath10k_ce_init_dest_ring()
892 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_ce_init_dest_ring()
900 ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, ath10k_ce_alloc_src_ring() argument
924 dma_alloc_coherent(ar->dev, ath10k_ce_alloc_src_ring()
950 dma_free_coherent(ar->dev, ath10k_ce_alloc_src_ring()
967 ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, ath10k_ce_alloc_dest_ring() argument
991 dma_alloc_coherent(ar->dev, ath10k_ce_alloc_dest_ring()
1026 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, ath10k_ce_init_pipe() argument
1032 ret = ath10k_ce_init_src_ring(ar, ce_id, attr); ath10k_ce_init_pipe()
1034 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n", ath10k_ce_init_pipe()
1041 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr); ath10k_ce_init_pipe()
1043 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n", ath10k_ce_init_pipe()
1052 static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) ath10k_ce_deinit_src_ring() argument
1054 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); ath10k_ce_deinit_src_ring()
1056 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); ath10k_ce_deinit_src_ring()
1057 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_deinit_src_ring()
1058 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); ath10k_ce_deinit_src_ring()
1059 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); ath10k_ce_deinit_src_ring()
1062 static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) ath10k_ce_deinit_dest_ring() argument
1064 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); ath10k_ce_deinit_dest_ring()
1066 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); ath10k_ce_deinit_dest_ring()
1067 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_deinit_dest_ring()
1068 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); ath10k_ce_deinit_dest_ring()
1071 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) ath10k_ce_deinit_pipe() argument
1073 ath10k_ce_deinit_src_ring(ar, ce_id); ath10k_ce_deinit_pipe()
1074 ath10k_ce_deinit_dest_ring(ar, ce_id); ath10k_ce_deinit_pipe()
1077 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, ath10k_ce_alloc_pipe() argument
1082 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_alloc_pipe()
1099 ce_state->ar = ar; ath10k_ce_alloc_pipe()
1101 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id); ath10k_ce_alloc_pipe()
1112 ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); ath10k_ce_alloc_pipe()
1115 ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n", ath10k_ce_alloc_pipe()
1123 ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id, ath10k_ce_alloc_pipe()
1127 ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n", ath10k_ce_alloc_pipe()
1137 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) ath10k_ce_free_pipe() argument
1139 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_ce_free_pipe()
1144 dma_free_coherent(ar->dev, ath10k_ce_free_pipe()
1154 dma_free_coherent(ar->dev, ath10k_ce_free_pipe()
H A Dwmi.c885 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) ath10k_wmi_wait_for_service_ready() argument
889 ret = wait_for_completion_timeout(&ar->wmi.service_ready, ath10k_wmi_wait_for_service_ready()
894 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) ath10k_wmi_wait_for_unified_ready() argument
898 ret = wait_for_completion_timeout(&ar->wmi.unified_ready, ath10k_wmi_wait_for_unified_ready()
903 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len) ath10k_wmi_alloc_skb() argument
908 skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len); ath10k_wmi_alloc_skb()
914 ath10k_warn(ar, "Unaligned WMI skb\n"); ath10k_wmi_alloc_skb()
922 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_htc_tx_complete() argument
927 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_cmd_send_nowait() argument
944 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); ath10k_wmi_cmd_send_nowait()
945 trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret); ath10k_wmi_cmd_send_nowait()
959 struct ath10k *ar = arvif->ar; ath10k_wmi_tx_beacon_nowait() local
964 spin_lock_bh(&ar->data_lock); ath10k_wmi_tx_beacon_nowait()
979 spin_unlock_bh(&ar->data_lock); ath10k_wmi_tx_beacon_nowait()
981 ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar, ath10k_wmi_tx_beacon_nowait()
988 spin_lock_bh(&ar->data_lock); ath10k_wmi_tx_beacon_nowait()
997 spin_unlock_bh(&ar->data_lock); ath10k_wmi_tx_beacon_nowait()
1008 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) ath10k_wmi_tx_beacons_nowait() argument
1010 ieee80211_iterate_active_interfaces_atomic(ar->hw, ath10k_wmi_tx_beacons_nowait()
1016 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) ath10k_wmi_op_ep_tx_credits() argument
1019 ath10k_wmi_tx_beacons_nowait(ar); ath10k_wmi_op_ep_tx_credits()
1021 wake_up(&ar->wmi.tx_credits_wq); ath10k_wmi_op_ep_tx_credits()
1024 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) ath10k_wmi_cmd_send() argument
1031 ath10k_warn(ar, "wmi command %d is not supported by firmware\n", ath10k_wmi_cmd_send()
1036 wait_event_timeout(ar->wmi.tx_credits_wq, ({ ath10k_wmi_cmd_send()
1038 ath10k_wmi_tx_beacons_nowait(ar); ath10k_wmi_cmd_send()
1040 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id); ath10k_wmi_cmd_send()
1042 if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) ath10k_wmi_cmd_send()
1055 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) ath10k_wmi_op_gen_mgmt_tx() argument
1082 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_op_gen_mgmt_tx()
1096 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", ath10k_wmi_op_gen_mgmt_tx()
1099 trace_ath10k_tx_hdr(ar, skb->data, skb->len); ath10k_wmi_op_gen_mgmt_tx()
1100 trace_ath10k_tx_payload(ar, skb->data, skb->len); ath10k_wmi_op_gen_mgmt_tx()
1105 static void ath10k_wmi_event_scan_started(struct ath10k *ar) ath10k_wmi_event_scan_started() argument
1107 lockdep_assert_held(&ar->data_lock); ath10k_wmi_event_scan_started()
1109 switch (ar->scan.state) { ath10k_wmi_event_scan_started()
1113 ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n", ath10k_wmi_event_scan_started()
1114 ath10k_scan_state_str(ar->scan.state), ath10k_wmi_event_scan_started()
1115 ar->scan.state); ath10k_wmi_event_scan_started()
1118 ar->scan.state = ATH10K_SCAN_RUNNING; ath10k_wmi_event_scan_started()
1120 if (ar->scan.is_roc) ath10k_wmi_event_scan_started()
1121 ieee80211_ready_on_channel(ar->hw); ath10k_wmi_event_scan_started()
1123 complete(&ar->scan.started); ath10k_wmi_event_scan_started()
1128 static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar) ath10k_wmi_event_scan_start_failed() argument
1130 lockdep_assert_held(&ar->data_lock); ath10k_wmi_event_scan_start_failed()
1132 switch (ar->scan.state) { ath10k_wmi_event_scan_start_failed()
1136 ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n", ath10k_wmi_event_scan_start_failed()
1137 ath10k_scan_state_str(ar->scan.state), ath10k_wmi_event_scan_start_failed()
1138 ar->scan.state); ath10k_wmi_event_scan_start_failed()
1141 complete(&ar->scan.started); ath10k_wmi_event_scan_start_failed()
1142 __ath10k_scan_finish(ar); ath10k_wmi_event_scan_start_failed()
1147 static void ath10k_wmi_event_scan_completed(struct ath10k *ar) ath10k_wmi_event_scan_completed() argument
1149 lockdep_assert_held(&ar->data_lock); ath10k_wmi_event_scan_completed()
1151 switch (ar->scan.state) { ath10k_wmi_event_scan_completed()
1162 ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n", ath10k_wmi_event_scan_completed()
1163 ath10k_scan_state_str(ar->scan.state), ath10k_wmi_event_scan_completed()
1164 ar->scan.state); ath10k_wmi_event_scan_completed()
1168 __ath10k_scan_finish(ar); ath10k_wmi_event_scan_completed()
1173 static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar) ath10k_wmi_event_scan_bss_chan() argument
1175 lockdep_assert_held(&ar->data_lock); ath10k_wmi_event_scan_bss_chan()
1177 switch (ar->scan.state) { ath10k_wmi_event_scan_bss_chan()
1180 ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n", ath10k_wmi_event_scan_bss_chan()
1181 ath10k_scan_state_str(ar->scan.state), ath10k_wmi_event_scan_bss_chan()
1182 ar->scan.state); ath10k_wmi_event_scan_bss_chan()
1186 ar->scan_channel = NULL; ath10k_wmi_event_scan_bss_chan()
1191 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq) ath10k_wmi_event_scan_foreign_chan() argument
1193 lockdep_assert_held(&ar->data_lock); ath10k_wmi_event_scan_foreign_chan()
1195 switch (ar->scan.state) { ath10k_wmi_event_scan_foreign_chan()
1198 ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n", ath10k_wmi_event_scan_foreign_chan()
1199 ath10k_scan_state_str(ar->scan.state), ath10k_wmi_event_scan_foreign_chan()
1200 ar->scan.state); ath10k_wmi_event_scan_foreign_chan()
1204 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); ath10k_wmi_event_scan_foreign_chan()
1206 if (ar->scan.is_roc && ar->scan.roc_freq == freq) ath10k_wmi_event_scan_foreign_chan()
1207 complete(&ar->scan.on_channel); ath10k_wmi_event_scan_foreign_chan()
1248 static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_op_pull_scan_ev() argument
1267 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_scan() argument
1278 ret = ath10k_wmi_pull_scan(ar, skb, &arg); ath10k_wmi_event_scan()
1280 ath10k_warn(ar, "failed to parse scan event: %d\n", ret); ath10k_wmi_event_scan()
1291 spin_lock_bh(&ar->data_lock); ath10k_wmi_event_scan()
1293 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_event_scan()
1297 ath10k_scan_state_str(ar->scan.state), ar->scan.state); ath10k_wmi_event_scan()
1301 ath10k_wmi_event_scan_started(ar); ath10k_wmi_event_scan()
1304 ath10k_wmi_event_scan_completed(ar); ath10k_wmi_event_scan()
1307 ath10k_wmi_event_scan_bss_chan(ar); ath10k_wmi_event_scan()
1310 ath10k_wmi_event_scan_foreign_chan(ar, freq); ath10k_wmi_event_scan()
1313 ath10k_warn(ar, "received scan start failure event\n"); ath10k_wmi_event_scan()
1314 ath10k_wmi_event_scan_start_failed(ar); ath10k_wmi_event_scan()
1322 spin_unlock_bh(&ar->data_lock); ath10k_wmi_event_scan()
1414 static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar, ath10k_wmi_handle_wep_reauth() argument
1434 spin_lock_bh(&ar->data_lock); ath10k_wmi_handle_wep_reauth()
1435 peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx); ath10k_wmi_handle_wep_reauth()
1436 spin_unlock_bh(&ar->data_lock); ath10k_wmi_handle_wep_reauth()
1439 ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_wmi_handle_wep_reauth()
1445 static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_op_pull_mgmt_rx_ev() argument
1454 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { ath10k_wmi_op_pull_mgmt_rx_ev()
1487 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_mgmt_rx() argument
1501 ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg); ath10k_wmi_event_mgmt_rx()
1503 ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret); ath10k_wmi_event_mgmt_rx()
1516 ath10k_dbg(ar, ATH10K_DBG_MGMT, ath10k_wmi_event_mgmt_rx()
1519 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { ath10k_wmi_event_mgmt_rx()
1560 ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); ath10k_wmi_event_mgmt_rx()
1569 ath10k_wmi_handle_wep_reauth(ar, skb, status); ath10k_wmi_event_mgmt_rx()
1588 ath10k_dbg(ar, ATH10K_DBG_MGMT, ath10k_wmi_event_mgmt_rx()
1593 ath10k_dbg(ar, ATH10K_DBG_MGMT, ath10k_wmi_event_mgmt_rx()
1598 ieee80211_rx(ar->hw, skb); ath10k_wmi_event_mgmt_rx()
1602 static int freq_to_idx(struct ath10k *ar, int freq) freq_to_idx() argument
1608 sband = ar->hw->wiphy->bands[band]; freq_to_idx()
1621 static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_op_pull_ch_info_ev() argument
1640 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_chan_info() argument
1647 ret = ath10k_wmi_pull_ch_info(ar, skb, &arg); ath10k_wmi_event_chan_info()
1649 ath10k_warn(ar, "failed to parse chan info event: %d\n", ret); ath10k_wmi_event_chan_info()
1660 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_event_chan_info()
1665 spin_lock_bh(&ar->data_lock); ath10k_wmi_event_chan_info()
1667 switch (ar->scan.state) { ath10k_wmi_event_chan_info()
1670 ath10k_warn(ar, "received chan info event without a scan request, ignoring\n"); ath10k_wmi_event_chan_info()
1677 idx = freq_to_idx(ar, freq); ath10k_wmi_event_chan_info()
1678 if (idx >= ARRAY_SIZE(ar->survey)) { ath10k_wmi_event_chan_info()
1679 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n", ath10k_wmi_event_chan_info()
1689 cycle_count -= ar->survey_last_cycle_count; ath10k_wmi_event_chan_info()
1690 rx_clear_count -= ar->survey_last_rx_clear_count; ath10k_wmi_event_chan_info()
1692 survey = &ar->survey[idx]; ath10k_wmi_event_chan_info()
1701 ar->survey_last_rx_clear_count = rx_clear_count; ath10k_wmi_event_chan_info()
1702 ar->survey_last_cycle_count = cycle_count; ath10k_wmi_event_chan_info()
1705 spin_unlock_bh(&ar->data_lock); ath10k_wmi_event_chan_info()
1708 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_echo() argument
1710 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); ath10k_wmi_event_echo()
1713 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_debug_mesg() argument
1715 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", ath10k_wmi_event_debug_mesg()
1718 trace_ath10k_wmi_dbglog(ar, skb->data, skb->len); ath10k_wmi_event_debug_mesg()
1800 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, ath10k_wmi_main_op_pull_fw_stats() argument
1855 static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar, ath10k_wmi_10x_op_pull_fw_stats() argument
1914 static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar, ath10k_wmi_10_2_op_pull_fw_stats() argument
1993 static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, ath10k_wmi_10_2_4_op_pull_fw_stats() argument
2072 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_update_stats() argument
2074 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); ath10k_wmi_event_update_stats()
2075 ath10k_debug_fw_stats_process(ar, skb); ath10k_wmi_event_update_stats()
2079 ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_op_pull_vdev_start_ev() argument
2096 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_start_resp() argument
2101 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); ath10k_wmi_event_vdev_start_resp()
2103 ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg); ath10k_wmi_event_vdev_start_resp()
2105 ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret); ath10k_wmi_event_vdev_start_resp()
2112 complete(&ar->vdev_setup_done); ath10k_wmi_event_vdev_start_resp()
2115 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_stopped() argument
2117 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); ath10k_wmi_event_vdev_stopped()
2118 complete(&ar->vdev_setup_done); ath10k_wmi_event_vdev_stopped()
2122 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_op_pull_peer_kick_ev() argument
2136 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_peer_sta_kickout() argument
2142 ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg); ath10k_wmi_event_peer_sta_kickout()
2144 ath10k_warn(ar, "failed to parse peer kickout event: %d\n", ath10k_wmi_event_peer_sta_kickout()
2149 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", ath10k_wmi_event_peer_sta_kickout()
2154 sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL); ath10k_wmi_event_peer_sta_kickout()
2156 ath10k_warn(ar, "Spurious quick kickout for STA %pM\n", ath10k_wmi_event_peer_sta_kickout()
2191 static void ath10k_wmi_update_tim(struct ath10k *ar, ath10k_wmi_update_tim() argument
2235 ath10k_warn(ar, "no tim ie found;\n"); ath10k_wmi_update_tim()
2255 ath10k_warn(ar, "tim expansion failed\n"); ath10k_wmi_update_tim()
2260 ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len); ath10k_wmi_update_tim()
2274 ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", ath10k_wmi_update_tim()
2340 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, ath10k_wmi_update_noa() argument
2350 ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); ath10k_wmi_update_noa()
2362 spin_lock_bh(&ar->data_lock); ath10k_wmi_update_noa()
2365 spin_unlock_bh(&ar->data_lock); ath10k_wmi_update_noa()
2377 spin_lock_bh(&ar->data_lock); ath10k_wmi_update_noa()
2380 spin_unlock_bh(&ar->data_lock); ath10k_wmi_update_noa()
2384 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_op_pull_swba_ev() argument
2415 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_host_swba() argument
2427 ret = ath10k_wmi_pull_swba(ar, skb, &arg); ath10k_wmi_event_host_swba()
2429 ath10k_warn(ar, "failed to parse swba event: %d\n", ret); ath10k_wmi_event_host_swba()
2435 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", ath10k_wmi_event_host_swba()
2445 ath10k_warn(ar, "swba has corrupted vdev map\n"); ath10k_wmi_event_host_swba()
2452 ath10k_dbg(ar, ATH10K_DBG_MGMT, ath10k_wmi_event_host_swba()
2464 arvif = ath10k_get_arvif(ar, vdev_id); ath10k_wmi_event_host_swba()
2466 ath10k_warn(ar, "no vif for vdev_id %d found\n", ath10k_wmi_event_host_swba()
2482 bcn = ieee80211_beacon_get(ar->hw, arvif->vif); ath10k_wmi_event_host_swba()
2484 ath10k_warn(ar, "could not get mac80211 beacon\n"); ath10k_wmi_event_host_swba()
2489 ath10k_wmi_update_tim(ar, arvif, bcn, tim_info); ath10k_wmi_event_host_swba()
2490 ath10k_wmi_update_noa(ar, arvif, bcn, noa_info); ath10k_wmi_event_host_swba()
2492 spin_lock_bh(&ar->data_lock); ath10k_wmi_event_host_swba()
2499 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n", ath10k_wmi_event_host_swba()
2503 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n", ath10k_wmi_event_host_swba()
2513 paddr = dma_map_single(arvif->ar->dev, bcn->data, ath10k_wmi_event_host_swba()
2515 ret = dma_mapping_error(arvif->ar->dev, paddr); ath10k_wmi_event_host_swba()
2517 ath10k_warn(ar, "failed to map beacon: %d\n", ath10k_wmi_event_host_swba()
2527 ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n", ath10k_wmi_event_host_swba()
2538 trace_ath10k_tx_hdr(ar, bcn->data, bcn->len); ath10k_wmi_event_host_swba()
2539 trace_ath10k_tx_payload(ar, bcn->data, bcn->len); ath10k_wmi_event_host_swba()
2542 spin_unlock_bh(&ar->data_lock); ath10k_wmi_event_host_swba()
2545 ath10k_wmi_tx_beacons_nowait(ar); ath10k_wmi_event_host_swba()
2548 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_tbttoffset_update() argument
2550 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); ath10k_wmi_event_tbttoffset_update()
2553 static void ath10k_dfs_radar_report(struct ath10k *ar, ath10k_dfs_radar_report() argument
2566 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, ath10k_dfs_radar_report()
2572 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, ath10k_dfs_radar_report()
2579 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, ath10k_dfs_radar_report()
2584 if (!ar->dfs_detector) ath10k_dfs_radar_report()
2602 pe.freq = ar->hw->conf.chandef.chan->center_freq; ath10k_dfs_radar_report()
2606 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, ath10k_dfs_radar_report()
2610 ATH10K_DFS_STAT_INC(ar, pulses_detected); ath10k_dfs_radar_report()
2612 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { ath10k_dfs_radar_report()
2613 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, ath10k_dfs_radar_report()
2618 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n"); ath10k_dfs_radar_report()
2619 ATH10K_DFS_STAT_INC(ar, radar_detected); ath10k_dfs_radar_report()
2623 if (ar->dfs_block_radar_events) { ath10k_dfs_radar_report()
2624 ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); ath10k_dfs_radar_report()
2628 ieee80211_radar_detected(ar->hw); ath10k_dfs_radar_report()
2631 static int ath10k_dfs_fft_report(struct ath10k *ar, ath10k_dfs_fft_report() argument
2643 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, ath10k_dfs_fft_report()
2649 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, ath10k_dfs_fft_report()
2661 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); ath10k_dfs_fft_report()
2662 ATH10K_DFS_STAT_INC(ar, pulses_discarded); ath10k_dfs_fft_report()
2669 void ath10k_wmi_event_dfs(struct ath10k *ar, ath10k_wmi_event_dfs() argument
2680 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, ath10k_wmi_event_dfs()
2689 ATH10K_DFS_STAT_INC(ar, pulses_total); ath10k_wmi_event_dfs()
2693 ath10k_warn(ar, "too short buf for tlv header (%d)\n", ath10k_wmi_event_dfs()
2701 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, ath10k_wmi_event_dfs()
2708 ath10k_warn(ar, "too short radar pulse summary (%d)\n", ath10k_wmi_event_dfs()
2714 ath10k_dfs_radar_report(ar, phyerr, rr, tsf); ath10k_wmi_event_dfs()
2718 ath10k_warn(ar, "too short fft report (%d)\n", ath10k_wmi_event_dfs()
2724 res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf); ath10k_wmi_event_dfs()
2734 void ath10k_wmi_event_spectral_scan(struct ath10k *ar, ath10k_wmi_event_spectral_scan() argument
2748 ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n", ath10k_wmi_event_spectral_scan()
2758 ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n", ath10k_wmi_event_spectral_scan()
2766 ath10k_warn(ar, "failed to parse fft report at byte %d\n", ath10k_wmi_event_spectral_scan()
2773 res = ath10k_spectral_process_fft(ar, phyerr, ath10k_wmi_event_spectral_scan()
2777 ath10k_warn(ar, "failed to process fft report: %d\n", ath10k_wmi_event_spectral_scan()
2788 static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_op_pull_phyerr_ev() argument
2805 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_phyerr() argument
2813 ATH10K_DFS_STAT_INC(ar, phy_errors); ath10k_wmi_event_phyerr()
2815 ret = ath10k_wmi_pull_phyerr(ar, skb, &arg); ath10k_wmi_event_phyerr()
2817 ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret); ath10k_wmi_event_phyerr()
2830 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_event_phyerr()
2838 ath10k_warn(ar, "single event (%d) wrong head len\n", ath10k_wmi_event_phyerr()
2849 ath10k_warn(ar, "single event (%d) wrong buf len\n", i); ath10k_wmi_event_phyerr()
2857 ath10k_wmi_event_dfs(ar, phyerr, tsf); ath10k_wmi_event_phyerr()
2860 ath10k_wmi_event_spectral_scan(ar, phyerr, tsf); ath10k_wmi_event_phyerr()
2863 ath10k_wmi_event_dfs(ar, phyerr, tsf); ath10k_wmi_event_phyerr()
2864 ath10k_wmi_event_spectral_scan(ar, phyerr, tsf); ath10k_wmi_event_phyerr()
2874 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_roam() argument
2876 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); ath10k_wmi_event_roam()
2879 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_profile_match() argument
2881 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); ath10k_wmi_event_profile_match()
2884 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_debug_print() argument
2905 ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len); ath10k_wmi_event_debug_print()
2914 ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf); ath10k_wmi_event_debug_print()
2917 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_pdev_qvit() argument
2919 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); ath10k_wmi_event_pdev_qvit()
2922 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_wlan_profile_data() argument
2924 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); ath10k_wmi_event_wlan_profile_data()
2927 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, ath10k_wmi_event_rtt_measurement_report() argument
2930 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); ath10k_wmi_event_rtt_measurement_report()
2933 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, ath10k_wmi_event_tsf_measurement_report() argument
2936 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); ath10k_wmi_event_tsf_measurement_report()
2939 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_rtt_error_report() argument
2941 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); ath10k_wmi_event_rtt_error_report()
2944 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_wow_wakeup_host() argument
2946 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); ath10k_wmi_event_wow_wakeup_host()
2949 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_dcs_interference() argument
2951 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); ath10k_wmi_event_dcs_interference()
2954 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_pdev_tpc_config() argument
2956 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); ath10k_wmi_event_pdev_tpc_config()
2959 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_pdev_ftm_intg() argument
2961 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); ath10k_wmi_event_pdev_ftm_intg()
2964 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_gtk_offload_status() argument
2966 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); ath10k_wmi_event_gtk_offload_status()
2969 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_gtk_rekey_fail() argument
2971 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); ath10k_wmi_event_gtk_rekey_fail()
2974 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_delba_complete() argument
2976 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); ath10k_wmi_event_delba_complete()
2979 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_addba_complete() argument
2981 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); ath10k_wmi_event_addba_complete()
2984 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, ath10k_wmi_event_vdev_install_key_complete() argument
2987 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); ath10k_wmi_event_vdev_install_key_complete()
2990 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_inst_rssi_stats() argument
2992 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); ath10k_wmi_event_inst_rssi_stats()
2995 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_standby_req() argument
2997 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); ath10k_wmi_event_vdev_standby_req()
3000 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_resume_req() argument
3002 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); ath10k_wmi_event_vdev_resume_req()
3005 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, ath10k_wmi_alloc_host_mem() argument
3010 int idx = ar->wmi.num_mem_chunks; ath10k_wmi_alloc_host_mem()
3017 ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, ath10k_wmi_alloc_host_mem()
3021 if (!ar->wmi.mem_chunks[idx].vaddr) { ath10k_wmi_alloc_host_mem()
3022 ath10k_warn(ar, "failed to allocate memory chunk\n"); ath10k_wmi_alloc_host_mem()
3026 memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size); ath10k_wmi_alloc_host_mem()
3028 ar->wmi.mem_chunks[idx].paddr = paddr; ath10k_wmi_alloc_host_mem()
3029 ar->wmi.mem_chunks[idx].len = pool_size; ath10k_wmi_alloc_host_mem()
3030 ar->wmi.mem_chunks[idx].req_id = req_id; ath10k_wmi_alloc_host_mem()
3031 ar->wmi.num_mem_chunks++; ath10k_wmi_alloc_host_mem()
3037 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_main_op_pull_svc_rdy_ev() argument
3074 ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_10x_op_pull_svc_rdy_ev() argument
3109 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_service_ready() argument
3115 ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg); ath10k_wmi_event_service_ready()
3117 ath10k_warn(ar, "failed to parse service ready: %d\n", ret); ath10k_wmi_event_service_ready()
3121 memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); ath10k_wmi_event_service_ready()
3122 ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map, ath10k_wmi_event_service_ready()
3125 ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power); ath10k_wmi_event_service_ready()
3126 ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power); ath10k_wmi_event_service_ready()
3127 ar->ht_cap_info = __le32_to_cpu(arg.ht_cap); ath10k_wmi_event_service_ready()
3128 ar->vht_cap_info = __le32_to_cpu(arg.vht_cap); ath10k_wmi_event_service_ready()
3129 ar->fw_version_major = ath10k_wmi_event_service_ready()
3131 ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff); ath10k_wmi_event_service_ready()
3132 ar->fw_version_release = ath10k_wmi_event_service_ready()
3134 ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff); ath10k_wmi_event_service_ready()
3135 ar->phy_capability = __le32_to_cpu(arg.phy_capab); ath10k_wmi_event_service_ready()
3136 ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains); ath10k_wmi_event_service_ready()
3137 ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd); ath10k_wmi_event_service_ready()
3139 ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", ath10k_wmi_event_service_ready()
3143 if (ar->fw_api == 1 && ar->fw_version_build > 636) ath10k_wmi_event_service_ready()
3144 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); ath10k_wmi_event_service_ready()
3146 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { ath10k_wmi_event_service_ready()
3147 ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", ath10k_wmi_event_service_ready()
3148 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); ath10k_wmi_event_service_ready()
3149 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; ath10k_wmi_event_service_ready()
3152 ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1; ath10k_wmi_event_service_ready()
3153 ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1; ath10k_wmi_event_service_ready()
3155 if (strlen(ar->hw->wiphy->fw_version) == 0) { ath10k_wmi_event_service_ready()
3156 snprintf(ar->hw->wiphy->fw_version, ath10k_wmi_event_service_ready()
3157 sizeof(ar->hw->wiphy->fw_version), ath10k_wmi_event_service_ready()
3159 ar->fw_version_major, ath10k_wmi_event_service_ready()
3160 ar->fw_version_minor, ath10k_wmi_event_service_ready()
3161 ar->fw_version_release, ath10k_wmi_event_service_ready()
3162 ar->fw_version_build); ath10k_wmi_event_service_ready()
3167 ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n", ath10k_wmi_event_service_ready()
3187 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_event_service_ready()
3195 ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units, ath10k_wmi_event_service_ready()
3201 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_event_service_ready()
3215 complete(&ar->wmi.service_ready); ath10k_wmi_event_service_ready()
3218 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_op_pull_rdy_ev() argument
3235 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_ready() argument
3240 ret = ath10k_wmi_pull_rdy(ar, skb, &arg); ath10k_wmi_event_ready()
3242 ath10k_warn(ar, "failed to parse ready event: %d\n", ret); ath10k_wmi_event_ready()
3246 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_event_ready()
3253 ether_addr_copy(ar->mac_addr, arg.mac_addr); ath10k_wmi_event_ready()
3254 complete(&ar->wmi.unified_ready); ath10k_wmi_event_ready()
3258 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_temperature() argument
3266 ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature)); ath10k_wmi_event_temperature()
3270 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_op_rx() argument
3281 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); ath10k_wmi_op_rx()
3285 ath10k_wmi_event_mgmt_rx(ar, skb); ath10k_wmi_op_rx()
3289 ath10k_wmi_event_scan(ar, skb); ath10k_wmi_op_rx()
3292 ath10k_wmi_event_chan_info(ar, skb); ath10k_wmi_op_rx()
3295 ath10k_wmi_event_echo(ar, skb); ath10k_wmi_op_rx()
3298 ath10k_wmi_event_debug_mesg(ar, skb); ath10k_wmi_op_rx()
3301 ath10k_wmi_event_update_stats(ar, skb); ath10k_wmi_op_rx()
3304 ath10k_wmi_event_vdev_start_resp(ar, skb); ath10k_wmi_op_rx()
3307 ath10k_wmi_event_vdev_stopped(ar, skb); ath10k_wmi_op_rx()
3310 ath10k_wmi_event_peer_sta_kickout(ar, skb); ath10k_wmi_op_rx()
3313 ath10k_wmi_event_host_swba(ar, skb); ath10k_wmi_op_rx()
3316 ath10k_wmi_event_tbttoffset_update(ar, skb); ath10k_wmi_op_rx()
3319 ath10k_wmi_event_phyerr(ar, skb); ath10k_wmi_op_rx()
3322 ath10k_wmi_event_roam(ar, skb); ath10k_wmi_op_rx()
3325 ath10k_wmi_event_profile_match(ar, skb); ath10k_wmi_op_rx()
3328 ath10k_wmi_event_debug_print(ar, skb); ath10k_wmi_op_rx()
3331 ath10k_wmi_event_pdev_qvit(ar, skb); ath10k_wmi_op_rx()
3334 ath10k_wmi_event_wlan_profile_data(ar, skb); ath10k_wmi_op_rx()
3337 ath10k_wmi_event_rtt_measurement_report(ar, skb); ath10k_wmi_op_rx()
3340 ath10k_wmi_event_tsf_measurement_report(ar, skb); ath10k_wmi_op_rx()
3343 ath10k_wmi_event_rtt_error_report(ar, skb); ath10k_wmi_op_rx()
3346 ath10k_wmi_event_wow_wakeup_host(ar, skb); ath10k_wmi_op_rx()
3349 ath10k_wmi_event_dcs_interference(ar, skb); ath10k_wmi_op_rx()
3352 ath10k_wmi_event_pdev_tpc_config(ar, skb); ath10k_wmi_op_rx()
3355 ath10k_wmi_event_pdev_ftm_intg(ar, skb); ath10k_wmi_op_rx()
3358 ath10k_wmi_event_gtk_offload_status(ar, skb); ath10k_wmi_op_rx()
3361 ath10k_wmi_event_gtk_rekey_fail(ar, skb); ath10k_wmi_op_rx()
3364 ath10k_wmi_event_delba_complete(ar, skb); ath10k_wmi_op_rx()
3367 ath10k_wmi_event_addba_complete(ar, skb); ath10k_wmi_op_rx()
3370 ath10k_wmi_event_vdev_install_key_complete(ar, skb); ath10k_wmi_op_rx()
3373 ath10k_wmi_event_service_ready(ar, skb); ath10k_wmi_op_rx()
3376 ath10k_wmi_event_ready(ar, skb); ath10k_wmi_op_rx()
3379 ath10k_warn(ar, "Unknown eventid: %d\n", id); ath10k_wmi_op_rx()
3386 static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_10_1_op_rx() argument
3398 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); ath10k_wmi_10_1_op_rx()
3400 consumed = ath10k_tm_event_wmi(ar, id, skb); ath10k_wmi_10_1_op_rx()
3407 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_10_1_op_rx()
3414 ath10k_wmi_event_mgmt_rx(ar, skb); ath10k_wmi_10_1_op_rx()
3418 ath10k_wmi_event_scan(ar, skb); ath10k_wmi_10_1_op_rx()
3421 ath10k_wmi_event_chan_info(ar, skb); ath10k_wmi_10_1_op_rx()
3424 ath10k_wmi_event_echo(ar, skb); ath10k_wmi_10_1_op_rx()
3427 ath10k_wmi_event_debug_mesg(ar, skb); ath10k_wmi_10_1_op_rx()
3430 ath10k_wmi_event_update_stats(ar, skb); ath10k_wmi_10_1_op_rx()
3433 ath10k_wmi_event_vdev_start_resp(ar, skb); ath10k_wmi_10_1_op_rx()
3436 ath10k_wmi_event_vdev_stopped(ar, skb); ath10k_wmi_10_1_op_rx()
3439 ath10k_wmi_event_peer_sta_kickout(ar, skb); ath10k_wmi_10_1_op_rx()
3442 ath10k_wmi_event_host_swba(ar, skb); ath10k_wmi_10_1_op_rx()
3445 ath10k_wmi_event_tbttoffset_update(ar, skb); ath10k_wmi_10_1_op_rx()
3448 ath10k_wmi_event_phyerr(ar, skb); ath10k_wmi_10_1_op_rx()
3451 ath10k_wmi_event_roam(ar, skb); ath10k_wmi_10_1_op_rx()
3454 ath10k_wmi_event_profile_match(ar, skb); ath10k_wmi_10_1_op_rx()
3457 ath10k_wmi_event_debug_print(ar, skb); ath10k_wmi_10_1_op_rx()
3460 ath10k_wmi_event_pdev_qvit(ar, skb); ath10k_wmi_10_1_op_rx()
3463 ath10k_wmi_event_wlan_profile_data(ar, skb); ath10k_wmi_10_1_op_rx()
3466 ath10k_wmi_event_rtt_measurement_report(ar, skb); ath10k_wmi_10_1_op_rx()
3469 ath10k_wmi_event_tsf_measurement_report(ar, skb); ath10k_wmi_10_1_op_rx()
3472 ath10k_wmi_event_rtt_error_report(ar, skb); ath10k_wmi_10_1_op_rx()
3475 ath10k_wmi_event_wow_wakeup_host(ar, skb); ath10k_wmi_10_1_op_rx()
3478 ath10k_wmi_event_dcs_interference(ar, skb); ath10k_wmi_10_1_op_rx()
3481 ath10k_wmi_event_pdev_tpc_config(ar, skb); ath10k_wmi_10_1_op_rx()
3484 ath10k_wmi_event_inst_rssi_stats(ar, skb); ath10k_wmi_10_1_op_rx()
3487 ath10k_wmi_event_vdev_standby_req(ar, skb); ath10k_wmi_10_1_op_rx()
3490 ath10k_wmi_event_vdev_resume_req(ar, skb); ath10k_wmi_10_1_op_rx()
3493 ath10k_wmi_event_service_ready(ar, skb); ath10k_wmi_10_1_op_rx()
3496 ath10k_wmi_event_ready(ar, skb); ath10k_wmi_10_1_op_rx()
3502 ath10k_warn(ar, "Unknown eventid: %d\n", id); ath10k_wmi_10_1_op_rx()
3510 static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_10_2_op_rx() argument
3521 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); ath10k_wmi_10_2_op_rx()
3525 ath10k_wmi_event_mgmt_rx(ar, skb); ath10k_wmi_10_2_op_rx()
3529 ath10k_wmi_event_scan(ar, skb); ath10k_wmi_10_2_op_rx()
3532 ath10k_wmi_event_chan_info(ar, skb); ath10k_wmi_10_2_op_rx()
3535 ath10k_wmi_event_echo(ar, skb); ath10k_wmi_10_2_op_rx()
3538 ath10k_wmi_event_debug_mesg(ar, skb); ath10k_wmi_10_2_op_rx()
3541 ath10k_wmi_event_update_stats(ar, skb); ath10k_wmi_10_2_op_rx()
3544 ath10k_wmi_event_vdev_start_resp(ar, skb); ath10k_wmi_10_2_op_rx()
3547 ath10k_wmi_event_vdev_stopped(ar, skb); ath10k_wmi_10_2_op_rx()
3550 ath10k_wmi_event_peer_sta_kickout(ar, skb); ath10k_wmi_10_2_op_rx()
3553 ath10k_wmi_event_host_swba(ar, skb); ath10k_wmi_10_2_op_rx()
3556 ath10k_wmi_event_tbttoffset_update(ar, skb); ath10k_wmi_10_2_op_rx()
3559 ath10k_wmi_event_phyerr(ar, skb); ath10k_wmi_10_2_op_rx()
3562 ath10k_wmi_event_roam(ar, skb); ath10k_wmi_10_2_op_rx()
3565 ath10k_wmi_event_profile_match(ar, skb); ath10k_wmi_10_2_op_rx()
3568 ath10k_wmi_event_debug_print(ar, skb); ath10k_wmi_10_2_op_rx()
3571 ath10k_wmi_event_pdev_qvit(ar, skb); ath10k_wmi_10_2_op_rx()
3574 ath10k_wmi_event_wlan_profile_data(ar, skb); ath10k_wmi_10_2_op_rx()
3577 ath10k_wmi_event_rtt_measurement_report(ar, skb); ath10k_wmi_10_2_op_rx()
3580 ath10k_wmi_event_tsf_measurement_report(ar, skb); ath10k_wmi_10_2_op_rx()
3583 ath10k_wmi_event_rtt_error_report(ar, skb); ath10k_wmi_10_2_op_rx()
3586 ath10k_wmi_event_wow_wakeup_host(ar, skb); ath10k_wmi_10_2_op_rx()
3589 ath10k_wmi_event_dcs_interference(ar, skb); ath10k_wmi_10_2_op_rx()
3592 ath10k_wmi_event_pdev_tpc_config(ar, skb); ath10k_wmi_10_2_op_rx()
3595 ath10k_wmi_event_inst_rssi_stats(ar, skb); ath10k_wmi_10_2_op_rx()
3598 ath10k_wmi_event_vdev_standby_req(ar, skb); ath10k_wmi_10_2_op_rx()
3601 ath10k_wmi_event_vdev_resume_req(ar, skb); ath10k_wmi_10_2_op_rx()
3604 ath10k_wmi_event_service_ready(ar, skb); ath10k_wmi_10_2_op_rx()
3607 ath10k_wmi_event_ready(ar, skb); ath10k_wmi_10_2_op_rx()
3610 ath10k_wmi_event_temperature(ar, skb); ath10k_wmi_10_2_op_rx()
3619 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_10_2_op_rx()
3623 ath10k_warn(ar, "Unknown eventid: %d\n", id); ath10k_wmi_10_2_op_rx()
3630 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_process_rx() argument
3634 ret = ath10k_wmi_rx(ar, skb); ath10k_wmi_process_rx()
3636 ath10k_warn(ar, "failed to process wmi rx: %d\n", ret); ath10k_wmi_process_rx()
3639 int ath10k_wmi_connect(struct ath10k *ar) ath10k_wmi_connect() argument
3656 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); ath10k_wmi_connect()
3658 ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n", ath10k_wmi_connect()
3663 ar->wmi.eid = conn_resp.eid; ath10k_wmi_connect()
3668 ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, ath10k_wmi_op_gen_pdev_set_rd() argument
3675 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_pdev_set_rd()
3686 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_pdev_set_rd()
3693 ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 ath10k_wmi_10x_op_gen_pdev_set_rd() argument
3700 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_10x_op_gen_pdev_set_rd()
3712 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_10x_op_gen_pdev_set_rd()
3719 ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt) ath10k_wmi_op_gen_pdev_suspend() argument
3724 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_pdev_suspend()
3735 ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar) ath10k_wmi_op_gen_pdev_resume() argument
3739 skb = ath10k_wmi_alloc_skb(ar, 0); ath10k_wmi_op_gen_pdev_resume()
3747 ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value) ath10k_wmi_op_gen_pdev_set_param() argument
3753 ath10k_warn(ar, "pdev param %d not supported by firmware\n", ath10k_wmi_op_gen_pdev_set_param()
3758 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_pdev_set_param()
3766 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", ath10k_wmi_op_gen_pdev_set_param()
3771 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar, ath10k_wmi_put_host_mem_chunks() argument
3777 chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks); ath10k_wmi_put_host_mem_chunks()
3779 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { ath10k_wmi_put_host_mem_chunks()
3781 chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); ath10k_wmi_put_host_mem_chunks()
3782 chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); ath10k_wmi_put_host_mem_chunks()
3783 chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); ath10k_wmi_put_host_mem_chunks()
3785 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_put_host_mem_chunks()
3788 ar->wmi.mem_chunks[i].len, ath10k_wmi_put_host_mem_chunks()
3789 (unsigned long long)ar->wmi.mem_chunks[i].paddr); ath10k_wmi_put_host_mem_chunks()
3793 static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar) ath10k_wmi_op_gen_init() argument
3852 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); ath10k_wmi_op_gen_init()
3854 buf = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_op_gen_init()
3861 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); ath10k_wmi_op_gen_init()
3863 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n"); ath10k_wmi_op_gen_init()
3867 static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar) ath10k_wmi_10_1_op_gen_init() argument
3918 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); ath10k_wmi_10_1_op_gen_init()
3920 buf = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_10_1_op_gen_init()
3927 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); ath10k_wmi_10_1_op_gen_init()
3929 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n"); ath10k_wmi_10_1_op_gen_init()
3933 static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) ath10k_wmi_10_2_op_gen_init() argument
3984 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); ath10k_wmi_10_2_op_gen_init()
3986 buf = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_10_2_op_gen_init()
3996 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); ath10k_wmi_10_2_op_gen_init()
3998 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n"); ath10k_wmi_10_2_op_gen_init()
4149 ath10k_wmi_op_gen_start_scan(struct ath10k *ar, ath10k_wmi_op_gen_start_scan() argument
4162 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_op_gen_start_scan()
4173 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n"); ath10k_wmi_op_gen_start_scan()
4178 ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar, ath10k_wmi_10x_op_gen_start_scan() argument
4191 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_10x_op_gen_start_scan()
4200 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n"); ath10k_wmi_10x_op_gen_start_scan()
4204 void ath10k_wmi_start_scan_init(struct ath10k *ar, ath10k_wmi_start_scan_init() argument
4231 ath10k_wmi_op_gen_stop_scan(struct ath10k *ar, ath10k_wmi_op_gen_stop_scan() argument
4244 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_stop_scan()
4260 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_stop_scan()
4267 ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_vdev_create() argument
4275 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_vdev_create()
4285 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_vdev_create()
4292 ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id) ath10k_wmi_op_gen_vdev_delete() argument
4297 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_vdev_delete()
4304 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_vdev_delete()
4310 ath10k_wmi_op_gen_vdev_start(struct ath10k *ar, ath10k_wmi_op_gen_vdev_start() argument
4331 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_vdev_start()
4356 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_vdev_start()
4366 ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id) ath10k_wmi_op_gen_vdev_stop() argument
4371 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_vdev_stop()
4378 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); ath10k_wmi_op_gen_vdev_stop()
4383 ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, ath10k_wmi_op_gen_vdev_up() argument
4389 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_vdev_up()
4398 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_vdev_up()
4405 ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id) ath10k_wmi_op_gen_vdev_down() argument
4410 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_vdev_down()
4417 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_vdev_down()
4423 ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_vdev_set_param() argument
4430 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_vdev_set_param()
4436 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_vdev_set_param()
4445 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_vdev_set_param()
4452 ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar, ath10k_wmi_op_gen_vdev_install_key() argument
4463 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len); ath10k_wmi_op_gen_vdev_install_key()
4481 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_vdev_install_key()
4488 ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar, ath10k_wmi_op_gen_vdev_spectral_conf() argument
4494 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_vdev_spectral_conf()
4523 ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_vdev_spectral_enable() argument
4529 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_vdev_spectral_enable()
4542 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_peer_create() argument
4548 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_peer_create()
4556 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_peer_create()
4563 ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_peer_delete() argument
4569 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_peer_delete()
4577 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_peer_delete()
4584 ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_peer_flush() argument
4590 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_peer_flush()
4599 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_peer_flush()
4606 ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_peer_set_param() argument
4614 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_peer_set_param()
4624 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_peer_set_param()
4631 ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_set_psmode() argument
4637 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_set_psmode()
4645 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_set_psmode()
4652 ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_set_sta_ps() argument
4659 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_set_sta_ps()
4668 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_set_sta_ps()
4675 ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac, ath10k_wmi_op_gen_set_ap_ps() argument
4684 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_set_ap_ps()
4694 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_set_ap_ps()
4701 ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar, ath10k_wmi_op_gen_scan_chan_list() argument
4713 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_op_gen_scan_chan_list()
4731 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf, ath10k_wmi_peer_assoc_fill() argument
4773 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf, ath10k_wmi_peer_assoc_fill_main() argument
4778 ath10k_wmi_peer_assoc_fill(ar, buf, arg); ath10k_wmi_peer_assoc_fill_main()
4783 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf, ath10k_wmi_peer_assoc_fill_10_1() argument
4786 ath10k_wmi_peer_assoc_fill(ar, buf, arg); ath10k_wmi_peer_assoc_fill_10_1()
4790 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf, ath10k_wmi_peer_assoc_fill_10_2() argument
4804 ath10k_wmi_peer_assoc_fill(ar, buf, arg); ath10k_wmi_peer_assoc_fill_10_2()
4822 ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar, ath10k_wmi_op_gen_peer_assoc() argument
4833 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_op_gen_peer_assoc()
4837 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); ath10k_wmi_op_gen_peer_assoc()
4839 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_peer_assoc()
4847 ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar, ath10k_wmi_10_1_op_gen_peer_assoc() argument
4858 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_10_1_op_gen_peer_assoc()
4862 ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); ath10k_wmi_10_1_op_gen_peer_assoc()
4864 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_10_1_op_gen_peer_assoc()
4872 ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar, ath10k_wmi_10_2_op_gen_peer_assoc() argument
4883 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_10_2_op_gen_peer_assoc()
4887 ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg); ath10k_wmi_10_2_op_gen_peer_assoc()
4889 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_10_2_op_gen_peer_assoc()
4897 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar) ath10k_wmi_10_2_op_gen_pdev_get_temperature() argument
4901 skb = ath10k_wmi_alloc_skb(ar, 0); ath10k_wmi_10_2_op_gen_pdev_get_temperature()
4905 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n"); ath10k_wmi_10_2_op_gen_pdev_get_temperature()
4911 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn, ath10k_wmi_op_gen_beacon_dma() argument
4920 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_beacon_dma()
4957 ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar, ath10k_wmi_op_gen_pdev_set_wmm() argument
4963 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_pdev_set_wmm()
4973 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); ath10k_wmi_op_gen_pdev_set_wmm()
4978 ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) ath10k_wmi_op_gen_request_stats() argument
4983 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_request_stats()
4990 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n", ath10k_wmi_op_gen_request_stats()
4996 ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar, ath10k_wmi_op_gen_force_fw_hang() argument
5002 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_force_fw_hang()
5010 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", ath10k_wmi_op_gen_force_fw_hang()
5016 ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable, ath10k_wmi_op_gen_dbglog_cfg() argument
5023 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_dbglog_cfg()
5044 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_dbglog_cfg()
5054 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap) ath10k_wmi_op_gen_pktlog_enable() argument
5059 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_pktlog_enable()
5068 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n", ath10k_wmi_op_gen_pktlog_enable()
5074 ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar) ath10k_wmi_op_gen_pktlog_disable() argument
5078 skb = ath10k_wmi_alloc_skb(ar, 0); ath10k_wmi_op_gen_pktlog_disable()
5082 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n"); ath10k_wmi_op_gen_pktlog_disable()
5087 ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period, ath10k_wmi_op_gen_pdev_set_quiet_mode() argument
5094 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_pdev_set_quiet_mode()
5104 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_pdev_set_quiet_mode()
5111 ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id, ath10k_wmi_op_gen_addba_clear_resp() argument
5120 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_addba_clear_resp()
5128 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_addba_clear_resp()
5135 ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, ath10k_wmi_op_gen_addba_send() argument
5144 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_addba_send()
5154 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_addba_send()
5161 ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, ath10k_wmi_op_gen_addba_set_resp() argument
5170 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_addba_set_resp()
5180 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_addba_set_resp()
5187 ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, ath10k_wmi_op_gen_delba_send() argument
5196 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_op_gen_delba_send()
5207 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_op_gen_delba_send()
5458 int ath10k_wmi_attach(struct ath10k *ar) ath10k_wmi_attach() argument
5460 switch (ar->wmi.op_version) { ath10k_wmi_attach()
5462 ar->wmi.cmd = &wmi_10_2_4_cmd_map; ath10k_wmi_attach()
5463 ar->wmi.ops = &wmi_10_2_4_ops; ath10k_wmi_attach()
5464 ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map; ath10k_wmi_attach()
5465 ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map; ath10k_wmi_attach()
5468 ar->wmi.cmd = &wmi_10_2_cmd_map; ath10k_wmi_attach()
5469 ar->wmi.ops = &wmi_10_2_ops; ath10k_wmi_attach()
5470 ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ath10k_wmi_attach()
5471 ar->wmi.pdev_param = &wmi_10x_pdev_param_map; ath10k_wmi_attach()
5474 ar->wmi.cmd = &wmi_10x_cmd_map; ath10k_wmi_attach()
5475 ar->wmi.ops = &wmi_10_1_ops; ath10k_wmi_attach()
5476 ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ath10k_wmi_attach()
5477 ar->wmi.pdev_param = &wmi_10x_pdev_param_map; ath10k_wmi_attach()
5480 ar->wmi.cmd = &wmi_cmd_map; ath10k_wmi_attach()
5481 ar->wmi.ops = &wmi_ops; ath10k_wmi_attach()
5482 ar->wmi.vdev_param = &wmi_vdev_param_map; ath10k_wmi_attach()
5483 ar->wmi.pdev_param = &wmi_pdev_param_map; ath10k_wmi_attach()
5486 ath10k_wmi_tlv_attach(ar); ath10k_wmi_attach()
5490 ath10k_err(ar, "unsupported WMI op version: %d\n", ath10k_wmi_attach()
5491 ar->wmi.op_version); ath10k_wmi_attach()
5495 init_completion(&ar->wmi.service_ready); ath10k_wmi_attach()
5496 init_completion(&ar->wmi.unified_ready); ath10k_wmi_attach()
5501 void ath10k_wmi_detach(struct ath10k *ar) ath10k_wmi_detach() argument
5506 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { ath10k_wmi_detach()
5507 dma_free_coherent(ar->dev, ath10k_wmi_detach()
5508 ar->wmi.mem_chunks[i].len, ath10k_wmi_detach()
5509 ar->wmi.mem_chunks[i].vaddr, ath10k_wmi_detach()
5510 ar->wmi.mem_chunks[i].paddr); ath10k_wmi_detach()
5513 ar->wmi.num_mem_chunks = 0; ath10k_wmi_detach()
H A Dhtt.c40 status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req, ath10k_htt_connect()
51 int ath10k_htt_init(struct ath10k *ar) ath10k_htt_init() argument
53 struct ath10k_htt *htt = &ar->htt; ath10k_htt_init()
55 htt->ar = ar; ath10k_htt_init()
76 struct ath10k *ar = htt->ar; ath10k_htt_verify_version() local
78 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n", ath10k_htt_verify_version()
83 ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n", ath10k_htt_verify_version()
93 struct ath10k *ar = htt->ar; ath10k_htt_setup() local
105 ath10k_warn(ar, "htt version request timed out\n"); ath10k_htt_setup()
H A Dmac.h27 struct ath10k *ar; member in struct:ath10k_generic_iter
32 void ath10k_mac_destroy(struct ath10k *ar);
33 int ath10k_mac_register(struct ath10k *ar);
34 void ath10k_mac_unregister(struct ath10k *ar);
35 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
36 void __ath10k_scan_finish(struct ath10k *ar);
37 void ath10k_scan_finish(struct ath10k *ar);
39 void ath10k_offchan_tx_purge(struct ath10k *ar);
41 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
43 void ath10k_halt(struct ath10k *ar);
45 void ath10k_drain_tx(struct ath10k *ar);
46 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
H A Dhtc.c33 ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force); ath10k_htc_send_complete_check()
36 static void ath10k_htc_control_tx_complete(struct ath10k *ar, ath10k_htc_control_tx_complete() argument
42 static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) ath10k_htc_build_tx_ctrl_skb() argument
57 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); ath10k_htc_build_tx_ctrl_skb()
66 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); ath10k_htc_restore_tx_skb()
73 struct ath10k *ar = ep->htc->ar; ath10k_htc_notify_tx_completion() local
75 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, ath10k_htc_notify_tx_completion()
81 ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid); ath10k_htc_notify_tx_completion()
86 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); ath10k_htc_notify_tx_completion()
92 struct ath10k *ar = ep->htc->ar; ath10k_htc_ep_need_credit_update() local
99 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n", ath10k_htc_ep_need_credit_update()
128 struct ath10k *ar = htc->ar; ath10k_htc_send() local
132 struct device *dev = htc->ar->dev; ath10k_htc_send()
136 if (htc->ar->state == ATH10K_STATE_WEDGED) ath10k_htc_send()
140 ath10k_warn(ar, "Invalid endpoint id: %d\n", eid); ath10k_htc_send()
155 ath10k_dbg(ar, ATH10K_DBG_HTC, ath10k_htc_send()
177 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1); ath10k_htc_send()
189 ath10k_dbg(ar, ATH10K_DBG_HTC, ath10k_htc_send()
195 ep->ep_ops.ep_tx_credits(htc->ar); ath10k_htc_send()
202 static int ath10k_htc_tx_completion_handler(struct ath10k *ar, ath10k_htc_tx_completion_handler() argument
205 struct ath10k_htc *htc = &ar->htc; ath10k_htc_tx_completion_handler()
231 struct ath10k *ar = htc->ar; ath10k_htc_process_credit_report() local
236 ath10k_warn(ar, "Uneven credit report len %d", len); ath10k_htc_process_credit_report()
248 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", ath10k_htc_process_credit_report()
253 ep->ep_ops.ep_tx_credits(htc->ar); ath10k_htc_process_credit_report()
265 struct ath10k *ar = htc->ar; ath10k_htc_process_trailer() local
285 ath10k_warn(ar, "Invalid record length: %d\n", ath10k_htc_process_trailer()
295 ath10k_warn(ar, "Credit report too long\n"); ath10k_htc_process_trailer()
305 ath10k_warn(ar, "Unhandled record: id:%d length:%d\n", ath10k_htc_process_trailer()
319 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "", ath10k_htc_process_trailer()
325 static int ath10k_htc_rx_completion_handler(struct ath10k *ar, ath10k_htc_rx_completion_handler() argument
329 struct ath10k_htc *htc = &ar->htc; ath10k_htc_rx_completion_handler()
344 ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid); ath10k_htc_rx_completion_handler()
345 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "", ath10k_htc_rx_completion_handler()
365 ath10k_warn(ar, "HTC rx frame too long, len: %zu\n", ath10k_htc_rx_completion_handler()
367 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", ath10k_htc_rx_completion_handler()
374 ath10k_dbg(ar, ATH10K_DBG_HTC, ath10k_htc_rx_completion_handler()
377 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", ath10k_htc_rx_completion_handler()
393 ath10k_warn(ar, "Invalid trailer length: %d\n", ath10k_htc_rx_completion_handler()
426 ath10k_warn(ar, "HTC rx ctrl still processing\n"); ath10k_htc_rx_completion_handler()
442 htc->htc_ops.target_send_suspend_complete(ar); ath10k_htc_rx_completion_handler()
447 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", ath10k_htc_rx_completion_handler()
449 ep->ep_ops.ep_rx_complete(ar, skb); ath10k_htc_rx_completion_handler()
459 static void ath10k_htc_control_rx_complete(struct ath10k *ar, ath10k_htc_control_rx_complete() argument
464 ath10k_warn(ar, "unexpected htc rx\n"); ath10k_htc_control_rx_complete()
551 struct ath10k *ar = htc->ar; ath10k_htc_wait_target() local
569 ath10k_warn(ar, "failed to receive control response completion, polling..\n"); ath10k_htc_wait_target()
572 ath10k_hif_send_complete_check(htc->ar, i, 1); ath10k_htc_wait_target()
582 ath10k_err(ar, "ctl_resp never came in (%d)\n", status); ath10k_htc_wait_target()
587 ath10k_err(ar, "Invalid HTC ready msg len:%d\n", ath10k_htc_wait_target()
598 ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id); ath10k_htc_wait_target()
605 ath10k_dbg(ar, ATH10K_DBG_HTC, ath10k_htc_wait_target()
612 ath10k_err(ar, "Invalid credit size received\n"); ath10k_htc_wait_target()
629 ath10k_err(ar, "could not connect to htc service (%d)\n", ath10k_htc_wait_target()
641 struct ath10k *ar = htc->ar; ath10k_htc_connect_service() local
667 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_htc_connect_service()
671 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); ath10k_htc_connect_service()
673 ath10k_err(ar, "Failed to allocate HTC packet\n"); ath10k_htc_connect_service()
709 ath10k_err(ar, "Service connect timeout: %d\n", status); ath10k_htc_connect_service()
722 ath10k_err(ar, "Invalid resp message ID 0x%x", message_id); ath10k_htc_connect_service()
726 ath10k_dbg(ar, ATH10K_DBG_HTC, ath10k_htc_connect_service()
735 ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n", ath10k_htc_connect_service()
777 status = ath10k_hif_map_service_to_pipe(htc->ar, ath10k_htc_connect_service()
786 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_htc_connect_service()
791 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_htc_connect_service()
797 ath10k_dbg(ar, ATH10K_DBG_BOOT, ath10k_htc_connect_service()
805 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size) ath10k_htc_alloc_skb() argument
817 ath10k_warn(ar, "Unaligned HTC tx skb\n"); ath10k_htc_alloc_skb()
824 struct ath10k *ar = htc->ar; ath10k_htc_start() local
829 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); ath10k_htc_start()
840 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); ath10k_htc_start()
852 int ath10k_htc_init(struct ath10k *ar) ath10k_htc_init() argument
856 struct ath10k_htc *htc = &ar->htc; ath10k_htc_init()
865 htc->ar = ar; ath10k_htc_init()
870 ath10k_hif_set_callbacks(ar, &htc_callbacks); ath10k_htc_init()
871 ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id); ath10k_htc_init()
H A Ddebugfs_sta.c27 struct ath10k *ar = arsta->arvif->ar; ath10k_dbg_sta_read_aggr_mode() local
31 mutex_lock(&ar->conf_mutex); ath10k_dbg_sta_read_aggr_mode()
35 mutex_unlock(&ar->conf_mutex); ath10k_dbg_sta_read_aggr_mode()
46 struct ath10k *ar = arsta->arvif->ar; ath10k_dbg_sta_write_aggr_mode() local
56 mutex_lock(&ar->conf_mutex); ath10k_dbg_sta_write_aggr_mode()
57 if ((ar->state != ATH10K_STATE_ON) || ath10k_dbg_sta_write_aggr_mode()
63 ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr); ath10k_dbg_sta_write_aggr_mode()
65 ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret); ath10k_dbg_sta_write_aggr_mode()
71 mutex_unlock(&ar->conf_mutex); ath10k_dbg_sta_write_aggr_mode()
89 struct ath10k *ar = arsta->arvif->ar; ath10k_dbg_sta_write_addba() local
107 mutex_lock(&ar->conf_mutex); ath10k_dbg_sta_write_addba()
108 if ((ar->state != ATH10K_STATE_ON) || ath10k_dbg_sta_write_addba()
114 ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr, ath10k_dbg_sta_write_addba()
117 ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n", ath10k_dbg_sta_write_addba()
123 mutex_unlock(&ar->conf_mutex); ath10k_dbg_sta_write_addba()
140 struct ath10k *ar = arsta->arvif->ar; ath10k_dbg_sta_write_addba_resp() local
158 mutex_lock(&ar->conf_mutex); ath10k_dbg_sta_write_addba_resp()
159 if ((ar->state != ATH10K_STATE_ON) || ath10k_dbg_sta_write_addba_resp()
165 ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr, ath10k_dbg_sta_write_addba_resp()
168 ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n", ath10k_dbg_sta_write_addba_resp()
173 mutex_unlock(&ar->conf_mutex); ath10k_dbg_sta_write_addba_resp()
190 struct ath10k *ar = arsta->arvif->ar; ath10k_dbg_sta_write_delba() local
208 mutex_lock(&ar->conf_mutex); ath10k_dbg_sta_write_delba()
209 if ((ar->state != ATH10K_STATE_ON) || ath10k_dbg_sta_write_delba()
215 ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr, ath10k_dbg_sta_write_delba()
218 ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n", ath10k_dbg_sta_write_delba()
224 mutex_unlock(&ar->conf_mutex); ath10k_dbg_sta_write_delba()
H A Dhtt_tx.c29 ieee80211_wake_queues(htt->ar->hw); __ath10k_htt_tx_dec_pending()
52 ieee80211_stop_queues(htt->ar->hw); ath10k_htt_tx_inc_pending()
61 struct ath10k *ar = htt->ar; ath10k_htt_tx_alloc_msdu_id() local
68 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); ath10k_htt_tx_alloc_msdu_id()
75 struct ath10k *ar = htt->ar; ath10k_htt_tx_free_msdu_id() local
79 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); ath10k_htt_tx_free_msdu_id()
86 struct ath10k *ar = htt->ar; ath10k_htt_tx_alloc() local
88 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", ath10k_htt_tx_alloc()
94 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, ath10k_htt_tx_alloc()
106 struct ath10k *ar = ctx; ath10k_htt_tx_clean_up_pending() local
107 struct ath10k_htt *htt = &ar->htt; ath10k_htt_tx_clean_up_pending()
110 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); ath10k_htt_tx_clean_up_pending()
124 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); ath10k_htt_tx_free()
129 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) ath10k_htt_htc_tx_complete() argument
136 struct ath10k *ar = htt->ar; ath10k_htt_h2t_ver_req_msg() local
145 skb = ath10k_htc_alloc_skb(ar, len); ath10k_htt_h2t_ver_req_msg()
153 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); ath10k_htt_h2t_ver_req_msg()
164 struct ath10k *ar = htt->ar; ath10k_htt_h2t_stats_req() local
173 skb = ath10k_htc_alloc_skb(ar, len); ath10k_htt_h2t_stats_req()
193 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); ath10k_htt_h2t_stats_req()
195 ath10k_warn(ar, "failed to send htt type stats request: %d", ath10k_htt_h2t_stats_req()
206 struct ath10k *ar = htt->ar; ath10k_htt_send_rx_ring_cfg_ll() local
225 skb = ath10k_htc_alloc_skb(ar, len); ath10k_htt_send_rx_ring_cfg_ll()
281 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); ath10k_htt_send_rx_ring_cfg_ll()
294 struct ath10k *ar = htt->ar; ath10k_htt_h2t_aggr_cfg_msg() local
312 skb = ath10k_htc_alloc_skb(ar, len); ath10k_htt_h2t_aggr_cfg_msg()
324 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", ath10k_htt_h2t_aggr_cfg_msg()
328 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); ath10k_htt_h2t_aggr_cfg_msg()
339 struct ath10k *ar = htt->ar; ath10k_htt_mgmt_tx() local
340 struct device *dev = ar->dev; ath10k_htt_mgmt_tx()
365 txdesc = ath10k_htc_alloc_skb(ar, len); ath10k_htt_mgmt_tx()
391 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); ath10k_htt_mgmt_tx()
413 struct ath10k *ar = htt->ar; ath10k_htt_tx() local
414 struct device *dev = ar->dev; ath10k_htt_tx()
542 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_htt_tx()
543 ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_htt_tx()
547 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", ath10k_htt_tx()
549 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); ath10k_htt_tx()
550 trace_ath10k_tx_payload(ar, msdu->data, msdu->len); ath10k_htt_tx()
567 res = ath10k_hif_tx_sg(htt->ar, ath10k_htt_tx()
568 htt->ar->htc.endpoint[htt->eid].ul_pipe_id, ath10k_htt_tx()
H A Dspectral.h49 int ath10k_spectral_process_fft(struct ath10k *ar,
53 int ath10k_spectral_start(struct ath10k *ar);
55 int ath10k_spectral_create(struct ath10k *ar);
56 void ath10k_spectral_destroy(struct ath10k *ar);
61 ath10k_spectral_process_fft(struct ath10k *ar, ath10k_spectral_process_fft() argument
69 static inline int ath10k_spectral_start(struct ath10k *ar) ath10k_spectral_start() argument
79 static inline int ath10k_spectral_create(struct ath10k *ar) ath10k_spectral_create() argument
84 static inline void ath10k_spectral_destroy(struct ath10k *ar) ath10k_spectral_destroy() argument
H A Dthermal.h39 int ath10k_thermal_register(struct ath10k *ar);
40 void ath10k_thermal_unregister(struct ath10k *ar);
41 void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
43 static inline int ath10k_thermal_register(struct ath10k *ar) ath10k_thermal_register() argument
48 static inline void ath10k_thermal_unregister(struct ath10k *ar) ath10k_thermal_unregister() argument
52 static inline void ath10k_thermal_event_temperature(struct ath10k *ar, ath10k_thermal_event_temperature() argument
H A Dhtt_rx.c38 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr) ath10k_htt_rx_find_skb_paddr() argument
42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) ath10k_htt_rx_find_skb_paddr()
60 dma_unmap_single(htt->ar->dev, rxcb->paddr, ath10k_htt_rx_ring_free()
73 dma_unmap_single(htt->ar->dev, rxcb->paddr, ath10k_htt_rx_ring_free()
118 paddr = dma_map_single(htt->ar->dev, skb->data, __ath10k_htt_rx_ring_fill_n()
122 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { __ath10k_htt_rx_ring_fill_n()
207 int ath10k_htt_rx_ring_refill(struct ath10k *ar) ath10k_htt_rx_ring_refill() argument
209 struct ath10k_htt *htt = &ar->htt; ath10k_htt_rx_ring_refill()
235 dma_free_coherent(htt->ar->dev, ath10k_htt_rx_free()
241 dma_free_coherent(htt->ar->dev, ath10k_htt_rx_free()
251 struct ath10k *ar = htt->ar; ath10k_htt_rx_netbuf_pop() local
258 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); ath10k_htt_rx_netbuf_pop()
272 dma_unmap_single(htt->ar->dev, ath10k_htt_rx_netbuf_pop()
276 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", ath10k_htt_rx_netbuf_pop()
287 struct ath10k *ar = htt->ar; ath10k_htt_rx_amsdu_pop() local
400 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, ath10k_htt_rx_amsdu_pop()
436 struct ath10k *ar = htt->ar; ath10k_htt_rx_pop_paddr() local
442 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); ath10k_htt_rx_pop_paddr()
450 dma_unmap_single(htt->ar->dev, rxcb->paddr, ath10k_htt_rx_pop_paddr()
453 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", ath10k_htt_rx_pop_paddr()
463 struct ath10k *ar = htt->ar; ath10k_htt_rx_pop_paddr_list() local
490 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); ath10k_htt_rx_pop_paddr_list()
498 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); ath10k_htt_rx_pop_paddr_list()
511 struct ath10k *ar = htt->ar; ath10k_htt_rx_alloc() local
527 ath10k_warn(ar, "htt rx ring size is not power of 2\n"); ath10k_htt_rx_alloc()
539 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA); ath10k_htt_rx_alloc()
546 vaddr = dma_alloc_coherent(htt->ar->dev, ath10k_htt_rx_alloc()
576 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", ath10k_htt_rx_alloc()
581 dma_free_coherent(htt->ar->dev, ath10k_htt_rx_alloc()
592 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, ath10k_htt_rx_crypto_param_len() argument
611 ath10k_warn(ar, "unsupported encryption type %d\n", type); ath10k_htt_rx_crypto_param_len()
617 static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, ath10k_htt_rx_crypto_tail_len() argument
636 ath10k_warn(ar, "unsupported encryption type %d\n", type); ath10k_htt_rx_crypto_tail_len()
673 static void ath10k_htt_rx_h_rates(struct ath10k *ar, ath10k_htt_rx_h_rates() argument
776 static bool ath10k_htt_rx_h_channel(struct ath10k *ar, ath10k_htt_rx_h_channel() argument
781 spin_lock_bh(&ar->data_lock); ath10k_htt_rx_h_channel()
782 ch = ar->scan_channel; ath10k_htt_rx_h_channel()
784 ch = ar->rx_channel; ath10k_htt_rx_h_channel()
785 spin_unlock_bh(&ar->data_lock); ath10k_htt_rx_h_channel()
796 static void ath10k_htt_rx_h_signal(struct ath10k *ar, ath10k_htt_rx_h_signal() argument
806 static void ath10k_htt_rx_h_mactime(struct ath10k *ar, ath10k_htt_rx_h_mactime() argument
820 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, ath10k_htt_rx_h_ppdu() argument
853 ath10k_htt_rx_h_signal(ar, status, rxd); ath10k_htt_rx_h_ppdu()
854 ath10k_htt_rx_h_channel(ar, status); ath10k_htt_rx_h_ppdu()
855 ath10k_htt_rx_h_rates(ar, status, rxd); ath10k_htt_rx_h_ppdu()
859 ath10k_htt_rx_h_mactime(ar, status, rxd); ath10k_htt_rx_h_ppdu()
891 static void ath10k_process_rx(struct ath10k *ar, ath10k_process_rx() argument
902 ath10k_dbg(ar, ATH10K_DBG_DATA, ath10k_process_rx()
924 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", ath10k_process_rx()
926 trace_ath10k_rx_hdr(ar, skb->data, skb->len); ath10k_process_rx()
927 trace_ath10k_rx_payload(ar, skb->data, skb->len); ath10k_process_rx()
929 ieee80211_rx(ar->hw, skb); ath10k_process_rx()
938 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, ath10k_htt_rx_h_undecap_raw() argument
995 skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype)); ath10k_htt_rx_h_undecap_raw()
1004 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); ath10k_htt_rx_h_undecap_raw()
1011 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, ath10k_htt_rx_h_undecap_nwifi() argument
1052 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, ath10k_htt_rx_h_find_rfc1042() argument
1075 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); ath10k_htt_rx_h_find_rfc1042()
1087 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, ath10k_htt_rx_h_undecap_eth() argument
1105 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); ath10k_htt_rx_h_undecap_eth()
1132 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, ath10k_htt_rx_h_undecap_snap() argument
1153 static void ath10k_htt_rx_h_undecap(struct ath10k *ar, ath10k_htt_rx_h_undecap() argument
1182 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, ath10k_htt_rx_h_undecap()
1186 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr); ath10k_htt_rx_h_undecap()
1189 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); ath10k_htt_rx_h_undecap()
1192 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr); ath10k_htt_rx_h_undecap()
1233 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, ath10k_htt_rx_h_mpdu() argument
1315 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, skb_queue_walk()
1330 static void ath10k_htt_rx_h_deliver(struct ath10k *ar, ath10k_htt_rx_h_deliver() argument
1343 ath10k_process_rx(ar, status, msdu); ath10k_htt_rx_h_deliver()
1390 static void ath10k_htt_rx_h_unchain(struct ath10k *ar, ath10k_htt_rx_h_unchain() argument
1420 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, ath10k_htt_rx_amsdu_allowed() argument
1437 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n"); ath10k_htt_rx_amsdu_allowed()
1454 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); ath10k_htt_rx_amsdu_allowed()
1458 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { ath10k_htt_rx_amsdu_allowed()
1459 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); ath10k_htt_rx_amsdu_allowed()
1466 static void ath10k_htt_rx_h_filter(struct ath10k *ar, ath10k_htt_rx_h_filter() argument
1473 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) ath10k_htt_rx_h_filter()
1482 struct ath10k *ar = htt->ar; ath10k_htt_rx_handler() local
1503 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", ath10k_htt_rx_handler()
1516 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); ath10k_htt_rx_handler()
1525 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); ath10k_htt_rx_handler()
1526 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); ath10k_htt_rx_handler()
1527 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); ath10k_htt_rx_handler()
1528 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); ath10k_htt_rx_handler()
1529 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); ath10k_htt_rx_handler()
1538 struct ath10k *ar = htt->ar; ath10k_htt_rx_frag_handler() local
1557 ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); ath10k_htt_rx_frag_handler()
1560 ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n", ath10k_htt_rx_frag_handler()
1567 ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n"); ath10k_htt_rx_frag_handler()
1572 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); ath10k_htt_rx_frag_handler()
1573 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); ath10k_htt_rx_frag_handler()
1574 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); ath10k_htt_rx_frag_handler()
1575 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); ath10k_htt_rx_frag_handler()
1578 ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_htt_rx_frag_handler()
1584 static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, ath10k_htt_rx_frm_tx_compl() argument
1587 struct ath10k_htt *htt = &ar->htt; ath10k_htt_rx_frm_tx_compl()
1608 ath10k_warn(ar, "unhandled tx completion status %d\n", status); ath10k_htt_rx_frm_tx_compl()
1613 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", ath10k_htt_rx_frm_tx_compl()
1623 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) ath10k_htt_rx_addba() argument
1634 ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_htt_rx_addba()
1638 spin_lock_bh(&ar->data_lock); ath10k_htt_rx_addba()
1639 peer = ath10k_peer_find_by_id(ar, peer_id); ath10k_htt_rx_addba()
1641 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", ath10k_htt_rx_addba()
1643 spin_unlock_bh(&ar->data_lock); ath10k_htt_rx_addba()
1647 arvif = ath10k_get_arvif(ar, peer->vdev_id); ath10k_htt_rx_addba()
1649 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", ath10k_htt_rx_addba()
1651 spin_unlock_bh(&ar->data_lock); ath10k_htt_rx_addba()
1655 ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_htt_rx_addba()
1660 spin_unlock_bh(&ar->data_lock); ath10k_htt_rx_addba()
1663 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) ath10k_htt_rx_delba() argument
1674 ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_htt_rx_delba()
1678 spin_lock_bh(&ar->data_lock); ath10k_htt_rx_delba()
1679 peer = ath10k_peer_find_by_id(ar, peer_id); ath10k_htt_rx_delba()
1681 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", ath10k_htt_rx_delba()
1683 spin_unlock_bh(&ar->data_lock); ath10k_htt_rx_delba()
1687 arvif = ath10k_get_arvif(ar, peer->vdev_id); ath10k_htt_rx_delba()
1689 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", ath10k_htt_rx_delba()
1691 spin_unlock_bh(&ar->data_lock); ath10k_htt_rx_delba()
1695 ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_htt_rx_delba()
1700 spin_unlock_bh(&ar->data_lock); ath10k_htt_rx_delba()
1754 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, ath10k_htt_rx_h_rx_offload() argument
1757 struct ath10k_htt *htt = &ar->htt; ath10k_htt_rx_h_rx_offload()
1774 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); ath10k_htt_rx_h_rx_offload()
1799 ath10k_htt_rx_h_channel(ar, status); ath10k_htt_rx_h_rx_offload()
1800 ath10k_process_rx(ar, status, msdu); ath10k_htt_rx_h_rx_offload()
1804 static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) ath10k_htt_rx_in_ord_ind() argument
1806 struct ath10k_htt *htt = &ar->htt; ath10k_htt_rx_in_ord_ind()
1835 ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_htt_rx_in_ord_ind()
1840 ath10k_warn(ar, "dropping invalid in order rx indication\n"); ath10k_htt_rx_in_ord_ind()
1850 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); ath10k_htt_rx_in_ord_ind()
1859 ath10k_htt_rx_h_rx_offload(ar, &list); ath10k_htt_rx_in_ord_ind()
1872 ath10k_htt_rx_h_ppdu(ar, &amsdu, status); ath10k_htt_rx_in_ord_ind()
1873 ath10k_htt_rx_h_filter(ar, &amsdu, status); ath10k_htt_rx_in_ord_ind()
1874 ath10k_htt_rx_h_mpdu(ar, &amsdu, status); ath10k_htt_rx_in_ord_ind()
1875 ath10k_htt_rx_h_deliver(ar, &amsdu, status); ath10k_htt_rx_in_ord_ind()
1881 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); ath10k_htt_rx_in_ord_ind()
1891 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ath10k_htt_t2h_msg_handler() argument
1893 struct ath10k_htt *htt = &ar->htt; ath10k_htt_t2h_msg_handler()
1898 ath10k_warn(ar, "unaligned htt message, expect trouble\n"); ath10k_htt_t2h_msg_handler()
1900 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", ath10k_htt_t2h_msg_handler()
1961 struct ath10k *ar = htt->ar; ath10k_htt_t2h_msg_handler() local
1964 ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_htt_t2h_msg_handler()
1969 complete(&ar->install_key_done); ath10k_htt_t2h_msg_handler()
1973 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", ath10k_htt_t2h_msg_handler()
1982 trace_ath10k_htt_stats(ar, skb->data, skb->len); ath10k_htt_t2h_msg_handler()
1990 ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); ath10k_htt_t2h_msg_handler()
1993 ath10k_htt_rx_addba(ar, resp); ath10k_htt_t2h_msg_handler()
1996 ath10k_htt_rx_delba(ar, resp); ath10k_htt_t2h_msg_handler()
2002 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, ath10k_htt_t2h_msg_handler()
2028 ath10k_warn(ar, "htt event (%d) not handled\n", ath10k_htt_t2h_msg_handler()
2030 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", ath10k_htt_t2h_msg_handler()
2042 struct ath10k *ar = htt->ar; ath10k_htt_txrx_compl_task() local
2048 ath10k_htt_rx_frm_tx_compl(htt->ar, skb); ath10k_htt_txrx_compl_task()
2061 ath10k_htt_rx_in_ord_ind(ar, skb); ath10k_htt_txrx_compl_task()
H A Dwmi-tlv.c68 ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len, ath10k_wmi_tlv_iter() argument
69 int (*iter)(struct ath10k *ar, u16 tag, u16 len, ath10k_wmi_tlv_iter()
80 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_tlv_iter()
93 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_tlv_iter()
102 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_tlv_iter()
109 ret = iter(ar, tlv_tag, tlv_len, ptr, data); ath10k_wmi_tlv_iter()
120 static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len, ath10k_wmi_tlv_iter_parse() argument
131 static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb, ath10k_wmi_tlv_parse() argument
134 return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse, ath10k_wmi_tlv_parse()
139 ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr, ath10k_wmi_tlv_parse_alloc() argument
149 ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len); ath10k_wmi_tlv_parse_alloc()
166 static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar, ath10k_wmi_tlv_event_bcn_tx_status() argument
174 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_event_bcn_tx_status()
177 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_event_bcn_tx_status()
199 ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d", ath10k_wmi_tlv_event_bcn_tx_status()
208 static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar, ath10k_wmi_tlv_event_diag_data() argument
217 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_event_diag_data()
220 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_event_diag_data()
238 ath10k_warn(ar, "failed to parse diag data: can't fit item header\n"); ath10k_wmi_tlv_event_diag_data()
245 ath10k_warn(ar, "failed to parse diag data: item is too long\n"); ath10k_wmi_tlv_event_diag_data()
249 trace_ath10k_wmi_diag_container(ar, ath10k_wmi_tlv_event_diag_data()
264 ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n", ath10k_wmi_tlv_event_diag_data()
271 static int ath10k_wmi_tlv_event_diag(struct ath10k *ar, ath10k_wmi_tlv_event_diag() argument
278 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_event_diag()
281 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_event_diag()
292 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len); ath10k_wmi_tlv_event_diag()
293 trace_ath10k_wmi_diag(ar, data, len); ath10k_wmi_tlv_event_diag()
303 static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_tlv_op_rx() argument
314 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); ath10k_wmi_tlv_op_rx()
318 ath10k_wmi_event_mgmt_rx(ar, skb); ath10k_wmi_tlv_op_rx()
322 ath10k_wmi_event_scan(ar, skb); ath10k_wmi_tlv_op_rx()
325 ath10k_wmi_event_chan_info(ar, skb); ath10k_wmi_tlv_op_rx()
328 ath10k_wmi_event_echo(ar, skb); ath10k_wmi_tlv_op_rx()
331 ath10k_wmi_event_debug_mesg(ar, skb); ath10k_wmi_tlv_op_rx()
334 ath10k_wmi_event_update_stats(ar, skb); ath10k_wmi_tlv_op_rx()
337 ath10k_wmi_event_vdev_start_resp(ar, skb); ath10k_wmi_tlv_op_rx()
340 ath10k_wmi_event_vdev_stopped(ar, skb); ath10k_wmi_tlv_op_rx()
343 ath10k_wmi_event_peer_sta_kickout(ar, skb); ath10k_wmi_tlv_op_rx()
346 ath10k_wmi_event_host_swba(ar, skb); ath10k_wmi_tlv_op_rx()
349 ath10k_wmi_event_tbttoffset_update(ar, skb); ath10k_wmi_tlv_op_rx()
352 ath10k_wmi_event_phyerr(ar, skb); ath10k_wmi_tlv_op_rx()
355 ath10k_wmi_event_roam(ar, skb); ath10k_wmi_tlv_op_rx()
358 ath10k_wmi_event_profile_match(ar, skb); ath10k_wmi_tlv_op_rx()
361 ath10k_wmi_event_debug_print(ar, skb); ath10k_wmi_tlv_op_rx()
364 ath10k_wmi_event_pdev_qvit(ar, skb); ath10k_wmi_tlv_op_rx()
367 ath10k_wmi_event_wlan_profile_data(ar, skb); ath10k_wmi_tlv_op_rx()
370 ath10k_wmi_event_rtt_measurement_report(ar, skb); ath10k_wmi_tlv_op_rx()
373 ath10k_wmi_event_tsf_measurement_report(ar, skb); ath10k_wmi_tlv_op_rx()
376 ath10k_wmi_event_rtt_error_report(ar, skb); ath10k_wmi_tlv_op_rx()
379 ath10k_wmi_event_wow_wakeup_host(ar, skb); ath10k_wmi_tlv_op_rx()
382 ath10k_wmi_event_dcs_interference(ar, skb); ath10k_wmi_tlv_op_rx()
385 ath10k_wmi_event_pdev_tpc_config(ar, skb); ath10k_wmi_tlv_op_rx()
388 ath10k_wmi_event_pdev_ftm_intg(ar, skb); ath10k_wmi_tlv_op_rx()
391 ath10k_wmi_event_gtk_offload_status(ar, skb); ath10k_wmi_tlv_op_rx()
394 ath10k_wmi_event_gtk_rekey_fail(ar, skb); ath10k_wmi_tlv_op_rx()
397 ath10k_wmi_event_delba_complete(ar, skb); ath10k_wmi_tlv_op_rx()
400 ath10k_wmi_event_addba_complete(ar, skb); ath10k_wmi_tlv_op_rx()
403 ath10k_wmi_event_vdev_install_key_complete(ar, skb); ath10k_wmi_tlv_op_rx()
406 ath10k_wmi_event_service_ready(ar, skb); ath10k_wmi_tlv_op_rx()
409 ath10k_wmi_event_ready(ar, skb); ath10k_wmi_tlv_op_rx()
412 ath10k_wmi_tlv_event_bcn_tx_status(ar, skb); ath10k_wmi_tlv_op_rx()
415 ath10k_wmi_tlv_event_diag_data(ar, skb); ath10k_wmi_tlv_op_rx()
418 ath10k_wmi_tlv_event_diag(ar, skb); ath10k_wmi_tlv_op_rx()
421 ath10k_warn(ar, "Unknown eventid: %d\n", id); ath10k_wmi_tlv_op_rx()
428 static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar, ath10k_wmi_tlv_op_pull_scan_ev() argument
436 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_scan_ev()
439 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_scan_ev()
460 static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar, ath10k_wmi_tlv_op_pull_mgmt_rx_ev() argument
470 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
473 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
509 static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, ath10k_wmi_tlv_op_pull_ch_info_ev() argument
517 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_ch_info_ev()
520 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_ch_info_ev()
542 ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_tlv_op_pull_vdev_start_ev() argument
549 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_vdev_start_ev()
552 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_vdev_start_ev()
572 static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar, ath10k_wmi_tlv_op_pull_peer_kick_ev() argument
580 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_peer_kick_ev()
583 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_peer_kick_ev()
608 static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len, ath10k_wmi_tlv_swba_tim_parse() argument
623 static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len, ath10k_wmi_tlv_swba_noa_parse() argument
638 static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len, ath10k_wmi_tlv_swba_parse() argument
651 ret = ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_swba_parse()
658 ret = ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_swba_parse()
671 static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar, ath10k_wmi_tlv_op_pull_swba_ev() argument
680 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, ath10k_wmi_tlv_op_pull_swba_ev()
683 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_swba_ev()
703 static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar, ath10k_wmi_tlv_op_pull_phyerr_ev() argument
712 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_phyerr_ev()
715 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_phyerr_ev()
749 ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len, ath10k_wmi_tlv_parse_mem_reqs() argument
768 static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, ath10k_wmi_tlv_op_pull_svc_rdy_ev() argument
779 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_svc_rdy_ev()
782 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_svc_rdy_ev()
799 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_tlv_op_pull_svc_rdy_ev()
830 ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs), ath10k_wmi_tlv_op_pull_svc_rdy_ev()
834 ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_svc_rdy_ev()
842 static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar, ath10k_wmi_tlv_op_pull_rdy_ev() argument
850 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_rdy_ev()
853 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_rdy_ev()
908 static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, ath10k_wmi_tlv_op_pull_fw_stats() argument
924 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_fw_stats()
927 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); ath10k_wmi_tlv_op_pull_fw_stats()
946 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_tlv_op_pull_fw_stats()
1016 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt) ath10k_wmi_tlv_op_gen_pdev_suspend() argument
1022 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_pdev_suspend()
1032 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n"); ath10k_wmi_tlv_op_gen_pdev_suspend()
1037 ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar) ath10k_wmi_tlv_op_gen_pdev_resume() argument
1043 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_pdev_resume()
1053 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n"); ath10k_wmi_tlv_op_gen_pdev_resume()
1058 ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar, ath10k_wmi_tlv_op_gen_pdev_set_rd() argument
1067 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_pdev_set_rd()
1081 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n"); ath10k_wmi_tlv_op_gen_pdev_set_rd()
1086 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id, ath10k_wmi_tlv_op_gen_pdev_set_param() argument
1093 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_pdev_set_param()
1104 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n"); ath10k_wmi_tlv_op_gen_pdev_set_param()
1108 static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) ath10k_wmi_tlv_op_gen_init() argument
1118 chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk); ath10k_wmi_tlv_op_gen_init()
1123 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_init()
1157 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); ath10k_wmi_tlv_op_gen_init()
1162 if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) { ath10k_wmi_tlv_op_gen_init()
1206 ath10k_wmi_put_host_mem_chunks(ar, chunks); ath10k_wmi_tlv_op_gen_init()
1208 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n"); ath10k_wmi_tlv_op_gen_init()
1213 ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, ath10k_wmi_tlv_op_gen_start_scan() argument
1240 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_start_scan()
1306 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n"); ath10k_wmi_tlv_op_gen_start_scan()
1311 ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar, ath10k_wmi_tlv_op_gen_stop_scan() argument
1325 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_stop_scan()
1344 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n"); ath10k_wmi_tlv_op_gen_stop_scan()
1349 ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar, ath10k_wmi_tlv_op_gen_vdev_create() argument
1359 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_vdev_create()
1372 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n"); ath10k_wmi_tlv_op_gen_vdev_create()
1377 ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id) ath10k_wmi_tlv_op_gen_vdev_delete() argument
1383 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_vdev_delete()
1393 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n"); ath10k_wmi_tlv_op_gen_vdev_delete()
1398 ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar, ath10k_wmi_tlv_op_gen_vdev_start() argument
1421 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_vdev_start()
1473 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n"); ath10k_wmi_tlv_op_gen_vdev_start()
1478 ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id) ath10k_wmi_tlv_op_gen_vdev_stop() argument
1484 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_vdev_stop()
1494 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n"); ath10k_wmi_tlv_op_gen_vdev_stop()
1499 ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, ath10k_wmi_tlv_op_gen_vdev_up() argument
1507 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_vdev_up()
1519 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n"); ath10k_wmi_tlv_op_gen_vdev_up()
1524 ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id) ath10k_wmi_tlv_op_gen_vdev_down() argument
1530 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_vdev_down()
1540 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n"); ath10k_wmi_tlv_op_gen_vdev_down()
1545 ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_vdev_set_param() argument
1552 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_vdev_set_param()
1564 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n"); ath10k_wmi_tlv_op_gen_vdev_set_param()
1569 ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar, ath10k_wmi_tlv_op_gen_vdev_install_key() argument
1585 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_vdev_install_key()
1617 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n"); ath10k_wmi_tlv_op_gen_vdev_install_key()
1621 static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr, ath10k_wmi_tlv_put_uapsd_ac() argument
1638 ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_wmi_tlv_put_uapsd_ac()
1647 ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_vdev_sta_uapsd() argument
1664 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_vdev_sta_uapsd()
1687 ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]); ath10k_wmi_tlv_op_gen_vdev_sta_uapsd()
1689 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n"); ath10k_wmi_tlv_op_gen_vdev_sta_uapsd()
1709 ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_vdev_wmm_conf() argument
1719 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_vdev_wmm_conf()
1735 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n"); ath10k_wmi_tlv_op_gen_vdev_wmm_conf()
1740 ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar, ath10k_wmi_tlv_op_gen_sta_keepalive() argument
1752 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_sta_keepalive()
1778 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n", ath10k_wmi_tlv_op_gen_sta_keepalive()
1784 ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_peer_create() argument
1791 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_peer_create()
1803 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n"); ath10k_wmi_tlv_op_gen_peer_create()
1808 ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_peer_delete() argument
1815 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_peer_delete()
1826 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n"); ath10k_wmi_tlv_op_gen_peer_delete()
1831 ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_peer_flush() argument
1838 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_peer_flush()
1850 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n"); ath10k_wmi_tlv_op_gen_peer_flush()
1855 ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_peer_set_param() argument
1864 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_peer_set_param()
1877 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n"); ath10k_wmi_tlv_op_gen_peer_set_param()
1882 ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar, ath10k_wmi_tlv_op_gen_peer_assoc() argument
1906 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_peer_assoc()
1967 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n"); ath10k_wmi_tlv_op_gen_peer_assoc()
1972 ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_set_psmode() argument
1979 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_set_psmode()
1990 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n"); ath10k_wmi_tlv_op_gen_set_psmode()
1995 ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_set_sta_ps() argument
2003 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_set_sta_ps()
2015 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n"); ath10k_wmi_tlv_op_gen_set_sta_ps()
2020 ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac, ath10k_wmi_tlv_op_gen_set_ap_ps() argument
2030 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); ath10k_wmi_tlv_op_gen_set_ap_ps()
2043 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n"); ath10k_wmi_tlv_op_gen_set_ap_ps()
2048 ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar, ath10k_wmi_tlv_op_gen_scan_chan_list() argument
2064 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_scan_chan_list()
2100 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n"); ath10k_wmi_tlv_op_gen_scan_chan_list()
2105 ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_beacon_dma() argument
2117 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_beacon_dma()
2141 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n"); ath10k_wmi_tlv_op_gen_beacon_dma()
2146 ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, ath10k_wmi_tlv_op_gen_pdev_set_wmm() argument
2158 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_pdev_set_wmm()
2179 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n"); ath10k_wmi_tlv_op_gen_pdev_set_wmm()
2184 ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) ath10k_wmi_tlv_op_gen_request_stats() argument
2190 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_request_stats()
2200 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n"); ath10k_wmi_tlv_op_gen_request_stats()
2205 ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, ath10k_wmi_tlv_op_gen_force_fw_hang() argument
2213 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); ath10k_wmi_tlv_op_gen_force_fw_hang()
2224 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n"); ath10k_wmi_tlv_op_gen_force_fw_hang()
2229 ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable, ath10k_wmi_tlv_op_gen_dbglog_cfg() argument
2250 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_dbglog_cfg()
2275 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value); ath10k_wmi_tlv_op_gen_dbglog_cfg()
2280 ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter) ath10k_wmi_tlv_op_gen_pktlog_enable() argument
2289 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_pktlog_enable()
2303 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n", ath10k_wmi_tlv_op_gen_pktlog_enable()
2309 ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar) ath10k_wmi_tlv_op_gen_pktlog_disable() argument
2318 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_pktlog_disable()
2331 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n"); ath10k_wmi_tlv_op_gen_pktlog_disable()
2336 ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_bcn_tmpl() argument
2354 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_bcn_tmpl()
2394 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n", ath10k_wmi_tlv_op_gen_bcn_tmpl()
2400 ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_prb_tmpl() argument
2413 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_prb_tmpl()
2443 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n", ath10k_wmi_tlv_op_gen_prb_tmpl()
2449 ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie() argument
2460 skb = ath10k_wmi_alloc_skb(ar, len); ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie()
2483 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n", ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie()
2790 void ath10k_wmi_tlv_attach(struct ath10k *ar) ath10k_wmi_tlv_attach() argument
2792 ar->wmi.cmd = &wmi_tlv_cmd_map; ath10k_wmi_tlv_attach()
2793 ar->wmi.vdev_param = &wmi_tlv_vdev_param_map; ath10k_wmi_tlv_attach()
2794 ar->wmi.pdev_param = &wmi_tlv_pdev_param_map; ath10k_wmi_tlv_attach()
2795 ar->wmi.ops = &wmi_tlv_ops; ath10k_wmi_tlv_attach()
H A Dbmi.h186 void ath10k_bmi_start(struct ath10k *ar);
187 int ath10k_bmi_done(struct ath10k *ar);
188 int ath10k_bmi_get_target_info(struct ath10k *ar,
190 int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
192 int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
195 #define ath10k_bmi_read32(ar, item, val) \
202 ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
208 #define ath10k_bmi_write32(ar, item, val) \
215 ret = ath10k_bmi_write_memory(ar, address, \
220 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
221 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
222 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
223 int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
H A Dpci.h163 struct ath10k *ar; member in struct:ath10k_pci
190 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) ath10k_pci_priv() argument
192 return (struct ath10k_pci *)ar->drv_priv; ath10k_pci_priv()
211 #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
230 static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, ath10k_pci_write32() argument
233 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_write32()
238 static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) ath10k_pci_read32() argument
240 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_read32()
245 static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) ath10k_pci_soc_read32() argument
247 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); ath10k_pci_soc_read32()
250 static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) ath10k_pci_soc_write32() argument
252 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); ath10k_pci_soc_write32()
255 static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) ath10k_pci_reg_read32() argument
257 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_reg_read32()
262 static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) ath10k_pci_reg_write32() argument
264 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_reg_write32()
H A Dtestmode.h21 void ath10k_testmode_destroy(struct ath10k *ar);
23 bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
29 static inline void ath10k_testmode_destroy(struct ath10k *ar) ath10k_testmode_destroy() argument
33 static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, ath10k_tm_event_wmi() argument
H A Dtxrx.h25 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
27 struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
28 int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
30 int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
H A Dhw.h150 #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
151 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
297 #define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
306 #define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
307 #define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
311 #define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
320 #define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
321 #define CE0_BASE_ADDRESS ar->regs->ce0_base_address
322 #define CE1_BASE_ADDRESS ar->regs->ce1_base_address
323 #define CE2_BASE_ADDRESS ar->regs->ce2_base_address
324 #define CE3_BASE_ADDRESS ar->regs->ce3_base_address
325 #define CE4_BASE_ADDRESS ar->regs->ce4_base_address
326 #define CE5_BASE_ADDRESS ar->regs->ce5_base_address
327 #define CE6_BASE_ADDRESS ar->regs->ce6_base_address
328 #define CE7_BASE_ADDRESS ar->regs->ce7_base_address
335 #define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
336 #define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
350 #define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
406 #define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
H A Dce.h110 struct ath10k *ar; member in struct:ath10k_ce_pipe
203 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
205 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
206 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
210 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
241 void ath10k_ce_per_engine_service_any(struct ath10k *ar);
242 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
243 int ath10k_ce_disable_interrupts(struct ath10k *ar);
244 void ath10k_ce_enable_interrupts(struct ath10k *ar);
397 static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) ath10k_ce_base_address() argument
433 #define CE_INTERRUPT_SUMMARY(ar) \
435 ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
H A Dcore.h262 /* protected by ar->data_lock */
269 /* the following are protected by ar->data_lock */
305 struct ath10k *ar; member in struct:ath10k_vif
698 void ath10k_core_destroy(struct ath10k *ar);
700 int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
701 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
702 void ath10k_core_stop(struct ath10k *ar);
703 int ath10k_core_register(struct ath10k *ar, u32 chip_id);
704 void ath10k_core_unregister(struct ath10k *ar);
H A Dhtc.h272 void (*target_send_suspend_complete)(struct ath10k *ar);
331 struct ath10k *ar; member in struct:ath10k_htc
349 int ath10k_htc_init(struct ath10k *ar);
357 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
H A Dwmi.h4870 int ath10k_wmi_attach(struct ath10k *ar);
4871 void ath10k_wmi_detach(struct ath10k *ar);
4872 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
4873 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
4875 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
4876 int ath10k_wmi_connect(struct ath10k *ar);
4878 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
4879 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
4880 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
4882 void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
4894 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
4904 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
4905 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
4906 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
4907 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
4908 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
4909 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
4910 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
4911 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
4912 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
4913 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
4914 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
4915 void ath10k_wmi_event_dfs(struct ath10k *ar,
4917 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
4920 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
4921 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
4922 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
4923 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
4924 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
4925 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
4926 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
4928 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
4930 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
4931 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
4932 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
4933 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
4934 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
4935 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
4937 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
4938 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
4939 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
4940 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
4942 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
4943 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
4944 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
4945 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
4946 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
H A Dhtt.h1245 struct ath10k *ar; member in struct:ath10k_htt
1406 int ath10k_htt_init(struct ath10k *ar);
1413 int ath10k_htt_rx_ring_refill(struct ath10k *ar);
1416 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
1417 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
/linux-4.1.27/arch/xtensa/include/asm/
H A Dcacheasm.h34 .macro __loop_cache_all ar at insn size line_width
36 movi \ar, 0
38 __loopi \ar, \at, \size, (4 << (\line_width)) variable
39 \insn \ar, 0 << (\line_width) variable
40 \insn \ar, 1 << (\line_width) variable
41 \insn \ar, 2 << (\line_width) variable
42 \insn \ar, 3 << (\line_width) variable
43 __endla \ar, \at, 4 << (\line_width) variable
48 .macro __loop_cache_range ar as at insn line_width
50 extui \at, \ar, 0, \line_width variable
53 __loops \ar, \as, \at, \line_width variable
54 \insn \ar, 0 variable
55 __endla \ar, \at, (1 << (\line_width)) variable
60 .macro __loop_cache_page ar at insn line_width
62 __loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
63 \insn \ar, 0 << (\line_width) variable
64 \insn \ar, 1 << (\line_width) variable
65 \insn \ar, 2 << (\line_width) variable
66 \insn \ar, 3 << (\line_width) variable
67 __endla \ar, \at, 4 << (\line_width) variable
74 .macro ___unlock_dcache_all ar at
76 __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
84 .macro ___unlock_icache_all ar at
86 __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
91 .macro ___flush_invalidate_dcache_all ar at
93 __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
98 .macro ___flush_dcache_all ar at
100 __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
105 .macro ___invalidate_dcache_all ar at
107 __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
113 .macro ___invalidate_icache_all ar at
115 __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
122 .macro ___flush_invalidate_dcache_range ar as at
124 __loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
129 .macro ___flush_dcache_range ar as at
131 __loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
136 .macro ___invalidate_dcache_range ar as at
138 __loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
143 .macro ___invalidate_icache_range ar as at
145 __loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
151 .macro ___flush_invalidate_dcache_page ar as
153 __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
158 .macro ___flush_dcache_page ar as
160 __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
165 .macro ___invalidate_dcache_page ar as
167 __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
172 .macro ___invalidate_icache_page ar as
174 __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
H A Dasmmacro.h20 * __loopi ar, at, size, inc
21 * ar register initialized with the start address
26 * __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
27 * ar register initialized with the start address
38 * __endla ar, at, incr
39 * ar start address (modified)
48 .macro __loopi ar, at, size, incr
54 addi \at, \ar, \size
64 .macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
90 add \at, \ar, \at
92 add \at, \ar, \as
100 * loop from ar to ax
103 .macro __loopt ar, as, at, incr_log2
106 sub \at, \as, \ar
136 .macro __endl ar, as
138 bltu \ar, \as, 98b variable
147 .macro __endla ar, as, incr
148 addi \ar, \ar, \incr variable
149 __endl \ar \as
/linux-4.1.27/drivers/net/wireless/ath/carl9170/
H A Dmain.c184 static void carl9170_ampdu_gc(struct ar9170 *ar) carl9170_ampdu_gc() argument
190 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) { carl9170_ampdu_gc()
191 spin_lock_bh(&ar->tx_ampdu_list_lock); carl9170_ampdu_gc()
195 ar->tx_ampdu_list_len--; carl9170_ampdu_gc()
198 spin_unlock_bh(&ar->tx_ampdu_list_lock); carl9170_ampdu_gc()
201 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info); carl9170_ampdu_gc()
212 carl9170_tx_status(ar, skb, false); carl9170_ampdu_gc()
219 static void carl9170_flush(struct ar9170 *ar, bool drop_queued) carl9170_flush() argument
229 for (i = 0; i < ar->hw->queues; i++) { carl9170_flush()
232 while ((skb = skb_dequeue(&ar->tx_pending[i]))) { carl9170_flush()
237 atomic_dec(&ar->tx_ampdu_upload); carl9170_flush()
239 carl9170_tx_status(ar, skb, false); carl9170_flush()
245 if (atomic_read(&ar->tx_total_queued)) carl9170_flush()
246 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0); carl9170_flush()
249 static void carl9170_flush_ba(struct ar9170 *ar) carl9170_flush_ba() argument
258 spin_lock_bh(&ar->tx_ampdu_list_lock); carl9170_flush_ba()
259 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) { carl9170_flush_ba()
269 spin_unlock_bh(&ar->tx_ampdu_list_lock); carl9170_flush_ba()
273 carl9170_tx_status(ar, skb, false); carl9170_flush_ba()
276 static void carl9170_zap_queues(struct ar9170 *ar) carl9170_zap_queues() argument
281 carl9170_ampdu_gc(ar); carl9170_zap_queues()
283 carl9170_flush_ba(ar); carl9170_zap_queues()
284 carl9170_flush(ar, true); carl9170_zap_queues()
286 for (i = 0; i < ar->hw->queues; i++) { carl9170_zap_queues()
287 spin_lock_bh(&ar->tx_status[i].lock); carl9170_zap_queues()
288 while (!skb_queue_empty(&ar->tx_status[i])) { carl9170_zap_queues()
291 skb = skb_peek(&ar->tx_status[i]); carl9170_zap_queues()
293 spin_unlock_bh(&ar->tx_status[i].lock); carl9170_zap_queues()
294 carl9170_tx_drop(ar, skb); carl9170_zap_queues()
295 spin_lock_bh(&ar->tx_status[i].lock); carl9170_zap_queues()
298 spin_unlock_bh(&ar->tx_status[i].lock); carl9170_zap_queues()
306 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats)); carl9170_zap_queues()
307 for (i = 0; i < ar->hw->queues; i++) carl9170_zap_queues()
308 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD; carl9170_zap_queues()
310 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++) carl9170_zap_queues()
311 ar->mem_bitmap[i] = 0; carl9170_zap_queues()
314 list_for_each_entry_rcu(cvif, &ar->vif_list, list) { carl9170_zap_queues()
315 spin_lock_bh(&ar->beacon_lock); carl9170_zap_queues()
318 spin_unlock_bh(&ar->beacon_lock); carl9170_zap_queues()
322 atomic_set(&ar->tx_ampdu_upload, 0); carl9170_zap_queues()
323 atomic_set(&ar->tx_ampdu_scheduler, 0); carl9170_zap_queues()
324 atomic_set(&ar->tx_total_pending, 0); carl9170_zap_queues()
325 atomic_set(&ar->tx_total_queued, 0); carl9170_zap_queues()
326 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks); carl9170_zap_queues()
339 struct ar9170 *ar = hw->priv; carl9170_op_start() local
342 mutex_lock(&ar->mutex); carl9170_op_start()
344 carl9170_zap_queues(ar); carl9170_op_start()
347 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47); carl9170_op_start()
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94); carl9170_op_start()
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0); carl9170_op_start()
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0); carl9170_op_start()
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0); carl9170_op_start()
353 ar->current_factor = ar->current_density = -1; carl9170_op_start()
355 ar->usedkeys = 1; carl9170_op_start()
356 ar->filter_state = 0; carl9170_op_start()
357 ar->ps.last_action = jiffies; carl9170_op_start()
358 ar->ps.last_slept = jiffies; carl9170_op_start()
359 ar->erp_mode = CARL9170_ERP_AUTO; carl9170_op_start()
364 ar->disable_offload = modparam_nohwcrypt | carl9170_op_start()
365 ar->fw.disable_offload_fw; carl9170_op_start()
366 ar->rx_software_decryption = ar->disable_offload; carl9170_op_start()
368 for (i = 0; i < ar->hw->queues; i++) { carl9170_op_start()
369 ar->queue_stop_timeout[i] = jiffies; carl9170_op_start()
370 ar->max_queue_stop_timeout[i] = 0; carl9170_op_start()
373 atomic_set(&ar->mem_allocs, 0); carl9170_op_start()
375 err = carl9170_usb_open(ar); carl9170_op_start()
379 err = carl9170_init_mac(ar); carl9170_op_start()
383 err = carl9170_set_qos(ar); carl9170_op_start()
387 if (ar->fw.rx_filter) { carl9170_op_start()
388 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA | carl9170_op_start()
394 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, carl9170_op_start()
401 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE, carl9170_op_start()
406 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE, carl9170_op_start()
412 err = carl9170_disable_key(ar, i); carl9170_op_start()
418 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED); carl9170_op_start()
420 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, carl9170_op_start()
423 ieee80211_wake_queues(ar->hw); carl9170_op_start()
427 mutex_unlock(&ar->mutex); carl9170_op_start()
431 static void carl9170_cancel_worker(struct ar9170 *ar) carl9170_cancel_worker() argument
433 cancel_delayed_work_sync(&ar->stat_work); carl9170_cancel_worker()
434 cancel_delayed_work_sync(&ar->tx_janitor); carl9170_cancel_worker()
436 cancel_delayed_work_sync(&ar->led_work); carl9170_cancel_worker()
438 cancel_work_sync(&ar->ps_work); carl9170_cancel_worker()
439 cancel_work_sync(&ar->ping_work); carl9170_cancel_worker()
440 cancel_work_sync(&ar->ampdu_work); carl9170_cancel_worker()
445 struct ar9170 *ar = hw->priv; carl9170_op_stop() local
447 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE); carl9170_op_stop()
449 ieee80211_stop_queues(ar->hw); carl9170_op_stop()
451 mutex_lock(&ar->mutex); carl9170_op_stop()
452 if (IS_ACCEPTING_CMD(ar)) { carl9170_op_stop()
453 RCU_INIT_POINTER(ar->beacon_iter, NULL); carl9170_op_stop()
455 carl9170_led_set_state(ar, 0); carl9170_op_stop()
458 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0); carl9170_op_stop()
459 carl9170_usb_stop(ar); carl9170_op_stop()
462 carl9170_zap_queues(ar); carl9170_op_stop()
463 mutex_unlock(&ar->mutex); carl9170_op_stop()
465 carl9170_cancel_worker(ar); carl9170_op_stop()
470 struct ar9170 *ar = container_of(work, struct ar9170, carl9170_restart_work() local
474 ar->usedkeys = 0; carl9170_restart_work()
475 ar->filter_state = 0; carl9170_restart_work()
476 carl9170_cancel_worker(ar); carl9170_restart_work()
478 mutex_lock(&ar->mutex); carl9170_restart_work()
479 if (!ar->force_usb_reset) { carl9170_restart_work()
480 err = carl9170_usb_restart(ar); carl9170_restart_work()
483 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err); carl9170_restart_work()
485 dev_info(&ar->udev->dev, "device restarted successfully.\n"); carl9170_restart_work()
488 carl9170_zap_queues(ar); carl9170_restart_work()
489 mutex_unlock(&ar->mutex); carl9170_restart_work()
491 if (!err && !ar->force_usb_reset) { carl9170_restart_work()
492 ar->restart_counter++; carl9170_restart_work()
493 atomic_set(&ar->pending_restarts, 0); carl9170_restart_work()
495 ieee80211_restart_hw(ar->hw); carl9170_restart_work()
503 carl9170_usb_reset(ar); carl9170_restart_work()
507 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r) carl9170_restart() argument
509 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE); carl9170_restart()
516 if (atomic_inc_return(&ar->pending_restarts) > 1) { carl9170_restart()
517 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r); carl9170_restart()
521 ieee80211_stop_queues(ar->hw); carl9170_restart()
523 dev_err(&ar->udev->dev, "restart device (%d)\n", r); carl9170_restart()
527 ar->last_reason = r; carl9170_restart()
529 if (!ar->registered) carl9170_restart()
532 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset) carl9170_restart()
533 ar->force_usb_reset = true; carl9170_restart()
535 ieee80211_queue_work(ar->hw, &ar->restart_work); carl9170_restart()
546 struct ar9170 *ar = container_of(work, struct ar9170, ping_work); carl9170_ping_work() local
549 if (!IS_STARTED(ar)) carl9170_ping_work()
552 mutex_lock(&ar->mutex); carl9170_ping_work()
553 err = carl9170_echo_test(ar, 0xdeadbeef); carl9170_ping_work()
555 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE); carl9170_ping_work()
556 mutex_unlock(&ar->mutex); carl9170_ping_work()
559 static int carl9170_init_interface(struct ar9170 *ar, carl9170_init_interface() argument
562 struct ath_common *common = &ar->common; carl9170_init_interface()
566 WARN_ON_ONCE(IS_STARTED(ar)); carl9170_init_interface()
582 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && carl9170_init_interface()
591 ar->disable_offload |= vif->p2p; carl9170_init_interface()
593 ar->rx_software_decryption = ar->disable_offload; carl9170_init_interface()
595 err = carl9170_set_operating_mode(ar); carl9170_init_interface()
604 struct ar9170 *ar = hw->priv; carl9170_op_add_interface() local
607 mutex_lock(&ar->mutex); carl9170_op_add_interface()
617 spin_lock_bh(&ar->beacon_lock); carl9170_op_add_interface()
620 spin_unlock_bh(&ar->beacon_lock); carl9170_op_add_interface()
630 * The first (from HEAD/TOP) interface in the ar->vif_list is carl9170_op_add_interface()
634 main_vif = carl9170_get_main_vif(ar); carl9170_op_add_interface()
677 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0); carl9170_op_add_interface()
686 BUG_ON(ar->vif_priv[vif_id].id != vif_id); carl9170_op_add_interface()
691 ar->vifs++; carl9170_op_add_interface()
697 list_add_rcu(&vif_priv->list, &ar->vif_list); carl9170_op_add_interface()
702 list_add_tail_rcu(&vif_priv->list, &ar->vif_list); carl9170_op_add_interface()
704 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif); carl9170_op_add_interface()
707 main_vif = carl9170_get_main_vif(ar); carl9170_op_add_interface()
710 rcu_assign_pointer(ar->beacon_iter, vif_priv); carl9170_op_add_interface()
718 * But we are still holding ar->mutex, so the carl9170_op_add_interface()
721 err = carl9170_mod_virtual_mac(ar, old_main_priv->id, carl9170_op_add_interface()
727 err = carl9170_init_interface(ar, vif); carl9170_op_add_interface()
732 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr); carl9170_op_add_interface()
738 if (ar->fw.tx_seq_table) { carl9170_op_add_interface()
739 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4, carl9170_op_add_interface()
748 bitmap_release_region(&ar->vif_bitmap, vif_id, 0); carl9170_op_add_interface()
749 ar->vifs--; carl9170_op_add_interface()
750 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL); carl9170_op_add_interface()
752 mutex_unlock(&ar->mutex); carl9170_op_add_interface()
755 if (ar->vifs > 1) carl9170_op_add_interface()
756 ar->ps.off_override |= PS_OFF_VIF; carl9170_op_add_interface()
758 mutex_unlock(&ar->mutex); carl9170_op_add_interface()
769 struct ar9170 *ar = hw->priv; carl9170_op_remove_interface() local
772 mutex_lock(&ar->mutex); carl9170_op_remove_interface()
777 ar->vifs--; carl9170_op_remove_interface()
780 main_vif = carl9170_get_main_vif(ar); carl9170_op_remove_interface()
788 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL); carl9170_op_remove_interface()
793 if (ar->vifs) { carl9170_op_remove_interface()
794 WARN_ON(carl9170_init_interface(ar, carl9170_op_remove_interface()
795 carl9170_get_main_vif(ar))); carl9170_op_remove_interface()
797 carl9170_set_operating_mode(ar); carl9170_op_remove_interface()
802 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL)); carl9170_op_remove_interface()
805 carl9170_update_beacon(ar, false); carl9170_op_remove_interface()
806 carl9170_flush_cab(ar, id); carl9170_op_remove_interface()
808 spin_lock_bh(&ar->beacon_lock); carl9170_op_remove_interface()
811 spin_unlock_bh(&ar->beacon_lock); carl9170_op_remove_interface()
813 bitmap_release_region(&ar->vif_bitmap, id, 0); carl9170_op_remove_interface()
815 carl9170_set_beacon_timers(ar); carl9170_op_remove_interface()
817 if (ar->vifs == 1) carl9170_op_remove_interface()
818 ar->ps.off_override &= ~PS_OFF_VIF; carl9170_op_remove_interface()
821 mutex_unlock(&ar->mutex); carl9170_op_remove_interface()
826 void carl9170_ps_check(struct ar9170 *ar) carl9170_ps_check() argument
828 ieee80211_queue_work(ar->hw, &ar->ps_work); carl9170_ps_check()
831 /* caller must hold ar->mutex */ carl9170_ps_update()
832 static int carl9170_ps_update(struct ar9170 *ar) carl9170_ps_update() argument
837 if (!ar->ps.off_override) carl9170_ps_update()
838 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS); carl9170_ps_update()
840 if (ps != ar->ps.state) { carl9170_ps_update()
841 err = carl9170_powersave(ar, ps); carl9170_ps_update()
845 if (ar->ps.state && !ps) { carl9170_ps_update()
846 ar->ps.sleep_ms = jiffies_to_msecs(jiffies - carl9170_ps_update()
847 ar->ps.last_action); carl9170_ps_update()
851 ar->ps.last_slept = jiffies; carl9170_ps_update()
853 ar->ps.last_action = jiffies; carl9170_ps_update()
854 ar->ps.state = ps; carl9170_ps_update()
862 struct ar9170 *ar = container_of(work, struct ar9170, carl9170_ps_work() local
864 mutex_lock(&ar->mutex); carl9170_ps_work()
865 if (IS_STARTED(ar)) carl9170_ps_work()
866 WARN_ON_ONCE(carl9170_ps_update(ar) != 0); carl9170_ps_work()
867 mutex_unlock(&ar->mutex); carl9170_ps_work()
870 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise) carl9170_update_survey() argument
875 err = carl9170_get_noisefloor(ar); carl9170_update_survey()
880 if (ar->fw.hw_counters) { carl9170_update_survey()
881 err = carl9170_collect_tally(ar); carl9170_update_survey()
887 memset(&ar->tally, 0, sizeof(ar->tally)); carl9170_update_survey()
894 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work); carl9170_stat_work() local
897 mutex_lock(&ar->mutex); carl9170_stat_work()
898 err = carl9170_update_survey(ar, false, true); carl9170_stat_work()
899 mutex_unlock(&ar->mutex); carl9170_stat_work()
904 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, carl9170_stat_work()
910 struct ar9170 *ar = hw->priv; carl9170_op_config() local
913 mutex_lock(&ar->mutex); carl9170_op_config()
920 err = carl9170_ps_update(ar); carl9170_op_config()
935 err = carl9170_set_slot_time(ar); carl9170_op_config()
939 err = carl9170_update_survey(ar, true, false); carl9170_op_config()
943 err = carl9170_set_channel(ar, hw->conf.chandef.chan, carl9170_op_config()
948 err = carl9170_update_survey(ar, false, true); carl9170_op_config()
952 err = carl9170_set_dyn_sifs_ack(ar); carl9170_op_config()
956 err = carl9170_set_rts_cts_rate(ar); carl9170_op_config()
962 err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan); carl9170_op_config()
968 mutex_unlock(&ar->mutex); carl9170_op_config()
992 struct ar9170 *ar = hw->priv; carl9170_op_configure_filter() local
995 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps; carl9170_op_configure_filter()
997 if (!IS_ACCEPTING_CMD(ar)) carl9170_op_configure_filter()
1000 mutex_lock(&ar->mutex); carl9170_op_configure_filter()
1002 ar->filter_state = *new_flags; carl9170_op_configure_filter()
1011 if (multicast != ar->cur_mc_hash) carl9170_op_configure_filter()
1012 WARN_ON(carl9170_update_multicast(ar, multicast)); carl9170_op_configure_filter()
1015 ar->sniffer_enabled = !!(*new_flags & carl9170_op_configure_filter()
1018 WARN_ON(carl9170_set_operating_mode(ar)); carl9170_op_configure_filter()
1021 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) { carl9170_op_configure_filter()
1024 if (!ar->fw.ba_filter) carl9170_op_configure_filter()
1041 WARN_ON(carl9170_rx_filter(ar, rx_filter)); carl9170_op_configure_filter()
1044 mutex_unlock(&ar->mutex); carl9170_op_configure_filter()
1053 struct ar9170 *ar = hw->priv; carl9170_op_bss_info_changed() local
1054 struct ath_common *common = &ar->common; carl9170_op_bss_info_changed()
1059 mutex_lock(&ar->mutex); carl9170_op_bss_info_changed()
1061 main_vif = carl9170_get_main_vif(ar); carl9170_op_bss_info_changed()
1071 list_for_each_entry_rcu(iter, &ar->vif_list, list) { carl9170_op_bss_info_changed()
1078 ar->beacon_enabled = i; carl9170_op_bss_info_changed()
1082 err = carl9170_update_beacon(ar, false); carl9170_op_bss_info_changed()
1106 err = carl9170_set_beacon_timers(ar); carl9170_op_bss_info_changed()
1128 err = carl9170_set_operating_mode(ar); carl9170_op_bss_info_changed()
1134 ar->common.curaid = bss_conf->aid; carl9170_op_bss_info_changed()
1135 err = carl9170_set_beacon_timers(ar); carl9170_op_bss_info_changed()
1141 err = carl9170_set_slot_time(ar); carl9170_op_bss_info_changed()
1147 err = carl9170_set_mac_rates(ar); carl9170_op_bss_info_changed()
1153 WARN_ON_ONCE(err && IS_STARTED(ar)); carl9170_op_bss_info_changed()
1154 mutex_unlock(&ar->mutex); carl9170_op_bss_info_changed()
1160 struct ar9170 *ar = hw->priv; carl9170_op_get_tsf() local
1164 mutex_lock(&ar->mutex); carl9170_op_get_tsf()
1165 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF, carl9170_op_get_tsf()
1167 mutex_unlock(&ar->mutex); carl9170_op_get_tsf()
1179 struct ar9170 *ar = hw->priv; carl9170_op_set_key() local
1183 if (ar->disable_offload || !vif) carl9170_op_set_key()
1193 if (!is_main_vif(ar, vif)) { carl9170_op_set_key()
1194 mutex_lock(&ar->mutex); carl9170_op_set_key()
1226 mutex_lock(&ar->mutex); carl9170_op_set_key()
1228 if (!IS_STARTED(ar)) { carl9170_op_set_key()
1239 if (!(ar->usedkeys & BIT(i))) carl9170_op_set_key()
1247 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL, carl9170_op_set_key()
1254 err = carl9170_upload_key(ar, i, sta ? sta->addr : carl9170_op_set_key()
1268 ar->usedkeys |= BIT(i); carl9170_op_set_key()
1272 if (!IS_STARTED(ar)) { carl9170_op_set_key()
1279 ar->usedkeys &= ~BIT(key->hw_key_idx); carl9170_op_set_key()
1281 err = carl9170_upload_key(ar, key->hw_key_idx, NULL, carl9170_op_set_key()
1288 err = carl9170_upload_key(ar, key->hw_key_idx, carl9170_op_set_key()
1298 err = carl9170_disable_key(ar, key->hw_key_idx); carl9170_op_set_key()
1304 mutex_unlock(&ar->mutex); carl9170_op_set_key()
1308 if (!ar->rx_software_decryption) { carl9170_op_set_key()
1309 ar->rx_software_decryption = true; carl9170_op_set_key()
1310 carl9170_set_operating_mode(ar); carl9170_op_set_key()
1312 mutex_unlock(&ar->mutex); carl9170_op_set_key()
1349 struct ar9170 *ar = hw->priv; carl9170_op_sta_remove() local
1368 spin_lock_bh(&ar->tx_ampdu_list_lock); carl9170_op_sta_remove()
1371 spin_unlock_bh(&ar->tx_ampdu_list_lock); carl9170_op_sta_remove()
1377 carl9170_ampdu_gc(ar); carl9170_op_sta_remove()
1387 struct ar9170 *ar = hw->priv; carl9170_op_conf_tx() local
1390 mutex_lock(&ar->mutex); carl9170_op_conf_tx()
1391 if (queue < ar->hw->queues) { carl9170_op_conf_tx()
1392 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param)); carl9170_op_conf_tx()
1393 ret = carl9170_set_qos(ar); carl9170_op_conf_tx()
1398 mutex_unlock(&ar->mutex); carl9170_op_conf_tx()
1404 struct ar9170 *ar = container_of(work, struct ar9170, carl9170_ampdu_work() local
1407 if (!IS_STARTED(ar)) carl9170_ampdu_work()
1410 mutex_lock(&ar->mutex); carl9170_ampdu_work()
1411 carl9170_ampdu_gc(ar); carl9170_ampdu_work()
1412 mutex_unlock(&ar->mutex); carl9170_ampdu_work()
1421 struct ar9170 *ar = hw->priv; carl9170_op_ampdu_action() local
1450 spin_lock_bh(&ar->tx_ampdu_list_lock); carl9170_op_ampdu_action()
1451 ar->tx_ampdu_list_len++; carl9170_op_ampdu_action()
1452 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list); carl9170_op_ampdu_action()
1454 spin_unlock_bh(&ar->tx_ampdu_list_lock); carl9170_op_ampdu_action()
1465 spin_lock_bh(&ar->tx_ampdu_list_lock); carl9170_op_ampdu_action()
1468 spin_unlock_bh(&ar->tx_ampdu_list_lock); carl9170_op_ampdu_action()
1475 ieee80211_queue_work(ar->hw, &ar->ampdu_work); carl9170_op_ampdu_action()
1509 static int carl9170_register_wps_button(struct ar9170 *ar) carl9170_register_wps_button() argument
1514 if (!(ar->features & CARL9170_WPS_BUTTON)) carl9170_register_wps_button()
1521 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button", carl9170_register_wps_button()
1522 wiphy_name(ar->hw->wiphy)); carl9170_register_wps_button()
1524 snprintf(ar->wps.phys, sizeof(ar->wps.phys), carl9170_register_wps_button()
1525 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy)); carl9170_register_wps_button()
1527 input->name = ar->wps.name; carl9170_register_wps_button()
1528 input->phys = ar->wps.phys; carl9170_register_wps_button()
1530 input->dev.parent = &ar->hw->wiphy->dev; carl9170_register_wps_button()
1540 ar->wps.pbc = input; carl9170_register_wps_button()
1546 static int carl9170_rng_get(struct ar9170 *ar) carl9170_rng_get() argument
1562 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized) carl9170_rng_get()
1565 count = ARRAY_SIZE(ar->rng.cache); carl9170_rng_get()
1567 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG, carl9170_rng_get()
1575 ar->rng.cache[off + i] = buf[i]; carl9170_rng_get()
1581 ar->rng.cache_idx = 0; carl9170_rng_get()
1590 struct ar9170 *ar = (struct ar9170 *)rng->priv; carl9170_rng_read() local
1593 mutex_lock(&ar->mutex); carl9170_rng_read()
1594 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) { carl9170_rng_read()
1595 ret = carl9170_rng_get(ar); carl9170_rng_read()
1597 mutex_unlock(&ar->mutex); carl9170_rng_read()
1602 *data = ar->rng.cache[ar->rng.cache_idx++]; carl9170_rng_read()
1603 mutex_unlock(&ar->mutex); carl9170_rng_read()
1608 static void carl9170_unregister_hwrng(struct ar9170 *ar) carl9170_unregister_hwrng() argument
1610 if (ar->rng.initialized) { carl9170_unregister_hwrng()
1611 hwrng_unregister(&ar->rng.rng); carl9170_unregister_hwrng()
1612 ar->rng.initialized = false; carl9170_unregister_hwrng()
1616 static int carl9170_register_hwrng(struct ar9170 *ar) carl9170_register_hwrng() argument
1620 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name), carl9170_register_hwrng()
1621 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy)); carl9170_register_hwrng()
1622 ar->rng.rng.name = ar->rng.name; carl9170_register_hwrng()
1623 ar->rng.rng.data_read = carl9170_rng_read; carl9170_register_hwrng()
1624 ar->rng.rng.priv = (unsigned long)ar; carl9170_register_hwrng()
1626 if (WARN_ON(ar->rng.initialized)) carl9170_register_hwrng()
1629 err = hwrng_register(&ar->rng.rng); carl9170_register_hwrng()
1631 dev_err(&ar->udev->dev, "Failed to register the random " carl9170_register_hwrng()
1636 ar->rng.initialized = true; carl9170_register_hwrng()
1638 err = carl9170_rng_get(ar); carl9170_register_hwrng()
1640 carl9170_unregister_hwrng(ar); carl9170_register_hwrng()
1651 struct ar9170 *ar = hw->priv; carl9170_op_get_survey() local
1656 chan = ar->channel; carl9170_op_get_survey()
1661 mutex_lock(&ar->mutex); carl9170_op_get_survey()
1662 err = carl9170_update_survey(ar, false, true); carl9170_op_get_survey()
1663 mutex_unlock(&ar->mutex); carl9170_op_get_survey()
1669 band = ar->hw->wiphy->bands[b]; carl9170_op_get_survey()
1684 memcpy(survey, &ar->survey[idx], sizeof(*survey)); carl9170_op_get_survey()
1689 if (ar->channel == chan) carl9170_op_get_survey()
1692 if (ar->fw.hw_counters) { carl9170_op_get_survey()
1705 struct ar9170 *ar = hw->priv; carl9170_op_flush() local
1708 mutex_lock(&ar->mutex); carl9170_op_flush()
1709 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num) carl9170_op_flush()
1710 carl9170_flush_cab(ar, vid); carl9170_op_flush()
1712 carl9170_flush(ar, drop); carl9170_op_flush()
1713 mutex_unlock(&ar->mutex); carl9170_op_flush()
1719 struct ar9170 *ar = hw->priv; carl9170_op_get_stats() local
1722 stats->dot11ACKFailureCount = ar->tx_ack_failures; carl9170_op_get_stats()
1723 stats->dot11FCSErrorCount = ar->tx_fcs_errors; carl9170_op_get_stats()
1749 struct ar9170 *ar = hw->priv; carl9170_tx_frames_pending() local
1751 return !!atomic_read(&ar->tx_total_queued); carl9170_tx_frames_pending()
1780 struct ar9170 *ar; carl9170_alloc() local
1798 ar = hw->priv; carl9170_alloc()
1799 ar->hw = hw; carl9170_alloc()
1800 ar->rx_failover = skb; carl9170_alloc()
1802 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head)); carl9170_alloc()
1803 ar->rx_has_plcp = false; carl9170_alloc()
1815 mutex_init(&ar->mutex); carl9170_alloc()
1816 spin_lock_init(&ar->beacon_lock); carl9170_alloc()
1817 spin_lock_init(&ar->cmd_lock); carl9170_alloc()
1818 spin_lock_init(&ar->tx_stats_lock); carl9170_alloc()
1819 spin_lock_init(&ar->tx_ampdu_list_lock); carl9170_alloc()
1820 spin_lock_init(&ar->mem_lock); carl9170_alloc()
1821 spin_lock_init(&ar->state_lock); carl9170_alloc()
1822 atomic_set(&ar->pending_restarts, 0); carl9170_alloc()
1823 ar->vifs = 0; carl9170_alloc()
1824 for (i = 0; i < ar->hw->queues; i++) { carl9170_alloc()
1825 skb_queue_head_init(&ar->tx_status[i]); carl9170_alloc()
1826 skb_queue_head_init(&ar->tx_pending[i]); carl9170_alloc()
1828 INIT_LIST_HEAD(&ar->bar_list[i]); carl9170_alloc()
1829 spin_lock_init(&ar->bar_list_lock[i]); carl9170_alloc()
1831 INIT_WORK(&ar->ps_work, carl9170_ps_work); carl9170_alloc()
1832 INIT_WORK(&ar->ping_work, carl9170_ping_work); carl9170_alloc()
1833 INIT_WORK(&ar->restart_work, carl9170_restart_work); carl9170_alloc()
1834 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work); carl9170_alloc()
1835 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work); carl9170_alloc()
1836 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor); carl9170_alloc()
1837 INIT_LIST_HEAD(&ar->tx_ampdu_list); carl9170_alloc()
1838 rcu_assign_pointer(ar->tx_ampdu_iter, carl9170_alloc()
1839 (struct carl9170_sta_tid *) &ar->tx_ampdu_list); carl9170_alloc()
1841 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num); carl9170_alloc()
1842 INIT_LIST_HEAD(&ar->vif_list); carl9170_alloc()
1843 init_completion(&ar->tx_flush); carl9170_alloc()
1873 for (i = 0; i < ARRAY_SIZE(ar->noise); i++) carl9170_alloc()
1874 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */ carl9170_alloc()
1876 return ar; carl9170_alloc()
1883 static int carl9170_read_eeprom(struct ar9170 *ar) carl9170_read_eeprom() argument
1887 u8 *eeprom = (void *)&ar->eeprom; carl9170_read_eeprom()
1891 BUILD_BUG_ON(sizeof(ar->eeprom) & 3); carl9170_read_eeprom()
1896 BUILD_BUG_ON(sizeof(ar->eeprom) % RB); carl9170_read_eeprom()
1899 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) { carl9170_read_eeprom()
1904 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG, carl9170_read_eeprom()
1916 static int carl9170_parse_eeprom(struct ar9170 *ar) carl9170_parse_eeprom() argument
1918 struct ath_regulatory *regulatory = &ar->common.regulatory; carl9170_parse_eeprom()
1923 if (ar->eeprom.length == cpu_to_le16(0xffff)) carl9170_parse_eeprom()
1926 rx_streams = hweight8(ar->eeprom.rx_mask); carl9170_parse_eeprom()
1927 tx_streams = hweight8(ar->eeprom.tx_mask); carl9170_parse_eeprom()
1942 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) { carl9170_parse_eeprom()
1943 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = carl9170_parse_eeprom()
1948 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) { carl9170_parse_eeprom()
1949 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = carl9170_parse_eeprom()
1958 ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL); carl9170_parse_eeprom()
1959 if (!ar->survey) carl9170_parse_eeprom()
1961 ar->num_channels = chans; carl9170_parse_eeprom()
1963 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]); carl9170_parse_eeprom()
1966 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address); carl9170_parse_eeprom()
1975 struct ar9170 *ar = hw->priv; carl9170_reg_notifier() local
1977 ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory); carl9170_reg_notifier()
1980 int carl9170_register(struct ar9170 *ar) carl9170_register() argument
1982 struct ath_regulatory *regulatory = &ar->common.regulatory; carl9170_register()
1985 if (WARN_ON(ar->mem_bitmap)) carl9170_register()
1988 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) * carl9170_register()
1991 if (!ar->mem_bitmap) carl9170_register()
1995 err = carl9170_read_eeprom(ar); carl9170_register()
1999 err = carl9170_parse_eeprom(ar); carl9170_register()
2003 err = ath_regd_init(regulatory, ar->hw->wiphy, carl9170_register()
2013 for (i = 0; i < ar->fw.vif_num; i++) { carl9170_register()
2014 ar->vif_priv[i].id = i; carl9170_register()
2015 ar->vif_priv[i].vif = NULL; carl9170_register()
2018 err = ieee80211_register_hw(ar->hw); carl9170_register()
2023 ar->registered = true; carl9170_register()
2026 regulatory_hint(ar->hw->wiphy, regulatory->alpha2); carl9170_register()
2029 carl9170_debugfs_register(ar); carl9170_register()
2032 err = carl9170_led_init(ar); carl9170_register()
2037 err = carl9170_led_register(ar); carl9170_register()
2043 err = carl9170_register_wps_button(ar); carl9170_register()
2049 err = carl9170_register_hwrng(ar); carl9170_register()
2054 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n", carl9170_register()
2055 wiphy_name(ar->hw->wiphy)); carl9170_register()
2060 carl9170_unregister(ar); carl9170_register()
2064 void carl9170_unregister(struct ar9170 *ar) carl9170_unregister() argument
2066 if (!ar->registered) carl9170_unregister()
2069 ar->registered = false; carl9170_unregister()
2072 carl9170_led_unregister(ar); carl9170_unregister()
2076 carl9170_debugfs_unregister(ar); carl9170_unregister()
2080 if (ar->wps.pbc) { carl9170_unregister()
2081 input_unregister_device(ar->wps.pbc); carl9170_unregister()
2082 ar->wps.pbc = NULL; carl9170_unregister()
2087 carl9170_unregister_hwrng(ar); carl9170_unregister()
2090 carl9170_cancel_worker(ar); carl9170_unregister()
2091 cancel_work_sync(&ar->restart_work); carl9170_unregister()
2093 ieee80211_unregister_hw(ar->hw); carl9170_unregister()
2096 void carl9170_free(struct ar9170 *ar) carl9170_free() argument
2098 WARN_ON(ar->registered); carl9170_free()
2099 WARN_ON(IS_INITIALIZED(ar)); carl9170_free()
2101 kfree_skb(ar->rx_failover); carl9170_free()
2102 ar->rx_failover = NULL; carl9170_free()
2104 kfree(ar->mem_bitmap); carl9170_free()
2105 ar->mem_bitmap = NULL; carl9170_free()
2107 kfree(ar->survey); carl9170_free()
2108 ar->survey = NULL; carl9170_free()
2110 mutex_destroy(&ar->mutex); carl9170_free()
2112 ieee80211_free_hw(ar->hw); carl9170_free()
H A Dled.c43 int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state) carl9170_led_set_state() argument
45 return carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_DATA, led_state); carl9170_led_set_state()
48 int carl9170_led_init(struct ar9170 *ar) carl9170_led_init() argument
54 err = carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3); carl9170_led_init()
59 err = carl9170_led_set_state(ar, 0); carl9170_led_init()
68 struct ar9170 *ar = container_of(work, struct ar9170, led_work.work); carl9170_led_update() local
73 if (!IS_ACCEPTING_CMD(ar)) carl9170_led_update()
76 mutex_lock(&ar->mutex); carl9170_led_update()
78 if (ar->leds[i].registered) { carl9170_led_update()
79 if (ar->leds[i].last_state || carl9170_led_update()
80 ar->leds[i].toggled) { carl9170_led_update()
82 if (ar->leds[i].toggled) carl9170_led_update()
83 tmp = 70 + 200 / (ar->leds[i].toggled); carl9170_led_update()
89 ar->leds[i].toggled = 0; carl9170_led_update()
95 carl9170_led_set_state(ar, led_val); carl9170_led_update()
96 mutex_unlock(&ar->mutex); carl9170_led_update()
101 ieee80211_queue_delayed_work(ar->hw, carl9170_led_update()
102 &ar->led_work, carl9170_led_update()
110 struct ar9170 *ar = arl->ar; carl9170_led_set_brightness() local
120 if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled)) carl9170_led_set_brightness()
121 ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ / 10); carl9170_led_set_brightness()
124 static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name, carl9170_led_register_led() argument
129 snprintf(ar->leds[i].name, sizeof(ar->leds[i].name), carl9170_led_register_led()
130 "carl9170-%s::%s", wiphy_name(ar->hw->wiphy), name); carl9170_led_register_led()
132 ar->leds[i].ar = ar; carl9170_led_register_led()
133 ar->leds[i].l.name = ar->leds[i].name; carl9170_led_register_led()
134 ar->leds[i].l.brightness_set = carl9170_led_set_brightness; carl9170_led_register_led()
135 ar->leds[i].l.brightness = 0; carl9170_led_register_led()
136 ar->leds[i].l.default_trigger = trigger; carl9170_led_register_led()
138 err = led_classdev_register(wiphy_dev(ar->hw->wiphy), carl9170_led_register_led()
139 &ar->leds[i].l); carl9170_led_register_led()
141 wiphy_err(ar->hw->wiphy, "failed to register %s LED (%d).\n", carl9170_led_register_led()
142 ar->leds[i].name, err); carl9170_led_register_led()
144 ar->leds[i].registered = true; carl9170_led_register_led()
150 void carl9170_led_unregister(struct ar9170 *ar) carl9170_led_unregister() argument
155 if (ar->leds[i].registered) { carl9170_led_unregister()
156 led_classdev_unregister(&ar->leds[i].l); carl9170_led_unregister()
157 ar->leds[i].registered = false; carl9170_led_unregister()
158 ar->leds[i].toggled = 0; carl9170_led_unregister()
161 cancel_delayed_work_sync(&ar->led_work); carl9170_led_unregister()
164 int carl9170_led_register(struct ar9170 *ar) carl9170_led_register() argument
168 INIT_DELAYED_WORK(&ar->led_work, carl9170_led_update); carl9170_led_register()
170 err = carl9170_led_register_led(ar, 0, "tx", carl9170_led_register()
171 ieee80211_get_tx_led_name(ar->hw)); carl9170_led_register()
175 if (ar->features & CARL9170_ONE_LED) carl9170_led_register()
178 err = carl9170_led_register_led(ar, 1, "assoc", carl9170_led_register()
179 ieee80211_get_assoc_led_name(ar->hw)); carl9170_led_register()
186 carl9170_led_unregister(ar); carl9170_led_register()
H A Dfw.c33 static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4], carl9170_fw_find_desc() argument
38 carl9170fw_for_each_hdr(iter, ar->fw.desc) { carl9170_fw_find_desc()
52 static int carl9170_fw_verify_descs(struct ar9170 *ar, carl9170_fw_verify_descs() argument
95 static void carl9170_fw_info(struct ar9170 *ar) carl9170_fw_info() argument
101 dev_info(&ar->udev->dev, "driver API: %s 2%03d-%02d-%02d [%d-%d]\n", carl9170_fw_info()
106 motd_desc = carl9170_fw_find_desc(ar, MOTD_MAGIC, carl9170_fw_info()
115 dev_info(&ar->udev->dev, "firmware API: %.*s 2%03d-%02d-%02d\n", carl9170_fw_info()
121 strlcpy(ar->hw->wiphy->fw_version, motd_desc->release, carl9170_fw_info()
122 sizeof(ar->hw->wiphy->fw_version)); carl9170_fw_info()
144 static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data, carl9170_fw_checksum() argument
154 last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, carl9170_fw_checksum()
159 otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC, carl9170_fw_checksum()
162 dev_err(&ar->udev->dev, "failed to find compatible firmware " carl9170_fw_checksum()
167 chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC, carl9170_fw_checksum()
171 dev_warn(&ar->udev->dev, "Unprotected firmware image.\n"); carl9170_fw_checksum()
189 dev_err(&ar->udev->dev, "fw checksum test failed.\n"); carl9170_fw_checksum()
195 dev_err(&ar->udev->dev, "descriptor check failed.\n"); carl9170_fw_checksum()
201 static int carl9170_fw_tx_sequence(struct ar9170 *ar) carl9170_fw_tx_sequence() argument
205 txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc), carl9170_fw_tx_sequence()
208 ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr); carl9170_fw_tx_sequence()
209 if (!valid_cpu_addr(ar->fw.tx_seq_table)) carl9170_fw_tx_sequence()
212 ar->fw.tx_seq_table = 0; carl9170_fw_tx_sequence()
218 static void carl9170_fw_set_if_combinations(struct ar9170 *ar, carl9170_fw_set_if_combinations() argument
221 if (ar->fw.vif_num < 2) carl9170_fw_set_if_combinations()
224 ar->if_comb_limits[0].max = ar->fw.vif_num; carl9170_fw_set_if_combinations()
225 ar->if_comb_limits[0].types = if_comb_types; carl9170_fw_set_if_combinations()
227 ar->if_combs[0].num_different_channels = 1; carl9170_fw_set_if_combinations()
228 ar->if_combs[0].max_interfaces = ar->fw.vif_num; carl9170_fw_set_if_combinations()
229 ar->if_combs[0].limits = ar->if_comb_limits; carl9170_fw_set_if_combinations()
230 ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits); carl9170_fw_set_if_combinations()
232 ar->hw->wiphy->iface_combinations = ar->if_combs; carl9170_fw_set_if_combinations()
233 ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs); carl9170_fw_set_if_combinations()
236 static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) carl9170_fw() argument
242 err = carl9170_fw_checksum(ar, data, len); carl9170_fw()
246 otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC, carl9170_fw()
256 dev_err(&ar->udev->dev, "invalid firmware descriptor " carl9170_fw()
261 ar->fw.api_version = otus_desc->api_ver; carl9170_fw()
263 if (ar->fw.api_version < CARL9170FW_API_MIN_VER || carl9170_fw()
264 ar->fw.api_version > CARL9170FW_API_MAX_VER) { carl9170_fw()
265 dev_err(&ar->udev->dev, "unsupported firmware api version.\n"); carl9170_fw()
271 dev_err(&ar->udev->dev, "firmware does support " carl9170_fw()
278 dev_warn(&ar->udev->dev, "driver does not support all " carl9170_fw()
283 dev_info(&ar->udev->dev, "crypto offloading is disabled " carl9170_fw()
285 ar->fw.disable_offload_fw = true; carl9170_fw()
289 ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS; carl9170_fw()
292 dev_err(&ar->udev->dev, "firmware does not provide " carl9170_fw()
298 ar->fw.offset = le16_to_cpu(otus_desc->miniboot_size); carl9170_fw()
300 ar->fw.offset = 0; carl9170_fw()
303 ar->hw->extra_tx_headroom += sizeof(struct ar9170_stream); carl9170_fw()
304 ar->fw.tx_stream = true; carl9170_fw()
308 ar->fw.rx_stream = true; carl9170_fw()
311 ar->fw.rx_filter = true; carl9170_fw()
312 ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL | carl9170_fw()
318 ar->fw.hw_counters = true; carl9170_fw()
321 device_set_wakeup_enable(&ar->udev->dev, true); carl9170_fw()
324 ar->fw.ba_filter = true; carl9170_fw()
329 ar->fw.vif_num = otus_desc->vif_num; carl9170_fw()
330 ar->fw.cmd_bufs = otus_desc->cmd_bufs; carl9170_fw()
331 ar->fw.address = le32_to_cpu(otus_desc->fw_address); carl9170_fw()
332 ar->fw.rx_size = le16_to_cpu(otus_desc->rx_max_frame_len); carl9170_fw()
333 ar->fw.mem_blocks = min_t(unsigned int, otus_desc->tx_descs, 0xfe); carl9170_fw()
334 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks); carl9170_fw()
335 ar->fw.mem_block_size = le16_to_cpu(otus_desc->tx_frag_len); carl9170_fw()
337 if (ar->fw.vif_num >= AR9170_MAX_VIRTUAL_MAC || !ar->fw.vif_num || carl9170_fw()
338 ar->fw.mem_blocks < 16 || !ar->fw.cmd_bufs || carl9170_fw()
339 ar->fw.mem_block_size < 64 || ar->fw.mem_block_size > 512 || carl9170_fw()
340 ar->fw.rx_size > 32768 || ar->fw.rx_size < 4096 || carl9170_fw()
341 !valid_cpu_addr(ar->fw.address)) { carl9170_fw()
342 dev_err(&ar->udev->dev, "firmware shows obvious signs of " carl9170_fw()
347 ar->fw.beacon_addr = le32_to_cpu(otus_desc->bcn_addr); carl9170_fw()
348 ar->fw.beacon_max_len = le16_to_cpu(otus_desc->bcn_len); carl9170_fw()
350 if (valid_dma_addr(ar->fw.beacon_addr) && ar->fw.beacon_max_len >= carl9170_fw()
352 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); carl9170_fw()
366 carl9170_fw_set_if_combinations(ar, if_comb_types); carl9170_fw()
368 ar->hw->wiphy->interface_modes |= if_comb_types; carl9170_fw()
370 ar->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; carl9170_fw()
373 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | carl9170_fw()
377 return carl9170_fw_tx_sequence(ar); carl9170_fw()
381 carl9170_find_fw_desc(struct ar9170 *ar, const __u8 *fw_data, const size_t len) carl9170_find_fw_desc() argument
387 dev_err(&ar->udev->dev, "firmware size is out of bound.\n"); carl9170_find_fw_desc()
410 int carl9170_parse_firmware(struct ar9170 *ar) carl9170_parse_firmware() argument
413 const struct firmware *fw = ar->fw.fw; carl9170_parse_firmware()
420 fw_desc = carl9170_find_fw_desc(ar, fw->data, fw->size); carl9170_parse_firmware()
423 dev_err(&ar->udev->dev, "unsupported firmware.\n"); carl9170_parse_firmware()
429 err = carl9170_fw_verify_descs(ar, fw_desc, fw->size - header_offset); carl9170_parse_firmware()
431 dev_err(&ar->udev->dev, "damaged firmware (%d).\n", err); carl9170_parse_firmware()
435 ar->fw.desc = fw_desc; carl9170_parse_firmware()
437 carl9170_fw_info(ar); carl9170_parse_firmware()
439 err = carl9170_fw(ar, fw->data, fw->size); carl9170_parse_firmware()
441 dev_err(&ar->udev->dev, "failed to parse firmware (%d).\n", carl9170_parse_firmware()
H A Dusb.c131 static void carl9170_usb_submit_data_urb(struct ar9170 *ar) carl9170_usb_submit_data_urb() argument
136 if (atomic_inc_return(&ar->tx_anch_urbs) > AR9170_NUM_TX_URBS) carl9170_usb_submit_data_urb()
139 urb = usb_get_from_anchor(&ar->tx_wait); carl9170_usb_submit_data_urb()
143 usb_anchor_urb(urb, &ar->tx_anch); carl9170_usb_submit_data_urb()
148 dev_err(&ar->udev->dev, "tx submit failed (%d)\n", carl9170_usb_submit_data_urb()
153 usb_anchor_urb(urb, &ar->tx_err); carl9170_usb_submit_data_urb()
162 atomic_dec(&ar->tx_anch_urbs); carl9170_usb_submit_data_urb()
167 struct ar9170 *ar = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); carl9170_usb_tx_data_complete() local
169 if (WARN_ON_ONCE(!ar)) { carl9170_usb_tx_data_complete()
174 atomic_dec(&ar->tx_anch_urbs); carl9170_usb_tx_data_complete()
179 carl9170_tx_callback(ar, (void *)urb->context); carl9170_usb_tx_data_complete()
192 usb_anchor_urb(urb, &ar->tx_err); carl9170_usb_tx_data_complete()
198 dev_err(&ar->udev->dev, "tx failed (%d)\n", carl9170_usb_tx_data_complete()
202 usb_anchor_urb(urb, &ar->tx_err); carl9170_usb_tx_data_complete()
206 if (likely(IS_STARTED(ar))) carl9170_usb_tx_data_complete()
207 carl9170_usb_submit_data_urb(ar); carl9170_usb_tx_data_complete()
210 static int carl9170_usb_submit_cmd_urb(struct ar9170 *ar) carl9170_usb_submit_cmd_urb() argument
215 if (atomic_inc_return(&ar->tx_cmd_urbs) != 1) { carl9170_usb_submit_cmd_urb()
216 atomic_dec(&ar->tx_cmd_urbs); carl9170_usb_submit_cmd_urb()
220 urb = usb_get_from_anchor(&ar->tx_cmd); carl9170_usb_submit_cmd_urb()
222 atomic_dec(&ar->tx_cmd_urbs); carl9170_usb_submit_cmd_urb()
226 usb_anchor_urb(urb, &ar->tx_anch); carl9170_usb_submit_cmd_urb()
230 atomic_dec(&ar->tx_cmd_urbs); carl9170_usb_submit_cmd_urb()
239 struct ar9170 *ar = urb->context; carl9170_usb_cmd_complete() local
242 if (WARN_ON_ONCE(!ar)) carl9170_usb_cmd_complete()
245 atomic_dec(&ar->tx_cmd_urbs); carl9170_usb_cmd_complete()
264 if (!IS_INITIALIZED(ar)) carl9170_usb_cmd_complete()
268 dev_err(&ar->udev->dev, "submit cmd cb failed (%d).\n", err); carl9170_usb_cmd_complete()
270 err = carl9170_usb_submit_cmd_urb(ar); carl9170_usb_cmd_complete()
272 dev_err(&ar->udev->dev, "submit cmd failed (%d).\n", err); carl9170_usb_cmd_complete()
277 struct ar9170 *ar = urb->context; carl9170_usb_rx_irq_complete() local
279 if (WARN_ON_ONCE(!ar)) carl9170_usb_rx_irq_complete()
305 carl9170_handle_command_response(ar, urb->transfer_buffer, carl9170_usb_rx_irq_complete()
309 usb_anchor_urb(urb, &ar->rx_anch); carl9170_usb_rx_irq_complete()
314 static int carl9170_usb_submit_rx_urb(struct ar9170 *ar, gfp_t gfp) carl9170_usb_submit_rx_urb() argument
319 while ((atomic_read(&ar->rx_anch_urbs) < AR9170_NUM_RX_URBS) && carl9170_usb_submit_rx_urb()
322 urb = usb_get_from_anchor(&ar->rx_pool); carl9170_usb_submit_rx_urb()
324 usb_anchor_urb(urb, &ar->rx_anch); carl9170_usb_submit_rx_urb()
328 usb_anchor_urb(urb, &ar->rx_pool); carl9170_usb_submit_rx_urb()
330 atomic_dec(&ar->rx_pool_urbs); carl9170_usb_submit_rx_urb()
331 atomic_inc(&ar->rx_anch_urbs); carl9170_usb_submit_rx_urb()
340 static void carl9170_usb_rx_work(struct ar9170 *ar) carl9170_usb_rx_work() argument
346 urb = usb_get_from_anchor(&ar->rx_work); carl9170_usb_rx_work()
350 atomic_dec(&ar->rx_work_urbs); carl9170_usb_rx_work()
351 if (IS_INITIALIZED(ar)) { carl9170_usb_rx_work()
352 carl9170_rx(ar, urb->transfer_buffer, carl9170_usb_rx_work()
356 usb_anchor_urb(urb, &ar->rx_pool); carl9170_usb_rx_work()
357 atomic_inc(&ar->rx_pool_urbs); carl9170_usb_rx_work()
361 carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC); carl9170_usb_rx_work()
365 void carl9170_usb_handle_tx_err(struct ar9170 *ar) carl9170_usb_handle_tx_err() argument
369 while ((urb = usb_get_from_anchor(&ar->tx_err))) { carl9170_usb_handle_tx_err()
372 carl9170_tx_drop(ar, skb); carl9170_usb_handle_tx_err()
373 carl9170_tx_callback(ar, skb); carl9170_usb_handle_tx_err()
380 struct ar9170 *ar = (struct ar9170 *) data; carl9170_usb_tasklet() local
382 if (!IS_INITIALIZED(ar)) carl9170_usb_tasklet()
385 carl9170_usb_rx_work(ar); carl9170_usb_tasklet()
392 if (IS_STARTED(ar)) carl9170_usb_tasklet()
393 carl9170_tx_scheduler(ar); carl9170_usb_tasklet()
398 struct ar9170 *ar = (struct ar9170 *)urb->context; carl9170_usb_rx_complete() local
401 if (WARN_ON_ONCE(!ar)) carl9170_usb_rx_complete()
404 atomic_dec(&ar->rx_anch_urbs); carl9170_usb_rx_complete()
409 usb_anchor_urb(urb, &ar->rx_work); carl9170_usb_rx_complete()
410 atomic_inc(&ar->rx_work_urbs); carl9170_usb_rx_complete()
422 usb_anchor_urb(urb, &ar->rx_pool); carl9170_usb_rx_complete()
423 atomic_inc(&ar->rx_pool_urbs); carl9170_usb_rx_complete()
427 err = carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC); carl9170_usb_rx_complete()
435 tasklet_hi_schedule(&ar->usb_tasklet); carl9170_usb_rx_complete()
437 if (atomic_read(&ar->rx_anch_urbs) == 0) { carl9170_usb_rx_complete()
446 ieee80211_queue_work(ar->hw, &ar->ping_work); carl9170_usb_rx_complete()
453 tasklet_hi_schedule(&ar->usb_tasklet); carl9170_usb_rx_complete()
457 static struct urb *carl9170_usb_alloc_rx_urb(struct ar9170 *ar, gfp_t gfp) carl9170_usb_alloc_rx_urb() argument
462 buf = kmalloc(ar->fw.rx_size, gfp); carl9170_usb_alloc_rx_urb()
472 usb_fill_bulk_urb(urb, ar->udev, usb_rcvbulkpipe(ar->udev, carl9170_usb_alloc_rx_urb()
473 AR9170_USB_EP_RX), buf, ar->fw.rx_size, carl9170_usb_alloc_rx_urb()
474 carl9170_usb_rx_complete, ar); carl9170_usb_alloc_rx_urb()
481 static int carl9170_usb_send_rx_irq_urb(struct ar9170 *ar) carl9170_usb_send_rx_irq_urb() argument
495 usb_fill_int_urb(urb, ar->udev, usb_rcvintpipe(ar->udev, carl9170_usb_send_rx_irq_urb()
497 carl9170_usb_rx_irq_complete, ar, 1); carl9170_usb_send_rx_irq_urb()
501 usb_anchor_urb(urb, &ar->rx_anch); carl9170_usb_send_rx_irq_urb()
511 static int carl9170_usb_init_rx_bulk_urbs(struct ar9170 *ar) carl9170_usb_init_rx_bulk_urbs() argument
527 urb = carl9170_usb_alloc_rx_urb(ar, GFP_KERNEL); carl9170_usb_init_rx_bulk_urbs()
533 usb_anchor_urb(urb, &ar->rx_pool); carl9170_usb_init_rx_bulk_urbs()
534 atomic_inc(&ar->rx_pool_urbs); carl9170_usb_init_rx_bulk_urbs()
538 err = carl9170_usb_submit_rx_urb(ar, GFP_KERNEL); carl9170_usb_init_rx_bulk_urbs()
543 carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE); carl9170_usb_init_rx_bulk_urbs()
548 usb_scuttle_anchored_urbs(&ar->rx_pool); carl9170_usb_init_rx_bulk_urbs()
549 usb_scuttle_anchored_urbs(&ar->rx_work); carl9170_usb_init_rx_bulk_urbs()
550 usb_kill_anchored_urbs(&ar->rx_anch); carl9170_usb_init_rx_bulk_urbs()
554 static int carl9170_usb_flush(struct ar9170 *ar) carl9170_usb_flush() argument
559 while ((urb = usb_get_from_anchor(&ar->tx_wait))) { carl9170_usb_flush()
561 carl9170_tx_drop(ar, skb); carl9170_usb_flush()
562 carl9170_tx_callback(ar, skb); carl9170_usb_flush()
566 ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000); carl9170_usb_flush()
571 ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000); carl9170_usb_flush()
575 usb_kill_anchored_urbs(&ar->tx_anch); carl9170_usb_flush()
576 carl9170_usb_handle_tx_err(ar); carl9170_usb_flush()
581 static void carl9170_usb_cancel_urbs(struct ar9170 *ar) carl9170_usb_cancel_urbs() argument
585 carl9170_set_state(ar, CARL9170_UNKNOWN_STATE); carl9170_usb_cancel_urbs()
587 err = carl9170_usb_flush(ar); carl9170_usb_cancel_urbs()
589 dev_err(&ar->udev->dev, "stuck tx urbs!\n"); carl9170_usb_cancel_urbs()
591 usb_poison_anchored_urbs(&ar->tx_anch); carl9170_usb_cancel_urbs()
592 carl9170_usb_handle_tx_err(ar); carl9170_usb_cancel_urbs()
593 usb_poison_anchored_urbs(&ar->rx_anch); carl9170_usb_cancel_urbs()
595 tasklet_kill(&ar->usb_tasklet); carl9170_usb_cancel_urbs()
597 usb_scuttle_anchored_urbs(&ar->rx_work); carl9170_usb_cancel_urbs()
598 usb_scuttle_anchored_urbs(&ar->rx_pool); carl9170_usb_cancel_urbs()
599 usb_scuttle_anchored_urbs(&ar->tx_cmd); carl9170_usb_cancel_urbs()
602 int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd, __carl9170_exec_cmd() argument
608 if (!IS_INITIALIZED(ar)) { __carl9170_exec_cmd()
624 if (ar->usb_ep_cmd_is_bulk) __carl9170_exec_cmd()
625 usb_fill_bulk_urb(urb, ar->udev, __carl9170_exec_cmd()
626 usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD), __carl9170_exec_cmd()
628 carl9170_usb_cmd_complete, ar); __carl9170_exec_cmd()
630 usb_fill_int_urb(urb, ar->udev, __carl9170_exec_cmd()
631 usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD), __carl9170_exec_cmd()
633 carl9170_usb_cmd_complete, ar, 1); __carl9170_exec_cmd()
638 usb_anchor_urb(urb, &ar->tx_cmd); __carl9170_exec_cmd()
641 return carl9170_usb_submit_cmd_urb(ar); __carl9170_exec_cmd()
650 int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, carl9170_exec_cmd() argument
655 if (!IS_ACCEPTING_CMD(ar)) carl9170_exec_cmd()
661 ar->cmd.hdr.len = plen; carl9170_exec_cmd()
662 ar->cmd.hdr.cmd = cmd; carl9170_exec_cmd()
664 if (plen && payload != (u8 *)(ar->cmd.data)) carl9170_exec_cmd()
665 memcpy(ar->cmd.data, payload, plen); carl9170_exec_cmd()
667 spin_lock_bh(&ar->cmd_lock); carl9170_exec_cmd()
668 ar->readbuf = (u8 *)out; carl9170_exec_cmd()
669 ar->readlen = outlen; carl9170_exec_cmd()
670 spin_unlock_bh(&ar->cmd_lock); carl9170_exec_cmd()
672 err = __carl9170_exec_cmd(ar, &ar->cmd, false); carl9170_exec_cmd()
675 err = wait_for_completion_timeout(&ar->cmd_wait, HZ); carl9170_exec_cmd()
681 if (ar->readlen != outlen) { carl9170_exec_cmd()
691 if (IS_STARTED(ar)) { carl9170_exec_cmd()
692 dev_err(&ar->udev->dev, "no command feedback " carl9170_exec_cmd()
697 &ar->cmd, plen + 4); carl9170_exec_cmd()
699 carl9170_restart(ar, CARL9170_RR_COMMAND_TIMEOUT); carl9170_exec_cmd()
703 spin_lock_bh(&ar->cmd_lock); carl9170_exec_cmd()
704 ar->readbuf = NULL; carl9170_exec_cmd()
705 ar->readlen = 0; carl9170_exec_cmd()
706 spin_unlock_bh(&ar->cmd_lock); carl9170_exec_cmd()
711 void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb) carl9170_usb_tx() argument
718 if (!IS_STARTED(ar)) carl9170_usb_tx()
725 if (ar->fw.tx_stream) { carl9170_usb_tx()
737 usb_fill_bulk_urb(urb, ar->udev, usb_sndbulkpipe(ar->udev, carl9170_usb_tx()
743 usb_anchor_urb(urb, &ar->tx_wait); carl9170_usb_tx()
747 carl9170_usb_submit_data_urb(ar); carl9170_usb_tx()
751 carl9170_tx_drop(ar, skb); carl9170_usb_tx()
752 carl9170_tx_callback(ar, skb); carl9170_usb_tx()
755 static void carl9170_release_firmware(struct ar9170 *ar) carl9170_release_firmware() argument
757 if (ar->fw.fw) { carl9170_release_firmware()
758 release_firmware(ar->fw.fw); carl9170_release_firmware()
759 memset(&ar->fw, 0, sizeof(ar->fw)); carl9170_release_firmware()
763 void carl9170_usb_stop(struct ar9170 *ar) carl9170_usb_stop() argument
767 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STOPPED); carl9170_usb_stop()
769 ret = carl9170_usb_flush(ar); carl9170_usb_stop()
771 dev_err(&ar->udev->dev, "kill pending tx urbs.\n"); carl9170_usb_stop()
773 usb_poison_anchored_urbs(&ar->tx_anch); carl9170_usb_stop()
774 carl9170_usb_handle_tx_err(ar); carl9170_usb_stop()
777 spin_lock_bh(&ar->cmd_lock); carl9170_usb_stop()
778 ar->readlen = 0; carl9170_usb_stop()
779 spin_unlock_bh(&ar->cmd_lock); carl9170_usb_stop()
780 complete_all(&ar->cmd_wait); carl9170_usb_stop()
783 reinit_completion(&ar->cmd_wait); carl9170_usb_stop()
792 int carl9170_usb_open(struct ar9170 *ar) carl9170_usb_open() argument
794 usb_unpoison_anchored_urbs(&ar->tx_anch); carl9170_usb_open()
796 carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE); carl9170_usb_open()
800 static int carl9170_usb_load_firmware(struct ar9170 *ar) carl9170_usb_load_firmware() argument
815 data = ar->fw.fw->data; carl9170_usb_load_firmware()
816 len = ar->fw.fw->size; carl9170_usb_load_firmware()
817 addr = ar->fw.address; carl9170_usb_load_firmware()
820 data += ar->fw.offset; carl9170_usb_load_firmware()
821 len -= ar->fw.offset; carl9170_usb_load_firmware()
827 err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0), carl9170_usb_load_firmware()
842 err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0), carl9170_usb_load_firmware()
846 if (wait_for_completion_timeout(&ar->fw_boot_wait, HZ) == 0) { carl9170_usb_load_firmware()
851 err = carl9170_echo_test(ar, 0x4a110123); carl9170_usb_load_firmware()
856 ar->cmd_seq = -1; carl9170_usb_load_firmware()
861 dev_err(&ar->udev->dev, "firmware upload failed (%d).\n", err); carl9170_usb_load_firmware()
865 int carl9170_usb_restart(struct ar9170 *ar) carl9170_usb_restart() argument
869 if (ar->intf->condition != USB_INTERFACE_BOUND) carl9170_usb_restart()
878 ar->cmd_seq = -2; carl9170_usb_restart()
880 err = carl9170_reboot(ar); carl9170_usb_restart()
882 carl9170_usb_stop(ar); carl9170_usb_restart()
887 tasklet_schedule(&ar->usb_tasklet); carl9170_usb_restart()
892 err = carl9170_usb_open(ar); carl9170_usb_restart()
896 err = carl9170_usb_load_firmware(ar); carl9170_usb_restart()
903 carl9170_usb_cancel_urbs(ar); carl9170_usb_restart()
907 void carl9170_usb_reset(struct ar9170 *ar) carl9170_usb_reset() argument
918 carl9170_usb_cancel_urbs(ar); carl9170_usb_reset()
920 carl9170_usb_stop(ar); carl9170_usb_reset()
922 usb_queue_reset_device(ar->intf); carl9170_usb_reset()
925 static int carl9170_usb_init_device(struct ar9170 *ar) carl9170_usb_init_device() argument
936 ar->cmd_seq = -2; carl9170_usb_init_device()
938 err = carl9170_usb_send_rx_irq_urb(ar); carl9170_usb_init_device()
942 err = carl9170_usb_init_rx_bulk_urbs(ar); carl9170_usb_init_device()
946 err = carl9170_usb_open(ar); carl9170_usb_init_device()
950 mutex_lock(&ar->mutex); carl9170_usb_init_device()
951 err = carl9170_usb_load_firmware(ar); carl9170_usb_init_device()
952 mutex_unlock(&ar->mutex); carl9170_usb_init_device()
959 carl9170_usb_stop(ar); carl9170_usb_init_device()
962 carl9170_usb_cancel_urbs(ar); carl9170_usb_init_device()
968 static void carl9170_usb_firmware_failed(struct ar9170 *ar) carl9170_usb_firmware_failed() argument
970 struct device *parent = ar->udev->dev.parent; carl9170_usb_firmware_failed()
977 * driver context (ar). carl9170_usb_firmware_failed()
979 udev = ar->udev; carl9170_usb_firmware_failed()
981 complete(&ar->fw_load_wait); carl9170_usb_firmware_failed()
994 static void carl9170_usb_firmware_finish(struct ar9170 *ar) carl9170_usb_firmware_finish() argument
998 err = carl9170_parse_firmware(ar); carl9170_usb_firmware_finish()
1002 err = carl9170_usb_init_device(ar); carl9170_usb_firmware_finish()
1006 err = carl9170_register(ar); carl9170_usb_firmware_finish()
1008 carl9170_usb_stop(ar); carl9170_usb_firmware_finish()
1012 complete(&ar->fw_load_wait); carl9170_usb_firmware_finish()
1013 usb_put_dev(ar->udev); carl9170_usb_firmware_finish()
1017 carl9170_usb_cancel_urbs(ar); carl9170_usb_firmware_finish()
1020 carl9170_release_firmware(ar); carl9170_usb_firmware_finish()
1021 carl9170_usb_firmware_failed(ar); carl9170_usb_firmware_finish()
1027 struct ar9170 *ar = context; carl9170_usb_firmware_step2() local
1030 ar->fw.fw = fw; carl9170_usb_firmware_step2()
1031 carl9170_usb_firmware_finish(ar); carl9170_usb_firmware_step2()
1035 dev_err(&ar->udev->dev, "firmware not found.\n"); carl9170_usb_firmware_step2()
1036 carl9170_usb_firmware_failed(ar); carl9170_usb_firmware_step2()
1043 struct ar9170 *ar; carl9170_usb_probe() local
1051 ar = carl9170_alloc(sizeof(*ar)); carl9170_usb_probe()
1052 if (IS_ERR(ar)) carl9170_usb_probe()
1053 return PTR_ERR(ar); carl9170_usb_probe()
1057 ar->udev = udev; carl9170_usb_probe()
1058 ar->intf = intf; carl9170_usb_probe()
1059 ar->features = id->driver_info; carl9170_usb_probe()
1073 ar->usb_ep_cmd_is_bulk = true; carl9170_usb_probe()
1076 usb_set_intfdata(intf, ar); carl9170_usb_probe()
1077 SET_IEEE80211_DEV(ar->hw, &intf->dev); carl9170_usb_probe()
1079 init_usb_anchor(&ar->rx_anch); carl9170_usb_probe()
1080 init_usb_anchor(&ar->rx_pool); carl9170_usb_probe()
1081 init_usb_anchor(&ar->rx_work); carl9170_usb_probe()
1082 init_usb_anchor(&ar->tx_wait); carl9170_usb_probe()
1083 init_usb_anchor(&ar->tx_anch); carl9170_usb_probe()
1084 init_usb_anchor(&ar->tx_cmd); carl9170_usb_probe()
1085 init_usb_anchor(&ar->tx_err); carl9170_usb_probe()
1086 init_completion(&ar->cmd_wait); carl9170_usb_probe()
1087 init_completion(&ar->fw_boot_wait); carl9170_usb_probe()
1088 init_completion(&ar->fw_load_wait); carl9170_usb_probe()
1089 tasklet_init(&ar->usb_tasklet, carl9170_usb_tasklet, carl9170_usb_probe()
1090 (unsigned long)ar); carl9170_usb_probe()
1092 atomic_set(&ar->tx_cmd_urbs, 0); carl9170_usb_probe()
1093 atomic_set(&ar->tx_anch_urbs, 0); carl9170_usb_probe()
1094 atomic_set(&ar->rx_work_urbs, 0); carl9170_usb_probe()
1095 atomic_set(&ar->rx_anch_urbs, 0); carl9170_usb_probe()
1096 atomic_set(&ar->rx_pool_urbs, 0); carl9170_usb_probe()
1098 usb_get_dev(ar->udev); carl9170_usb_probe()
1100 carl9170_set_state(ar, CARL9170_STOPPED); carl9170_usb_probe()
1103 &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2); carl9170_usb_probe()
1107 carl9170_free(ar); carl9170_usb_probe()
1114 struct ar9170 *ar = usb_get_intfdata(intf); carl9170_usb_disconnect() local
1117 if (WARN_ON(!ar)) carl9170_usb_disconnect()
1120 udev = ar->udev; carl9170_usb_disconnect()
1121 wait_for_completion(&ar->fw_load_wait); carl9170_usb_disconnect()
1123 if (IS_INITIALIZED(ar)) { carl9170_usb_disconnect()
1124 carl9170_reboot(ar); carl9170_usb_disconnect()
1125 carl9170_usb_stop(ar); carl9170_usb_disconnect()
1128 carl9170_usb_cancel_urbs(ar); carl9170_usb_disconnect()
1129 carl9170_unregister(ar); carl9170_usb_disconnect()
1133 carl9170_release_firmware(ar); carl9170_usb_disconnect()
1134 carl9170_free(ar); carl9170_usb_disconnect()
1142 struct ar9170 *ar = usb_get_intfdata(intf); carl9170_usb_suspend() local
1144 if (!ar) carl9170_usb_suspend()
1147 carl9170_usb_cancel_urbs(ar); carl9170_usb_suspend()
1154 struct ar9170 *ar = usb_get_intfdata(intf); carl9170_usb_resume() local
1157 if (!ar) carl9170_usb_resume()
1160 usb_unpoison_anchored_urbs(&ar->rx_anch); carl9170_usb_resume()
1161 carl9170_set_state(ar, CARL9170_STOPPED); carl9170_usb_resume()
1174 err = carl9170_usb_init_device(ar); carl9170_usb_resume()
1181 carl9170_usb_cancel_urbs(ar); carl9170_usb_resume()
H A Ddebug.c54 char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
66 struct ar9170 *ar; carl9170_debugfs_read() local
74 ar = file->private_data; carl9170_debugfs_read()
76 if (!ar) carl9170_debugfs_read()
89 mutex_lock(&ar->mutex); carl9170_debugfs_read()
90 if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) { carl9170_debugfs_read()
96 res_buf = dfops->read(ar, buf, dfops->read_bufsize, &ret); carl9170_debugfs_read()
108 mutex_unlock(&ar->mutex); carl9170_debugfs_read()
116 struct ar9170 *ar; carl9170_debugfs_write() local
126 ar = file->private_data; carl9170_debugfs_write()
128 if (!ar) carl9170_debugfs_write()
144 if (mutex_trylock(&ar->mutex) == 0) { carl9170_debugfs_write()
149 if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) { carl9170_debugfs_write()
154 err = dfops->write(ar, buf, count); carl9170_debugfs_write()
159 mutex_unlock(&ar->mutex); carl9170_debugfs_write()
205 static char *carl9170_debugfs_ ##name ## _read(struct ar9170 *ar, \
214 static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf, carl9170_debugfs_mem_usage_read() argument
217 spin_lock_bh(&ar->mem_lock); carl9170_debugfs_mem_usage_read()
220 ar->fw.mem_blocks, ar->mem_bitmap); carl9170_debugfs_mem_usage_read()
223 bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks), carl9170_debugfs_mem_usage_read()
224 ar->fw.mem_blocks, atomic_read(&ar->mem_allocs)); carl9170_debugfs_mem_usage_read()
227 atomic_read(&ar->mem_free_blocks), carl9170_debugfs_mem_usage_read()
228 (atomic_read(&ar->mem_free_blocks) * ar->fw.mem_block_size) / 1024, carl9170_debugfs_mem_usage_read()
229 (ar->fw.mem_blocks * ar->fw.mem_block_size) / 1024); carl9170_debugfs_mem_usage_read()
231 spin_unlock_bh(&ar->mem_lock); carl9170_debugfs_mem_usage_read()
237 static char *carl9170_debugfs_qos_stat_read(struct ar9170 *ar, char *buf, carl9170_debugfs_qos_stat_read() argument
246 spin_lock_bh(&ar->tx_stats_lock); carl9170_debugfs_qos_stat_read()
251 ar->tx_stats[0].len, ar->tx_stats[0].limit, carl9170_debugfs_qos_stat_read()
252 ar->tx_stats[1].len, ar->tx_stats[1].limit, carl9170_debugfs_qos_stat_read()
253 ar->tx_stats[2].len, ar->tx_stats[2].limit, carl9170_debugfs_qos_stat_read()
254 ar->tx_stats[3].len, ar->tx_stats[3].limit); carl9170_debugfs_qos_stat_read()
259 ar->tx_stats[0].count, ar->tx_stats[1].count, carl9170_debugfs_qos_stat_read()
260 ar->tx_stats[2].count, ar->tx_stats[3].count); carl9170_debugfs_qos_stat_read()
262 spin_unlock_bh(&ar->tx_stats_lock); carl9170_debugfs_qos_stat_read()
268 skb_queue_len(&ar->tx_pending[0]), carl9170_debugfs_qos_stat_read()
269 skb_queue_len(&ar->tx_status[0]), carl9170_debugfs_qos_stat_read()
270 skb_queue_len(&ar->tx_pending[1]), carl9170_debugfs_qos_stat_read()
271 skb_queue_len(&ar->tx_status[1]), carl9170_debugfs_qos_stat_read()
272 skb_queue_len(&ar->tx_pending[2]), carl9170_debugfs_qos_stat_read()
273 skb_queue_len(&ar->tx_status[2]), carl9170_debugfs_qos_stat_read()
274 skb_queue_len(&ar->tx_pending[3]), carl9170_debugfs_qos_stat_read()
275 skb_queue_len(&ar->tx_status[3])); carl9170_debugfs_qos_stat_read()
281 static void carl9170_debugfs_format_frame(struct ar9170 *ar, carl9170_debugfs_format_frame() argument
298 static char *carl9170_debugfs_ampdu_state_read(struct ar9170 *ar, char *buf, carl9170_debugfs_ampdu_state_read() argument
307 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { carl9170_debugfs_ampdu_state_read()
340 carl9170_debugfs_format_frame(ar, skb, prefix, buf, carl9170_debugfs_ampdu_state_read()
354 static void carl9170_debugfs_queue_dump(struct ar9170 *ar, char *buf, carl9170_debugfs_queue_dump() argument
364 carl9170_debugfs_format_frame(ar, skb, prefix, buf, skb_queue_walk()
372 static char *carl9170_debugfs_##q ##_##qi ##_read(struct ar9170 *ar, \
375 carl9170_debugfs_queue_dump(ar, buf, len, bufsize, &ar->q[qi]); \
380 static char *carl9170_debugfs_sta_psm_read(struct ar9170 *ar, char *buf, carl9170_debugfs_sta_psm_read() argument
383 ADD(buf, *len, bufsize, "psm state: %s\n", (ar->ps.off_override ? carl9170_debugfs_sta_psm_read()
384 "FORCE CAM" : (ar->ps.state ? "PSM" : "CAM"))); carl9170_debugfs_sta_psm_read()
386 ADD(buf, *len, bufsize, "sleep duration: %d ms.\n", ar->ps.sleep_ms); carl9170_debugfs_sta_psm_read()
388 jiffies_to_msecs(jiffies - ar->ps.last_action)); carl9170_debugfs_sta_psm_read()
390 jiffies_to_msecs(jiffies - ar->ps.last_slept)); carl9170_debugfs_sta_psm_read()
396 static char *carl9170_debugfs_tx_stuck_read(struct ar9170 *ar, char *buf, carl9170_debugfs_tx_stuck_read() argument
401 for (i = 0; i < ar->hw->queues; i++) { carl9170_debugfs_tx_stuck_read()
403 i, ieee80211_queue_stopped(ar->hw, i) ? carl9170_debugfs_tx_stuck_read()
404 jiffies_to_msecs(jiffies - ar->queue_stop_timeout[i]) : 0, carl9170_debugfs_tx_stuck_read()
405 jiffies_to_msecs(ar->max_queue_stop_timeout[i])); carl9170_debugfs_tx_stuck_read()
407 ar->max_queue_stop_timeout[i] = 0; carl9170_debugfs_tx_stuck_read()
414 static char *carl9170_debugfs_phy_noise_read(struct ar9170 *ar, char *buf, carl9170_debugfs_phy_noise_read() argument
419 err = carl9170_get_noisefloor(ar); carl9170_debugfs_phy_noise_read()
426 ar->noise[0], ar->noise[2]); carl9170_debugfs_phy_noise_read()
428 ar->noise[1], ar->noise[3]); carl9170_debugfs_phy_noise_read()
434 static char *carl9170_debugfs_vif_dump_read(struct ar9170 *ar, char *buf, carl9170_debugfs_vif_dump_read() argument
441 ar->vifs, ar->fw.vif_num); carl9170_debugfs_vif_dump_read()
444 ar->fw.vif_num, &ar->vif_bitmap); carl9170_debugfs_vif_dump_read()
447 list_for_each_entry_rcu(iter, &ar->vif_list, list) { carl9170_debugfs_vif_dump_read()
450 " mac:%pM %s]\n", i, (carl9170_get_main_vif(ar) == vif ? carl9170_debugfs_vif_dump_read()
461 #define UPDATE_COUNTER(ar, name) ({ \
467 ar->debug.stats.name##_counter[__i] = 0; \
470 if (IS_STARTED(ar)) \
471 __err = carl9170_read_mreg(ar, ARRAY_SIZE(name##_regs), \
472 __tmp, ar->debug.stats.name##_counter); \
475 #define TALLY_SUM_UP(ar, name) do { \
479 ar->debug.stats.name##_sum[__i] += \
480 ar->debug.stats.name##_counter[__i]; \
485 static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
496 err = UPDATE_COUNTER(ar, name); \
502 TALLY_SUM_UP(ar, name); \
506 name##_regs[i].nreg, ar->debug.stats.name ##_sum[i],\
507 ar->debug.stats.name ##_counter[i]); \
515 static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
526 err = UPDATE_COUNTER(ar, name); \
535 ar->debug.stats.name##_counter[i]); \
542 static ssize_t carl9170_debugfs_hw_ioread32_write(struct ar9170 *ar, carl9170_debugfs_hw_ioread32_write() argument
579 err = carl9170_read_reg(ar, reg + (i << 2), &tmp); carl9170_debugfs_hw_ioread32_write()
583 ar->debug.ring[ar->debug.ring_tail].reg = reg + (i << 2); carl9170_debugfs_hw_ioread32_write()
584 ar->debug.ring[ar->debug.ring_tail].value = tmp; carl9170_debugfs_hw_ioread32_write()
585 ar->debug.ring_tail++; carl9170_debugfs_hw_ioread32_write()
586 ar->debug.ring_tail %= CARL9170_DEBUG_RING_SIZE; carl9170_debugfs_hw_ioread32_write()
593 static char *carl9170_debugfs_hw_ioread32_read(struct ar9170 *ar, char *buf, carl9170_debugfs_hw_ioread32_read() argument
598 while (ar->debug.ring_head != ar->debug.ring_tail) { carl9170_debugfs_hw_ioread32_read()
600 ar->debug.ring[ar->debug.ring_head].reg, carl9170_debugfs_hw_ioread32_read()
601 ar->debug.ring[ar->debug.ring_head].value); carl9170_debugfs_hw_ioread32_read()
603 ar->debug.ring_head++; carl9170_debugfs_hw_ioread32_read()
604 ar->debug.ring_head %= CARL9170_DEBUG_RING_SIZE; carl9170_debugfs_hw_ioread32_read()
609 ar->debug.ring_head = ar->debug.ring_tail; carl9170_debugfs_hw_ioread32_read()
614 static ssize_t carl9170_debugfs_bug_write(struct ar9170 *ar, const char *buf, carl9170_debugfs_bug_write() argument
624 ar->needs_full_reset = true; carl9170_debugfs_bug_write()
628 if (!IS_STARTED(ar)) { carl9170_debugfs_bug_write()
633 ar->needs_full_reset = false; carl9170_debugfs_bug_write()
637 err = carl9170_mac_reset(ar); carl9170_debugfs_bug_write()
644 err = carl9170_set_channel(ar, ar->hw->conf.chandef.chan, carl9170_debugfs_bug_write()
645 cfg80211_get_chandef_type(&ar->hw->conf.chandef)); carl9170_debugfs_bug_write()
655 carl9170_restart(ar, CARL9170_RR_USER_REQUEST); carl9170_debugfs_bug_write()
661 static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf, carl9170_debugfs_bug_read() argument
667 ar->restart_counter, ar->last_reason); carl9170_debugfs_bug_read()
669 ar->total_chan_fail, ar->chan_fail); carl9170_debugfs_bug_read()
671 ar->fw.err_counter); carl9170_debugfs_bug_read()
673 ar->fw.bug_counter); carl9170_debugfs_bug_read()
675 atomic_read(&ar->pending_restarts)); carl9170_debugfs_bug_read()
689 static char *carl9170_debugfs_erp_read(struct ar9170 *ar, char *buf, carl9170_debugfs_erp_read() argument
692 ADD(buf, *ret, bufsize, "ERP Setting: (%d) -> %s\n", ar->erp_mode, carl9170_debugfs_erp_read()
693 erp_modes[ar->erp_mode]); carl9170_debugfs_erp_read()
697 static ssize_t carl9170_debugfs_erp_write(struct ar9170 *ar, const char *buf, carl9170_debugfs_erp_write() argument
713 ar->erp_mode = val; carl9170_debugfs_erp_write()
719 static ssize_t carl9170_debugfs_hw_iowrite32_write(struct ar9170 *ar, carl9170_debugfs_hw_iowrite32_write() argument
747 err = carl9170_write_reg(ar, reg, val); carl9170_debugfs_hw_iowrite32_write()
771 atomic_read(&ar->tx_anch_urbs));
773 atomic_read(&ar->rx_anch_urbs));
775 atomic_read(&ar->rx_work_urbs));
777 atomic_read(&ar->rx_pool_urbs));
780 atomic_read(&ar->tx_total_queued));
782 atomic_read(&ar->tx_ampdu_scheduler));
785 atomic_read(&ar->tx_total_pending));
788 ar->tx_ampdu_list_len);
791 atomic_read(&ar->tx_ampdu_upload));
794 jiffies_to_msecs(jiffies - ar->tx_janitor_last_run));
796 DEBUGFS_READONLY_FILE(tx_dropped, 20, "%d", ar->tx_dropped);
798 DEBUGFS_READONLY_FILE(rx_dropped, 20, "%d", ar->rx_dropped);
800 DEBUGFS_READONLY_FILE(sniffer_enabled, 20, "%d", ar->sniffer_enabled);
802 ar->rx_software_decryption);
804 ar->current_factor);
806 ar->current_density);
808 DEBUGFS_READONLY_FILE(beacon_int, 20, "%d TU", ar->global_beacon_int);
809 DEBUGFS_READONLY_FILE(pretbtt, 20, "%d TU", ar->global_pretbtt);
811 void carl9170_debugfs_register(struct ar9170 *ar) carl9170_debugfs_register() argument
813 ar->debug_dir = debugfs_create_dir(KBUILD_MODNAME, carl9170_debugfs_register()
814 ar->hw->wiphy->debugfsdir); carl9170_debugfs_register()
818 ar->debug_dir, ar, \ carl9170_debugfs_register()
881 void carl9170_debugfs_unregister(struct ar9170 *ar) carl9170_debugfs_unregister() argument
883 debugfs_remove_recursive(ar->debug_dir); carl9170_debugfs_unregister()
H A Dmac.c44 int carl9170_set_dyn_sifs_ack(struct ar9170 *ar) carl9170_set_dyn_sifs_ack() argument
48 if (conf_is_ht40(&ar->hw->conf)) carl9170_set_dyn_sifs_ack()
51 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) carl9170_set_dyn_sifs_ack()
57 return carl9170_write_reg(ar, AR9170_MAC_REG_DYNAMIC_SIFS_ACK, val); carl9170_set_dyn_sifs_ack()
60 int carl9170_set_rts_cts_rate(struct ar9170 *ar) carl9170_set_rts_cts_rate() argument
64 if (conf_is_ht(&ar->hw->conf)) { carl9170_set_rts_cts_rate()
69 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { carl9170_set_rts_cts_rate()
80 return carl9170_write_reg(ar, AR9170_MAC_REG_RTS_CTS_RATE, carl9170_set_rts_cts_rate()
84 int carl9170_set_slot_time(struct ar9170 *ar) carl9170_set_slot_time() argument
90 vif = carl9170_get_main_vif(ar); carl9170_set_slot_time()
96 if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) || carl9170_set_slot_time()
102 return carl9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME, carl9170_set_slot_time()
106 int carl9170_set_mac_rates(struct ar9170 *ar) carl9170_set_mac_rates() argument
112 vif = carl9170_get_main_vif(ar); carl9170_set_mac_rates()
123 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) carl9170_set_mac_rates()
128 carl9170_regwrite_begin(ar); carl9170_set_mac_rates()
136 int carl9170_set_qos(struct ar9170 *ar) carl9170_set_qos() argument
138 carl9170_regwrite_begin(ar); carl9170_set_qos()
140 carl9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min | carl9170_set_qos()
141 (ar->edcf[0].cw_max << 16)); carl9170_set_qos()
142 carl9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min | carl9170_set_qos()
143 (ar->edcf[1].cw_max << 16)); carl9170_set_qos()
144 carl9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min | carl9170_set_qos()
145 (ar->edcf[2].cw_max << 16)); carl9170_set_qos()
146 carl9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min | carl9170_set_qos()
147 (ar->edcf[3].cw_max << 16)); carl9170_set_qos()
148 carl9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min | carl9170_set_qos()
149 (ar->edcf[4].cw_max << 16)); carl9170_set_qos()
152 ((ar->edcf[0].aifs * 9 + 10)) | carl9170_set_qos()
153 ((ar->edcf[1].aifs * 9 + 10) << 12) | carl9170_set_qos()
154 ((ar->edcf[2].aifs * 9 + 10) << 24)); carl9170_set_qos()
156 ((ar->edcf[2].aifs * 9 + 10) >> 8) | carl9170_set_qos()
157 ((ar->edcf[3].aifs * 9 + 10) << 4) | carl9170_set_qos()
158 ((ar->edcf[4].aifs * 9 + 10) << 16)); carl9170_set_qos()
161 ar->edcf[0].txop | ar->edcf[1].txop << 16); carl9170_set_qos()
163 ar->edcf[2].txop | ar->edcf[3].txop << 16 | carl9170_set_qos()
164 ar->edcf[4].txop << 24); carl9170_set_qos()
171 int carl9170_init_mac(struct ar9170 *ar) carl9170_init_mac() argument
173 carl9170_regwrite_begin(ar); carl9170_init_mac()
257 static int carl9170_set_mac_reg(struct ar9170 *ar, carl9170_set_mac_reg() argument
265 carl9170_regwrite_begin(ar); carl9170_set_mac_reg()
275 int carl9170_mod_virtual_mac(struct ar9170 *ar, const unsigned int id, carl9170_mod_virtual_mac() argument
278 if (WARN_ON(id >= ar->fw.vif_num)) carl9170_mod_virtual_mac()
281 return carl9170_set_mac_reg(ar, carl9170_mod_virtual_mac()
285 int carl9170_update_multicast(struct ar9170 *ar, const u64 mc_hash) carl9170_update_multicast() argument
289 carl9170_regwrite_begin(ar); carl9170_update_multicast()
297 ar->cur_mc_hash = mc_hash; carl9170_update_multicast()
301 int carl9170_set_operating_mode(struct ar9170 *ar) carl9170_set_operating_mode() argument
304 struct ath_common *common = &ar->common; carl9170_set_operating_mode()
315 vif = carl9170_get_main_vif(ar); carl9170_set_operating_mode()
371 if (ar->rx_software_decryption) carl9170_set_operating_mode()
374 if (ar->sniffer_enabled) { carl9170_set_operating_mode()
378 err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr); carl9170_set_operating_mode()
382 err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid); carl9170_set_operating_mode()
386 carl9170_regwrite_begin(ar); carl9170_set_operating_mode()
396 int carl9170_set_hwretry_limit(struct ar9170 *ar, const unsigned int max_retry) carl9170_set_hwretry_limit() argument
400 return carl9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp); carl9170_set_hwretry_limit()
403 int carl9170_set_beacon_timers(struct ar9170 *ar) carl9170_set_beacon_timers() argument
410 vif = carl9170_get_main_vif(ar); carl9170_set_beacon_timers()
416 if (mvif->enable_beacon && !WARN_ON(!ar->beacon_enabled)) { carl9170_set_beacon_timers()
417 ar->global_beacon_int = vif->bss_conf.beacon_int / carl9170_set_beacon_timers()
418 ar->beacon_enabled; carl9170_set_beacon_timers()
436 ar->global_beacon_int = vif->bss_conf.beacon_int; carl9170_set_beacon_timers()
439 ar->hw->conf.ps_dtim_period); carl9170_set_beacon_timers()
445 if (ar->global_beacon_int) { carl9170_set_beacon_timers()
446 if (ar->global_beacon_int < 15) { carl9170_set_beacon_timers()
451 ar->global_pretbtt = ar->global_beacon_int - carl9170_set_beacon_timers()
454 ar->global_pretbtt = 0; carl9170_set_beacon_timers()
457 ar->global_beacon_int = 0; carl9170_set_beacon_timers()
458 ar->global_pretbtt = 0; carl9170_set_beacon_timers()
463 SET_VAL(AR9170_MAC_BCN_PERIOD, v, ar->global_beacon_int); carl9170_set_beacon_timers()
464 SET_VAL(AR9170_MAC_PRETBTT, pretbtt, ar->global_pretbtt); carl9170_set_beacon_timers()
465 SET_VAL(AR9170_MAC_PRETBTT2, pretbtt, ar->global_pretbtt); carl9170_set_beacon_timers()
467 carl9170_regwrite_begin(ar); carl9170_set_beacon_timers()
474 int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac, carl9170_upload_key() argument
491 return carl9170_exec_cmd(ar, CARL9170_CMD_EKEY, carl9170_upload_key()
495 int carl9170_disable_key(struct ar9170 *ar, const u8 id) carl9170_disable_key() argument
501 return carl9170_exec_cmd(ar, CARL9170_CMD_DKEY, carl9170_disable_key()
505 int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel) carl9170_set_mac_tpc() argument
509 if (ar->eeprom.tx_mask != 1) carl9170_set_mac_tpc()
516 power = ar->power_2G_ofdm[0] & 0x3f; carl9170_set_mac_tpc()
519 power = ar->power_5G_leg[0] & 0x3f; carl9170_set_mac_tpc()
525 power = min_t(unsigned int, power, ar->hw->conf.power_level * 2); carl9170_set_mac_tpc()
527 carl9170_regwrite_begin(ar); carl9170_set_mac_tpc()
H A Dtx.c48 static inline unsigned int __carl9170_get_queue(struct ar9170 *ar, __carl9170_get_queue() argument
64 static inline unsigned int carl9170_get_queue(struct ar9170 *ar, carl9170_get_queue() argument
67 return __carl9170_get_queue(ar, skb_get_queue_mapping(skb)); carl9170_get_queue()
70 static bool is_mem_full(struct ar9170 *ar) is_mem_full() argument
72 return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) > is_mem_full()
73 atomic_read(&ar->mem_free_blocks)); is_mem_full()
76 static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb) carl9170_tx_accounting() argument
81 atomic_inc(&ar->tx_total_queued); carl9170_tx_accounting()
84 spin_lock_bh(&ar->tx_stats_lock); carl9170_tx_accounting()
92 ar->tx_stats[queue].len++; carl9170_tx_accounting()
93 ar->tx_stats[queue].count++; carl9170_tx_accounting()
95 mem_full = is_mem_full(ar); carl9170_tx_accounting()
96 for (i = 0; i < ar->hw->queues; i++) { carl9170_tx_accounting()
97 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) { carl9170_tx_accounting()
98 ieee80211_stop_queue(ar->hw, i); carl9170_tx_accounting()
99 ar->queue_stop_timeout[i] = jiffies; carl9170_tx_accounting()
103 spin_unlock_bh(&ar->tx_stats_lock); carl9170_tx_accounting()
107 static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar, __carl9170_get_tx_sta() argument
121 vif = rcu_dereference(ar->vif_priv[vif_id].vif); __carl9170_get_tx_sta()
138 static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb) carl9170_tx_ps_unblock() argument
144 sta = __carl9170_get_tx_sta(ar, skb); carl9170_tx_ps_unblock()
150 ieee80211_sta_block_awake(ar->hw, sta, false); carl9170_tx_ps_unblock()
156 static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb) carl9170_tx_accounting_free() argument
162 spin_lock_bh(&ar->tx_stats_lock); carl9170_tx_accounting_free()
164 ar->tx_stats[queue].len--; carl9170_tx_accounting_free()
166 if (!is_mem_full(ar)) { carl9170_tx_accounting_free()
168 for (i = 0; i < ar->hw->queues; i++) { carl9170_tx_accounting_free()
169 if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT) carl9170_tx_accounting_free()
172 if (ieee80211_queue_stopped(ar->hw, i)) { carl9170_tx_accounting_free()
175 tmp = jiffies - ar->queue_stop_timeout[i]; carl9170_tx_accounting_free()
176 if (tmp > ar->max_queue_stop_timeout[i]) carl9170_tx_accounting_free()
177 ar->max_queue_stop_timeout[i] = tmp; carl9170_tx_accounting_free()
180 ieee80211_wake_queue(ar->hw, i); carl9170_tx_accounting_free()
184 spin_unlock_bh(&ar->tx_stats_lock); carl9170_tx_accounting_free()
186 if (atomic_dec_and_test(&ar->tx_total_queued)) carl9170_tx_accounting_free()
187 complete(&ar->tx_flush); carl9170_tx_accounting_free()
190 static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb) carl9170_alloc_dev_space() argument
196 atomic_inc(&ar->mem_allocs); carl9170_alloc_dev_space()
198 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); carl9170_alloc_dev_space()
199 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { carl9170_alloc_dev_space()
200 atomic_add(chunks, &ar->mem_free_blocks); carl9170_alloc_dev_space()
204 spin_lock_bh(&ar->mem_lock); carl9170_alloc_dev_space()
205 cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0); carl9170_alloc_dev_space()
206 spin_unlock_bh(&ar->mem_lock); carl9170_alloc_dev_space()
209 atomic_add(chunks, &ar->mem_free_blocks); carl9170_alloc_dev_space()
226 static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb) carl9170_release_dev_space() argument
250 WARN_ON_ONCE(cookie > ar->fw.mem_blocks))) carl9170_release_dev_space()
253 atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size), carl9170_release_dev_space()
254 &ar->mem_free_blocks); carl9170_release_dev_space()
256 spin_lock_bh(&ar->mem_lock); carl9170_release_dev_space()
257 bitmap_release_region(ar->mem_bitmap, cookie - 1, 0); carl9170_release_dev_space()
258 spin_unlock_bh(&ar->mem_lock); carl9170_release_dev_space()
264 struct ar9170 *ar; carl9170_tx_release() local
274 ar = arinfo->ar; carl9170_tx_release()
275 if (WARN_ON_ONCE(!ar)) carl9170_tx_release()
285 if (atomic_read(&ar->tx_total_queued)) carl9170_tx_release()
286 ar->tx_schedule = true; carl9170_tx_release()
289 if (!atomic_read(&ar->tx_ampdu_upload)) carl9170_tx_release()
290 ar->tx_ampdu_schedule = true; carl9170_tx_release()
316 ieee80211_free_txskb(ar->hw, skb); carl9170_tx_release()
327 ieee80211_tx_status_irqsafe(ar->hw, skb); carl9170_tx_release()
346 static void carl9170_tx_shift_bm(struct ar9170 *ar, carl9170_tx_shift_bm() argument
378 static void carl9170_tx_status_process_ampdu(struct ar9170 *ar, carl9170_tx_status_process_ampdu() argument
393 sta = __carl9170_get_tx_sta(ar, skb); carl9170_tx_status_process_ampdu()
406 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr)); carl9170_tx_status_process_ampdu()
437 static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb, carl9170_tx_bar_status() argument
458 list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) { carl9170_tx_bar_status()
460 spin_lock_bh(&ar->bar_list_lock[queue]); carl9170_tx_bar_status()
462 spin_unlock_bh(&ar->bar_list_lock[queue]); carl9170_tx_bar_status()
476 void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb, carl9170_tx_status() argument
481 carl9170_tx_accounting_free(ar, skb); carl9170_tx_status()
485 carl9170_tx_bar_status(ar, skb, txinfo); carl9170_tx_status()
490 ar->tx_ack_failures++; carl9170_tx_status()
493 carl9170_tx_status_process_ampdu(ar, skb, txinfo); carl9170_tx_status()
495 carl9170_tx_ps_unblock(ar, skb); carl9170_tx_status()
500 void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) carl9170_tx_callback() argument
504 atomic_dec(&ar->tx_total_pending); carl9170_tx_callback()
507 atomic_dec(&ar->tx_ampdu_upload); carl9170_tx_callback()
510 tasklet_hi_schedule(&ar->usb_tasklet); carl9170_tx_callback()
513 static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie, carl9170_get_queued_skb() argument
528 carl9170_release_dev_space(ar, skb); skb_queue_walk()
536 static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix, carl9170_tx_fill_rateinfo() argument
558 static void carl9170_check_queue_stop_timeout(struct ar9170 *ar) carl9170_check_queue_stop_timeout() argument
566 for (i = 0; i < ar->hw->queues; i++) { carl9170_check_queue_stop_timeout()
567 spin_lock_bh(&ar->tx_status[i].lock); carl9170_check_queue_stop_timeout()
569 skb = skb_peek(&ar->tx_status[i]); carl9170_check_queue_stop_timeout()
582 spin_unlock_bh(&ar->tx_status[i].lock); carl9170_check_queue_stop_timeout()
599 carl9170_restart(ar, CARL9170_RR_STUCK_TX); carl9170_check_queue_stop_timeout()
603 static void carl9170_tx_ampdu_timeout(struct ar9170 *ar) carl9170_tx_ampdu_timeout() argument
612 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { carl9170_tx_ampdu_timeout()
641 struct ar9170 *ar = container_of(work, struct ar9170, carl9170_tx_janitor() local
643 if (!IS_STARTED(ar)) carl9170_tx_janitor()
646 ar->tx_janitor_last_run = jiffies; carl9170_tx_janitor()
648 carl9170_check_queue_stop_timeout(ar); carl9170_tx_janitor()
649 carl9170_tx_ampdu_timeout(ar); carl9170_tx_janitor()
651 if (!atomic_read(&ar->tx_total_queued)) carl9170_tx_janitor()
654 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, carl9170_tx_janitor()
658 static void __carl9170_tx_process_status(struct ar9170 *ar, __carl9170_tx_process_status() argument
668 skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]); __carl9170_tx_process_status()
685 carl9170_tx_fill_rateinfo(ar, r, t, txinfo); __carl9170_tx_process_status()
686 carl9170_tx_status(ar, skb, success); __carl9170_tx_process_status()
689 void carl9170_tx_process_status(struct ar9170 *ar, carl9170_tx_process_status() argument
701 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie, carl9170_tx_process_status()
706 static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar, carl9170_tx_rate_tpc_chains() argument
724 txpower = ar->power_2G_ht40; carl9170_tx_rate_tpc_chains()
726 txpower = ar->power_5G_ht40; carl9170_tx_rate_tpc_chains()
729 txpower = ar->power_2G_ht20; carl9170_tx_rate_tpc_chains()
731 txpower = ar->power_5G_ht20; carl9170_tx_rate_tpc_chains()
739 txpower = ar->power_2G_cck; carl9170_tx_rate_tpc_chains()
741 txpower = ar->power_2G_ofdm; carl9170_tx_rate_tpc_chains()
743 txpower = ar->power_5G_leg; carl9170_tx_rate_tpc_chains()
752 if (ar->eeprom.tx_mask == 1) { carl9170_tx_rate_tpc_chains()
762 *tpc = min_t(unsigned int, *tpc, ar->hw->conf.power_level * 2); carl9170_tx_rate_tpc_chains()
765 static __le32 carl9170_tx_physet(struct ar9170 *ar, carl9170_tx_physet() argument
816 carl9170_tx_rate_tpc_chains(ar, info, txrate, carl9170_tx_physet()
825 static bool carl9170_tx_rts_check(struct ar9170 *ar, carl9170_tx_rts_check() argument
829 switch (ar->erp_mode) { carl9170_tx_rts_check()
849 static bool carl9170_tx_cts_check(struct ar9170 *ar, carl9170_tx_cts_check() argument
852 switch (ar->erp_mode) { carl9170_tx_cts_check()
868 static void carl9170_tx_get_rates(struct ar9170 *ar, carl9170_tx_get_rates() argument
885 static void carl9170_tx_apply_rateset(struct ar9170 *ar, carl9170_tx_apply_rateset() argument
917 phy_set = carl9170_tx_physet(ar, info, txrate); carl9170_tx_apply_rateset()
927 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) carl9170_tx_apply_rateset()
929 else if (carl9170_tx_cts_check(ar, txrate)) carl9170_tx_apply_rateset()
943 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) carl9170_tx_apply_rateset()
946 else if (carl9170_tx_cts_check(ar, txrate)) carl9170_tx_apply_rateset()
955 static int carl9170_tx_prepare(struct ar9170 *ar, carl9170_tx_prepare() argument
979 hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)]; carl9170_tx_prepare()
1070 arinfo->ar = ar; carl9170_tx_prepare()
1079 static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb) carl9170_set_immba() argument
1087 static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb) carl9170_set_ampdu_params() argument
1108 if (tmp != ar->current_density) { carl9170_set_ampdu_params()
1109 ar->current_density = tmp; carl9170_set_ampdu_params()
1117 if (tmp != ar->current_factor) { carl9170_set_ampdu_params()
1118 ar->current_factor = tmp; carl9170_set_ampdu_params()
1124 static void carl9170_tx_ampdu(struct ar9170 *ar) carl9170_tx_ampdu() argument
1133 atomic_inc(&ar->tx_ampdu_scheduler); carl9170_tx_ampdu()
1134 ar->tx_ampdu_schedule = false; carl9170_tx_ampdu()
1136 if (atomic_read(&ar->tx_ampdu_upload)) carl9170_tx_ampdu()
1139 if (!ar->tx_ampdu_list_len) carl9170_tx_ampdu()
1145 tid_info = rcu_dereference(ar->tx_ampdu_iter); carl9170_tx_ampdu()
1152 list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) { carl9170_tx_ampdu()
1187 carl9170_tx_get_rates(ar, tid_info->vif, carl9170_tx_ampdu()
1192 carl9170_tx_apply_rateset(ar, tx_info_first, skb); carl9170_tx_ampdu()
1194 atomic_inc(&ar->tx_ampdu_upload); carl9170_tx_ampdu()
1222 carl9170_set_ampdu_params(ar, skb_peek(&agg)); carl9170_tx_ampdu()
1225 carl9170_set_immba(ar, skb_peek_tail(&agg)); carl9170_tx_ampdu()
1227 spin_lock_bh(&ar->tx_pending[queue].lock); carl9170_tx_ampdu()
1228 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]); carl9170_tx_ampdu()
1229 spin_unlock_bh(&ar->tx_pending[queue].lock); carl9170_tx_ampdu()
1230 ar->tx_schedule = true; carl9170_tx_ampdu()
1235 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info); carl9170_tx_ampdu()
1239 static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar, carl9170_tx_pick_skb() argument
1253 if (carl9170_alloc_dev_space(ar, skb)) carl9170_tx_pick_skb()
1270 void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb) carl9170_tx_drop() argument
1275 ar->tx_dropped++; carl9170_tx_drop()
1279 ar9170_qmap[carl9170_get_queue(ar, skb)]); carl9170_tx_drop()
1280 __carl9170_tx_process_status(ar, super->s.cookie, q); carl9170_tx_drop()
1283 static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb) carl9170_tx_ps_drop() argument
1290 sta = __carl9170_get_tx_sta(ar, skb); carl9170_tx_ps_drop()
1303 atomic_dec(&ar->tx_ampdu_upload); carl9170_tx_ps_drop()
1306 carl9170_release_dev_space(ar, skb); carl9170_tx_ps_drop()
1307 carl9170_tx_status(ar, skb, false); carl9170_tx_ps_drop()
1316 static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb) carl9170_bar_check() argument
1329 spin_lock_bh(&ar->bar_list_lock[queue]); carl9170_bar_check()
1330 list_add_tail_rcu(&entry->list, &ar->bar_list[queue]); carl9170_bar_check()
1331 spin_unlock_bh(&ar->bar_list_lock[queue]); carl9170_bar_check()
1336 static void carl9170_tx(struct ar9170 *ar) carl9170_tx() argument
1342 ar->tx_schedule = false; carl9170_tx()
1344 if (unlikely(!IS_STARTED(ar))) carl9170_tx()
1347 carl9170_usb_handle_tx_err(ar); carl9170_tx()
1349 for (i = 0; i < ar->hw->queues; i++) { carl9170_tx()
1350 while (!skb_queue_empty(&ar->tx_pending[i])) { carl9170_tx()
1351 skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]); carl9170_tx()
1355 if (unlikely(carl9170_tx_ps_drop(ar, skb))) carl9170_tx()
1358 carl9170_bar_check(ar, skb); carl9170_tx()
1360 atomic_inc(&ar->tx_total_pending); carl9170_tx()
1362 q = __carl9170_get_queue(ar, i); carl9170_tx()
1367 skb_queue_tail(&ar->tx_status[q], skb); carl9170_tx()
1379 carl9170_usb_tx(ar, skb); carl9170_tx()
1387 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, carl9170_tx()
1391 static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, carl9170_tx_ampdu_queue() argument
1462 carl9170_tx_status(ar, skb, false); carl9170_tx_ampdu_queue()
1463 ar->tx_dropped++; carl9170_tx_ampdu_queue()
1471 struct ar9170 *ar = hw->priv; carl9170_op_tx() local
1477 if (unlikely(!IS_STARTED(ar))) carl9170_op_tx()
1483 if (unlikely(carl9170_tx_prepare(ar, sta, skb))) carl9170_op_tx()
1486 carl9170_tx_accounting(ar, skb); carl9170_op_tx()
1505 run = carl9170_tx_ampdu_queue(ar, sta, skb, info); carl9170_op_tx()
1507 carl9170_tx_ampdu(ar); carl9170_op_tx()
1512 carl9170_tx_get_rates(ar, vif, sta, skb); carl9170_op_tx()
1513 carl9170_tx_apply_rateset(ar, info, skb); carl9170_op_tx()
1514 skb_queue_tail(&ar->tx_pending[queue], skb); carl9170_op_tx()
1517 carl9170_tx(ar); carl9170_op_tx()
1521 ar->tx_dropped++; carl9170_op_tx()
1522 ieee80211_free_txskb(ar->hw, skb); carl9170_op_tx()
1525 void carl9170_tx_scheduler(struct ar9170 *ar) carl9170_tx_scheduler() argument
1528 if (ar->tx_ampdu_schedule) carl9170_tx_scheduler()
1529 carl9170_tx_ampdu(ar); carl9170_tx_scheduler()
1531 if (ar->tx_schedule) carl9170_tx_scheduler()
1532 carl9170_tx(ar); carl9170_tx_scheduler()
1536 static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar) carl9170_pick_beaconing_vif() argument
1548 cvif = rcu_dereference(ar->beacon_iter); carl9170_pick_beaconing_vif()
1549 if (ar->vifs > 0 && cvif) { carl9170_pick_beaconing_vif()
1551 list_for_each_entry_continue_rcu(cvif, &ar->vif_list, carl9170_pick_beaconing_vif()
1556 } while (ar->beacon_enabled && i--); carl9170_pick_beaconing_vif()
1560 RCU_INIT_POINTER(ar->beacon_iter, cvif); carl9170_pick_beaconing_vif()
1564 static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb, carl9170_tx_beacon_physet() argument
1575 carl9170_tx_rate_tpc_chains(ar, txinfo, rate, plcp, &power, &chains); carl9170_tx_beacon_physet()
1608 int carl9170_update_beacon(struct ar9170 *ar, const bool submit) carl9170_update_beacon() argument
1618 cvif = carl9170_pick_beaconing_vif(ar); carl9170_update_beacon()
1622 skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif), carl9170_update_beacon()
1630 spin_lock_bh(&ar->beacon_lock); carl9170_update_beacon()
1636 addr = ar->fw.beacon_addr + off; carl9170_update_beacon()
1639 if ((off + len) > ar->fw.beacon_max_len) { carl9170_update_beacon()
1641 wiphy_err(ar->hw->wiphy, "beacon does not " carl9170_update_beacon()
1650 wiphy_err(ar->hw->wiphy, "no support for beacons " carl9170_update_beacon()
1659 ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp); carl9170_update_beacon()
1661 carl9170_async_regwrite_begin(ar); carl9170_update_beacon()
1688 spin_unlock_bh(&ar->beacon_lock); carl9170_update_beacon()
1693 err = carl9170_bcn_ctrl(ar, cvif->id, carl9170_update_beacon()
1705 spin_unlock_bh(&ar->beacon_lock); carl9170_update_beacon()
H A Drx.c49 static void carl9170_dbg_message(struct ar9170 *ar, const char *buf, u32 len) carl9170_dbg_message() argument
56 ar->fw.err_counter++; carl9170_dbg_message()
57 if (ar->fw.err_counter > 3) { carl9170_dbg_message()
64 ar->fw.bug_counter++; carl9170_dbg_message()
70 wiphy_info(ar->hw->wiphy, "FW: %.*s\n", len, buf); carl9170_dbg_message()
73 carl9170_restart(ar, reason); carl9170_dbg_message()
76 static void carl9170_handle_ps(struct ar9170 *ar, struct carl9170_rsp *rsp) carl9170_handle_ps() argument
84 if (ar->ps.state != new_ps) { carl9170_handle_ps()
86 ar->ps.sleep_ms = jiffies_to_msecs(jiffies - carl9170_handle_ps()
87 ar->ps.last_action); carl9170_handle_ps()
90 ar->ps.last_action = jiffies; carl9170_handle_ps()
92 ar->ps.state = new_ps; carl9170_handle_ps()
96 static int carl9170_check_sequence(struct ar9170 *ar, unsigned int seq) carl9170_check_sequence() argument
98 if (ar->cmd_seq < -1) carl9170_check_sequence()
104 if (ar->cmd_seq < 0) carl9170_check_sequence()
105 ar->cmd_seq = seq; carl9170_check_sequence()
113 if (seq != ar->cmd_seq) { carl9170_check_sequence()
116 count = (seq - ar->cmd_seq) % ar->fw.cmd_bufs; carl9170_check_sequence()
118 wiphy_err(ar->hw->wiphy, "lost %d command responses/traps! " carl9170_check_sequence()
119 "w:%d g:%d\n", count, ar->cmd_seq, seq); carl9170_check_sequence()
121 carl9170_restart(ar, CARL9170_RR_LOST_RSP); carl9170_check_sequence()
125 ar->cmd_seq = (ar->cmd_seq + 1) % ar->fw.cmd_bufs; carl9170_check_sequence()
129 static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer) carl9170_cmd_callback() argument
136 if (unlikely(ar->readlen != (len - 4))) { carl9170_cmd_callback()
137 dev_warn(&ar->udev->dev, "received invalid command response:" carl9170_cmd_callback()
138 "got %d, instead of %d\n", len - 4, ar->readlen); carl9170_cmd_callback()
140 ar->cmd_buf, (ar->cmd.hdr.len + 4) & 0x3f); carl9170_cmd_callback()
147 carl9170_restart(ar, CARL9170_RR_INVALID_RSP); carl9170_cmd_callback()
150 spin_lock(&ar->cmd_lock); carl9170_cmd_callback()
151 if (ar->readbuf) { carl9170_cmd_callback()
153 memcpy(ar->readbuf, buffer + 4, len - 4); carl9170_cmd_callback()
155 ar->readbuf = NULL; carl9170_cmd_callback()
157 complete(&ar->cmd_wait); carl9170_cmd_callback()
158 spin_unlock(&ar->cmd_lock); carl9170_cmd_callback()
161 void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) carl9170_handle_command_response() argument
168 carl9170_cmd_callback(ar, len, buf); carl9170_handle_command_response()
175 wiphy_err(ar->hw->wiphy, "FW: received over-/under" carl9170_handle_command_response()
191 vif = carl9170_get_main_vif(ar); carl9170_handle_command_response()
200 carl9170_handle_ps(ar, cmd); carl9170_handle_command_response()
206 carl9170_update_beacon(ar, true); carl9170_handle_command_response()
219 carl9170_tx_process_status(ar, cmd); carl9170_handle_command_response()
239 carl9170_restart(ar, CARL9170_RR_WATCHDOG); carl9170_handle_command_response()
244 carl9170_dbg_message(ar, (char *)buf + 4, len - 4); carl9170_handle_command_response()
248 wiphy_dbg(ar->hw->wiphy, "FW: HD %d\n", len - 4); carl9170_handle_command_response()
257 wiphy_info(ar->hw->wiphy, "FW: RADAR! Please report this " carl9170_handle_command_response()
263 if (ar->wps.pbc) { carl9170_handle_command_response()
267 if (state != ar->wps.pbc_state) { carl9170_handle_command_response()
268 ar->wps.pbc_state = state; carl9170_handle_command_response()
269 input_report_key(ar->wps.pbc, KEY_WPS_BUTTON, carl9170_handle_command_response()
271 input_sync(ar->wps.pbc); carl9170_handle_command_response()
278 complete(&ar->fw_boot_wait); carl9170_handle_command_response()
282 wiphy_err(ar->hw->wiphy, "FW: received unhandled event %x\n", carl9170_handle_command_response()
289 static int carl9170_rx_mac_status(struct ar9170 *ar, carl9170_rx_mac_status() argument
302 if (!ar->sniffer_enabled) carl9170_rx_mac_status()
307 if (!(ar->filter_state & FIF_PLCPFAIL)) carl9170_rx_mac_status()
314 ar->tx_fcs_errors++; carl9170_rx_mac_status()
316 if (!(ar->filter_state & FIF_FCSFAIL)) carl9170_rx_mac_status()
332 if (error & AR9170_RX_ERROR_DECRYPT && !ar->sniffer_enabled) carl9170_rx_mac_status()
346 wiphy_dbg(ar->hw->wiphy, "received frame with " carl9170_rx_mac_status()
352 chan = ar->channel; carl9170_rx_mac_status()
377 wiphy_err(ar->hw->wiphy, "invalid plcp cck " carl9170_rx_mac_status()
414 wiphy_err(ar->hw->wiphy, "invalid plcp ofdm " carl9170_rx_mac_status()
442 static void carl9170_rx_phy_status(struct ar9170 *ar, carl9170_rx_phy_status() argument
459 status->signal = ar->noise[0] + phy->rssi_combined; carl9170_rx_phy_status()
518 static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) carl9170_ps_beacon() argument
522 struct ath_common *common = &ar->common; carl9170_ps_beacon()
527 if (likely(!(ar->hw->conf.flags & IEEE80211_CONF_PS))) carl9170_ps_beacon()
539 ar->ps.last_beacon = jiffies; carl9170_ps_beacon()
551 if (!WARN_ON_ONCE(!ar->hw->conf.ps_dtim_period)) carl9170_ps_beacon()
552 ar->ps.dtim_counter = (tim_ie->dtim_count - 1) % carl9170_ps_beacon()
553 ar->hw->conf.ps_dtim_period; carl9170_ps_beacon()
558 cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); carl9170_ps_beacon()
565 ar->ps.off_override &= ~PS_OFF_BCN; carl9170_ps_beacon()
566 carl9170_ps_check(ar); carl9170_ps_beacon()
569 ar->ps.off_override |= PS_OFF_BCN; carl9170_ps_beacon()
573 static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len) carl9170_ba_check() argument
590 list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) { carl9170_ba_check()
608 spin_lock_bh(&ar->bar_list_lock[queue]); carl9170_ba_check()
610 spin_unlock_bh(&ar->bar_list_lock[queue]); carl9170_ba_check()
620 static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms, carl9170_ampdu_check() argument
635 rx_status->ampdu_reference = ar->ampdu_ref; carl9170_ampdu_check()
659 static int carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len, carl9170_handle_mpdu() argument
675 carl9170_ps_beacon(ar, buf, len); carl9170_handle_mpdu()
677 carl9170_ba_check(ar, buf, len); carl9170_handle_mpdu()
684 ieee80211_rx(ar->hw, skb); carl9170_handle_mpdu()
697 static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len) carl9170_rx_untie_data() argument
706 if (!IS_STARTED(ar)) carl9170_rx_untie_data()
720 ar->ampdu_ref++; carl9170_rx_untie_data()
733 memcpy(&ar->rx_plcp, (void *) buf, carl9170_rx_untie_data()
739 ar->rx_has_plcp = true; carl9170_rx_untie_data()
742 wiphy_err(ar->hw->wiphy, "plcp info " carl9170_rx_untie_data()
763 wiphy_err(ar->hw->wiphy, "frame tail " carl9170_rx_untie_data()
772 if (unlikely(!ar->rx_has_plcp)) { carl9170_rx_untie_data()
776 wiphy_err(ar->hw->wiphy, "rx stream does not start " carl9170_rx_untie_data()
782 head = &ar->rx_plcp; carl9170_rx_untie_data()
805 if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status))) carl9170_rx_untie_data()
808 if (!carl9170_ampdu_check(ar, buf, mac_status, &status)) carl9170_rx_untie_data()
812 carl9170_rx_phy_status(ar, phy, &status); carl9170_rx_untie_data()
816 if (carl9170_handle_mpdu(ar, buf, mpdu_len, &status)) carl9170_rx_untie_data()
821 ar->rx_dropped++; carl9170_rx_untie_data()
824 static void carl9170_rx_untie_cmds(struct ar9170 *ar, const u8 *respbuf, carl9170_rx_untie_cmds() argument
837 if (carl9170_check_sequence(ar, cmd->hdr.seq)) carl9170_rx_untie_cmds()
840 carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4); carl9170_rx_untie_cmds()
847 wiphy_err(ar->hw->wiphy, "malformed firmware trap:\n"); carl9170_rx_untie_cmds()
853 static void __carl9170_rx(struct ar9170 *ar, u8 *buf, unsigned int len) __carl9170_rx() argument
869 carl9170_rx_untie_cmds(ar, buf, len); __carl9170_rx()
871 carl9170_rx_untie_data(ar, buf, len); __carl9170_rx()
874 static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len) carl9170_rx_stream() argument
896 if (!ar->rx_failover_missing) { carl9170_rx_stream()
900 wiphy_err(ar->hw->wiphy, carl9170_rx_stream()
904 __carl9170_rx(ar, tbuf, tlen); carl9170_rx_stream()
908 if (ar->rx_failover_missing > tlen) { carl9170_rx_stream()
910 wiphy_err(ar->hw->wiphy, carl9170_rx_stream()
919 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen); carl9170_rx_stream()
920 ar->rx_failover_missing -= tlen; carl9170_rx_stream()
922 if (ar->rx_failover_missing <= 0) { carl9170_rx_stream()
931 ar->rx_failover_missing = 0; carl9170_rx_stream()
932 carl9170_rx_stream(ar, ar->rx_failover->data, carl9170_rx_stream()
933 ar->rx_failover->len); carl9170_rx_stream()
935 skb_reset_tail_pointer(ar->rx_failover); carl9170_rx_stream()
936 skb_trim(ar->rx_failover, 0); carl9170_rx_stream()
944 if (ar->rx_failover_missing) { carl9170_rx_stream()
947 wiphy_err(ar->hw->wiphy, "double rx " carl9170_rx_stream()
961 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen); carl9170_rx_stream()
962 ar->rx_failover_missing = clen - tlen; carl9170_rx_stream()
965 __carl9170_rx(ar, rx_stream->payload, clen); carl9170_rx_stream()
973 wiphy_err(ar->hw->wiphy, "%d bytes of unprocessed " carl9170_rx_stream()
983 wiphy_err(ar->hw->wiphy, "damaged RX stream data [want:%d, " carl9170_rx_stream()
985 ar->rx_failover_missing); carl9170_rx_stream()
987 if (ar->rx_failover_missing) carl9170_rx_stream()
989 ar->rx_failover->data, carl9170_rx_stream()
990 ar->rx_failover->len); carl9170_rx_stream()
995 wiphy_err(ar->hw->wiphy, "please check your hardware and cables, if " carl9170_rx_stream()
999 if (ar->rx_failover_missing) { carl9170_rx_stream()
1000 skb_reset_tail_pointer(ar->rx_failover); carl9170_rx_stream()
1001 skb_trim(ar->rx_failover, 0); carl9170_rx_stream()
1002 ar->rx_failover_missing = 0; carl9170_rx_stream()
1006 void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len) carl9170_rx() argument
1008 if (ar->fw.rx_stream) carl9170_rx()
1009 carl9170_rx_stream(ar, buf, len); carl9170_rx()
1011 __carl9170_rx(ar, buf, len); carl9170_rx()
H A Dcmd.c43 int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val) carl9170_write_reg() argument
51 err = carl9170_exec_cmd(ar, CARL9170_CMD_WREG, sizeof(buf), carl9170_write_reg()
55 wiphy_err(ar->hw->wiphy, "writing reg %#x " carl9170_write_reg()
62 int carl9170_read_mreg(struct ar9170 *ar, const int nregs, carl9170_read_mreg() argument
76 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG, carl9170_read_mreg()
81 wiphy_err(ar->hw->wiphy, "reading regs failed (%d)\n", carl9170_read_mreg()
94 int carl9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val) carl9170_read_reg() argument
96 return carl9170_read_mreg(ar, 1, &reg, val); carl9170_read_reg()
99 int carl9170_echo_test(struct ar9170 *ar, const u32 v) carl9170_echo_test() argument
104 err = carl9170_exec_cmd(ar, CARL9170_CMD_ECHO, carl9170_echo_test()
111 wiphy_info(ar->hw->wiphy, "wrong echo %x != %x", v, echores); carl9170_echo_test()
118 struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar, carl9170_cmd_buf() argument
132 int carl9170_reboot(struct ar9170 *ar) carl9170_reboot() argument
137 cmd = carl9170_cmd_buf(ar, CARL9170_CMD_REBOOT_ASYNC, 0); carl9170_reboot()
141 err = __carl9170_exec_cmd(ar, cmd, true); carl9170_reboot()
145 int carl9170_mac_reset(struct ar9170 *ar) carl9170_mac_reset() argument
147 return carl9170_exec_cmd(ar, CARL9170_CMD_SWRST, carl9170_mac_reset()
151 int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id, carl9170_bcn_ctrl() argument
156 cmd = carl9170_cmd_buf(ar, CARL9170_CMD_BCN_CTRL_ASYNC, carl9170_bcn_ctrl()
166 return __carl9170_exec_cmd(ar, cmd, true); carl9170_bcn_ctrl()
169 int carl9170_collect_tally(struct ar9170 *ar) carl9170_collect_tally() argument
176 err = carl9170_exec_cmd(ar, CARL9170_CMD_TALLY, 0, NULL, carl9170_collect_tally()
183 ar->tally.active += le32_to_cpu(tally.active) / tick; carl9170_collect_tally()
184 ar->tally.cca += le32_to_cpu(tally.cca) / tick; carl9170_collect_tally()
185 ar->tally.tx_time += le32_to_cpu(tally.tx_time) / tick; carl9170_collect_tally()
186 ar->tally.rx_total += le32_to_cpu(tally.rx_total); carl9170_collect_tally()
187 ar->tally.rx_overrun += le32_to_cpu(tally.rx_overrun); carl9170_collect_tally()
189 if (ar->channel) { carl9170_collect_tally()
190 info = &ar->survey[ar->channel->hw_value]; carl9170_collect_tally()
191 info->time = ar->tally.active; carl9170_collect_tally()
192 info->time_busy = ar->tally.cca; carl9170_collect_tally()
193 info->time_tx = ar->tally.tx_time; carl9170_collect_tally()
202 int carl9170_powersave(struct ar9170 *ar, const bool ps) carl9170_powersave() argument
207 cmd = carl9170_cmd_buf(ar, CARL9170_CMD_PSM_ASYNC, carl9170_powersave()
221 return __carl9170_exec_cmd(ar, cmd, true); carl9170_powersave()
H A Dcmd.h45 int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
46 int carl9170_read_reg(struct ar9170 *ar, const u32 reg, u32 *val);
47 int carl9170_read_mreg(struct ar9170 *ar, const int nregs,
49 int carl9170_echo_test(struct ar9170 *ar, u32 v);
50 int carl9170_reboot(struct ar9170 *ar);
51 int carl9170_mac_reset(struct ar9170 *ar);
52 int carl9170_powersave(struct ar9170 *ar, const bool power_on);
53 int carl9170_collect_tally(struct ar9170 *ar);
54 int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
57 static inline int carl9170_flush_cab(struct ar9170 *ar, carl9170_flush_cab() argument
60 return carl9170_bcn_ctrl(ar, vif_id, CARL9170_BCN_CTRL_DRAIN, 0, 0); carl9170_flush_cab()
63 static inline int carl9170_rx_filter(struct ar9170 *ar, carl9170_rx_filter() argument
68 return carl9170_exec_cmd(ar, CARL9170_CMD_RX_FILTER, carl9170_rx_filter()
73 struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar,
82 #define carl9170_regwrite_begin(ar) \
85 struct ar9170 *__ar = ar;
H A Dcarl9170.h198 struct ar9170 *ar; member in struct:carl9170_led
495 struct ar9170 *ar; member in struct:carl9170_tx_info
504 static inline void __carl9170_set_state(struct ar9170 *ar, __carl9170_set_state() argument
507 ar->state = newstate; __carl9170_set_state()
510 static inline void carl9170_set_state(struct ar9170 *ar, carl9170_set_state() argument
515 spin_lock_irqsave(&ar->state_lock, flags); carl9170_set_state()
516 __carl9170_set_state(ar, newstate); carl9170_set_state()
517 spin_unlock_irqrestore(&ar->state_lock, flags); carl9170_set_state()
520 static inline void carl9170_set_state_when(struct ar9170 *ar, carl9170_set_state_when() argument
525 spin_lock_irqsave(&ar->state_lock, flags); carl9170_set_state_when()
526 if (CHK_DEV_STATE(ar, min)) carl9170_set_state_when()
527 __carl9170_set_state(ar, newstate); carl9170_set_state_when()
528 spin_unlock_irqrestore(&ar->state_lock, flags); carl9170_set_state_when()
533 int carl9170_register(struct ar9170 *ar);
534 void carl9170_unregister(struct ar9170 *ar);
535 void carl9170_free(struct ar9170 *ar);
536 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r);
537 void carl9170_ps_check(struct ar9170 *ar);
540 int carl9170_usb_open(struct ar9170 *ar);
541 void carl9170_usb_stop(struct ar9170 *ar);
542 void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb);
543 void carl9170_usb_handle_tx_err(struct ar9170 *ar);
544 int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids,
546 int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
548 int carl9170_usb_restart(struct ar9170 *ar);
549 void carl9170_usb_reset(struct ar9170 *ar);
552 int carl9170_init_mac(struct ar9170 *ar);
553 int carl9170_set_qos(struct ar9170 *ar);
554 int carl9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
555 int carl9170_mod_virtual_mac(struct ar9170 *ar, const unsigned int id,
557 int carl9170_set_operating_mode(struct ar9170 *ar);
558 int carl9170_set_beacon_timers(struct ar9170 *ar);
559 int carl9170_set_dyn_sifs_ack(struct ar9170 *ar);
560 int carl9170_set_rts_cts_rate(struct ar9170 *ar);
561 int carl9170_set_ampdu_settings(struct ar9170 *ar);
562 int carl9170_set_slot_time(struct ar9170 *ar);
563 int carl9170_set_mac_rates(struct ar9170 *ar);
564 int carl9170_set_hwretry_limit(struct ar9170 *ar, const u32 max_retry);
565 int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
567 int carl9170_disable_key(struct ar9170 *ar, const u8 id);
568 int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel);
571 void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
572 void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
579 void carl9170_tx_process_status(struct ar9170 *ar,
581 void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
583 void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb);
584 void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb);
585 void carl9170_tx_scheduler(struct ar9170 *ar);
588 int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
592 int carl9170_led_register(struct ar9170 *ar);
593 void carl9170_led_unregister(struct ar9170 *ar);
595 int carl9170_led_init(struct ar9170 *ar);
596 int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state);
599 int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
601 int carl9170_get_noisefloor(struct ar9170 *ar);
604 int carl9170_parse_firmware(struct ar9170 *ar);
647 /* Protected by ar->mutex or RCU */ carl9170_get_main_vif()
648 static inline struct ieee80211_vif *carl9170_get_main_vif(struct ar9170 *ar) carl9170_get_main_vif() argument
652 list_for_each_entry_rcu(cvif, &ar->vif_list, list) { list_for_each_entry_rcu()
660 static inline bool is_main_vif(struct ar9170 *ar, struct ieee80211_vif *vif) is_main_vif() argument
665 ret = (carl9170_get_main_vif(ar) == vif); is_main_vif()
H A Dphy.c44 static int carl9170_init_power_cal(struct ar9170 *ar) carl9170_init_power_cal() argument
46 carl9170_regwrite_begin(ar); carl9170_init_power_cal()
432 static int carl9170_init_phy_from_eeprom(struct ar9170 *ar, carl9170_init_phy_from_eeprom() argument
440 struct ar9170_eeprom_modal *m = &ar->eeprom.modal_header[is_2ghz]; carl9170_init_phy_from_eeprom()
443 carl9170_regwrite_begin(ar); carl9170_init_phy_from_eeprom()
536 carl9170_regwrite(AR9170_PHY_REG_RX_CHAINMASK, ar->eeprom.rx_mask); carl9170_init_phy_from_eeprom()
537 carl9170_regwrite(AR9170_PHY_REG_CAL_CHAINMASK, ar->eeprom.rx_mask); carl9170_init_phy_from_eeprom()
543 static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band) carl9170_init_phy() argument
548 bool is_40mhz = conf_is_ht40(&ar->hw->conf); carl9170_init_phy()
550 carl9170_regwrite_begin(ar); carl9170_init_phy()
573 err = carl9170_init_phy_from_eeprom(ar, is_2ghz, is_40mhz); carl9170_init_phy()
577 err = carl9170_init_power_cal(ar); carl9170_init_phy()
581 if (!ar->fw.hw_counters) { carl9170_init_phy()
582 err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC, carl9170_init_phy()
672 static int carl9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz) carl9170_init_rf_banks_0_7() argument
676 carl9170_regwrite_begin(ar); carl9170_init_rf_banks_0_7()
686 wiphy_err(ar->hw->wiphy, "rf init failed\n"); carl9170_init_rf_banks_0_7()
966 static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz, carl9170_init_rf_bank4_pwr() argument
1021 carl9170_regwrite_begin(ar); carl9170_init_rf_bank4_pwr()
1117 static int carl9170_set_freq_cal_data(struct ar9170 *ar, carl9170_set_freq_cal_data() argument
1130 cal_freq_pier = ar->eeprom.cal_freq_pier_2G; carl9170_set_freq_cal_data()
1136 cal_freq_pier = ar->eeprom.cal_freq_pier_5G; carl9170_set_freq_cal_data()
1153 carl9170_regwrite_begin(ar); carl9170_set_freq_cal_data()
1162 cal_pier_data = &ar->eeprom. carl9170_set_freq_cal_data()
1167 cal_pier_data = &ar->eeprom. carl9170_set_freq_cal_data()
1217 static u8 carl9170_get_max_edge_power(struct ar9170 *ar, carl9170_get_max_edge_power() argument
1259 static u8 carl9170_get_heavy_clip(struct ar9170 *ar, u32 freq, carl9170_get_heavy_clip() argument
1289 * and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706)
1291 static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw) carl9170_calc_ctl() argument
1308 { CTL_11B, 0, ar->power_2G_cck, 4 }, carl9170_calc_ctl()
1309 { CTL_11G, 0, ar->power_2G_ofdm, 4 }, carl9170_calc_ctl()
1310 { CTL_2GHT20, 0, ar->power_2G_ht20, 8 }, carl9170_calc_ctl()
1311 { CTL_2GHT40, 0, ar->power_2G_ht40, 8 }, carl9170_calc_ctl()
1314 { CTL_11A, 0, ar->power_5G_leg, 4 }, carl9170_calc_ctl()
1315 { CTL_5GHT20, 0, ar->power_5G_ht20, 8 }, carl9170_calc_ctl()
1316 { CTL_5GHT40, 0, ar->power_5G_ht40, 8 }, carl9170_calc_ctl()
1320 #define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n]) carl9170_calc_ctl()
1322 ar->heavy_clip = 0; carl9170_calc_ctl()
1332 ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory, carl9170_calc_ctl()
1333 ar->hw->conf.chandef.chan->band); carl9170_calc_ctl()
1343 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { carl9170_calc_ctl()
1354 if (c == ar->eeprom.ctl_index[ctl_idx]) carl9170_calc_ctl()
1364 ar->heavy_clip = carl9170_calc_ctl()
1365 carl9170_get_heavy_clip(ar, carl9170_calc_ctl()
1379 carl9170_get_max_edge_power(ar, carl9170_calc_ctl()
1407 /* apply max power to pwr_cal_data (ar->power_*) */ carl9170_calc_ctl()
1414 if (ar->heavy_clip & 0xf0) { carl9170_calc_ctl()
1415 ar->power_2G_ht40[0]--; carl9170_calc_ctl()
1416 ar->power_2G_ht40[1]--; carl9170_calc_ctl()
1417 ar->power_2G_ht40[2]--; carl9170_calc_ctl()
1419 if (ar->heavy_clip & 0xf) { carl9170_calc_ctl()
1420 ar->power_2G_ht20[0]++; carl9170_calc_ctl()
1421 ar->power_2G_ht20[1]++; carl9170_calc_ctl()
1422 ar->power_2G_ht20[2]++; carl9170_calc_ctl()
1428 static void carl9170_set_power_cal(struct ar9170 *ar, u32 freq, carl9170_set_power_cal() argument
1452 ctpl = &ar->eeprom.cal_tgt_pwr_5G[0]; carl9170_set_power_cal()
1454 ctpres = ar->power_5G_leg; carl9170_set_power_cal()
1457 ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0]; carl9170_set_power_cal()
1459 ctpres = ar->power_2G_cck; carl9170_set_power_cal()
1462 ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0]; carl9170_set_power_cal()
1464 ctpres = ar->power_2G_ofdm; carl9170_set_power_cal()
1487 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0]; carl9170_set_power_cal()
1489 ctpres = ar->power_5G_ht20; carl9170_set_power_cal()
1492 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0]; carl9170_set_power_cal()
1494 ctpres = ar->power_5G_ht40; carl9170_set_power_cal()
1497 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0]; carl9170_set_power_cal()
1499 ctpres = ar->power_2G_ht20; carl9170_set_power_cal()
1502 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0]; carl9170_set_power_cal()
1504 ctpres = ar->power_2G_ht40; carl9170_set_power_cal()
1523 /* calc. conformance test limits and apply to ar->power*[] */ carl9170_set_power_cal()
1524 carl9170_calc_ctl(ar, freq, bw); carl9170_set_power_cal()
1527 int carl9170_get_noisefloor(struct ar9170 *ar) carl9170_get_noisefloor() argument
1535 BUILD_BUG_ON(ARRAY_SIZE(phy_regs) != ARRAY_SIZE(ar->noise)); carl9170_get_noisefloor()
1537 err = carl9170_read_mreg(ar, ARRAY_SIZE(phy_regs), phy_regs, phy_res); carl9170_get_noisefloor()
1542 ar->noise[i] = sign_extend32(GET_VAL( carl9170_get_noisefloor()
1545 ar->noise[i + 2] = sign_extend32(GET_VAL( carl9170_get_noisefloor()
1549 if (ar->channel) carl9170_get_noisefloor()
1550 ar->survey[ar->channel->hw_value].noise = ar->noise[0]; carl9170_get_noisefloor()
1570 int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, carl9170_set_channel() argument
1583 if (conf_is_ht(&ar->hw->conf)) carl9170_set_channel()
1586 if (conf_is_ht40(&ar->hw->conf)) carl9170_set_channel()
1590 if (ar->channel) { carl9170_set_channel()
1591 old_channel = ar->channel; carl9170_set_channel()
1592 ar->channel = NULL; carl9170_set_channel()
1596 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, carl9170_set_channel()
1601 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, 0x0); carl9170_set_channel()
1605 err = carl9170_init_phy(ar, channel->band); carl9170_set_channel()
1609 err = carl9170_init_rf_banks_0_7(ar, carl9170_set_channel()
1614 err = carl9170_exec_cmd(ar, CARL9170_CMD_FREQ_START, 0, NULL, 0, NULL); carl9170_set_channel()
1618 err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE, carl9170_set_channel()
1623 err = carl9170_init_rf_bank4_pwr(ar, carl9170_set_channel()
1651 if (ar->eeprom.tx_mask != 1) carl9170_set_channel()
1654 err = carl9170_write_reg(ar, AR9170_PHY_REG_TURBO, tmp); carl9170_set_channel()
1658 err = carl9170_set_freq_cal_data(ar, channel); carl9170_set_channel()
1662 carl9170_set_power_cal(ar, channel->center_freq, bw); carl9170_set_channel()
1664 err = carl9170_set_mac_tpc(ar, channel); carl9170_set_channel()
1671 if (conf_is_ht40(&ar->hw->conf)) carl9170_set_channel()
1680 err = carl9170_exec_cmd(ar, CARL9170_CMD_RF_INIT, sizeof(rf), &rf, carl9170_set_channel()
1687 ar->chan_fail++; carl9170_set_channel()
1688 ar->total_chan_fail++; carl9170_set_channel()
1690 wiphy_err(ar->hw->wiphy, "channel change: %d -> %d " carl9170_set_channel()
1695 if (ar->chan_fail > 3) { carl9170_set_channel()
1702 carl9170_restart(ar, CARL9170_RR_TOO_MANY_PHY_ERRORS); carl9170_set_channel()
1706 err = carl9170_set_channel(ar, channel, _bw); carl9170_set_channel()
1710 ar->chan_fail = 0; carl9170_set_channel()
1713 if (ar->heavy_clip) { carl9170_set_channel()
1714 err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE, carl9170_set_channel()
1715 0x200 | ar->heavy_clip); carl9170_set_channel()
1718 wiphy_err(ar->hw->wiphy, "failed to set " carl9170_set_channel()
1726 ar->channel = channel; carl9170_set_channel()
1727 ar->ht_settings = new_ht; carl9170_set_channel()
H A Ddebug.h132 void carl9170_debugfs_register(struct ar9170 *ar);
133 void carl9170_debugfs_unregister(struct ar9170 *ar);
/linux-4.1.27/drivers/net/wireless/ath/ar5523/
H A Dar5523.c45 static int ar5523_submit_rx_cmd(struct ar5523 *ar);
46 static void ar5523_data_tx_pkt_put(struct ar5523 *ar);
48 static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr, ar5523_read_reply() argument
61 ar5523_dbg(ar, "Code = %d len = %d\n", be32_to_cpu(hdr->code) & 0xff, ar5523_read_reply()
77 ar5523_err(ar, "olen to small %d < %d\n", ar5523_read_reply()
94 struct ar5523 *ar = urb->context; ar5523_cmd_rx_cb() local
95 struct ar5523_tx_cmd *cmd = &ar->tx_cmd; ar5523_cmd_rx_cb()
96 struct ar5523_cmd_hdr *hdr = ar->rx_cmd_buf; ar5523_cmd_rx_cb()
102 ar5523_err(ar, "RX USB error %d.\n", urb->status); ar5523_cmd_rx_cb()
107 ar5523_err(ar, "RX USB to short.\n"); ar5523_cmd_rx_cb()
111 ar5523_dbg(ar, "%s code %02x priv %d\n", __func__, ar5523_cmd_rx_cb()
121 ar5523_err(ar, "Unexpected command id: %02x\n", ar5523_cmd_rx_cb()
125 ar5523_read_reply(ar, hdr, cmd); ar5523_cmd_rx_cb()
129 ar5523_dbg(ar, "WDCMSG_DEVICE_AVAIL\n"); ar5523_cmd_rx_cb()
136 ar5523_dbg(ar, "WDCMSG_SEND_COMPLETE: %d pending\n", ar5523_cmd_rx_cb()
137 atomic_read(&ar->tx_nr_pending)); ar5523_cmd_rx_cb()
138 if (!test_bit(AR5523_HW_UP, &ar->flags)) ar5523_cmd_rx_cb()
139 ar5523_dbg(ar, "Unexpected WDCMSG_SEND_COMPLETE\n"); ar5523_cmd_rx_cb()
141 mod_timer(&ar->tx_wd_timer, ar5523_cmd_rx_cb()
143 ar5523_data_tx_pkt_put(ar); ar5523_cmd_rx_cb()
153 ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START"); ar5523_cmd_rx_cb()
163 ar5523_dbg(ar, "WDCMSG_STATS_UPDATE\n"); ar5523_cmd_rx_cb()
168 ar5523_submit_rx_cmd(ar); ar5523_cmd_rx_cb()
171 static int ar5523_alloc_rx_cmd(struct ar5523 *ar) ar5523_alloc_rx_cmd() argument
173 ar->rx_cmd_urb = usb_alloc_urb(0, GFP_KERNEL); ar5523_alloc_rx_cmd()
174 if (!ar->rx_cmd_urb) ar5523_alloc_rx_cmd()
177 ar->rx_cmd_buf = usb_alloc_coherent(ar->dev, AR5523_MAX_RXCMDSZ, ar5523_alloc_rx_cmd()
179 &ar->rx_cmd_urb->transfer_dma); ar5523_alloc_rx_cmd()
180 if (!ar->rx_cmd_buf) { ar5523_alloc_rx_cmd()
181 usb_free_urb(ar->rx_cmd_urb); ar5523_alloc_rx_cmd()
187 static void ar5523_cancel_rx_cmd(struct ar5523 *ar) ar5523_cancel_rx_cmd() argument
189 usb_kill_urb(ar->rx_cmd_urb); ar5523_cancel_rx_cmd()
192 static void ar5523_free_rx_cmd(struct ar5523 *ar) ar5523_free_rx_cmd() argument
194 usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, ar5523_free_rx_cmd()
195 ar->rx_cmd_buf, ar->rx_cmd_urb->transfer_dma); ar5523_free_rx_cmd()
196 usb_free_urb(ar->rx_cmd_urb); ar5523_free_rx_cmd()
199 static int ar5523_submit_rx_cmd(struct ar5523 *ar) ar5523_submit_rx_cmd() argument
203 usb_fill_bulk_urb(ar->rx_cmd_urb, ar->dev, ar5523_submit_rx_cmd()
204 ar5523_cmd_rx_pipe(ar->dev), ar->rx_cmd_buf, ar5523_submit_rx_cmd()
205 AR5523_MAX_RXCMDSZ, ar5523_cmd_rx_cb, ar); ar5523_submit_rx_cmd()
206 ar->rx_cmd_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ar5523_submit_rx_cmd()
208 error = usb_submit_urb(ar->rx_cmd_urb, GFP_ATOMIC); ar5523_submit_rx_cmd()
211 ar5523_err(ar, "error %d when submitting rx urb\n", ar5523_submit_rx_cmd()
224 struct ar5523 *ar = cmd->ar; ar5523_cmd_tx_cb() local
227 ar5523_err(ar, "Failed to TX command. Status = %d\n", ar5523_cmd_tx_cb()
240 static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, ar5523_cmd() argument
244 struct ar5523_tx_cmd *cmd = &ar->tx_cmd; ar5523_cmd()
264 ar5523_dbg(ar, "do cmd %02x\n", code); ar5523_cmd()
266 usb_fill_bulk_urb(cmd->urb_tx, ar->dev, ar5523_cmd_tx_pipe(ar->dev), ar5523_cmd()
272 ar5523_err(ar, "could not send command 0x%x, error=%d\n", ar5523_cmd()
279 ar5523_err(ar, "timeout waiting for command %02x reply\n", ar5523_cmd()
286 static int ar5523_cmd_write(struct ar5523 *ar, u32 code, const void *data, ar5523_cmd_write() argument
290 return ar5523_cmd(ar, code, data, len, NULL, 0, flags); ar5523_cmd_write()
293 static int ar5523_cmd_read(struct ar5523 *ar, u32 code, const void *idata, ar5523_cmd_read() argument
297 return ar5523_cmd(ar, code, idata, ilen, odata, olen, flags); ar5523_cmd_read()
300 static int ar5523_config(struct ar5523 *ar, u32 reg, u32 val) ar5523_config() argument
309 error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write, ar5523_config()
312 ar5523_err(ar, "could not write register 0x%02x\n", reg); ar5523_config()
316 static int ar5523_config_multi(struct ar5523 *ar, u32 reg, const void *data, ar5523_config_multi() argument
327 error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write, ar5523_config_multi()
330 ar5523_err(ar, "could not write %d bytes to register 0x%02x\n", ar5523_config_multi()
335 static int ar5523_get_status(struct ar5523 *ar, u32 which, void *odata, ar5523_get_status() argument
342 error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_STATUS, ar5523_get_status()
345 ar5523_err(ar, "could not read EEPROM offset 0x%02x\n", which); ar5523_get_status()
349 static int ar5523_get_capability(struct ar5523 *ar, u32 cap, u32 *val) ar5523_get_capability() argument
355 error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_CAPABILITY, &cap_be, ar5523_get_capability()
359 ar5523_err(ar, "could not read capability %u\n", cap); ar5523_get_capability()
366 static int ar5523_get_devcap(struct ar5523 *ar) ar5523_get_devcap() argument
369 error = ar5523_get_capability(ar, x, &cap); \ ar5523_get_devcap()
372 ar5523_info(ar, "Cap: " \ ar5523_get_devcap()
415 static int ar5523_set_ledsteady(struct ar5523 *ar, int lednum, int ledmode) ar5523_set_ledsteady() argument
422 ar5523_dbg(ar, "set %s led %s (steady)\n", ar5523_set_ledsteady()
425 return ar5523_cmd_write(ar, WDCMSG_SET_LED_STEADY, &led, sizeof(led), ar5523_set_ledsteady()
429 static int ar5523_set_rxfilter(struct ar5523 *ar, u32 bits, u32 op) ar5523_set_rxfilter() argument
436 ar5523_dbg(ar, "setting Rx filter=0x%x flags=0x%x\n", bits, op); ar5523_set_rxfilter()
437 return ar5523_cmd_write(ar, WDCMSG_RX_FILTER, &rxfilter, ar5523_set_rxfilter()
441 static int ar5523_reset_tx_queues(struct ar5523 *ar) ar5523_reset_tx_queues() argument
445 ar5523_dbg(ar, "resetting Tx queue\n"); ar5523_reset_tx_queues()
446 return ar5523_cmd_write(ar, WDCMSG_RELEASE_TX_QUEUE, ar5523_reset_tx_queues()
450 static int ar5523_set_chan(struct ar5523 *ar) ar5523_set_chan() argument
452 struct ieee80211_conf *conf = &ar->hw->conf; ar5523_set_chan()
464 ar5523_dbg(ar, "set chan flags 0x%x freq %d\n", ar5523_set_chan()
467 return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0); ar5523_set_chan()
470 static int ar5523_queue_init(struct ar5523 *ar) ar5523_queue_init() argument
474 ar5523_dbg(ar, "setting up Tx queue\n"); ar5523_queue_init()
484 return ar5523_cmd_write(ar, WDCMSG_SETUP_TX_QUEUE, &qinfo, ar5523_queue_init()
488 static int ar5523_switch_chan(struct ar5523 *ar) ar5523_switch_chan() argument
492 error = ar5523_set_chan(ar); ar5523_switch_chan()
494 ar5523_err(ar, "could not set chan, error %d\n", error); ar5523_switch_chan()
499 error = ar5523_reset_tx_queues(ar); ar5523_switch_chan()
501 ar5523_err(ar, "could not reset Tx queues, error %d\n", ar5523_switch_chan()
506 error = ar5523_queue_init(ar); ar5523_switch_chan()
508 ar5523_err(ar, "could not init wme, error %d\n", error); ar5523_switch_chan()
514 static void ar5523_rx_data_put(struct ar5523 *ar, ar5523_rx_data_put() argument
518 spin_lock_irqsave(&ar->rx_data_list_lock, flags); ar5523_rx_data_put()
519 list_move(&data->list, &ar->rx_data_free); ar5523_rx_data_put()
520 spin_unlock_irqrestore(&ar->rx_data_list_lock, flags); ar5523_rx_data_put()
526 struct ar5523 *ar = data->ar; ar5523_data_rx_cb() local
529 struct ieee80211_hw *hw = ar->hw; ar5523_data_rx_cb()
535 ar5523_dbg(ar, "%s\n", __func__); ar5523_data_rx_cb()
539 ar5523_err(ar, "%s: USB err: %d\n", __func__, ar5523_data_rx_cb()
545 ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen); ar5523_data_rx_cb()
553 ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n", ar5523_data_rx_cb()
564 if (rxlen > ar->rxbufsz) { ar5523_data_rx_cb()
565 ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n", ar5523_data_rx_cb()
571 ar5523_dbg(ar, "RX: rxlen is 0\n"); ar5523_data_rx_cb()
576 ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n", ar5523_data_rx_cb()
586 ar5523_dbg(ar, "eek, alignment workaround activated\n"); ar5523_data_rx_cb()
608 ar5523_rx_data_put(ar, data); ar5523_data_rx_cb()
609 if (atomic_inc_return(&ar->rx_data_free_cnt) >= ar5523_data_rx_cb()
611 test_bit(AR5523_HW_UP, &ar->flags)) ar5523_data_rx_cb()
612 queue_work(ar->wq, &ar->rx_refill_work); ar5523_data_rx_cb()
617 struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work); ar5523_rx_refill_work() local
622 ar5523_dbg(ar, "%s\n", __func__); ar5523_rx_refill_work()
624 spin_lock_irqsave(&ar->rx_data_list_lock, flags); ar5523_rx_refill_work()
626 if (!list_empty(&ar->rx_data_free)) ar5523_rx_refill_work()
627 data = (struct ar5523_rx_data *) ar->rx_data_free.next; ar5523_rx_refill_work()
630 spin_unlock_irqrestore(&ar->rx_data_list_lock, flags); ar5523_rx_refill_work()
635 data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL); ar5523_rx_refill_work()
637 ar5523_err(ar, "could not allocate rx skbuff\n"); ar5523_rx_refill_work()
641 usb_fill_bulk_urb(data->urb, ar->dev, ar5523_rx_refill_work()
642 ar5523_data_rx_pipe(ar->dev), data->skb->data, ar5523_rx_refill_work()
643 ar->rxbufsz, ar5523_data_rx_cb, data); ar5523_rx_refill_work()
645 spin_lock_irqsave(&ar->rx_data_list_lock, flags); ar5523_rx_refill_work()
646 list_move(&data->list, &ar->rx_data_used); ar5523_rx_refill_work()
647 spin_unlock_irqrestore(&ar->rx_data_list_lock, flags); ar5523_rx_refill_work()
648 atomic_dec(&ar->rx_data_free_cnt); ar5523_rx_refill_work()
654 ar5523_err(ar, "Err sending rx data urb %d\n", ar5523_rx_refill_work()
656 ar5523_rx_data_put(ar, data); ar5523_rx_refill_work()
657 atomic_inc(&ar->rx_data_free_cnt); ar5523_rx_refill_work()
666 static void ar5523_cancel_rx_bufs(struct ar5523 *ar) ar5523_cancel_rx_bufs() argument
672 spin_lock_irqsave(&ar->rx_data_list_lock, flags); ar5523_cancel_rx_bufs()
673 if (!list_empty(&ar->rx_data_used)) ar5523_cancel_rx_bufs()
674 data = (struct ar5523_rx_data *) ar->rx_data_used.next; ar5523_cancel_rx_bufs()
677 spin_unlock_irqrestore(&ar->rx_data_list_lock, flags); ar5523_cancel_rx_bufs()
683 list_move(&data->list, &ar->rx_data_free); ar5523_cancel_rx_bufs()
684 atomic_inc(&ar->rx_data_free_cnt); ar5523_cancel_rx_bufs()
688 static void ar5523_free_rx_bufs(struct ar5523 *ar) ar5523_free_rx_bufs() argument
692 ar5523_cancel_rx_bufs(ar); ar5523_free_rx_bufs()
693 while (!list_empty(&ar->rx_data_free)) { ar5523_free_rx_bufs()
694 data = (struct ar5523_rx_data *) ar->rx_data_free.next; ar5523_free_rx_bufs()
700 static int ar5523_alloc_rx_bufs(struct ar5523 *ar) ar5523_alloc_rx_bufs() argument
705 struct ar5523_rx_data *data = &ar->rx_data[i]; ar5523_alloc_rx_bufs()
707 data->ar = ar; ar5523_alloc_rx_bufs()
710 ar5523_err(ar, "could not allocate rx data urb\n"); ar5523_alloc_rx_bufs()
713 list_add_tail(&data->list, &ar->rx_data_free); ar5523_alloc_rx_bufs()
714 atomic_inc(&ar->rx_data_free_cnt); ar5523_alloc_rx_bufs()
719 ar5523_free_rx_bufs(ar); ar5523_alloc_rx_bufs()
723 static void ar5523_data_tx_pkt_put(struct ar5523 *ar) ar5523_data_tx_pkt_put() argument
725 atomic_dec(&ar->tx_nr_total); ar5523_data_tx_pkt_put()
726 if (!atomic_dec_return(&ar->tx_nr_pending)) { ar5523_data_tx_pkt_put()
727 del_timer(&ar->tx_wd_timer); ar5523_data_tx_pkt_put()
728 wake_up(&ar->tx_flush_waitq); ar5523_data_tx_pkt_put()
731 if (atomic_read(&ar->tx_nr_total) < AR5523_TX_DATA_RESTART_COUNT) { ar5523_data_tx_pkt_put()
732 ar5523_dbg(ar, "restart tx queue\n"); ar5523_data_tx_pkt_put()
733 ieee80211_wake_queues(ar->hw); ar5523_data_tx_pkt_put()
743 struct ar5523 *ar = data->ar; ar5523_data_tx_cb() local
746 ar5523_dbg(ar, "data tx urb completed: %d\n", urb->status); ar5523_data_tx_cb()
748 spin_lock_irqsave(&ar->tx_data_list_lock, flags); ar5523_data_tx_cb()
750 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); ar5523_data_tx_cb()
753 ar5523_dbg(ar, "%s: urb status: %d\n", __func__, urb->status); ar5523_data_tx_cb()
754 ar5523_data_tx_pkt_put(ar); ar5523_data_tx_cb()
755 ieee80211_free_txskb(ar->hw, skb); ar5523_data_tx_cb()
758 ieee80211_tx_status_irqsafe(ar->hw, skb); ar5523_data_tx_cb()
770 struct ar5523 *ar = hw->priv; ar5523_tx() local
773 ar5523_dbg(ar, "tx called\n"); ar5523_tx()
774 if (atomic_inc_return(&ar->tx_nr_total) >= AR5523_TX_DATA_COUNT) { ar5523_tx()
775 ar5523_dbg(ar, "tx queue full\n"); ar5523_tx()
776 ar5523_dbg(ar, "stop queues (tot %d pend %d)\n", ar5523_tx()
777 atomic_read(&ar->tx_nr_total), ar5523_tx()
778 atomic_read(&ar->tx_nr_pending)); ar5523_tx()
782 spin_lock_irqsave(&ar->tx_data_list_lock, flags); ar5523_tx()
783 list_add_tail(&data->list, &ar->tx_queue_pending); ar5523_tx()
784 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); ar5523_tx()
786 ieee80211_queue_work(ar->hw, &ar->tx_work); ar5523_tx()
789 static void ar5523_tx_work_locked(struct ar5523 *ar) ar5523_tx_work_locked() argument
804 ar5523_dbg(ar, "%s\n", __func__); ar5523_tx_work_locked()
806 spin_lock_irqsave(&ar->tx_data_list_lock, flags); ar5523_tx_work_locked()
807 if (!list_empty(&ar->tx_queue_pending)) { ar5523_tx_work_locked()
809 ar->tx_queue_pending.next; ar5523_tx_work_locked()
813 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); ar5523_tx_work_locked()
827 ar5523_err(ar, "Failed to allocate TX urb\n"); ar5523_tx_work_locked()
828 ieee80211_free_txskb(ar->hw, skb); ar5523_tx_work_locked()
832 data->ar = ar; ar5523_tx_work_locked()
848 if (test_bit(AR5523_CONNECTED, &ar->flags)) ar5523_tx_work_locked()
859 usb_fill_bulk_urb(urb, ar->dev, ar5523_data_tx_pipe(ar->dev), ar5523_tx_work_locked()
862 spin_lock_irqsave(&ar->tx_data_list_lock, flags); ar5523_tx_work_locked()
863 list_add_tail(&data->list, &ar->tx_queue_submitted); ar5523_tx_work_locked()
864 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); ar5523_tx_work_locked()
865 mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT); ar5523_tx_work_locked()
866 atomic_inc(&ar->tx_nr_pending); ar5523_tx_work_locked()
868 ar5523_dbg(ar, "TX Frame (%d pending)\n", ar5523_tx_work_locked()
869 atomic_read(&ar->tx_nr_pending)); ar5523_tx_work_locked()
872 ar5523_err(ar, "error %d when submitting tx urb\n", ar5523_tx_work_locked()
874 spin_lock_irqsave(&ar->tx_data_list_lock, flags); ar5523_tx_work_locked()
876 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); ar5523_tx_work_locked()
877 atomic_dec(&ar->tx_nr_pending); ar5523_tx_work_locked()
878 ar5523_data_tx_pkt_put(ar); ar5523_tx_work_locked()
880 ieee80211_free_txskb(ar->hw, skb); ar5523_tx_work_locked()
887 struct ar5523 *ar = container_of(work, struct ar5523, tx_work); ar5523_tx_work() local
889 ar5523_dbg(ar, "%s\n", __func__); ar5523_tx_work()
890 mutex_lock(&ar->mutex); ar5523_tx_work()
891 ar5523_tx_work_locked(ar); ar5523_tx_work()
892 mutex_unlock(&ar->mutex); ar5523_tx_work()
897 struct ar5523 *ar = (struct ar5523 *) arg; ar5523_tx_wd_timer() local
899 ar5523_dbg(ar, "TX watchdog timer triggered\n"); ar5523_tx_wd_timer()
900 ieee80211_queue_work(ar->hw, &ar->tx_wd_work); ar5523_tx_wd_timer()
905 struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work); ar5523_tx_wd_work() local
911 mutex_lock(&ar->mutex); ar5523_tx_wd_work()
912 ar5523_err(ar, "TX queue stuck (tot %d pend %d)\n", ar5523_tx_wd_work()
913 atomic_read(&ar->tx_nr_total), ar5523_tx_wd_work()
914 atomic_read(&ar->tx_nr_pending)); ar5523_tx_wd_work()
916 ar5523_err(ar, "Will restart dongle.\n"); ar5523_tx_wd_work()
917 ar5523_cmd_write(ar, WDCMSG_TARGET_RESET, NULL, 0, 0); ar5523_tx_wd_work()
918 mutex_unlock(&ar->mutex); ar5523_tx_wd_work()
921 static void ar5523_flush_tx(struct ar5523 *ar) ar5523_flush_tx() argument
923 ar5523_tx_work_locked(ar); ar5523_flush_tx()
926 if (test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) ar5523_flush_tx()
928 if (!wait_event_timeout(ar->tx_flush_waitq, ar5523_flush_tx()
929 !atomic_read(&ar->tx_nr_pending), AR5523_FLUSH_TIMEOUT)) ar5523_flush_tx()
930 ar5523_err(ar, "flush timeout (tot %d pend %d)\n", ar5523_flush_tx()
931 atomic_read(&ar->tx_nr_total), ar5523_flush_tx()
932 atomic_read(&ar->tx_nr_pending)); ar5523_flush_tx()
935 static void ar5523_free_tx_cmd(struct ar5523 *ar) ar5523_free_tx_cmd() argument
937 struct ar5523_tx_cmd *cmd = &ar->tx_cmd; ar5523_free_tx_cmd()
939 usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, cmd->buf_tx, ar5523_free_tx_cmd()
944 static int ar5523_alloc_tx_cmd(struct ar5523 *ar) ar5523_alloc_tx_cmd() argument
946 struct ar5523_tx_cmd *cmd = &ar->tx_cmd; ar5523_alloc_tx_cmd()
948 cmd->ar = ar; ar5523_alloc_tx_cmd()
953 ar5523_err(ar, "could not allocate urb\n"); ar5523_alloc_tx_cmd()
956 cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ, ar5523_alloc_tx_cmd()
972 struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work); ar5523_stat_work() local
975 ar5523_dbg(ar, "%s\n", __func__); ar5523_stat_work()
976 mutex_lock(&ar->mutex); ar5523_stat_work()
982 error = ar5523_cmd_write(ar, WDCMSG_TARGET_GET_STATS, NULL, 0, 0); ar5523_stat_work()
984 ar5523_err(ar, "could not query stats, error %d\n", error); ar5523_stat_work()
985 mutex_unlock(&ar->mutex); ar5523_stat_work()
986 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, HZ); ar5523_stat_work()
994 struct ar5523 *ar = hw->priv; ar5523_start() local
998 ar5523_dbg(ar, "start called\n"); ar5523_start()
1000 mutex_lock(&ar->mutex); ar5523_start()
1002 ar5523_cmd_write(ar, WDCMSG_BIND, &val, sizeof(val), 0); ar5523_start()
1005 ar5523_config_multi(ar, CFG_MAC_ADDR, &ar->hw->wiphy->perm_addr, ar5523_start()
1009 ar5523_config(ar, CFG_RATE_CONTROL_ENABLE, 0x00000001); ar5523_start()
1010 ar5523_config(ar, CFG_DIVERSITY_CTL, 0x00000001); ar5523_start()
1011 ar5523_config(ar, CFG_ABOLT, 0x0000003f); ar5523_start()
1012 ar5523_config(ar, CFG_WME_ENABLED, 0x00000000); ar5523_start()
1014 ar5523_config(ar, CFG_SERVICE_TYPE, 1); ar5523_start()
1015 ar5523_config(ar, CFG_TP_SCALE, 0x00000000); ar5523_start()
1016 ar5523_config(ar, CFG_TPC_HALF_DBM5, 0x0000003c); ar5523_start()
1017 ar5523_config(ar, CFG_TPC_HALF_DBM2, 0x0000003c); ar5523_start()
1018 ar5523_config(ar, CFG_OVERRD_TX_POWER, 0x00000000); ar5523_start()
1019 ar5523_config(ar, CFG_GMODE_PROTECTION, 0x00000000); ar5523_start()
1020 ar5523_config(ar, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003); ar5523_start()
1021 ar5523_config(ar, CFG_PROTECTION_TYPE, 0x00000000); ar5523_start()
1022 ar5523_config(ar, CFG_MODE_CTS, 0x00000002); ar5523_start()
1024 error = ar5523_cmd_read(ar, WDCMSG_TARGET_START, NULL, 0, ar5523_start()
1027 ar5523_dbg(ar, "could not start target, error %d\n", error); ar5523_start()
1030 ar5523_dbg(ar, "WDCMSG_TARGET_START returns handle: 0x%x\n", ar5523_start()
1033 ar5523_switch_chan(ar); ar5523_start()
1036 ar5523_cmd_write(ar, WDCMSG_SET_PWR_MODE, &val, sizeof(val), 0); ar5523_start()
1038 ar5523_cmd_write(ar, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0); ar5523_start()
1040 set_bit(AR5523_HW_UP, &ar->flags); ar5523_start()
1041 queue_work(ar->wq, &ar->rx_refill_work); ar5523_start()
1044 ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT); ar5523_start()
1045 ar5523_set_rxfilter(ar, ar5523_start()
1050 ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_ON); ar5523_start()
1051 ar5523_dbg(ar, "start OK\n"); ar5523_start()
1054 mutex_unlock(&ar->mutex); ar5523_start()
1060 struct ar5523 *ar = hw->priv; ar5523_stop() local
1062 ar5523_dbg(ar, "stop called\n"); ar5523_stop()
1064 cancel_delayed_work_sync(&ar->stat_work); ar5523_stop()
1065 mutex_lock(&ar->mutex); ar5523_stop()
1066 clear_bit(AR5523_HW_UP, &ar->flags); ar5523_stop()
1068 ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF); ar5523_stop()
1069 ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_OFF); ar5523_stop()
1071 ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0); ar5523_stop()
1073 del_timer_sync(&ar->tx_wd_timer); ar5523_stop()
1074 cancel_work_sync(&ar->tx_wd_work); ar5523_stop()
1075 cancel_work_sync(&ar->rx_refill_work); ar5523_stop()
1076 ar5523_cancel_rx_bufs(ar); ar5523_stop()
1077 mutex_unlock(&ar->mutex); ar5523_stop()
1082 struct ar5523 *ar = hw->priv; ar5523_set_rts_threshold() local
1085 ar5523_dbg(ar, "set_rts_threshold called\n"); ar5523_set_rts_threshold()
1086 mutex_lock(&ar->mutex); ar5523_set_rts_threshold()
1088 ret = ar5523_config(ar, CFG_USER_RTS_THRESHOLD, value); ar5523_set_rts_threshold()
1090 mutex_unlock(&ar->mutex); ar5523_set_rts_threshold()
1097 struct ar5523 *ar = hw->priv; ar5523_flush() local
1099 ar5523_dbg(ar, "flush called\n"); ar5523_flush()
1100 ar5523_flush_tx(ar); ar5523_flush()
1106 struct ar5523 *ar = hw->priv; ar5523_add_interface() local
1108 ar5523_dbg(ar, "add interface called\n"); ar5523_add_interface()
1110 if (ar->vif) { ar5523_add_interface()
1111 ar5523_dbg(ar, "invalid add_interface\n"); ar5523_add_interface()
1117 ar->vif = vif; ar5523_add_interface()
1128 struct ar5523 *ar = hw->priv; ar5523_remove_interface() local
1130 ar5523_dbg(ar, "remove interface called\n"); ar5523_remove_interface()
1131 ar->vif = NULL; ar5523_remove_interface()
1136 struct ar5523 *ar = hw->priv; ar5523_hwconfig() local
1138 ar5523_dbg(ar, "config called\n"); ar5523_hwconfig()
1139 mutex_lock(&ar->mutex); ar5523_hwconfig()
1141 ar5523_dbg(ar, "Do channel switch\n"); ar5523_hwconfig()
1142 ar5523_flush_tx(ar); ar5523_hwconfig()
1143 ar5523_switch_chan(ar); ar5523_hwconfig()
1145 mutex_unlock(&ar->mutex); ar5523_hwconfig()
1149 static int ar5523_get_wlan_mode(struct ar5523 *ar, ar5523_get_wlan_mode() argument
1157 band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; ar5523_get_wlan_mode()
1158 sta = ieee80211_find_sta(ar->vif, bss_conf->bssid); ar5523_get_wlan_mode()
1160 ar5523_info(ar, "STA not found!\n"); ar5523_get_wlan_mode()
1163 sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band]; ar5523_get_wlan_mode()
1185 static void ar5523_create_rateset(struct ar5523 *ar, ar5523_create_rateset() argument
1195 sta = ieee80211_find_sta(ar->vif, bss_conf->bssid); ar5523_create_rateset()
1198 ar5523_info(ar, "STA not found. Cannot set rates\n"); ar5523_create_rateset()
1201 sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band]; ar5523_create_rateset()
1203 ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set); ar5523_create_rateset()
1205 band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; ar5523_create_rateset()
1208 ar5523_dbg(ar, "Considering rate %d : %d\n", ar5523_create_rateset()
1223 static int ar5523_set_basic_rates(struct ar5523 *ar, ar5523_set_basic_rates() argument
1231 ar5523_create_rateset(ar, bss, &rates.rateset, true); ar5523_set_basic_rates()
1233 return ar5523_cmd_write(ar, WDCMSG_SET_BASIC_RATE, &rates, ar5523_set_basic_rates()
1237 static int ar5523_create_connection(struct ar5523 *ar, ar5523_create_connection() argument
1250 ar5523_create_rateset(ar, bss, &create.connattr.rateset, false); ar5523_create_connection()
1252 wlan_mode = ar5523_get_wlan_mode(ar, bss); ar5523_create_connection()
1255 return ar5523_cmd_write(ar, WDCMSG_CREATE_CONNECTION, &create, ar5523_create_connection()
1259 static int ar5523_write_associd(struct ar5523 *ar, ar5523_write_associd() argument
1269 return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd, ar5523_write_associd()
1278 struct ar5523 *ar = hw->priv; ar5523_bss_info_changed() local
1281 ar5523_dbg(ar, "bss_info_changed called\n"); ar5523_bss_info_changed()
1282 mutex_lock(&ar->mutex); ar5523_bss_info_changed()
1288 error = ar5523_create_connection(ar, vif, bss); ar5523_bss_info_changed()
1290 ar5523_err(ar, "could not create connection\n"); ar5523_bss_info_changed()
1294 error = ar5523_set_basic_rates(ar, bss); ar5523_bss_info_changed()
1296 ar5523_err(ar, "could not set negotiated rate set\n"); ar5523_bss_info_changed()
1300 error = ar5523_write_associd(ar, bss); ar5523_bss_info_changed()
1302 ar5523_err(ar, "could not set association\n"); ar5523_bss_info_changed()
1307 ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_ON); ar5523_bss_info_changed()
1308 set_bit(AR5523_CONNECTED, &ar->flags); ar5523_bss_info_changed()
1309 ieee80211_queue_delayed_work(hw, &ar->stat_work, HZ); ar5523_bss_info_changed()
1312 cancel_delayed_work(&ar->stat_work); ar5523_bss_info_changed()
1313 clear_bit(AR5523_CONNECTED, &ar->flags); ar5523_bss_info_changed()
1314 ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF); ar5523_bss_info_changed()
1318 mutex_unlock(&ar->mutex); ar5523_bss_info_changed()
1332 struct ar5523 *ar = hw->priv; ar5523_configure_filter() local
1335 ar5523_dbg(ar, "configure_filter called\n"); ar5523_configure_filter()
1336 mutex_lock(&ar->mutex); ar5523_configure_filter()
1337 ar5523_flush_tx(ar); ar5523_configure_filter()
1349 ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT); ar5523_configure_filter()
1350 ar5523_set_rxfilter(ar, filter, UATH_FILTER_OP_SET); ar5523_configure_filter()
1352 mutex_unlock(&ar->mutex); ar5523_configure_filter()
1368 static int ar5523_host_available(struct ar5523 *ar) ar5523_host_available() argument
1377 return ar5523_cmd_read(ar, WDCMSG_HOST_AVAILABLE, ar5523_host_available()
1381 static int ar5523_get_devstatus(struct ar5523 *ar) ar5523_get_devstatus() argument
1387 error = ar5523_get_status(ar, ST_MAC_ADDR, macaddr, ETH_ALEN); ar5523_get_devstatus()
1389 ar5523_err(ar, "could not read MAC address\n"); ar5523_get_devstatus()
1393 SET_IEEE80211_PERM_ADDR(ar->hw, macaddr); ar5523_get_devstatus()
1395 error = ar5523_get_status(ar, ST_SERIAL_NUMBER, ar5523_get_devstatus()
1396 &ar->serial[0], sizeof(ar->serial)); ar5523_get_devstatus()
1398 ar5523_err(ar, "could not read device serial number\n"); ar5523_get_devstatus()
1406 static int ar5523_get_max_rxsz(struct ar5523 *ar) ar5523_get_max_rxsz() argument
1412 error = ar5523_get_status(ar, ST_WDC_TRANSPORT_CHUNK_SIZE, &rxsize, ar5523_get_max_rxsz()
1415 ar5523_err(ar, "could not read max RX size\n"); ar5523_get_max_rxsz()
1419 ar->rxbufsz = be32_to_cpu(rxsize); ar5523_get_max_rxsz()
1421 if (!ar->rxbufsz || ar->rxbufsz > AR5523_SANE_RXBUFSZ) { ar5523_get_max_rxsz()
1422 ar5523_err(ar, "Bad rxbufsz from device. Using %d instead\n", ar5523_get_max_rxsz()
1424 ar->rxbufsz = AR5523_SANE_RXBUFSZ; ar5523_get_max_rxsz()
1427 ar5523_dbg(ar, "Max RX buf size: %d\n", ar->rxbufsz); ar5523_get_max_rxsz()
1467 static int ar5523_init_modes(struct ar5523 *ar) ar5523_init_modes() argument
1469 BUILD_BUG_ON(sizeof(ar->channels) != sizeof(ar5523_channels)); ar5523_init_modes()
1470 BUILD_BUG_ON(sizeof(ar->rates) != sizeof(ar5523_rates)); ar5523_init_modes()
1472 memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels)); ar5523_init_modes()
1473 memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates)); ar5523_init_modes()
1475 ar->band.band = IEEE80211_BAND_2GHZ; ar5523_init_modes()
1476 ar->band.channels = ar->channels; ar5523_init_modes()
1477 ar->band.n_channels = ARRAY_SIZE(ar5523_channels); ar5523_init_modes()
1478 ar->band.bitrates = ar->rates; ar5523_init_modes()
1479 ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates); ar5523_init_modes()
1480 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band; ar5523_init_modes()
1585 struct ar5523 *ar; ar5523_probe() local
1597 hw = ieee80211_alloc_hw(sizeof(*ar), &ar5523_ops); ar5523_probe()
1602 ar = hw->priv; ar5523_probe()
1603 ar->hw = hw; ar5523_probe()
1604 ar->dev = dev; ar5523_probe()
1605 mutex_init(&ar->mutex); ar5523_probe()
1607 INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work); ar5523_probe()
1608 init_timer(&ar->tx_wd_timer); ar5523_probe()
1609 setup_timer(&ar->tx_wd_timer, ar5523_tx_wd_timer, (unsigned long) ar); ar5523_probe()
1610 INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work); ar5523_probe()
1611 INIT_WORK(&ar->tx_work, ar5523_tx_work); ar5523_probe()
1612 INIT_LIST_HEAD(&ar->tx_queue_pending); ar5523_probe()
1613 INIT_LIST_HEAD(&ar->tx_queue_submitted); ar5523_probe()
1614 spin_lock_init(&ar->tx_data_list_lock); ar5523_probe()
1615 atomic_set(&ar->tx_nr_total, 0); ar5523_probe()
1616 atomic_set(&ar->tx_nr_pending, 0); ar5523_probe()
1617 init_waitqueue_head(&ar->tx_flush_waitq); ar5523_probe()
1619 atomic_set(&ar->rx_data_free_cnt, 0); ar5523_probe()
1620 INIT_WORK(&ar->rx_refill_work, ar5523_rx_refill_work); ar5523_probe()
1621 INIT_LIST_HEAD(&ar->rx_data_free); ar5523_probe()
1622 INIT_LIST_HEAD(&ar->rx_data_used); ar5523_probe()
1623 spin_lock_init(&ar->rx_data_list_lock); ar5523_probe()
1625 ar->wq = create_singlethread_workqueue("ar5523"); ar5523_probe()
1626 if (!ar->wq) { ar5523_probe()
1627 ar5523_err(ar, "Could not create wq\n"); ar5523_probe()
1631 error = ar5523_alloc_rx_bufs(ar); ar5523_probe()
1633 ar5523_err(ar, "Could not allocate rx buffers\n"); ar5523_probe()
1637 error = ar5523_alloc_rx_cmd(ar); ar5523_probe()
1639 ar5523_err(ar, "Could not allocate rx command buffers\n"); ar5523_probe()
1643 error = ar5523_alloc_tx_cmd(ar); ar5523_probe()
1645 ar5523_err(ar, "Could not allocate tx command buffers\n"); ar5523_probe()
1649 error = ar5523_submit_rx_cmd(ar); ar5523_probe()
1651 ar5523_err(ar, "Failed to submit rx cmd\n"); ar5523_probe()
1658 error = ar5523_host_available(ar); ar5523_probe()
1660 ar5523_err(ar, "could not initialize adapter\n"); ar5523_probe()
1664 error = ar5523_get_max_rxsz(ar); ar5523_probe()
1666 ar5523_err(ar, "could not get caps from adapter\n"); ar5523_probe()
1670 error = ar5523_get_devcap(ar); ar5523_probe()
1672 ar5523_err(ar, "could not get caps from adapter\n"); ar5523_probe()
1676 error = ar5523_get_devstatus(ar); ar5523_probe()
1678 ar5523_err(ar, "could not get device status\n"); ar5523_probe()
1682 ar5523_info(ar, "MAC/BBP AR5523, RF AR%c112\n", ar5523_probe()
1685 ar->vif = NULL; ar5523_probe()
1694 error = ar5523_init_modes(ar); ar5523_probe()
1702 ar5523_err(ar, "could not register device\n"); ar5523_probe()
1706 ar5523_info(ar, "Found and initialized AR5523 device\n"); ar5523_probe()
1710 ar5523_cancel_rx_cmd(ar); ar5523_probe()
1712 ar5523_free_tx_cmd(ar); ar5523_probe()
1714 ar5523_free_rx_cmd(ar); ar5523_probe()
1716 ar5523_free_rx_bufs(ar); ar5523_probe()
1718 destroy_workqueue(ar->wq); ar5523_probe()
1728 struct ar5523 *ar = hw->priv; ar5523_disconnect() local
1730 ar5523_dbg(ar, "detaching\n"); ar5523_disconnect()
1731 set_bit(AR5523_USB_DISCONNECTED, &ar->flags); ar5523_disconnect()
1735 ar5523_cancel_rx_cmd(ar); ar5523_disconnect()
1736 ar5523_free_tx_cmd(ar); ar5523_disconnect()
1737 ar5523_free_rx_cmd(ar); ar5523_disconnect()
1738 ar5523_free_rx_bufs(ar); ar5523_disconnect()
1740 destroy_workqueue(ar->wq); ar5523_disconnect()
H A Dar5523.h61 struct ar5523 *ar; member in struct:ar5523_tx_cmd
76 struct ar5523 *ar; member in struct:ar5523_tx_data
82 struct ar5523 *ar; member in struct:ar5523_rx_data
137 #define ar5523_dbg(ar, format, arg...) \
138 dev_dbg(&(ar)->dev->dev, format, ## arg)
144 #define ar5523_err(ar, format, arg...) \
146 if (!test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) { \
147 dev_err(&(ar)->dev->dev, format, ## arg); \
150 #define ar5523_info(ar, format, arg...) \
151 dev_info(&(ar)->dev->dev, format, ## arg)
/linux-4.1.27/drivers/media/platform/
H A Darv.c109 struct ar { struct
125 static struct ar ardev;
249 static void wait_for_vertical_sync(struct ar *ar, int exp_line) wait_for_vertical_sync() argument
264 v4l2_err(&ar->v4l2_dev, "lost %d -> %d\n", exp_line, l); wait_for_vertical_sync()
273 struct ar *ar = video_drvdata(file); ar_read() local
274 long ret = ar->frame_bytes; /* return read bytes */ ar_read()
286 if (ar->size == AR_SIZE_QVGA) ar_read()
288 if (ar->mode == AR_MODE_NORMAL) ar_read()
291 mutex_lock(&ar->lock); ar_read()
302 ar_outl(ar->line_buff, M32R_DMA0CDA_PORTL); /* destination addr. */ ar_read()
303 ar_outl(ar->line_buff, M32R_DMA0RDA_PORTL); /* reload address */ ar_read()
304 ar_outl(ar->line_bytes, M32R_DMA0CBCUT_PORTL); /* byte count (bytes) */ ar_read()
305 ar_outl(ar->line_bytes, M32R_DMA0RBCUT_PORTL); /* reload count (bytes) */ ar_read()
310 ar->start_capture = -1; ar_read()
314 wait_event_interruptible(ar->wait, ar->start_capture == 0); ar_read()
329 ar_outl(ar->line_bytes, M32R_DMA0CBCUT_PORTL); ar_read()
330 ar_outl(ar->line_bytes, M32R_DMA0RBCUT_PORTL); ar_read()
335 if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) { ar_read()
336 for (h = 0; h < ar->height; h++) { ar_read()
337 wait_for_vertical_sync(ar, h); ar_read()
342 ar_outl(virt_to_phys(ar->frame[l]), M32R_DMA0CDA_PORTL); ar_read()
351 for (h = 0; h < ar->height; h++) { ar_read()
352 wait_for_vertical_sync(ar, h); ar_read()
353 ar_outl(virt_to_phys(ar->frame[h]), M32R_DMA0CDA_PORTL); ar_read()
377 pu = py + (ar->frame_bytes / 2); ar_read()
378 pv = pu + (ar->frame_bytes / 4); ar_read()
379 for (h = 0; h < ar->height; h++) { ar_read()
380 p = ar->frame[h]; ar_read()
381 for (w = 0; w < ar->line_bytes; w += 4) { ar_read()
388 if (copy_to_user(buf, yuv, ar->frame_bytes)) { ar_read()
389 v4l2_err(&ar->v4l2_dev, "failed while copy_to_user yuv.\n"); ar_read()
395 mutex_unlock(&ar->lock); ar_read()
402 struct ar *ar = video_drvdata(file); ar_querycap() local
404 strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver)); ar_querycap()
438 struct ar *ar = video_drvdata(file); ar_g_fmt_vid_cap() local
441 pix->width = ar->width; ar_g_fmt_vid_cap()
442 pix->height = ar->height; ar_g_fmt_vid_cap()
444 pix->field = (ar->mode == AR_MODE_NORMAL) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED; ar_g_fmt_vid_cap()
445 pix->bytesperline = ar->width; ar_g_fmt_vid_cap()
446 pix->sizeimage = 2 * ar->width * ar->height; ar_g_fmt_vid_cap()
454 struct ar *ar = video_drvdata(file); ar_try_fmt_vid_cap() local
467 pix->bytesperline = ar->width; ar_try_fmt_vid_cap()
468 pix->sizeimage = 2 * ar->width * ar->height; ar_try_fmt_vid_cap()
476 struct ar *ar = video_drvdata(file); ar_s_fmt_vid_cap() local
482 mutex_lock(&ar->lock); ar_s_fmt_vid_cap()
483 ar->width = pix->width; ar_s_fmt_vid_cap()
484 ar->height = pix->height; ar_s_fmt_vid_cap()
485 if (ar->width == AR_WIDTH_VGA) { ar_s_fmt_vid_cap()
486 ar->size = AR_SIZE_VGA; ar_s_fmt_vid_cap()
487 ar->frame_bytes = AR_FRAME_BYTES_VGA; ar_s_fmt_vid_cap()
488 ar->line_bytes = AR_LINE_BYTES_VGA; ar_s_fmt_vid_cap()
490 ar->mode = AR_MODE_INTERLACE; ar_s_fmt_vid_cap()
492 ar->mode = AR_MODE_NORMAL; ar_s_fmt_vid_cap()
494 ar->size = AR_SIZE_QVGA; ar_s_fmt_vid_cap()
495 ar->frame_bytes = AR_FRAME_BYTES_QVGA; ar_s_fmt_vid_cap()
496 ar->line_bytes = AR_LINE_BYTES_QVGA; ar_s_fmt_vid_cap()
497 ar->mode = AR_MODE_INTERLACE; ar_s_fmt_vid_cap()
500 mutex_unlock(&ar->lock); ar_s_fmt_vid_cap()
528 struct ar *ar = dev; ar_interrupt() local
534 if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) { ar_interrupt()
552 ar_outl(ar->line_buff, M32R_DMA0CDA_PORTL); /* needless? */ ar_interrupt()
554 memcpy(ar->frame[0], ar->line_buff, ar->line_bytes); ar_interrupt()
559 ar->start_capture = 1; /* during capture */ ar_interrupt()
563 if (ar->start_capture == 1 && line_number <= (ar->height - 1)) { ar_interrupt()
565 memcpy(ar->frame[line_number], ar->line_buff, ar->line_bytes); ar_interrupt()
571 if (line_number == (ar->height - 1)) { /* end of line */ ar_interrupt()
573 ar->start_capture = 0; ar_interrupt()
579 wake_up_interruptible(&ar->wait); ar_interrupt()
582 ar_outl(ar->line_buff, M32R_DMA0CDA_PORTL); ar_interrupt()
600 static int ar_initialize(struct ar *ar) ar_initialize() argument
620 if (ar->size == AR_SIZE_QVGA) ar_initialize()
622 if (ar->mode == AR_MODE_NORMAL) ar_initialize()
642 v4l2_info(&ar->v4l2_dev, "Initializing "); ar_initialize()
735 struct ar *ar; ar_init() local
740 ar = &ardev; ar_init()
741 v4l2_dev = &ar->v4l2_dev; ar_init()
754 ar->line_buff = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL | GFP_DMA); ar_init()
755 if (ar->line_buff == NULL || !ALIGN4(ar->line_buff)) { ar_init()
763 ar->frame[i] = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL); ar_init()
764 if (ar->frame[i] == NULL || !ALIGN4(ar->frame[i])) { ar_init()
771 strlcpy(ar->vdev.name, "Colour AR VGA", sizeof(ar->vdev.name)); ar_init()
772 ar->vdev.v4l2_dev = v4l2_dev; ar_init()
773 ar->vdev.fops = &ar_fops; ar_init()
774 ar->vdev.ioctl_ops = &ar_ioctl_ops; ar_init()
775 ar->vdev.release = video_device_release_empty; ar_init()
776 video_set_drvdata(&ar->vdev, ar); ar_init()
779 ar->width = AR_WIDTH_VGA; ar_init()
780 ar->height = AR_HEIGHT_VGA; ar_init()
781 ar->size = AR_SIZE_VGA; ar_init()
782 ar->frame_bytes = AR_FRAME_BYTES_VGA; ar_init()
783 ar->line_bytes = AR_LINE_BYTES_VGA; ar_init()
785 ar->mode = AR_MODE_INTERLACE; ar_init()
787 ar->mode = AR_MODE_NORMAL; ar_init()
789 ar->width = AR_WIDTH_QVGA; ar_init()
790 ar->height = AR_HEIGHT_QVGA; ar_init()
791 ar->size = AR_SIZE_QVGA; ar_init()
792 ar->frame_bytes = AR_FRAME_BYTES_QVGA; ar_init()
793 ar->line_bytes = AR_LINE_BYTES_QVGA; ar_init()
794 ar->mode = AR_MODE_INTERLACE; ar_init()
796 mutex_init(&ar->lock); ar_init()
797 init_waitqueue_head(&ar->wait); ar_init()
800 if (request_irq(M32R_IRQ_INT3, ar_interrupt, 0, "arv", ar)) { ar_init()
807 if (ar_initialize(ar) != 0) { ar_init()
819 if (video_register_device(&ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) { ar_init()
827 video_device_node_name(&ar->vdev), M32R_IRQ_INT3, freq); ar_init()
833 free_irq(M32R_IRQ_INT3, ar); ar_init()
838 kfree(ar->frame[i]); ar_init()
842 kfree(ar->line_buff); ar_init()
846 v4l2_device_unregister(&ar->v4l2_dev); ar_init()
862 struct ar *ar; ar_cleanup_module() local
865 ar = &ardev; ar_cleanup_module()
866 video_unregister_device(&ar->vdev); ar_cleanup_module()
868 free_irq(M32R_IRQ_INT3, ar); ar_cleanup_module()
871 kfree(ar->frame[i]); ar_cleanup_module()
873 kfree(ar->line_buff); ar_cleanup_module()
875 v4l2_device_unregister(&ar->v4l2_dev); ar_cleanup_module()
/linux-4.1.27/arch/ia64/kernel/
H A Dminstate.h8 /* read ar.itc in advance, and use it before leaving bank 0 */
10 (pUStk) mov.m r20=ar.itc;
34 * r8 = contents of ar.ccv
35 * r9 = contents of ar.csd
36 * r10 = contents of ar.ssd
49 mov r27=ar.rsc; /* M */ \
51 mov r25=ar.unat; /* M */ \
53 mov r26=ar.pfs; /* I */ \
55 mov r21=ar.fpsr; /* M */ \
69 (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
71 (pUStk) mov.m r24=ar.rnat; \
77 (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
79 (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
82 (pUStk) mov r18=ar.bsp; \
83 (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
108 mov r8=ar.ccv; \
109 mov r9=ar.csd; \
110 mov r10=ar.ssd; \
113 st8 [r16]=r25,16; /* save ar.unat */ \
114 st8 [r17]=r26,16; /* save ar.pfs */ \
115 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
117 st8 [r16]=r27,16; /* save ar.rsc */ \
118 (pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
121 (pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
126 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
134 .mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
159 * r8: contents of ar.ccv
160 * r9: contents of ar.csd
161 * r10: contents of ar.ssd
193 mov ar.fpsr=r11; /* M-unit */ \
194 st8 [r2]=r8,8; /* ar.ccv */ \
210 st8 [r24]=r9; /* ar.csd */ \
211 st8 [r25]=r10; /* ar.ssd */ \
230 mov ar.pfs=r17; \
237 mov ar.rsc=r0 \
241 mov ar.bspstore=r22 \
243 mov r18=ar.bsp; \
H A Drelocate_kernel.S23 alloc r31=ar.pfs,4,0,0,0
41 mov ar.rsc=0 // put RSE in enforced lazy mode
46 mov r18=ar.rnat
47 mov ar.bspstore=r8
54 mov ar.rnat=r18
83 mov ar.lc=r20
156 mov ar.lc=r14;;
191 alloc loc0=ar.pfs,1,2,0,0
193 mov ar.rsc=0 // put RSE in enforced lazy mode
205 mov r4=ar.rnat
252 extr.u r5=r5, 0, 38 // ar.pfs.pfm
260 mov r5=ar.rsc
263 st8 [loc1]=r5, 8 // ar.rsc
264 mov r4=ar.bsp
267 st8 [loc1]=r4, 8 // ar.bsp
268 mov r5=ar.bspstore
271 st8 [loc1]=r5, 8 // ar.bspstore
272 mov r4=ar.rnat
275 st8 [loc1]=r4, 8 // ar.rnat
276 mov r5=ar.ccv
279 st8 [loc1]=r5, 8 // ar.ccv
280 mov r4=ar.unat
283 st8 [loc1]=r4, 8 // ar.unat
284 mov r5 = ar.fpsr
287 st8 [loc1]=r5, 8 // ar.fpsr
288 mov r4 = ar.unat
292 mov r5 = ar.fpsr
296 mov r4 = ar.pfs
299 st8 [loc1]=r4, 8 // ar.pfs
300 mov r5 = ar.lc
303 st8 [loc1]=r5, 8 // ar.lc
304 mov r4 = ar.ec
307 st8 [loc1]=r4, 8 // ar.ec
308 mov r5 = ar.csd
311 st8 [loc1]=r5, 8 // ar.csd
312 mov r4 = ar.ssd
315 st8 [loc1]=r4, 8 // ar.ssd
320 mov ar.pfs=loc0
H A Dentry.h31 .spillsp ar.pfs, PT(CR_IFS)+16+(off); \
32 .spillsp ar.unat, PT(AR_UNAT)+16+(off); \
33 .spillsp ar.fpsr, PT(AR_FPSR)+16+(off); \
42 .savesp ar.unat,SW(CALLER_UNAT)+16+(off); \
43 .savesp ar.fpsr,SW(AR_FPSR)+16+(off); \
59 .spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off); \
61 .spillsp ar.rnat,SW(AR_RNAT)+16+(off); \
62 .spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off); \
H A Dentry.S64 alloc loc1=ar.pfs,8,2,3,0
74 mov ar.pfs=loc1 // restore ar.pfs
79 (p6) mov ar.pfs=r0 // clear ar.pfs on success
88 mov ar.unat=0; mov ar.lc=0
112 alloc r16=ar.pfs,8,2,6,0
116 mov loc1=r16 // save ar.pfs across do_fork
129 mov ar.pfs=loc1
143 alloc r16=ar.pfs,8,2,6,0
147 mov loc1=r16 // save ar.pfs across do_fork
160 mov ar.pfs=loc1
174 alloc r16=ar.pfs,1,0,0,0
230 * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc,
231 * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a
239 * - r16 holds ar.pfs
248 mov r17=ar.unat // preserve caller's
281 mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
285 mov.m r18=ar.fpsr // preserve fpsr
289 mov.m r19=ar.rnat
296 // since we're done with the spills, read and save ar.unat:
297 mov.m r29=ar.unat
298 mov.m r20=ar.bspstore
311 mov r21=ar.lc // I-unit
316 st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
321 st8 [r15]=r21 // save ar.lc
347 st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
351 st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
354 st8 [r2]=r20 // save ar.bspstore
356 mov ar.rsc=3 // put RSE back into eager mode, pl 0
374 mov ar.rsc=0 // put RSE into enforced lazy mode
390 ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
391 ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
426 mov ar.bspstore=r27
427 mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
440 mov ar.pfs=r16
444 mov ar.lc=r17
453 mov ar.unat=r18 // restore caller's unat
454 mov ar.rnat=r30 // must restore after bspstore but before rsc!
455 mov ar.fpsr=r19 // restore fpsr
456 mov ar.rsc=3 // put RSE back into eager mode, pl 0
487 * manipulate ar.pfs.
589 alloc loc1=ar.pfs,0,3,1,0
601 mov ar.pfs=loc1
670 * r20: user-level ar.fpsr
673 * r23: user-level ar.bspstore
674 * r24: user-level ar.rnat
675 * r25: user-level ar.unat
676 * r26: user-level ar.pfs
677 * r27: user-level ar.rsc
687 * ar.unat: restored (user-level ar.unat)
688 * ar.pfs: restored (user-level ar.pfs)
689 * ar.rsc: restored (user-level ar.rsc)
690 * ar.rnat: restored (user-level ar.rnat)
691 * ar.bspstore: restored (user-level ar.bspstore)
692 * ar.fpsr: restored (user-level ar.fpsr)
693 * ar.ccv: cleared
694 * ar.csd: cleared
695 * ar.ssd: cleared
733 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
742 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
746 mov r16=ar.bsp // M2 get existing backing store pointer
750 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
769 ld8 r25=[r3],16 // M0|1 load ar.unat
776 ld8 r25=[r3],16 // M0|1 load ar.unat
780 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
785 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
788 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
792 ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
817 mov r19=ar.bsp // M2 get new backing store pointer
825 mov r19=ar.bsp // M2 get new backing store pointer
832 mov.m ar.csd=r0 // M2 clear ar.csd
833 mov.m ar.ccv=r0 // M2 clear ar.ccv
836 mov.m ar.ssd=r0 // M2 clear ar.ssd
889 ld8 r15=[r30] // load ar.ccv
893 ld8 r30=[r3],16 // load ar.csd
896 ld8 r31=[r2],16 // load ar.ssd
910 mov ar.csd=r30
911 mov ar.ssd=r31
942 mov ar.ccv=r15
967 ld8 r25=[r17],16 // load ar.unat
969 ld8 r26=[r16],16 // load ar.pfs
970 ld8 r27=[r17],16 // load ar.rsc
973 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
974 ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
979 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
990 ld8 r20=[r16],16 // ar.fpsr
1013 mov r16=ar.bsp // get existing backing store pointer
1019 mov r16=ar.bsp // get existing backing store pointer
1032 mov r19=ar.bsp // get new backing store pointer
1056 alloc loc0=ar.pfs,2,Nregs-2,2,0
1060 mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
1069 alloc loc0=ar.pfs,2,Nregs-2,2,0
1096 alloc loc0=ar.pfs,2,Nregs-2,2,0
1120 alloc r17=ar.pfs,0,0,0,0 // drop current register frame
1125 mov ar.unat=r25 // M2
1129 (pUStk) mov ar.bspstore=r23 // M2
1134 mov ar.pfs=r26 // I0
1141 mov ar.fpsr=r20 // M2
1145 (pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
1149 mov ar.rsc=r27 // M2
1217 alloc loc1=ar.pfs,8,2,1,0
1222 .ret11: mov ar.pfs=loc1
1237 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1238 mov r9=ar.unat
1246 .spillsp ar.unat, 16
1247 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1248 st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
1257 mov ar.unat=r9
1258 mov ar.pfs=loc1
1267 alloc r2=ar.pfs,8,0,1,0
1296 ld8 r9=[sp] // load new ar.unat
1299 mov ar.unat=r9
1306 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
1323 alloc loc1=ar.pfs,2,3,3,0
1356 mov ar.pfs=loc1
1371 alloc out0 = ar.pfs, 8, 0, 4, 0
1390 alloc loc0 = ar.pfs, 4, 4, 2, 0
1403 mov ar.pfs = loc0
1421 alloc loc0 = ar.pfs, 4, 4, 2, 0
1434 mov ar.pfs = loc0
1454 mov ar.pfs = r40
H A Dpal.S32 alloc r3=ar.pfs,1,0,0,0
57 alloc loc1 = ar.pfs,4,5,0,0
67 mov loc4=ar.rsc // save RSE configuration
69 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
83 mov ar.rsc = loc4 // restore RSE configuration
84 mov ar.pfs = loc1
100 alloc loc1 = ar.pfs,4,4,4,0
119 mov ar.pfs = loc1
147 alloc loc1 = ar.pfs,4,7,0,0
164 mov loc4=ar.rsc // save RSE configuration
169 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
182 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
189 mov ar.pfs = loc1
192 mov ar.rsc=loc4 // restore RSE configuration
206 alloc loc1 = ar.pfs,5,7,4,0
217 mov loc4=ar.rsc // save RSE configuration
220 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
239 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
246 mov ar.pfs = loc1
249 mov ar.rsc=loc4 // restore RSE configuration
265 alloc r3=ar.pfs,1,0,0,0
286 alloc r3=ar.pfs,1,0,0,0
H A Dgate.S104 .savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF; \
105 .savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF; \
108 .savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF; \
128 mov.m r9=ar.bsp // fetch ar.bsp
129 .spillsp.p p1, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
132 alloc r8=ar.pfs,0,0,3,0
143 .spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF
167 mov r14=ar.bsp
197 mov ar.rsc=0 // put RSE into enforced lazy mode
199 .save ar.rnat, r19
200 mov r19=ar.rnat // save RNaT before switching backing store area
203 mov r18=ar.bspstore
204 mov ar.bspstore=r15 // switch over to new register backing store area
207 .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
210 mov.m r16=ar.bsp // sc_loadrs <- (new bsp - new bspstore) << 16
217 mov ar.rsc=0xf // set RSE into eager mode, pl 3
228 .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
235 // Here, we need to calculate bspstore0, the value that ar.bspstore needs
244 alloc r2=ar.pfs,0,0,0,0 // alloc null frame
252 mov ar.rsc=r17 // put RSE into enforced lazy mode
291 mov ar.bspstore=r15 // switch back to old register backing store area
293 mov ar.rnat=r16 // restore RNaT
294 mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc)
301 * r11 = saved ar.pfs
306 * r11 = saved ar.pfs
340 mov r21=ar.fpsr // M2 (12 cyc)
342 mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
356 mov r27=ar.rsc // M2 (12 cyc)
H A Desi_stub.S49 alloc loc1=ar.pfs,2,7,8,0
70 mov loc4=ar.rsc // save RSE configuration
71 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
83 .ret0: mov loc5=r19 // old ar.bsp
86 .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
91 .ret2: mov ar.rsc=loc4 // restore RSE configuration
92 mov ar.pfs=loc1
H A Dmca_asm.S82 mov ar.lc=r20
262 mov ar.rsc=3 // set eager mode for C handler
267 alloc r14=ar.pfs,0,0,3,0
283 alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame
371 mov ar.rsc=3 // set eager mode for C handler
376 alloc r14=ar.pfs,0,0,3,0
397 alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame
524 mov temp3=ar.csd
525 mov temp4=ar.ssd
528 st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd
529 st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd
530 mov temp3=ar.unat
531 mov temp4=ar.pfs
533 st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat
534 st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs
535 mov temp3=ar.rnat
536 mov temp4=ar.bspstore
538 st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat
539 st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore
540 mov temp3=ar.bsp
542 sub temp3=temp3, temp4 // ar.bsp - ar.bspstore
543 mov temp4=ar.fpsr
545 shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs"
548 st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr
549 mov temp3=ar.ccv
551 st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv
614 mov temp3=ar.lc
616 st8 [temp1]=temp3 // save ar.lc
774 ld8 temp3=[temp1] // restore ar.lc
776 mov ar.lc=temp3
790 ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd
791 ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd
793 mov ar.csd=temp3
794 mov ar.ssd=temp4
795 ld8 temp3=[temp1] // restore ar.unat
797 ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs
799 mov ar.unat=temp3
800 mov ar.pfs=temp4
801 // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
802 ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv
803 ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr
805 mov ar.ccv=temp3
806 mov ar.fpsr=temp4
936 mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack
989 ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore
990 mov ar.rsc=temp2
993 ld8 temp4=[temp1] // restore ar.rnat
995 mov ar.bspstore=temp3 // back to old stack
997 mov ar.rnat=temp4
1061 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1080 // address of an rnat slot, if that address is below the current ar.bspstore
1082 // ar.rnat.
1084 alloc r14=ar.pfs,1,0,0,0
1085 mov ar.rsc=0
1087 mov r14=ar.bspstore
1092 (p7) mov r8=ar.rnat
1093 mov ar.rsc=3
H A Defi_stub.S47 alloc loc1=ar.pfs,8,7,7,0
53 mov loc4=ar.rsc // save RSE configuration
54 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
76 .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
81 .ret2: mov ar.rsc=loc4 // restore RSE configuration
82 mov ar.pfs=loc1
H A Dhead.S50 mov ar.lc=IA64_NUM_DBG_REGS-1;; \
58 mov ar.lc=IA64_NUM_DBG_REGS-1;; \
89 mov ar.lc=0x08-1;; \
117 SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \
118 SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \
119 SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \
120 SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \
121 SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \
135 SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \
289 mov ar.fpsr=r2
344 // load the "current" pointer (r13) and ar.k6 with the current task
358 mov ar.rsc=0 // place RSE in enforced lazy mode
388 mov ar.bspstore=r2 // establish the new RSE stack
390 mov ar.rsc=0x3 // place RSE in eager mode
448 alloc r2=ar.pfs,8,0,2,0
460 alloc r16=ar.pfs,1,0,0,0
461 mov r20=ar.lc // preserve ar.lc
462 mov ar.lc=IA64_NUM_DBG_REGS-1
478 mov ar.lc=r20 // restore ar.lc
483 alloc r16=ar.pfs,1,0,0,0
485 mov r20=ar.lc // preserve ar.lc
487 mov ar.lc=IA64_NUM_DBG_REGS-1
502 mov ar.lc=r20 // restore ar.lc
507 alloc r2=ar.pfs,1,4,0,0
672 alloc r2=ar.pfs,1,2,0,0
918 * r19 = old virtual address of ar.bsp
937 mov r19=ar.bsp
949 mov r18=ar.rnat // save ar.rnat
950 mov ar.bspstore=r17 // this steps on ar.rnat
954 mov ar.rnat=r18 // restore ar.rnat
1000 mov r18=ar.rnat // save ar.rnat
1001 mov ar.bspstore=r19 // this steps on ar.rnat
1005 mov ar.rnat=r18 // restore ar.rnat
1015 .save ar.lc,r2
1016 mov r2=ar.lc
1019 mov ar.lc=r32
1027 mov ar.lc=r2
1051 mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
1075 alloc r16=ar.pfs,1,0,0,0
1101 alloc r16=ar.pfs,1,0,0,0; \
1119 alloc r16=ar.pfs,1,0,0,0;;
1132 mov ar.rsc=0 // Put RSE in enforced lazy, LE mode
1157 RESTORE_REG(ar.fpsr, r25, r17);;
1158 RESTORE_REG(ar.pfs, r25, r17);;
1159 RESTORE_REG(ar.rnat, r25, r17);;
1160 RESTORE_REG(ar.unat, r25, r17);;
1161 RESTORE_REG(ar.bspstore, r25, r17);;
1177 RESTORE_REG(ar.lc, r25, r17);;
H A Divt.S69 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
72 # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
545 mov r28=ar.ccv // save ar.ccv
549 mov ar.ccv=r18 // set compare value for cmpxchg
553 (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present
572 mov ar.ccv=r28
611 mov r28=ar.ccv // save ar.ccv
615 mov ar.ccv=r18 // set compare value for cmpxchg
619 (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present
638 mov ar.ccv=r28
666 mov r28=ar.ccv // save ar.ccv
670 mov ar.ccv=r18 // set compare value for cmpxchg
674 (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present
691 mov ar.ccv=r28
717 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
731 mov.m r27=ar.rsc // M2 (12 cyc)
734 mov.m ar.rsc=0 // M2
735 mov.m r21=ar.fpsr // M2 (12 cyc)
738 mov.m r23=ar.bspstore // M2 (12 cyc)
739 mov.m r24=ar.rnat // M2 (5 cyc)
740 mov.i r26=ar.pfs // I0 (2 cyc)
778 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
795 mov.m r25=ar.unat // M2 (5 cyc)
812 mov r18=ar.bsp // M2 (12 cyc)
821 // mov.m r30=ar.itc is called in advance, and r13 is current
844 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
866 mov ar.rnat=r24 // M2 restore kernel's AR.RNAT
868 mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE
907 * - r11: original contents (saved ar.pfs to be saved)
914 * - r21: saved ar.fpsr
916 * - r23: saved ar.bspstore
917 * - r24: saved ar.rnat
918 * - r25: saved ar.unat
919 * - r26: saved ar.pfs
920 * - r27: saved ar.rsc
923 * - r30: ar.itc for accounting (don't touch)
937 * - ar.fpsr: set to kernel settings
949 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
958 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
963 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
968 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
969 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
970 and r8=0x7f,r19 // A // get sof of ar.pfs
972 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
989 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
990 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
991 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
997 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
1006 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1028 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1057 // mov.m r20=ar.itc is called in advance, and r13 is current
1524 alloc r15=ar.pfs,0,0,3,0
1540 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1549 alloc r14=ar.pfs,0,0,2,0
1577 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1598 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1629 alloc r14=ar.pfs,0,0,5,0
1668 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1669 mov out0=ar.ec
1676 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
H A Dsigframe.h2 unsigned long scratch_unat; /* ar.unat for the general registers saved in pt */
H A Dptrace.c394 * backing store (i.e., it's the address that would be in ar.bsp after
500 * store. This is the address that would have been stored in ar.bsp
794 dprintk("ptrace: failed to set ar.unat\n"); access_nat_bits()
804 dprintk("ptrace: failed to read ar.unat\n"); access_nat_bits()
864 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); ptrace_getregs()
865 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); ptrace_getregs()
866 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); ptrace_getregs()
867 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); ptrace_getregs()
868 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); ptrace_getregs()
869 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); ptrace_getregs()
871 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); ptrace_getregs()
872 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); ptrace_getregs()
873 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); ptrace_getregs()
874 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); ptrace_getregs()
1000 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); ptrace_setregs()
1001 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]); ptrace_setregs()
1002 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); ptrace_setregs()
1003 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); ptrace_setregs()
1004 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); ptrace_setregs()
1005 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); ptrace_setregs()
1007 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); ptrace_setregs()
1008 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); ptrace_setregs()
1009 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); ptrace_setregs()
1010 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); ptrace_setregs()
1369 * to get the real value of ar.bsp at the time access_elf_areg()
1398 * of ar.bsp: access_elf_areg()
1512 * ar.rsc ar.bsp ar.bspstore ar.rnat do_gpregs_get()
1513 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec do_gpregs_get()
1575 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat do_gpregs_get()
1576 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd do_gpregs_get()
1663 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat do_gpregs_set()
1664 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd do_gpregs_set()
H A Djprobes.S73 mov r16=ar.rsc
75 mov ar.rsc=r0
79 mov ar.rsc=r16
H A Dfsys.S34 * r11 = saved ar.pfs (a user-level value)
39 * ar.pfs = previous frame-state (a user-level value)
44 * r11 = saved ar.pfs (as passed into the fsyscall handler)
48 * ar.pfs = previous frame-state (as passed into the fsyscall handler)
157 // r11 = preserved: saved ar.pfs
180 // p8 = timesource ar.itc
247 (p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg
249 (p7) cmpxchg8.rel r3 = [r19],r2,ar.ccv
400 mov r27=ar.rsc
401 mov r21=ar.fpsr
402 mov r26=ar.pfs
428 * - r21: ar.fpsr
429 * - r26: ar.pfs
430 * - r27: ar.rsc
484 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
492 mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore
493 mov.m r24=ar.rnat // M2 (5 cyc) read ar.rnat (dual-issues!)
496 mov ar.bspstore=r22 // M2 (6 cyc) switch to kernel RBS
499 mov r25=ar.unat // M2 (5 cyc) save ar.unat
507 mov r18=ar.bsp // M2 save (kernel) ar.bsp (12 cyc)
512 // mov.m r30=ar.itc is called in advance
533 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
H A Dprocess.c327 * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
475 * ar.rsc ar.bsp ar.bspstore ar.rnat do_copy_task_regs()
476 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec do_copy_task_regs()
H A Dsignal.c81 err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ restore_sigcontext()
84 err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ restore_sigcontext()
252 err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */ setup_sigcontext()
253 err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */ setup_sigcontext()
266 err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ setup_sigcontext()
269 err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */ setup_sigcontext()
H A Dsetup.c424 * value firmware left in ar.k0. io_port_init()
426 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute io_port_init()
429 * *physical* address in ar.k0 to mmap the appropriate area from io_port_init()
433 * ar.k0 is not involved in kernel I/O port accesses, which can use io_port_init()
941 * Set ar.k3 so that assembly code in MCA handler can compute cpu_init()
943 * phys = ar.k3 + &per_cpu_var cpu_init()
H A Dparavirt.c405 #define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg)
483 #define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg)
674 IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg)
H A Dsmpboot.c246 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
264 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
H A Dunwind.c230 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
232 "ar.unat", "pr", "ar.lc", "ar.fpsr",
573 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n", unw_access_ar()
1916 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__); unw_unwind()
1959 /* as we unwind, the saved ar.unat becomes the primary unat: */ unw_unwind()
H A Dftrace.c23 0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */
H A Dpalinfo.c737 unsigned long ar:3; tr_info() member in struct:gr_reg
799 gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma, tr_info()
H A Dmca.c1059 msg = "inconsistent ar.bspstore and r13"; ia64_mca_modify_original_stack()
1068 msg = "inconsistent ar.bsp and r13"; ia64_mca_modify_original_stack()
1096 * ar.pfs is set to 0. ia64_mca_modify_original_stack()
1124 * Registers from ar.bspstore through ar.bsp at the time of the event ia64_mca_modify_original_stack()
/linux-4.1.27/arch/ia64/lib/
H A Dcopy_page.S39 .save ar.pfs, saved_pfs
40 alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot
46 .save ar.lc, saved_lc
47 mov saved_lc=ar.lc
48 mov ar.ec=PIPE_DEPTH
63 mov ar.lc=lcount
95 mov ar.pfs=saved_pfs
96 mov ar.lc=saved_lc
H A Dclear_page.S36 .save ar.lc, saved_lc
37 mov saved_lc = ar.lc
40 mov ar.lc = (PREFETCH_LINES - 1)
50 mov ar.lc = r16 // one L3 line per iteration
74 mov ar.lc = saved_lc // restore lc
H A Dmemcpy.S48 .save ar.pfs, saved_pfs
49 alloc saved_pfs=ar.pfs,3,Nrot,0,Nrot
50 .save ar.lc, saved_lc
51 mov saved_lc=ar.lc
73 mov ar.ec=N
77 mov ar.lc=cnt
106 mov ar.lc=saved_lc
108 mov ar.pfs=saved_pfs
120 mov ar.ec=MEM_LAT
123 mov ar.lc=cnt
149 mov ar.lc=saved_lc
151 mov ar.pfs=saved_pfs
182 alloc t3=ar.pfs,3,Nrot,0,Nrot // resize register frame
227 mov ar.ec=N
243 mov ar.lc=t2
262 mov ar.lc=saved_lc
266 mov ar.pfs=saved_pfs
H A Dclear_user.S56 .save ar.pfs, saved_pfs
57 alloc saved_pfs=ar.pfs,2,0,0,0
59 .save ar.lc, saved_lc
60 mov saved_lc=ar.lc // preserve ar.lc (slow)
68 mov ar.lc=tmp // initialize lc for small count
70 ;; // WAR on ar.lc
83 ;; // avoid RAW on ar.lc
88 mov ret0=len // faster than using ar.lc
89 mov ar.lc=saved_lc
125 mov ar.lc=tmp
138 // way would be to use ar.lc and derive how many byte were left by
139 // doing : left= 16*ar.lc + 16. this would avoid the addition at
153 mov ar.lc=saved_lc
207 mov ar.lc=saved_lc
H A Dcopy_user.S75 .save ar.pfs, saved_pfs
76 alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7)
86 .save ar.lc, saved_lc
87 mov saved_lc=ar.lc // preserve ar.lc (slow)
98 mov ar.ec=PIPE_DEPTH
102 mov ar.lc=len2 // initialize lc for small count
117 mov ar.lc=saved_lc
119 mov ar.pfs=saved_pfs // restore ar.ec
190 mov ar.ec=PIPE_DEPTH
192 mov ar.lc=cnt
216 mov ar.lc=cnt
217 mov ar.ec=PIPE_DEPTH
301 mov ar.ec=PIPE_DEPTH
303 mov ar.lc=len1
310 mov ar.lc=saved_lc
312 mov ar.pfs=saved_pfs
369 mov ar.lc=tmp
401 mov ar.lc=saved_lc
407 mov ar.pfs=saved_pfs
445 mov ar.lc=saved_lc
446 mov ar.pfs=saved_pfs
549 mov ar.lc=len // Continue with a stupid byte store.
556 mov ar.lc=saved_lc
557 mov ar.pfs=saved_pfs
565 // ar.lc and ar.ec are setup correctly at this point
585 mov ar.lc=saved_lc
586 mov ar.pfs=saved_pfs
596 mov ar.lc=saved_lc
597 mov ar.pfs=saved_pfs
606 mov ar.lc=saved_lc
608 mov ar.pfs=saved_pfs
H A Dstrlen_user.S85 .save ar.pfs, saved_pfs
86 alloc saved_pfs=ar.pfs,11,0,0,8
108 mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs)
151 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
172 // ar.ec is still zero here
187 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
196 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
H A Dmemcpy_mck.S109 .save ar.pfs, saved_pfs
110 alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot
118 .save ar.lc, saved_lc
119 mov saved_lc=ar.lc
125 (p7) mov ar.lc=cnt // prefetch count
126 (p8) mov ar.lc=r0
147 mov ar.lc=cnt // loop setup
149 mov ar.ec=2
168 mov ar.lc=saved_lc
169 mov ar.pfs=saved_pfs
193 mov ar.ec = 1 // special unrolled loop
201 mov ar.lc = 2*PREFETCH_DIST - 1
216 mov ar.lc = cnt
217 mov ar.ec = N // # of stages in pipeline
277 .save ar.pfs, saved_pfs
278 alloc saved_pfs=ar.pfs,3,5,0,8
279 .save ar.lc, saved_lc
280 mov saved_lc=ar.lc
306 (p7) mov ar.lc = cnt
307 (p8) mov ar.lc = r0
335 mov ar.lc=cnt
339 mov ar.ec=2 // loop setup
394 mov ar.pfs=saved_pfs
396 mov ar.lc=saved_lc
591 mov ar.lc=saved_lc
592 mov ar.pfs=saved_pfs
629 alloc saved_pfs_stack=ar.pfs,3,3,3,0
661 mov ar.pfs=saved_pfs_stack
H A Dstrlen.S83 .save ar.pfs, saved_pfs
84 alloc saved_pfs=ar.pfs,11,0,0,8 // rotating must be multiple of 8
106 mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs)
148 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
175 // ar.ec is still zero here
190 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
H A Dcopy_page_mck.S102 alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot
107 .save ar.lc, saved_lc
108 mov saved_lc = ar.lc
115 mov ar.ec = 1 // special unrolled loop
118 mov ar.lc = 2*PREFETCH_DIST - 1
137 mov ar.lc = t1 // with 64KB pages, t1 is too big to fit in 8 bits!
138 mov ar.ec = N // # of stages in pipeline
182 mov ar.lc = saved_lc
H A Dmemset.S59 alloc tmp = ar.pfs, 3, 0, 0, 0
61 .save ar.lc, save_lc
62 mov.i save_lc = ar.lc
144 mov.i ar.lc = loopcnt //
154 mov.i ar.lc = tmp //
214 mov.i ar.lc = loopcnt
224 mov.i ar.lc = tmp
262 mov.i ar.lc = loopcnt
316 mov.i ar.lc = save_lc
335 mov.i ar.lc = save_lc
H A Ddo_csum.S128 .save ar.pfs, saved_pfs
129 alloc saved_pfs=ar.pfs,2,16,0,16
155 .save ar.lc, saved_lc
156 mov saved_lc=ar.lc // save lc
218 mov ar.ec=PIPE_DEPTH
219 mov ar.lc=count // set lc
284 mov ar.pfs=saved_pfs // restore ar.ec
287 mov ar.lc=saved_lc
313 // mov ar.pfs=saved_pfs // restore ar.ec
317 // mov ar.lc=saved_lc
H A Dip_fast_csum.S80 .save ar.pfs, r35
81 alloc r35=ar.pfs,2,2,2,0
91 mov ar.pfs=r35
/linux-4.1.27/tools/build/tests/ex/
H A DMakefile4 export AR := ar
/linux-4.1.27/arch/ia64/include/uapi/asm/
H A Dgcc_intrin.h40 asm volatile ("mov ar%0=%1" :: \
80 asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
310 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
311 asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
319 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
320 asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
328 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
329 asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
337 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
339 asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
347 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
348 asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
356 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
357 asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
365 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
366 asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
374 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
376 asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
H A Drse.h42 * ar.rnat.
54 * registers, calculate ar.bsp.
H A Dptrace.h53 * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
54 * This is because ar.ec is saved as part of ar.pfs.
92 * contains syscall's ar.pfs.pfm:
217 unsigned long ar[128]; member in struct:pt_all_user_regs
H A Dsigcontext.h23 * be found in ar.bsp after executing a "cover" instruction the context in which the
46 unsigned long sc_ar_unat; /* ar.unat of interrupted context */
H A Dperfmon_default_smpl.h68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
H A Dsignal.h88 * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
/linux-4.1.27/arch/ia64/include/asm/
H A Dkregs.h15 #define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */
16 #define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */
17 #define IA64_KR_PER_CPU_DATA 3 /* ar.k3: physical per-CPU base */
18 #define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
19 #define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */
20 #define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */
21 #define IA64_KR_PT_BASE 7 /* ar.k7: page table base address (physical) */
24 #define _IA64_KR_PREFIX(n) _IA64_KR_PASTE(ar.k, n)
H A Dfutex.h32 " mov ar.ccv=%2;; \n" \
33 "[2:] cmpxchg4.acq %1=[%4],%3,ar.ccv;; \n" \
113 " mov ar.ccv=%4;; \n" futex_atomic_cmpxchg_inatomic()
114 "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" futex_atomic_cmpxchg_inatomic()
H A Dmca_asm.h87 mov ar.rsc = 0 ; \
90 mov temp2 = ar.bspstore; \
94 mov temp1 = ar.rnat; \
96 mov ar.bspstore = temp2; \
98 mov ar.rnat = temp1; \
170 mov ar.rsc = 0; \
173 mov r13 = ar.k6; \
174 mov temp2 = ar.bspstore; \
178 mov temp1 = ar.rnat; \
180 mov ar.bspstore = temp2; \
182 mov ar.rnat = temp1; \
H A Delf.h151 * ar.rsc ar.bsp ar.bspstore ar.rnat
152 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
H A Dspinlock.h208 "mov ar.ccv = r0\n" arch_write_lock_flags()
221 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" arch_write_lock_flags()
225 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); arch_write_lock_flags()
235 "mov ar.ccv = r0\n" \
237 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
238 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
H A Dasmmacro.h94 mov r16=ar.pfs; \
99 mov ar.pfs=r16; \
H A Dparavirt_privop.h190 * ar.ccv
200 * ar.ccv: scratch
269 "ar.ccv"
H A Dsal.h381 ar : 1, member in struct:sal_processor_static_info::__anon1531
389 u64 ar[128]; member in struct:sal_processor_static_info
H A Dpal.h451 ar : 1, /* App regs valid */ member in struct:pal_process_state_info_s
689 #define pmci_proc_app_regs_valid pme_processor.ar
759 u64 pmsa_rsc; /* ar.rsc */
/linux-4.1.27/arch/s390/kvm/
H A Dpriv.c39 ar_t ar; handle_set_clock() local
45 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); handle_set_clock()
48 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); handle_set_clock()
72 ar_t ar; handle_set_prefix() local
79 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); handle_set_prefix()
86 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); handle_set_prefix()
112 ar_t ar; handle_store_prefix() local
119 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); handle_store_prefix()
128 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); handle_store_prefix()
142 ar_t ar; handle_store_cpu_address() local
149 ga = kvm_s390_get_base_disp_s(vcpu, &ar); handle_store_cpu_address()
154 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); handle_store_cpu_address()
238 ar_t ar; handle_tpi() local
240 addr = kvm_s390_get_base_disp_s(vcpu, &ar); handle_tpi()
259 rc = write_guest(vcpu, addr, ar, &tpi_data, len); handle_tpi()
406 ar_t ar; kvm_s390_handle_lpsw() local
411 addr = kvm_s390_get_base_disp_s(vcpu, &ar); kvm_s390_handle_lpsw()
415 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); kvm_s390_handle_lpsw()
433 ar_t ar; handle_lpswe() local
438 addr = kvm_s390_get_base_disp_s(vcpu, &ar); handle_lpswe()
441 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); handle_lpswe()
455 ar_t ar; handle_stidp() local
462 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); handle_stidp()
467 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); handle_stidp()
502 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, insert_stsi_usr_data() argument
507 vcpu->run->s390_stsi.ar = ar; insert_stsi_usr_data()
521 ar_t ar; handle_stsi() local
544 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); handle_stsi()
568 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); handle_stsi()
574 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); handle_stsi()
816 ar_t ar; kvm_s390_handle_lctl() local
823 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); kvm_s390_handle_lctl()
832 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); kvm_s390_handle_lctl()
855 ar_t ar; kvm_s390_handle_stctl() local
862 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); kvm_s390_handle_stctl()
878 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); kvm_s390_handle_stctl()
889 ar_t ar; handle_lctlg() local
896 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); handle_lctlg()
905 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); handle_lctlg()
927 ar_t ar; handle_stctg() local
934 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); handle_stctg()
950 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); handle_stctg()
975 ar_t ar; handle_tprot() local
982 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); handle_tprot()
991 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); handle_tprot()
995 ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); handle_tprot()
H A Dkvm-s390.h75 static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar) kvm_s390_get_base_disp_s() argument
80 if (ar) kvm_s390_get_base_disp_s()
81 *ar = base2; kvm_s390_get_base_disp_s()
112 static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) kvm_s390_get_base_disp_rsy() argument
121 if (ar) kvm_s390_get_base_disp_rsy()
122 *ar = base2; kvm_s390_get_base_disp_rsy()
127 static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) kvm_s390_get_base_disp_rs() argument
132 if (ar) kvm_s390_get_base_disp_rs()
133 *ar = base2; kvm_s390_get_base_disp_rs()
H A Dgaccess.h159 ar_t ar, unsigned long *gpa, int write);
160 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
163 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
173 * @ar: access register
215 int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, write_guest() argument
218 return access_guest(vcpu, ga, ar, data, len, 1); write_guest()
225 * @ar: access register
235 int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, read_guest() argument
238 return access_guest(vcpu, ga, ar, data, len, 0); read_guest()
H A Dgaccess.c359 static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, ar_translation() argument
370 if (ar >= NUM_ACRS) ar_translation()
374 alet.val = vcpu->run->s.regs.acrs[ar]; ar_translation()
376 if (ar == 0 || alet.val == 0) { ar_translation()
464 ar_t ar, int write) get_vcpu_asce()
493 rc = ar_translation(vcpu, asce, ar, write); get_vcpu_asce()
500 vcpu->arch.pgm.exc_access_id = ar; get_vcpu_asce()
754 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, access_guest() argument
767 rc = get_vcpu_asce(vcpu, &asce, ar, write); access_guest()
827 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, guest_translate_address() argument
838 rc = get_vcpu_asce(vcpu, &asce, ar, write); guest_translate_address()
869 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, check_gva_range() argument
879 rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); check_gva_range()
463 get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, int write) get_vcpu_asce() argument
H A Dkvm-s390.c2362 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); kvm_s390_guest_mem_op()
2365 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); kvm_s390_guest_mem_op()
2373 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); kvm_s390_guest_mem_op()
2380 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); kvm_s390_guest_mem_op()
/linux-4.1.27/drivers/edac/
H A Dcell_edac.c34 static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) cell_edac_count_ce() argument
40 dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", cell_edac_count_ce()
41 priv->node, chan, ar); cell_edac_count_ce()
44 address = (ar & 0xffffffffe0000000ul) >> 29; cell_edac_count_ce()
49 syndrome = (ar & 0x000000001fe00000ul) >> 21; cell_edac_count_ce()
57 static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) cell_edac_count_ue() argument
63 dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", cell_edac_count_ue()
64 priv->node, chan, ar); cell_edac_count_ue()
67 address = (ar & 0xffffffffe0000000ul) >> 29; cell_edac_count_ue()
/linux-4.1.27/tools/lib/api/
H A DMakefile12 AR = $(CROSS_COMPILE)ar
/linux-4.1.27/arch/ia64/oprofile/
H A Dbacktrace.c56 * has ar.pfs == r0. Leaf functions do not modify ar.pfs so ar.pfs remains next_frame()
57 * as 0, stopping the backtrace. Record the previous ar.pfs when the current next_frame()
59 * after unwind then use pt_regs.ar_pfs which is where the real ar.pfs is for next_frame()
/linux-4.1.27/drivers/usb/misc/
H A Drio500_usb.h3 Copyright (C) 2000 Cesar Miquel (miquel@df.uba.ar)
H A Drio500.c6 * Cesar Miquel (miquel@df.uba.ar)
50 #define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>"
/linux-4.1.27/scripts/
H A Dkallsyms.c95 struct addr_range *ar; check_symbol_range() local
98 ar = &ranges[i]; check_symbol_range()
100 if (strcmp(sym, ar->start_sym) == 0) { check_symbol_range()
101 ar->start = addr; check_symbol_range()
103 } else if (strcmp(sym, ar->end_sym) == 0) { check_symbol_range()
104 ar->end = addr; check_symbol_range()
184 struct addr_range *ar; symbol_in_range() local
187 ar = &ranges[i]; symbol_in_range()
189 if (s->addr >= ar->start && s->addr <= ar->end) symbol_in_range()
/linux-4.1.27/arch/ia64/hp/sim/boot/
H A Dboot_head.S95 mov r9=ar.lc
98 mov ar.lc=r8
108 mov ar.lc=r9
/linux-4.1.27/fs/ext4/
H A Dindirect.c322 struct ext4_allocation_request *ar, ext4_alloc_branch()
333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); ext4_alloc_branch()
335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, ext4_alloc_branch()
336 ar->inode, ar->goal, ext4_alloc_branch()
337 ar->flags & EXT4_MB_DELALLOC_RESERVED, ext4_alloc_branch()
347 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]); ext4_alloc_branch()
365 len = ar->len; ext4_alloc_branch()
374 err = ext4_handle_dirty_metadata(handle, ar->inode, bh); ext4_alloc_branch()
388 ext4_forget(handle, 1, ar->inode, branch[i].bh, ext4_alloc_branch()
390 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i], ext4_alloc_branch()
391 (i == indirect_blks) ? ar->len : 1, 0); ext4_alloc_branch()
412 struct ext4_allocation_request *ar, ext4_splice_branch()
438 if (num == 0 && ar->len > 1) { ext4_splice_branch()
440 for (i = 1; i < ar->len; i++) ext4_splice_branch()
457 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh); ext4_splice_branch()
464 ext4_mark_inode_dirty(handle, ar->inode); ext4_splice_branch()
476 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1, ext4_splice_branch()
479 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key), ext4_splice_branch()
480 ar->len, 0); ext4_splice_branch()
517 struct ext4_allocation_request ar; ext4_ind_map_blocks() local
572 memset(&ar, 0, sizeof(ar)); ext4_ind_map_blocks()
573 ar.inode = inode; ext4_ind_map_blocks()
574 ar.logical = map->m_lblk; ext4_ind_map_blocks()
576 ar.flags = EXT4_MB_HINT_DATA; ext4_ind_map_blocks()
578 ar.flags |= EXT4_MB_DELALLOC_RESERVED; ext4_ind_map_blocks()
580 ar.goal = ext4_find_goal(inode, map->m_lblk, partial); ext4_ind_map_blocks()
589 ar.len = ext4_blks_to_allocate(partial, indirect_blks, ext4_ind_map_blocks()
595 err = ext4_alloc_branch(handle, &ar, indirect_blks, ext4_ind_map_blocks()
606 err = ext4_splice_branch(handle, &ar, partial, indirect_blks); ext4_ind_map_blocks()
613 count = ar.len; ext4_ind_map_blocks()
321 ext4_alloc_branch(handle_t *handle, struct ext4_allocation_request *ar, int indirect_blks, ext4_lblk_t *offsets, Indirect *branch) ext4_alloc_branch() argument
411 ext4_splice_branch(handle_t *handle, struct ext4_allocation_request *ar, Indirect *where, int num) ext4_splice_branch() argument
H A Dmballoc.c3006 struct ext4_allocation_request *ar) ext4_mb_normalize_request()
3091 if (ar->pleft && start <= ar->lleft) { ext4_mb_normalize_request()
3092 size -= ar->lleft + 1 - start; ext4_mb_normalize_request()
3093 start = ar->lleft + 1; ext4_mb_normalize_request()
3095 if (ar->pright && start + size - 1 >= ar->lright) ext4_mb_normalize_request()
3096 size -= start + size - ar->lright; ext4_mb_normalize_request()
3173 if (ar->pright && (ar->lright == (start + size))) { ext4_mb_normalize_request()
3175 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, ext4_mb_normalize_request()
3180 if (ar->pleft && (ar->lleft + 1 == start)) { ext4_mb_normalize_request()
3182 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, ext4_mb_normalize_request()
4150 struct ext4_allocation_request *ar) ext4_mb_initialize_context()
4152 struct super_block *sb = ar->inode->i_sb; ext4_mb_initialize_context()
4161 len = ar->len; ext4_mb_initialize_context()
4168 goal = ar->goal; ext4_mb_initialize_context()
4175 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); ext4_mb_initialize_context()
4178 ac->ac_inode = ar->inode; ext4_mb_initialize_context()
4184 ac->ac_flags = ar->flags; ext4_mb_initialize_context()
4192 (unsigned) ar->len, (unsigned) ar->logical, ext4_mb_initialize_context()
4193 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, ext4_mb_initialize_context()
4194 (unsigned) ar->lleft, (unsigned) ar->pleft, ext4_mb_initialize_context()
4195 (unsigned) ar->lright, (unsigned) ar->pright, ext4_mb_initialize_context()
4196 atomic_read(&ar->inode->i_writecount) ? "" : "non-"); ext4_mb_initialize_context()
4395 struct ext4_allocation_request *ar, int *errp) ext4_mb_new_blocks()
4406 sb = ar->inode->i_sb; ext4_mb_new_blocks()
4409 trace_ext4_request_blocks(ar); ext4_mb_new_blocks()
4412 if (IS_NOQUOTA(ar->inode)) ext4_mb_new_blocks()
4413 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; ext4_mb_new_blocks()
4415 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { ext4_mb_new_blocks()
4420 while (ar->len && ext4_mb_new_blocks()
4421 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { ext4_mb_new_blocks()
4425 ar->len = ar->len >> 1; ext4_mb_new_blocks()
4427 if (!ar->len) { ext4_mb_new_blocks()
4431 reserv_clstrs = ar->len; ext4_mb_new_blocks()
4432 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { ext4_mb_new_blocks()
4433 dquot_alloc_block_nofail(ar->inode, ext4_mb_new_blocks()
4434 EXT4_C2B(sbi, ar->len)); ext4_mb_new_blocks()
4436 while (ar->len && ext4_mb_new_blocks()
4437 dquot_alloc_block(ar->inode, ext4_mb_new_blocks()
4438 EXT4_C2B(sbi, ar->len))) { ext4_mb_new_blocks()
4440 ar->flags |= EXT4_MB_HINT_NOPREALLOC; ext4_mb_new_blocks()
4441 ar->len--; ext4_mb_new_blocks()
4444 inquota = ar->len; ext4_mb_new_blocks()
4445 if (ar->len == 0) { ext4_mb_new_blocks()
4453 ar->len = 0; ext4_mb_new_blocks()
4458 *errp = ext4_mb_initialize_context(ac, ar); ext4_mb_new_blocks()
4460 ar->len = 0; ext4_mb_new_blocks()
4467 ext4_mb_normalize_request(ac, ar); ext4_mb_new_blocks()
4504 ar->len = ac->ac_b_ex.fe_len; ext4_mb_new_blocks()
4516 ar->len = 0; ext4_mb_new_blocks()
4523 if (inquota && ar->len < inquota) ext4_mb_new_blocks()
4524 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); ext4_mb_new_blocks()
4525 if (!ar->len) { ext4_mb_new_blocks()
4526 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) ext4_mb_new_blocks()
4532 trace_ext4_allocate_blocks(ar, (unsigned long long)block); ext4_mb_new_blocks()
3005 ext4_mb_normalize_request(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) ext4_mb_normalize_request() argument
4149 ext4_mb_initialize_context(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) ext4_mb_initialize_context() argument
4394 ext4_mb_new_blocks(handle_t *handle, struct ext4_allocation_request *ar, int *errp) ext4_mb_new_blocks() argument
H A Dballoc.c625 struct ext4_allocation_request ar; ext4_new_meta_blocks() local
628 memset(&ar, 0, sizeof(ar)); ext4_new_meta_blocks()
630 ar.inode = inode; ext4_new_meta_blocks()
631 ar.goal = goal; ext4_new_meta_blocks()
632 ar.len = count ? *count : 1; ext4_new_meta_blocks()
633 ar.flags = flags; ext4_new_meta_blocks()
635 ret = ext4_mb_new_blocks(handle, &ar, errp); ext4_new_meta_blocks()
637 *count = ar.len; ext4_new_meta_blocks()
644 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); ext4_new_meta_blocks()
H A Dextents.c4280 struct ext4_allocation_request ar; ext4_ext_map_blocks() local
4386 ar.len = allocated = map->m_len; ext4_ext_map_blocks()
4393 ar.lleft = map->m_lblk; ext4_ext_map_blocks()
4394 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); ext4_ext_map_blocks()
4397 ar.lright = map->m_lblk; ext4_ext_map_blocks()
4399 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); ext4_ext_map_blocks()
4407 ar.len = allocated = map->m_len; ext4_ext_map_blocks()
4435 ar.inode = inode; ext4_ext_map_blocks()
4436 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); ext4_ext_map_blocks()
4437 ar.logical = map->m_lblk; ext4_ext_map_blocks()
4447 ar.len = EXT4_NUM_B2C(sbi, offset+allocated); ext4_ext_map_blocks()
4448 ar.goal -= offset; ext4_ext_map_blocks()
4449 ar.logical -= offset; ext4_ext_map_blocks()
4451 ar.flags = EXT4_MB_HINT_DATA; ext4_ext_map_blocks()
4454 ar.flags = 0; ext4_ext_map_blocks()
4456 ar.flags |= EXT4_MB_HINT_NOPREALLOC; ext4_ext_map_blocks()
4458 ar.flags |= EXT4_MB_DELALLOC_RESERVED; ext4_ext_map_blocks()
4459 newblock = ext4_mb_new_blocks(handle, &ar, &err); ext4_ext_map_blocks()
4463 ar.goal, newblock, allocated); ext4_ext_map_blocks()
4465 allocated_clusters = ar.len; ext4_ext_map_blocks()
4466 ar.len = EXT4_C2B(sbi, ar.len) - offset; ext4_ext_map_blocks()
4467 if (ar.len > allocated) ext4_ext_map_blocks()
4468 ar.len = allocated; ext4_ext_map_blocks()
4473 newex.ee_len = cpu_to_le16(ar.len); ext4_ext_map_blocks()
4492 path, ar.len); ext4_ext_map_blocks()
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
H A Dosc_object.c141 struct osc_async_rc *ar = &oinfo->loi_ar; osc_object_print() local
146 ar->ar_rc, ar->ar_force_sync, ar->ar_min_xid); osc_object_print()
H A Dosc_cache.c1761 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, osc_process_ar() argument
1765 if (!ar->ar_rc) osc_process_ar()
1766 ar->ar_rc = rc; osc_process_ar()
1768 ar->ar_force_sync = 1; osc_process_ar()
1769 ar->ar_min_xid = ptlrpc_sample_next_xid(); osc_process_ar()
1774 if (ar->ar_force_sync && (xid >= ar->ar_min_xid)) osc_process_ar()
1775 ar->ar_force_sync = 0; osc_process_ar()
/linux-4.1.27/drivers/clk/sunxi/
H A Dclk-a10-hosc.c4 * Emilio López <emilio@elopez.com.ar>
H A Dclk-a20-gmac.c3 * Emilio López <emilio@elopez.com.ar>
H A Dclk-usb.c4 * Emilio López <emilio@elopez.com.ar>
H A Dclk-factors.c2 * Copyright (C) 2013 Emilio López <emilio@elopez.com.ar>
H A Dclk-mod0.c4 * Emilio López <emilio@elopez.com.ar>
/linux-4.1.27/arch/s390/include/asm/
H A Dnmi.h49 __u32 ar : 1; /* 33 access register validity */ member in struct:mci
H A Datomic.h51 #define __ATOMIC_ADD "ar"
/linux-4.1.27/sound/isa/msnd/
H A Dmsnd_pinnacle_mixer.c163 #define update_potm(d, s, ar) \
171 if (snd_msnd_send_word(dev, 0, 0, ar) == 0) \
175 #define update_pot(d, s, ar) \
181 if (snd_msnd_send_word(dev, 0, 0, ar) == 0) \
/linux-4.1.27/fs/
H A Dlibfs.c687 struct simple_transaction_argresp *ar = file->private_data; simple_transaction_set() local
692 * The barrier ensures that ar->size will really remain zero until simple_transaction_set()
693 * ar->data is ready for reading. simple_transaction_set()
696 ar->size = n; simple_transaction_set()
702 struct simple_transaction_argresp *ar; simple_transaction_get() local
708 ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL); simple_transaction_get()
709 if (!ar) simple_transaction_get()
717 free_page((unsigned long)ar); simple_transaction_get()
721 file->private_data = ar; simple_transaction_get()
725 if (copy_from_user(ar->data, buf, size)) simple_transaction_get()
728 return ar->data; simple_transaction_get()
734 struct simple_transaction_argresp *ar = file->private_data; simple_transaction_read() local
736 if (!ar) simple_transaction_read()
738 return simple_read_from_buffer(buf, size, pos, ar->data, ar->size); simple_transaction_read()
/linux-4.1.27/drivers/media/pci/saa7164/
H A Dsaa7164-api.c251 struct tmComResEncVideoInputAspectRatio ar; saa7164_api_get_encoder() local
294 ar.width = 0; saa7164_api_get_encoder()
295 ar.height = 0; saa7164_api_get_encoder()
298 sizeof(struct tmComResEncVideoInputAspectRatio), &ar); saa7164_api_get_encoder()
319 ar.width, ar.height); saa7164_api_get_encoder()
327 struct tmComResEncVideoInputAspectRatio ar; saa7164_api_set_aspect_ratio() local
335 ar.width = 1; saa7164_api_set_aspect_ratio()
336 ar.height = 1; saa7164_api_set_aspect_ratio()
339 ar.width = 4; saa7164_api_set_aspect_ratio()
340 ar.height = 3; saa7164_api_set_aspect_ratio()
343 ar.width = 16; saa7164_api_set_aspect_ratio()
344 ar.height = 9; saa7164_api_set_aspect_ratio()
347 ar.width = 221; saa7164_api_set_aspect_ratio()
348 ar.height = 100; saa7164_api_set_aspect_ratio()
356 ar.width, ar.height); saa7164_api_set_aspect_ratio()
361 sizeof(struct tmComResEncVideoInputAspectRatio), &ar); saa7164_api_set_aspect_ratio()
/linux-4.1.27/include/trace/events/
H A Dext4.h748 TP_PROTO(struct ext4_allocation_request *ar),
750 TP_ARGS(ar),
766 __entry->dev = ar->inode->i_sb->s_dev;
767 __entry->ino = ar->inode->i_ino;
768 __entry->len = ar->len;
769 __entry->logical = ar->logical;
770 __entry->goal = ar->goal;
771 __entry->lleft = ar->lleft;
772 __entry->lright = ar->lright;
773 __entry->pleft = ar->pleft;
774 __entry->pright = ar->pright;
775 __entry->flags = ar->flags;
788 TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block),
790 TP_ARGS(ar, block),
807 __entry->dev = ar->inode->i_sb->s_dev;
808 __entry->ino = ar->inode->i_ino;
810 __entry->len = ar->len;
811 __entry->logical = ar->logical;
812 __entry->goal = ar->goal;
813 __entry->lleft = ar->lleft;
814 __entry->lright = ar->lright;
815 __entry->pleft = ar->pleft;
816 __entry->pright = ar->pright;
817 __entry->flags = ar->flags;
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/
H A Dnidstrings.c743 struct addrrange *ar; free_addrranges() local
745 ar = list_entry(list->next, struct addrrange, ar_link); free_addrranges()
747 cfs_expr_list_free_list(&ar->ar_numaddr_ranges); free_addrranges()
748 list_del(&ar->ar_link); free_addrranges()
749 LIBCFS_FREE(ar, sizeof(struct addrrange)); free_addrranges()
826 struct addrrange *ar; cfs_match_nid() local
835 list_for_each_entry(ar, &nr->nr_addrranges, ar_link) list_for_each_entry()
837 &ar->ar_numaddr_ranges)) list_for_each_entry()
/linux-4.1.27/drivers/video/fbdev/core/
H A Dsvgalib.c383 u16 am, an, ar; svga_compute_pll() local
388 ar = pll->r_max; svga_compute_pll()
389 f_vco = f_wanted << ar; svga_compute_pll()
392 if ((f_vco >> ar) != f_wanted) svga_compute_pll()
398 while ((ar > pll->r_min) && (f_vco > pll->f_vco_max)) { svga_compute_pll()
399 ar--; svga_compute_pll()
410 *r = ar; svga_compute_pll()
433 pr_debug("fb%d: found frequency: %d kHz (VCO %d kHz)\n", node, (int) (f_current >> ar), (int) f_current); svga_compute_pll()
/linux-4.1.27/arch/s390/kernel/
H A Dsclp.S233 ar %r9,%r6
236 ar %r9,%r6
239 ar %r9,%r6
241 ar %r7,%r6 # update current mto address
H A Dnmi.c175 if (!mci->ar) { s390_revalidate_registers()
H A Dhead.S128 ar %r2,%r0
/linux-4.1.27/tools/testing/selftests/x86/
H A Dsigreturn.c370 uint32_t valid = 0, ar; cs_bitness() local
371 asm ("lar %[cs], %[ar]\n\t" cs_bitness()
375 : [ar] "=r" (ar), [valid] "+rm" (valid) cs_bitness()
381 bool db = (ar & (1 << 22)); cs_bitness()
382 bool l = (ar & (1 << 21)); cs_bitness()
384 if (!(ar & (1<<11))) cs_bitness()
/linux-4.1.27/lib/raid6/test/
H A DMakefile11 AR = ar
/linux-4.1.27/tools/lib/lockdep/
H A DMakefile19 $(call allow-override,AR,$(CROSS_COMPILE)ar)
/linux-4.1.27/drivers/scsi/esas2r/
H A Desas2r_main.c972 struct esas2r_request *ar = *abort_request; esas2r_check_active_queue() local
989 ar = esas2r_alloc_request(a); list_for_each_safe()
990 if (ar == NULL) { list_for_each_safe()
1002 ar->sense_len = 0; list_for_each_safe()
1003 ar->vrq->scsi.length = 0; list_for_each_safe()
1004 ar->target_id = rq->target_id; list_for_each_safe()
1005 ar->vrq->scsi.flags |= cpu_to_le32( list_for_each_safe()
1008 memset(ar->vrq->scsi.cdb, 0, list_for_each_safe()
1009 sizeof(ar->vrq->scsi.cdb)); list_for_each_safe()
1011 ar->vrq->scsi.flags |= cpu_to_le32( list_for_each_safe()
1013 ar->vrq->scsi.u.abort_handle = list_for_each_safe()
/linux-4.1.27/drivers/staging/comedi/drivers/
H A Dadq12b.c22 Author: jeremy theler <thelerg@ib.cnea.gov.ar>
60 written by jeremy theler <thelerg@ib.cnea.gov.ar>
H A Dpcl816.c4 Author: Juan Grigera <juan@grigera.com.ar>
14 Author: Juan Grigera <juan@grigera.com.ar>
/linux-4.1.27/arch/ia64/include/asm/native/
H A Dpvchk_inst.h58 * mov ar.eflag =
59 * mov = ar.eflag
H A Dinst.h81 (pred) mov reg = ar.itc \
/linux-4.1.27/net/mac80211/
H A Drc80211_minstrel.c270 struct ieee80211_tx_rate *ar = info->status.rates; minstrel_tx_status() local
277 if (ar[i].idx < 0) minstrel_tx_status()
280 ndx = rix_to_ndx(mi, ar[i].idx); minstrel_tx_status()
284 mi->r[ndx].stats.attempts += ar[i].count; minstrel_tx_status()
286 if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0)) minstrel_tx_status()
H A Drc80211_minstrel_ht.c704 struct ieee80211_tx_rate *ar = info->status.rates; minstrel_ht_tx_status() local
737 last = !minstrel_ht_txstat_valid(mp, &ar[0]); minstrel_ht_tx_status()
740 !minstrel_ht_txstat_valid(mp, &ar[i + 1]); minstrel_ht_tx_status()
742 rate = minstrel_ht_get_stats(mp, mi, &ar[i]); minstrel_ht_tx_status()
747 rate->attempts += ar[i].count * info->status.ampdu_len; minstrel_ht_tx_status()
/linux-4.1.27/arch/x86/include/asm/
H A Dvmx.h390 #define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3)
/linux-4.1.27/arch/x86/kvm/
H A Dvmx.c523 u32 ar; member in struct:vcpu_vmx::__anon3174::kvm_save_segment
1558 u32 *p = &vmx->segment_cache.seg[seg].ar; vmx_read_guest_seg_ar()
3696 u32 ar; vmx_get_segment() local
3710 ar = vmx_read_guest_seg_ar(vmx, seg); vmx_get_segment()
3711 var->unusable = (ar >> 16) & 1; vmx_get_segment()
3712 var->type = ar & 15; vmx_get_segment()
3713 var->s = (ar >> 4) & 1; vmx_get_segment()
3714 var->dpl = (ar >> 5) & 3; vmx_get_segment()
3723 var->avl = (ar >> 12) & 1; vmx_get_segment()
3724 var->l = (ar >> 13) & 1; vmx_get_segment()
3725 var->db = (ar >> 14) & 1; vmx_get_segment()
3726 var->g = (ar >> 15) & 1; vmx_get_segment()
3747 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); vmx_get_cpl() local
3748 return AR_DPL(ar); vmx_get_cpl()
3754 u32 ar; vmx_segment_access_rights() local
3757 ar = 1 << 16; vmx_segment_access_rights()
3759 ar = var->type & 15; vmx_segment_access_rights()
3760 ar |= (var->s & 1) << 4; vmx_segment_access_rights()
3761 ar |= (var->dpl & 3) << 5; vmx_segment_access_rights()
3762 ar |= (var->present & 1) << 7; vmx_segment_access_rights()
3763 ar |= (var->avl & 1) << 12; vmx_segment_access_rights()
3764 ar |= (var->l & 1) << 13; vmx_segment_access_rights()
3765 ar |= (var->db & 1) << 14; vmx_segment_access_rights()
3766 ar |= (var->g & 1) << 15; vmx_segment_access_rights()
3769 return ar; vmx_segment_access_rights()
3815 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); vmx_get_cs_db_l_bits() local
3817 *db = (ar >> 14) & 1; vmx_get_cs_db_l_bits()
3818 *l = (ar >> 13) & 1; vmx_get_cs_db_l_bits()
3848 u32 ar; rmode_segment_valid() local
3854 ar = vmx_segment_access_rights(&var); rmode_segment_valid()
3860 if (ar != 0xf3) rmode_segment_valid()
4119 unsigned int ar; seg_setup() local
4124 ar = 0x93; seg_setup()
4126 ar |= 0x08; /* code segment */ seg_setup()
4128 vmcs_write32(sf->ar_bytes, ar); seg_setup()
/linux-4.1.27/arch/ia64/mm/
H A Dcontig.c116 * Update ar.k3. This move is ensures that percpu for_each_possible_cpu()
/linux-4.1.27/tools/lib/traceevent/
H A DMakefile25 $(call allow-override,AR,$(CROSS_COMPILE)ar)
/linux-4.1.27/drivers/net/wireless/
H A Dmac80211_hwsim.c2133 struct mac80211_hwsim_data *ar = hw->priv; mac80211_hwsim_get_et_stats() local
2136 data[i++] = ar->tx_pkts; mac80211_hwsim_get_et_stats()
2137 data[i++] = ar->tx_bytes; mac80211_hwsim_get_et_stats()
2138 data[i++] = ar->rx_pkts; mac80211_hwsim_get_et_stats()
2139 data[i++] = ar->rx_bytes; mac80211_hwsim_get_et_stats()
2140 data[i++] = ar->tx_dropped; mac80211_hwsim_get_et_stats()
2141 data[i++] = ar->tx_failed; mac80211_hwsim_get_et_stats()
2142 data[i++] = ar->ps; mac80211_hwsim_get_et_stats()
2143 data[i++] = ar->group; mac80211_hwsim_get_et_stats()
2144 data[i++] = ar->power_level; mac80211_hwsim_get_et_stats()
/linux-4.1.27/net/netfilter/
H A Dnf_nat_sip.c3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
27 MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
/linux-4.1.27/drivers/gpu/drm/ast/
H A Dast_drv.h276 u8 ar[20]; member in struct:ast_vbios_stdtable
/linux-4.1.27/arch/powerpc/
H A DMakefile21 CROSS32AR := $(CROSS32_COMPILE)ar
/linux-4.1.27/sound/oss/
H A Dmsnd_pinnacle.c406 #define update_potm(d,s,ar) \
413 if (msnd_send_word(&dev, 0, 0, ar) == 0) \
416 #define update_pot(d,s,ar) \
421 if (msnd_send_word(&dev, 0, 0, ar) == 0) \
/linux-4.1.27/tools/power/cpupower/
H A DMakefile96 AR = $(CROSS)ar
/linux-4.1.27/drivers/gpu/drm/mgag200/
H A Dmgag200_reg.h188 /* ar register at zero */
/linux-4.1.27/drivers/video/fbdev/matrox/
H A Dmatroxfb_accel.c37 * "Pablo Bianucci" <pbian@pccp.com.ar>
/linux-4.1.27/drivers/media/dvb-frontends/
H A Dves1x93.c311 * reliably, while the SIGNAL and CARRIER bits ar sometimes wrong. ves1x93_read_status()
/linux-4.1.27/net/netfilter/ipvs/
H A Dip_vs_app.c17 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
/linux-4.1.27/drivers/scsi/megaraid/
H A Dmegaraid_sas_fp.c135 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map) MR_ArPdGet() argument
137 return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); MR_ArPdGet()

Completed in 3065 milliseconds

12