1
2/*
3 * This file is part of wlcore
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/firmware.h>
26#include <linux/etherdevice.h>
27#include <linux/vmalloc.h>
28#include <linux/interrupt.h>
29#include <linux/irq.h>
30
31#include "wlcore.h"
32#include "debug.h"
33#include "wl12xx_80211.h"
34#include "io.h"
35#include "tx.h"
36#include "ps.h"
37#include "init.h"
38#include "debugfs.h"
39#include "testmode.h"
40#include "vendor_cmd.h"
41#include "scan.h"
42#include "hw_ops.h"
43#include "sysfs.h"
44
45#define WL1271_BOOT_RETRIES 3
46
47static char *fwlog_param;
48static int fwlog_mem_blocks = -1;
49static int bug_on_recovery = -1;
50static int no_recovery     = -1;
51
52static void __wl1271_op_remove_interface(struct wl1271 *wl,
53					 struct ieee80211_vif *vif,
54					 bool reset_tx_queues);
55static void wlcore_op_stop_locked(struct wl1271 *wl);
56static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57
58static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
59{
60	int ret;
61
62	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
63		return -EINVAL;
64
65	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
66		return 0;
67
68	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
69		return 0;
70
71	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
72	if (ret < 0)
73		return ret;
74
75	wl1271_info("Association completed.");
76	return 0;
77}
78
79static void wl1271_reg_notify(struct wiphy *wiphy,
80			      struct regulatory_request *request)
81{
82	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83	struct wl1271 *wl = hw->priv;
84
85	/* copy the current dfs region */
86	if (request)
87		wl->dfs_region = request->dfs_region;
88
89	wlcore_regdomain_config(wl);
90}
91
92static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
93				   bool enable)
94{
95	int ret = 0;
96
97	/* we should hold wl->mutex */
98	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
99	if (ret < 0)
100		goto out;
101
102	if (enable)
103		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
104	else
105		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
106out:
107	return ret;
108}
109
110/*
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
113 */
114int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
115{
116	int ret = 0;
117	int period = wl->conf.rx_streaming.interval;
118
119	/* don't reconfigure if rx_streaming is disabled */
120	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
121		goto out;
122
123	/* reconfigure/disable according to new streaming_period */
124	if (period &&
125	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126	    (wl->conf.rx_streaming.always ||
127	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128		ret = wl1271_set_rx_streaming(wl, wlvif, true);
129	else {
130		ret = wl1271_set_rx_streaming(wl, wlvif, false);
131		/* don't cancel_work_sync since we might deadlock */
132		del_timer_sync(&wlvif->rx_streaming_timer);
133	}
134out:
135	return ret;
136}
137
138static void wl1271_rx_streaming_enable_work(struct work_struct *work)
139{
140	int ret;
141	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142						rx_streaming_enable_work);
143	struct wl1271 *wl = wlvif->wl;
144
145	mutex_lock(&wl->mutex);
146
147	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149	    (!wl->conf.rx_streaming.always &&
150	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
151		goto out;
152
153	if (!wl->conf.rx_streaming.interval)
154		goto out;
155
156	ret = wl1271_ps_elp_wakeup(wl);
157	if (ret < 0)
158		goto out;
159
160	ret = wl1271_set_rx_streaming(wl, wlvif, true);
161	if (ret < 0)
162		goto out_sleep;
163
164	/* stop it after some time of inactivity */
165	mod_timer(&wlvif->rx_streaming_timer,
166		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
167
168out_sleep:
169	wl1271_ps_elp_sleep(wl);
170out:
171	mutex_unlock(&wl->mutex);
172}
173
174static void wl1271_rx_streaming_disable_work(struct work_struct *work)
175{
176	int ret;
177	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178						rx_streaming_disable_work);
179	struct wl1271 *wl = wlvif->wl;
180
181	mutex_lock(&wl->mutex);
182
183	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
184		goto out;
185
186	ret = wl1271_ps_elp_wakeup(wl);
187	if (ret < 0)
188		goto out;
189
190	ret = wl1271_set_rx_streaming(wl, wlvif, false);
191	if (ret)
192		goto out_sleep;
193
194out_sleep:
195	wl1271_ps_elp_sleep(wl);
196out:
197	mutex_unlock(&wl->mutex);
198}
199
200static void wl1271_rx_streaming_timer(unsigned long data)
201{
202	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203	struct wl1271 *wl = wlvif->wl;
204	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
205}
206
207/* wl->mutex must be taken */
208void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
209{
210	/* if the watchdog is not armed, don't do anything */
211	if (wl->tx_allocated_blocks == 0)
212		return;
213
214	cancel_delayed_work(&wl->tx_watchdog_work);
215	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
217}
218
219static void wlcore_rc_update_work(struct work_struct *work)
220{
221	int ret;
222	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
223						rc_update_work);
224	struct wl1271 *wl = wlvif->wl;
225
226	mutex_lock(&wl->mutex);
227
228	if (unlikely(wl->state != WLCORE_STATE_ON))
229		goto out;
230
231	ret = wl1271_ps_elp_wakeup(wl);
232	if (ret < 0)
233		goto out;
234
235	wlcore_hw_sta_rc_update(wl, wlvif);
236
237	wl1271_ps_elp_sleep(wl);
238out:
239	mutex_unlock(&wl->mutex);
240}
241
242static void wl12xx_tx_watchdog_work(struct work_struct *work)
243{
244	struct delayed_work *dwork;
245	struct wl1271 *wl;
246
247	dwork = container_of(work, struct delayed_work, work);
248	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249
250	mutex_lock(&wl->mutex);
251
252	if (unlikely(wl->state != WLCORE_STATE_ON))
253		goto out;
254
255	/* Tx went out in the meantime - everything is ok */
256	if (unlikely(wl->tx_allocated_blocks == 0))
257		goto out;
258
259	/*
260	 * if a ROC is in progress, we might not have any Tx for a long
261	 * time (e.g. pending Tx on the non-ROC channels)
262	 */
263	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265			     wl->conf.tx.tx_watchdog_timeout);
266		wl12xx_rearm_tx_watchdog_locked(wl);
267		goto out;
268	}
269
270	/*
271	 * if a scan is in progress, we might not have any Tx for a long
272	 * time
273	 */
274	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276			     wl->conf.tx.tx_watchdog_timeout);
277		wl12xx_rearm_tx_watchdog_locked(wl);
278		goto out;
279	}
280
281	/*
282	* AP might cache a frame for a long time for a sleeping station,
283	* so rearm the timer if there's an AP interface with stations. If
284	* Tx is genuinely stuck we will most hopefully discover it when all
285	* stations are removed due to inactivity.
286	*/
287	if (wl->active_sta_count) {
288		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289			     " %d stations",
290			      wl->conf.tx.tx_watchdog_timeout,
291			      wl->active_sta_count);
292		wl12xx_rearm_tx_watchdog_locked(wl);
293		goto out;
294	}
295
296	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297		     wl->conf.tx.tx_watchdog_timeout);
298	wl12xx_queue_recovery_work(wl);
299
300out:
301	mutex_unlock(&wl->mutex);
302}
303
304static void wlcore_adjust_conf(struct wl1271 *wl)
305{
306	/* Adjust settings according to optional module parameters */
307
308	/* Firmware Logger params */
309	if (fwlog_mem_blocks != -1) {
310		if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311		    fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312			wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
313		} else {
314			wl1271_error(
315				"Illegal fwlog_mem_blocks=%d using default %d",
316				fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
317		}
318	}
319
320	if (fwlog_param) {
321		if (!strcmp(fwlog_param, "continuous")) {
322			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323		} else if (!strcmp(fwlog_param, "ondemand")) {
324			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325		} else if (!strcmp(fwlog_param, "dbgpins")) {
326			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328		} else if (!strcmp(fwlog_param, "disable")) {
329			wl->conf.fwlog.mem_blocks = 0;
330			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
331		} else {
332			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
333		}
334	}
335
336	if (bug_on_recovery != -1)
337		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
338
339	if (no_recovery != -1)
340		wl->conf.recovery.no_recovery = (u8) no_recovery;
341}
342
343static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344					struct wl12xx_vif *wlvif,
345					u8 hlid, u8 tx_pkts)
346{
347	bool fw_ps;
348
349	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
350
351	/*
352	 * Wake up from high level PS if the STA is asleep with too little
353	 * packets in FW or if the STA is awake.
354	 */
355	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356		wl12xx_ps_link_end(wl, wlvif, hlid);
357
358	/*
359	 * Start high-level PS if the STA is asleep with enough blocks in FW.
360	 * Make an exception if this is the only connected link. In this
361	 * case FW-memory congestion is less of a problem.
362	 * Note that a single connected STA means 2*ap_count + 1 active links,
363	 * since we must account for the global and broadcast AP links
364	 * for each AP. The "fw_ps" check assures us the other link is a STA
365	 * connected to the AP. Otherwise the FW would not set the PSM bit.
366	 */
367	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369		wl12xx_ps_link_start(wl, wlvif, hlid, true);
370}
371
372static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373					   struct wl12xx_vif *wlvif,
374					   struct wl_fw_status *status)
375{
376	unsigned long cur_fw_ps_map;
377	u8 hlid;
378
379	cur_fw_ps_map = status->link_ps_bitmap;
380	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381		wl1271_debug(DEBUG_PSM,
382			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383			     wl->ap_fw_ps_map, cur_fw_ps_map,
384			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
385
386		wl->ap_fw_ps_map = cur_fw_ps_map;
387	}
388
389	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391					    wl->links[hlid].allocated_pkts);
392}
393
394static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
395{
396	struct wl12xx_vif *wlvif;
397	struct timespec ts;
398	u32 old_tx_blk_count = wl->tx_blocks_available;
399	int avail, freed_blocks;
400	int i;
401	int ret;
402	struct wl1271_link *lnk;
403
404	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
405				   wl->raw_fw_status,
406				   wl->fw_status_len, false);
407	if (ret < 0)
408		return ret;
409
410	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
411
412	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413		     "drv_rx_counter = %d, tx_results_counter = %d)",
414		     status->intr,
415		     status->fw_rx_counter,
416		     status->drv_rx_counter,
417		     status->tx_results_counter);
418
419	for (i = 0; i < NUM_TX_QUEUES; i++) {
420		/* prevent wrap-around in freed-packets counter */
421		wl->tx_allocated_pkts[i] -=
422				(status->counters.tx_released_pkts[i] -
423				wl->tx_pkts_freed[i]) & 0xff;
424
425		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
426	}
427
428
429	for_each_set_bit(i, wl->links_map, wl->num_links) {
430		u8 diff;
431		lnk = &wl->links[i];
432
433		/* prevent wrap-around in freed-packets counter */
434		diff = (status->counters.tx_lnk_free_pkts[i] -
435		       lnk->prev_freed_pkts) & 0xff;
436
437		if (diff == 0)
438			continue;
439
440		lnk->allocated_pkts -= diff;
441		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
442
443		/* accumulate the prev_freed_pkts counter */
444		lnk->total_freed_pkts += diff;
445	}
446
447	/* prevent wrap-around in total blocks counter */
448	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449		freed_blocks = status->total_released_blks -
450			       wl->tx_blocks_freed;
451	else
452		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453			       status->total_released_blks;
454
455	wl->tx_blocks_freed = status->total_released_blks;
456
457	wl->tx_allocated_blocks -= freed_blocks;
458
459	/*
460	 * If the FW freed some blocks:
461	 * If we still have allocated blocks - re-arm the timer, Tx is
462	 * not stuck. Otherwise, cancel the timer (no Tx currently).
463	 */
464	if (freed_blocks) {
465		if (wl->tx_allocated_blocks)
466			wl12xx_rearm_tx_watchdog_locked(wl);
467		else
468			cancel_delayed_work(&wl->tx_watchdog_work);
469	}
470
471	avail = status->tx_total - wl->tx_allocated_blocks;
472
473	/*
474	 * The FW might change the total number of TX memblocks before
475	 * we get a notification about blocks being released. Thus, the
476	 * available blocks calculation might yield a temporary result
477	 * which is lower than the actual available blocks. Keeping in
478	 * mind that only blocks that were allocated can be moved from
479	 * TX to RX, tx_blocks_available should never decrease here.
480	 */
481	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
482				      avail);
483
484	/* if more blocks are available now, tx work can be scheduled */
485	if (wl->tx_blocks_available > old_tx_blk_count)
486		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
487
488	/* for AP update num of allocated TX blocks per link and ps status */
489	wl12xx_for_each_wlvif_ap(wl, wlvif) {
490		wl12xx_irq_update_links_status(wl, wlvif, status);
491	}
492
493	/* update the host-chipset time offset */
494	getnstimeofday(&ts);
495	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496		(s64)(status->fw_localtime);
497
498	wl->fw_fast_lnk_map = status->link_fast_bitmap;
499
500	return 0;
501}
502
503static void wl1271_flush_deferred_work(struct wl1271 *wl)
504{
505	struct sk_buff *skb;
506
507	/* Pass all received frames to the network stack */
508	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509		ieee80211_rx_ni(wl->hw, skb);
510
511	/* Return sent skbs to the network stack */
512	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513		ieee80211_tx_status_ni(wl->hw, skb);
514}
515
516static void wl1271_netstack_work(struct work_struct *work)
517{
518	struct wl1271 *wl =
519		container_of(work, struct wl1271, netstack_work);
520
521	do {
522		wl1271_flush_deferred_work(wl);
523	} while (skb_queue_len(&wl->deferred_rx_queue));
524}
525
526#define WL1271_IRQ_MAX_LOOPS 256
527
528static int wlcore_irq_locked(struct wl1271 *wl)
529{
530	int ret = 0;
531	u32 intr;
532	int loopcount = WL1271_IRQ_MAX_LOOPS;
533	bool done = false;
534	unsigned int defer_count;
535	unsigned long flags;
536
537	/*
538	 * In case edge triggered interrupt must be used, we cannot iterate
539	 * more than once without introducing race conditions with the hardirq.
540	 */
541	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
542		loopcount = 1;
543
544	wl1271_debug(DEBUG_IRQ, "IRQ work");
545
546	if (unlikely(wl->state != WLCORE_STATE_ON))
547		goto out;
548
549	ret = wl1271_ps_elp_wakeup(wl);
550	if (ret < 0)
551		goto out;
552
553	while (!done && loopcount--) {
554		/*
555		 * In order to avoid a race with the hardirq, clear the flag
556		 * before acknowledging the chip. Since the mutex is held,
557		 * wl1271_ps_elp_wakeup cannot be called concurrently.
558		 */
559		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560		smp_mb__after_atomic();
561
562		ret = wlcore_fw_status(wl, wl->fw_status);
563		if (ret < 0)
564			goto out;
565
566		wlcore_hw_tx_immediate_compl(wl);
567
568		intr = wl->fw_status->intr;
569		intr &= WLCORE_ALL_INTR_MASK;
570		if (!intr) {
571			done = true;
572			continue;
573		}
574
575		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576			wl1271_error("HW watchdog interrupt received! starting recovery.");
577			wl->watchdog_recovery = true;
578			ret = -EIO;
579
580			/* restarting the chip. ignore any other interrupt. */
581			goto out;
582		}
583
584		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585			wl1271_error("SW watchdog interrupt received! "
586				     "starting recovery.");
587			wl->watchdog_recovery = true;
588			ret = -EIO;
589
590			/* restarting the chip. ignore any other interrupt. */
591			goto out;
592		}
593
594		if (likely(intr & WL1271_ACX_INTR_DATA)) {
595			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
596
597			ret = wlcore_rx(wl, wl->fw_status);
598			if (ret < 0)
599				goto out;
600
601			/* Check if any tx blocks were freed */
602			spin_lock_irqsave(&wl->wl_lock, flags);
603			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604			    wl1271_tx_total_queue_count(wl) > 0) {
605				spin_unlock_irqrestore(&wl->wl_lock, flags);
606				/*
607				 * In order to avoid starvation of the TX path,
608				 * call the work function directly.
609				 */
610				ret = wlcore_tx_work_locked(wl);
611				if (ret < 0)
612					goto out;
613			} else {
614				spin_unlock_irqrestore(&wl->wl_lock, flags);
615			}
616
617			/* check for tx results */
618			ret = wlcore_hw_tx_delayed_compl(wl);
619			if (ret < 0)
620				goto out;
621
622			/* Make sure the deferred queues don't get too long */
623			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624				      skb_queue_len(&wl->deferred_rx_queue);
625			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626				wl1271_flush_deferred_work(wl);
627		}
628
629		if (intr & WL1271_ACX_INTR_EVENT_A) {
630			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631			ret = wl1271_event_handle(wl, 0);
632			if (ret < 0)
633				goto out;
634		}
635
636		if (intr & WL1271_ACX_INTR_EVENT_B) {
637			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638			ret = wl1271_event_handle(wl, 1);
639			if (ret < 0)
640				goto out;
641		}
642
643		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644			wl1271_debug(DEBUG_IRQ,
645				     "WL1271_ACX_INTR_INIT_COMPLETE");
646
647		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
649	}
650
651	wl1271_ps_elp_sleep(wl);
652
653out:
654	return ret;
655}
656
657static irqreturn_t wlcore_irq(int irq, void *cookie)
658{
659	int ret;
660	unsigned long flags;
661	struct wl1271 *wl = cookie;
662
663	/* complete the ELP completion */
664	spin_lock_irqsave(&wl->wl_lock, flags);
665	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
666	if (wl->elp_compl) {
667		complete(wl->elp_compl);
668		wl->elp_compl = NULL;
669	}
670
671	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672		/* don't enqueue a work right now. mark it as pending */
673		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675		disable_irq_nosync(wl->irq);
676		pm_wakeup_event(wl->dev, 0);
677		spin_unlock_irqrestore(&wl->wl_lock, flags);
678		return IRQ_HANDLED;
679	}
680	spin_unlock_irqrestore(&wl->wl_lock, flags);
681
682	/* TX might be handled here, avoid redundant work */
683	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684	cancel_work_sync(&wl->tx_work);
685
686	mutex_lock(&wl->mutex);
687
688	ret = wlcore_irq_locked(wl);
689	if (ret)
690		wl12xx_queue_recovery_work(wl);
691
692	spin_lock_irqsave(&wl->wl_lock, flags);
693	/* In case TX was not handled here, queue TX work */
694	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696	    wl1271_tx_total_queue_count(wl) > 0)
697		ieee80211_queue_work(wl->hw, &wl->tx_work);
698	spin_unlock_irqrestore(&wl->wl_lock, flags);
699
700	mutex_unlock(&wl->mutex);
701
702	return IRQ_HANDLED;
703}
704
705struct vif_counter_data {
706	u8 counter;
707
708	struct ieee80211_vif *cur_vif;
709	bool cur_vif_running;
710};
711
712static void wl12xx_vif_count_iter(void *data, u8 *mac,
713				  struct ieee80211_vif *vif)
714{
715	struct vif_counter_data *counter = data;
716
717	counter->counter++;
718	if (counter->cur_vif == vif)
719		counter->cur_vif_running = true;
720}
721
722/* caller must not hold wl->mutex, as it might deadlock */
723static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724			       struct ieee80211_vif *cur_vif,
725			       struct vif_counter_data *data)
726{
727	memset(data, 0, sizeof(*data));
728	data->cur_vif = cur_vif;
729
730	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731					    wl12xx_vif_count_iter, data);
732}
733
734static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
735{
736	const struct firmware *fw;
737	const char *fw_name;
738	enum wl12xx_fw_type fw_type;
739	int ret;
740
741	if (plt) {
742		fw_type = WL12XX_FW_TYPE_PLT;
743		fw_name = wl->plt_fw_name;
744	} else {
745		/*
746		 * we can't call wl12xx_get_vif_count() here because
747		 * wl->mutex is taken, so use the cached last_vif_count value
748		 */
749		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750			fw_type = WL12XX_FW_TYPE_MULTI;
751			fw_name = wl->mr_fw_name;
752		} else {
753			fw_type = WL12XX_FW_TYPE_NORMAL;
754			fw_name = wl->sr_fw_name;
755		}
756	}
757
758	if (wl->fw_type == fw_type)
759		return 0;
760
761	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
762
763	ret = request_firmware(&fw, fw_name, wl->dev);
764
765	if (ret < 0) {
766		wl1271_error("could not get firmware %s: %d", fw_name, ret);
767		return ret;
768	}
769
770	if (fw->size % 4) {
771		wl1271_error("firmware size is not multiple of 32 bits: %zu",
772			     fw->size);
773		ret = -EILSEQ;
774		goto out;
775	}
776
777	vfree(wl->fw);
778	wl->fw_type = WL12XX_FW_TYPE_NONE;
779	wl->fw_len = fw->size;
780	wl->fw = vmalloc(wl->fw_len);
781
782	if (!wl->fw) {
783		wl1271_error("could not allocate memory for the firmware");
784		ret = -ENOMEM;
785		goto out;
786	}
787
788	memcpy(wl->fw, fw->data, wl->fw_len);
789	ret = 0;
790	wl->fw_type = fw_type;
791out:
792	release_firmware(fw);
793
794	return ret;
795}
796
797void wl12xx_queue_recovery_work(struct wl1271 *wl)
798{
799	/* Avoid a recursive recovery */
800	if (wl->state == WLCORE_STATE_ON) {
801		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
802				  &wl->flags));
803
804		wl->state = WLCORE_STATE_RESTARTING;
805		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806		wl1271_ps_elp_wakeup(wl);
807		wlcore_disable_interrupts_nosync(wl);
808		ieee80211_queue_work(wl->hw, &wl->recovery_work);
809	}
810}
811
812size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
813{
814	size_t len;
815
816	/* Make sure we have enough room */
817	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
818
819	/* Fill the FW log file, consumed by the sysfs fwlog entry */
820	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821	wl->fwlog_size += len;
822
823	return len;
824}
825
826static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
827{
828	struct wlcore_partition_set part, old_part;
829	u32 addr;
830	u32 offset;
831	u32 end_of_log;
832	u8 *block;
833	int ret;
834
835	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836	    (wl->conf.fwlog.mem_blocks == 0))
837		return;
838
839	wl1271_info("Reading FW panic log");
840
841	block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
842	if (!block)
843		return;
844
845	/*
846	 * Make sure the chip is awake and the logger isn't active.
847	 * Do not send a stop fwlog command if the fw is hanged or if
848	 * dbgpins are used (due to some fw bug).
849	 */
850	if (wl1271_ps_elp_wakeup(wl))
851		goto out;
852	if (!wl->watchdog_recovery &&
853	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854		wl12xx_cmd_stop_fwlog(wl);
855
856	/* Read the first memory block address */
857	ret = wlcore_fw_status(wl, wl->fw_status);
858	if (ret < 0)
859		goto out;
860
861	addr = wl->fw_status->log_start_addr;
862	if (!addr)
863		goto out;
864
865	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867		end_of_log = wl->fwlog_end;
868	} else {
869		offset = sizeof(addr);
870		end_of_log = addr;
871	}
872
873	old_part = wl->curr_part;
874	memset(&part, 0, sizeof(part));
875
876	/* Traverse the memory blocks linked list */
877	do {
878		part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879		part.mem.size  = PAGE_SIZE;
880
881		ret = wlcore_set_partition(wl, &part);
882		if (ret < 0) {
883			wl1271_error("%s: set_partition start=0x%X size=%d",
884				__func__, part.mem.start, part.mem.size);
885			goto out;
886		}
887
888		memset(block, 0, wl->fw_mem_block_size);
889		ret = wlcore_read_hwaddr(wl, addr, block,
890					wl->fw_mem_block_size, false);
891
892		if (ret < 0)
893			goto out;
894
895		/*
896		 * Memory blocks are linked to one another. The first 4 bytes
897		 * of each memory block hold the hardware address of the next
898		 * one. The last memory block points to the first one in
899		 * on demand mode and is equal to 0x2000000 in continuous mode.
900		 */
901		addr = le32_to_cpup((__le32 *)block);
902
903		if (!wl12xx_copy_fwlog(wl, block + offset,
904					wl->fw_mem_block_size - offset))
905			break;
906	} while (addr && (addr != end_of_log));
907
908	wake_up_interruptible(&wl->fwlog_waitq);
909
910out:
911	kfree(block);
912	wlcore_set_partition(wl, &old_part);
913}
914
915static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916				   u8 hlid, struct ieee80211_sta *sta)
917{
918	struct wl1271_station *wl_sta;
919	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
920
921	wl_sta = (void *)sta->drv_priv;
922	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
923
924	/*
925	 * increment the initial seq number on recovery to account for
926	 * transmitted packets that we haven't yet got in the FW status
927	 */
928	if (wlvif->encryption_type == KEY_GEM)
929		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
930
931	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932		wl_sta->total_freed_pkts += sqn_recovery_padding;
933}
934
935static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936					struct wl12xx_vif *wlvif,
937					u8 hlid, const u8 *addr)
938{
939	struct ieee80211_sta *sta;
940	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
941
942	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943		    is_zero_ether_addr(addr)))
944		return;
945
946	rcu_read_lock();
947	sta = ieee80211_find_sta(vif, addr);
948	if (sta)
949		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
950	rcu_read_unlock();
951}
952
953static void wlcore_print_recovery(struct wl1271 *wl)
954{
955	u32 pc = 0;
956	u32 hint_sts = 0;
957	int ret;
958
959	wl1271_info("Hardware recovery in progress. FW ver: %s",
960		    wl->chip.fw_ver_str);
961
962	/* change partitions momentarily so we can read the FW pc */
963	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
964	if (ret < 0)
965		return;
966
967	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
968	if (ret < 0)
969		return;
970
971	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
972	if (ret < 0)
973		return;
974
975	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976				pc, hint_sts, ++wl->recovery_count);
977
978	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
979}
980
981
982static void wl1271_recovery_work(struct work_struct *work)
983{
984	struct wl1271 *wl =
985		container_of(work, struct wl1271, recovery_work);
986	struct wl12xx_vif *wlvif;
987	struct ieee80211_vif *vif;
988
989	mutex_lock(&wl->mutex);
990
991	if (wl->state == WLCORE_STATE_OFF || wl->plt)
992		goto out_unlock;
993
994	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996			wl12xx_read_fwlog_panic(wl);
997		wlcore_print_recovery(wl);
998	}
999
1000	BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1002
1003	if (wl->conf.recovery.no_recovery) {
1004		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1005		goto out_unlock;
1006	}
1007
1008	/* Prevent spurious TX during FW restart */
1009	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1010
1011	/* reboot the chipset */
1012	while (!list_empty(&wl->wlvif_list)) {
1013		wlvif = list_first_entry(&wl->wlvif_list,
1014				       struct wl12xx_vif, list);
1015		vif = wl12xx_wlvif_to_vif(wlvif);
1016
1017		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020						    vif->bss_conf.bssid);
1021		}
1022
1023		__wl1271_op_remove_interface(wl, vif, false);
1024	}
1025
1026	wlcore_op_stop_locked(wl);
1027
1028	ieee80211_restart_hw(wl->hw);
1029
1030	/*
1031	 * Its safe to enable TX now - the queues are stopped after a request
1032	 * to restart the HW.
1033	 */
1034	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1035
1036out_unlock:
1037	wl->watchdog_recovery = false;
1038	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039	mutex_unlock(&wl->mutex);
1040}
1041
1042static int wlcore_fw_wakeup(struct wl1271 *wl)
1043{
1044	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1045}
1046
1047static int wl1271_setup(struct wl1271 *wl)
1048{
1049	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050	if (!wl->raw_fw_status)
1051		goto err;
1052
1053	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1054	if (!wl->fw_status)
1055		goto err;
1056
1057	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1058	if (!wl->tx_res_if)
1059		goto err;
1060
1061	return 0;
1062err:
1063	kfree(wl->fw_status);
1064	kfree(wl->raw_fw_status);
1065	return -ENOMEM;
1066}
1067
1068static int wl12xx_set_power_on(struct wl1271 *wl)
1069{
1070	int ret;
1071
1072	msleep(WL1271_PRE_POWER_ON_SLEEP);
1073	ret = wl1271_power_on(wl);
1074	if (ret < 0)
1075		goto out;
1076	msleep(WL1271_POWER_ON_SLEEP);
1077	wl1271_io_reset(wl);
1078	wl1271_io_init(wl);
1079
1080	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1081	if (ret < 0)
1082		goto fail;
1083
1084	/* ELP module wake up */
1085	ret = wlcore_fw_wakeup(wl);
1086	if (ret < 0)
1087		goto fail;
1088
1089out:
1090	return ret;
1091
1092fail:
1093	wl1271_power_off(wl);
1094	return ret;
1095}
1096
1097static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1098{
1099	int ret = 0;
1100
1101	ret = wl12xx_set_power_on(wl);
1102	if (ret < 0)
1103		goto out;
1104
1105	/*
1106	 * For wl127x based devices we could use the default block
1107	 * size (512 bytes), but due to a bug in the sdio driver, we
1108	 * need to set it explicitly after the chip is powered on.  To
1109	 * simplify the code and since the performance impact is
1110	 * negligible, we use the same block size for all different
1111	 * chip types.
1112	 *
1113	 * Check if the bus supports blocksize alignment and, if it
1114	 * doesn't, make sure we don't have the quirk.
1115	 */
1116	if (!wl1271_set_block_size(wl))
1117		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1118
1119	/* TODO: make sure the lower driver has set things up correctly */
1120
1121	ret = wl1271_setup(wl);
1122	if (ret < 0)
1123		goto out;
1124
1125	ret = wl12xx_fetch_firmware(wl, plt);
1126	if (ret < 0)
1127		goto out;
1128
1129out:
1130	return ret;
1131}
1132
1133int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1134{
1135	int retries = WL1271_BOOT_RETRIES;
1136	struct wiphy *wiphy = wl->hw->wiphy;
1137
1138	static const char* const PLT_MODE[] = {
1139		"PLT_OFF",
1140		"PLT_ON",
1141		"PLT_FEM_DETECT",
1142		"PLT_CHIP_AWAKE"
1143	};
1144
1145	int ret;
1146
1147	mutex_lock(&wl->mutex);
1148
1149	wl1271_notice("power up");
1150
1151	if (wl->state != WLCORE_STATE_OFF) {
1152		wl1271_error("cannot go into PLT state because not "
1153			     "in off state: %d", wl->state);
1154		ret = -EBUSY;
1155		goto out;
1156	}
1157
1158	/* Indicate to lower levels that we are now in PLT mode */
1159	wl->plt = true;
1160	wl->plt_mode = plt_mode;
1161
1162	while (retries) {
1163		retries--;
1164		ret = wl12xx_chip_wakeup(wl, true);
1165		if (ret < 0)
1166			goto power_off;
1167
1168		if (plt_mode != PLT_CHIP_AWAKE) {
1169			ret = wl->ops->plt_init(wl);
1170			if (ret < 0)
1171				goto power_off;
1172		}
1173
1174		wl->state = WLCORE_STATE_ON;
1175		wl1271_notice("firmware booted in PLT mode %s (%s)",
1176			      PLT_MODE[plt_mode],
1177			      wl->chip.fw_ver_str);
1178
1179		/* update hw/fw version info in wiphy struct */
1180		wiphy->hw_version = wl->chip.id;
1181		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1182			sizeof(wiphy->fw_version));
1183
1184		goto out;
1185
1186power_off:
1187		wl1271_power_off(wl);
1188	}
1189
1190	wl->plt = false;
1191	wl->plt_mode = PLT_OFF;
1192
1193	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1194		     WL1271_BOOT_RETRIES);
1195out:
1196	mutex_unlock(&wl->mutex);
1197
1198	return ret;
1199}
1200
1201int wl1271_plt_stop(struct wl1271 *wl)
1202{
1203	int ret = 0;
1204
1205	wl1271_notice("power down");
1206
1207	/*
1208	 * Interrupts must be disabled before setting the state to OFF.
1209	 * Otherwise, the interrupt handler might be called and exit without
1210	 * reading the interrupt status.
1211	 */
1212	wlcore_disable_interrupts(wl);
1213	mutex_lock(&wl->mutex);
1214	if (!wl->plt) {
1215		mutex_unlock(&wl->mutex);
1216
1217		/*
1218		 * This will not necessarily enable interrupts as interrupts
1219		 * may have been disabled when op_stop was called. It will,
1220		 * however, balance the above call to disable_interrupts().
1221		 */
1222		wlcore_enable_interrupts(wl);
1223
1224		wl1271_error("cannot power down because not in PLT "
1225			     "state: %d", wl->state);
1226		ret = -EBUSY;
1227		goto out;
1228	}
1229
1230	mutex_unlock(&wl->mutex);
1231
1232	wl1271_flush_deferred_work(wl);
1233	cancel_work_sync(&wl->netstack_work);
1234	cancel_work_sync(&wl->recovery_work);
1235	cancel_delayed_work_sync(&wl->elp_work);
1236	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1237
1238	mutex_lock(&wl->mutex);
1239	wl1271_power_off(wl);
1240	wl->flags = 0;
1241	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1242	wl->state = WLCORE_STATE_OFF;
1243	wl->plt = false;
1244	wl->plt_mode = PLT_OFF;
1245	wl->rx_counter = 0;
1246	mutex_unlock(&wl->mutex);
1247
1248out:
1249	return ret;
1250}
1251
1252static void wl1271_op_tx(struct ieee80211_hw *hw,
1253			 struct ieee80211_tx_control *control,
1254			 struct sk_buff *skb)
1255{
1256	struct wl1271 *wl = hw->priv;
1257	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1258	struct ieee80211_vif *vif = info->control.vif;
1259	struct wl12xx_vif *wlvif = NULL;
1260	unsigned long flags;
1261	int q, mapping;
1262	u8 hlid;
1263
1264	if (!vif) {
1265		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1266		ieee80211_free_txskb(hw, skb);
1267		return;
1268	}
1269
1270	wlvif = wl12xx_vif_to_data(vif);
1271	mapping = skb_get_queue_mapping(skb);
1272	q = wl1271_tx_get_queue(mapping);
1273
1274	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1275
1276	spin_lock_irqsave(&wl->wl_lock, flags);
1277
1278	/*
1279	 * drop the packet if the link is invalid or the queue is stopped
1280	 * for any reason but watermark. Watermark is a "soft"-stop so we
1281	 * allow these packets through.
1282	 */
1283	if (hlid == WL12XX_INVALID_LINK_ID ||
1284	    (!test_bit(hlid, wlvif->links_map)) ||
1285	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1286	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1287			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1288		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1289		ieee80211_free_txskb(hw, skb);
1290		goto out;
1291	}
1292
1293	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1294		     hlid, q, skb->len);
1295	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1296
1297	wl->tx_queue_count[q]++;
1298	wlvif->tx_queue_count[q]++;
1299
1300	/*
1301	 * The workqueue is slow to process the tx_queue and we need stop
1302	 * the queue here, otherwise the queue will get too long.
1303	 */
1304	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1305	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1306					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1307		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1308		wlcore_stop_queue_locked(wl, wlvif, q,
1309					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1310	}
1311
1312	/*
1313	 * The chip specific setup must run before the first TX packet -
1314	 * before that, the tx_work will not be initialized!
1315	 */
1316
1317	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1318	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1319		ieee80211_queue_work(wl->hw, &wl->tx_work);
1320
1321out:
1322	spin_unlock_irqrestore(&wl->wl_lock, flags);
1323}
1324
1325int wl1271_tx_dummy_packet(struct wl1271 *wl)
1326{
1327	unsigned long flags;
1328	int q;
1329
1330	/* no need to queue a new dummy packet if one is already pending */
1331	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1332		return 0;
1333
1334	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1335
1336	spin_lock_irqsave(&wl->wl_lock, flags);
1337	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1338	wl->tx_queue_count[q]++;
1339	spin_unlock_irqrestore(&wl->wl_lock, flags);
1340
1341	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1342	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1343		return wlcore_tx_work_locked(wl);
1344
1345	/*
1346	 * If the FW TX is busy, TX work will be scheduled by the threaded
1347	 * interrupt handler function
1348	 */
1349	return 0;
1350}
1351
1352/*
1353 * The size of the dummy packet should be at least 1400 bytes. However, in
1354 * order to minimize the number of bus transactions, aligning it to 512 bytes
1355 * boundaries could be beneficial, performance wise
1356 */
1357#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1358
1359static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1360{
1361	struct sk_buff *skb;
1362	struct ieee80211_hdr_3addr *hdr;
1363	unsigned int dummy_packet_size;
1364
1365	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1366			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1367
1368	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1369	if (!skb) {
1370		wl1271_warning("Failed to allocate a dummy packet skb");
1371		return NULL;
1372	}
1373
1374	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1375
1376	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1377	memset(hdr, 0, sizeof(*hdr));
1378	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1379					 IEEE80211_STYPE_NULLFUNC |
1380					 IEEE80211_FCTL_TODS);
1381
1382	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1383
1384	/* Dummy packets require the TID to be management */
1385	skb->priority = WL1271_TID_MGMT;
1386
1387	/* Initialize all fields that might be used */
1388	skb_set_queue_mapping(skb, 0);
1389	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1390
1391	return skb;
1392}
1393
1394
1395#ifdef CONFIG_PM
1396static int
1397wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1398{
1399	int num_fields = 0, in_field = 0, fields_size = 0;
1400	int i, pattern_len = 0;
1401
1402	if (!p->mask) {
1403		wl1271_warning("No mask in WoWLAN pattern");
1404		return -EINVAL;
1405	}
1406
1407	/*
1408	 * The pattern is broken up into segments of bytes at different offsets
1409	 * that need to be checked by the FW filter. Each segment is called
1410	 * a field in the FW API. We verify that the total number of fields
1411	 * required for this pattern won't exceed FW limits (8)
1412	 * as well as the total fields buffer won't exceed the FW limit.
1413	 * Note that if there's a pattern which crosses Ethernet/IP header
1414	 * boundary a new field is required.
1415	 */
1416	for (i = 0; i < p->pattern_len; i++) {
1417		if (test_bit(i, (unsigned long *)p->mask)) {
1418			if (!in_field) {
1419				in_field = 1;
1420				pattern_len = 1;
1421			} else {
1422				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1423					num_fields++;
1424					fields_size += pattern_len +
1425						RX_FILTER_FIELD_OVERHEAD;
1426					pattern_len = 1;
1427				} else
1428					pattern_len++;
1429			}
1430		} else {
1431			if (in_field) {
1432				in_field = 0;
1433				fields_size += pattern_len +
1434					RX_FILTER_FIELD_OVERHEAD;
1435				num_fields++;
1436			}
1437		}
1438	}
1439
1440	if (in_field) {
1441		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1442		num_fields++;
1443	}
1444
1445	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1446		wl1271_warning("RX Filter too complex. Too many segments");
1447		return -EINVAL;
1448	}
1449
1450	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1451		wl1271_warning("RX filter pattern is too big");
1452		return -E2BIG;
1453	}
1454
1455	return 0;
1456}
1457
1458struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1459{
1460	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1461}
1462
1463void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1464{
1465	int i;
1466
1467	if (filter == NULL)
1468		return;
1469
1470	for (i = 0; i < filter->num_fields; i++)
1471		kfree(filter->fields[i].pattern);
1472
1473	kfree(filter);
1474}
1475
1476int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1477				 u16 offset, u8 flags,
1478				 const u8 *pattern, u8 len)
1479{
1480	struct wl12xx_rx_filter_field *field;
1481
1482	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1483		wl1271_warning("Max fields per RX filter. can't alloc another");
1484		return -EINVAL;
1485	}
1486
1487	field = &filter->fields[filter->num_fields];
1488
1489	field->pattern = kzalloc(len, GFP_KERNEL);
1490	if (!field->pattern) {
1491		wl1271_warning("Failed to allocate RX filter pattern");
1492		return -ENOMEM;
1493	}
1494
1495	filter->num_fields++;
1496
1497	field->offset = cpu_to_le16(offset);
1498	field->flags = flags;
1499	field->len = len;
1500	memcpy(field->pattern, pattern, len);
1501
1502	return 0;
1503}
1504
1505int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1506{
1507	int i, fields_size = 0;
1508
1509	for (i = 0; i < filter->num_fields; i++)
1510		fields_size += filter->fields[i].len +
1511			sizeof(struct wl12xx_rx_filter_field) -
1512			sizeof(u8 *);
1513
1514	return fields_size;
1515}
1516
1517void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1518				    u8 *buf)
1519{
1520	int i;
1521	struct wl12xx_rx_filter_field *field;
1522
1523	for (i = 0; i < filter->num_fields; i++) {
1524		field = (struct wl12xx_rx_filter_field *)buf;
1525
1526		field->offset = filter->fields[i].offset;
1527		field->flags = filter->fields[i].flags;
1528		field->len = filter->fields[i].len;
1529
1530		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1531		buf += sizeof(struct wl12xx_rx_filter_field) -
1532			sizeof(u8 *) + field->len;
1533	}
1534}
1535
1536/*
1537 * Allocates an RX filter returned through f
1538 * which needs to be freed using rx_filter_free()
1539 */
1540static int
1541wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1542					   struct wl12xx_rx_filter **f)
1543{
1544	int i, j, ret = 0;
1545	struct wl12xx_rx_filter *filter;
1546	u16 offset;
1547	u8 flags, len;
1548
1549	filter = wl1271_rx_filter_alloc();
1550	if (!filter) {
1551		wl1271_warning("Failed to alloc rx filter");
1552		ret = -ENOMEM;
1553		goto err;
1554	}
1555
1556	i = 0;
1557	while (i < p->pattern_len) {
1558		if (!test_bit(i, (unsigned long *)p->mask)) {
1559			i++;
1560			continue;
1561		}
1562
1563		for (j = i; j < p->pattern_len; j++) {
1564			if (!test_bit(j, (unsigned long *)p->mask))
1565				break;
1566
1567			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1568			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1569				break;
1570		}
1571
1572		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1573			offset = i;
1574			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1575		} else {
1576			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1577			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1578		}
1579
1580		len = j - i;
1581
1582		ret = wl1271_rx_filter_alloc_field(filter,
1583						   offset,
1584						   flags,
1585						   &p->pattern[i], len);
1586		if (ret)
1587			goto err;
1588
1589		i = j;
1590	}
1591
1592	filter->action = FILTER_SIGNAL;
1593
1594	*f = filter;
1595	return 0;
1596
1597err:
1598	wl1271_rx_filter_free(filter);
1599	*f = NULL;
1600
1601	return ret;
1602}
1603
1604static int wl1271_configure_wowlan(struct wl1271 *wl,
1605				   struct cfg80211_wowlan *wow)
1606{
1607	int i, ret;
1608
1609	if (!wow || wow->any || !wow->n_patterns) {
1610		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1611							  FILTER_SIGNAL);
1612		if (ret)
1613			goto out;
1614
1615		ret = wl1271_rx_filter_clear_all(wl);
1616		if (ret)
1617			goto out;
1618
1619		return 0;
1620	}
1621
1622	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1623		return -EINVAL;
1624
1625	/* Validate all incoming patterns before clearing current FW state */
1626	for (i = 0; i < wow->n_patterns; i++) {
1627		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1628		if (ret) {
1629			wl1271_warning("Bad wowlan pattern %d", i);
1630			return ret;
1631		}
1632	}
1633
1634	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1635	if (ret)
1636		goto out;
1637
1638	ret = wl1271_rx_filter_clear_all(wl);
1639	if (ret)
1640		goto out;
1641
1642	/* Translate WoWLAN patterns into filters */
1643	for (i = 0; i < wow->n_patterns; i++) {
1644		struct cfg80211_pkt_pattern *p;
1645		struct wl12xx_rx_filter *filter = NULL;
1646
1647		p = &wow->patterns[i];
1648
1649		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1650		if (ret) {
1651			wl1271_warning("Failed to create an RX filter from "
1652				       "wowlan pattern %d", i);
1653			goto out;
1654		}
1655
1656		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1657
1658		wl1271_rx_filter_free(filter);
1659		if (ret)
1660			goto out;
1661	}
1662
1663	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1664
1665out:
1666	return ret;
1667}
1668
1669static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1670					struct wl12xx_vif *wlvif,
1671					struct cfg80211_wowlan *wow)
1672{
1673	int ret = 0;
1674
1675	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1676		goto out;
1677
1678	ret = wl1271_configure_wowlan(wl, wow);
1679	if (ret < 0)
1680		goto out;
1681
1682	if ((wl->conf.conn.suspend_wake_up_event ==
1683	     wl->conf.conn.wake_up_event) &&
1684	    (wl->conf.conn.suspend_listen_interval ==
1685	     wl->conf.conn.listen_interval))
1686		goto out;
1687
1688	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689				    wl->conf.conn.suspend_wake_up_event,
1690				    wl->conf.conn.suspend_listen_interval);
1691
1692	if (ret < 0)
1693		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1694out:
1695	return ret;
1696
1697}
1698
1699static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1700					struct wl12xx_vif *wlvif,
1701					struct cfg80211_wowlan *wow)
1702{
1703	int ret = 0;
1704
1705	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1706		goto out;
1707
1708	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1709	if (ret < 0)
1710		goto out;
1711
1712	ret = wl1271_configure_wowlan(wl, wow);
1713	if (ret < 0)
1714		goto out;
1715
1716out:
1717	return ret;
1718
1719}
1720
1721static int wl1271_configure_suspend(struct wl1271 *wl,
1722				    struct wl12xx_vif *wlvif,
1723				    struct cfg80211_wowlan *wow)
1724{
1725	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1726		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1727	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1728		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1729	return 0;
1730}
1731
1732static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1733{
1734	int ret = 0;
1735	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1736	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1737
1738	if ((!is_ap) && (!is_sta))
1739		return;
1740
1741	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1742	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1743		return;
1744
1745	wl1271_configure_wowlan(wl, NULL);
1746
1747	if (is_sta) {
1748		if ((wl->conf.conn.suspend_wake_up_event ==
1749		     wl->conf.conn.wake_up_event) &&
1750		    (wl->conf.conn.suspend_listen_interval ==
1751		     wl->conf.conn.listen_interval))
1752			return;
1753
1754		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1755				    wl->conf.conn.wake_up_event,
1756				    wl->conf.conn.listen_interval);
1757
1758		if (ret < 0)
1759			wl1271_error("resume: wake up conditions failed: %d",
1760				     ret);
1761
1762	} else if (is_ap) {
1763		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1764	}
1765}
1766
1767static int wl1271_op_suspend(struct ieee80211_hw *hw,
1768			    struct cfg80211_wowlan *wow)
1769{
1770	struct wl1271 *wl = hw->priv;
1771	struct wl12xx_vif *wlvif;
1772	int ret;
1773
1774	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1775	WARN_ON(!wow);
1776
1777	/* we want to perform the recovery before suspending */
1778	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1779		wl1271_warning("postponing suspend to perform recovery");
1780		return -EBUSY;
1781	}
1782
1783	wl1271_tx_flush(wl);
1784
1785	mutex_lock(&wl->mutex);
1786
1787	ret = wl1271_ps_elp_wakeup(wl);
1788	if (ret < 0) {
1789		mutex_unlock(&wl->mutex);
1790		return ret;
1791	}
1792
1793	wl->wow_enabled = true;
1794	wl12xx_for_each_wlvif(wl, wlvif) {
1795		ret = wl1271_configure_suspend(wl, wlvif, wow);
1796		if (ret < 0) {
1797			mutex_unlock(&wl->mutex);
1798			wl1271_warning("couldn't prepare device to suspend");
1799			return ret;
1800		}
1801	}
1802
1803	/* disable fast link flow control notifications from FW */
1804	ret = wlcore_hw_interrupt_notify(wl, false);
1805	if (ret < 0)
1806		goto out_sleep;
1807
1808	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1809	ret = wlcore_hw_rx_ba_filter(wl,
1810				     !!wl->conf.conn.suspend_rx_ba_activity);
1811	if (ret < 0)
1812		goto out_sleep;
1813
1814out_sleep:
1815	wl1271_ps_elp_sleep(wl);
1816	mutex_unlock(&wl->mutex);
1817
1818	if (ret < 0) {
1819		wl1271_warning("couldn't prepare device to suspend");
1820		return ret;
1821	}
1822
1823	/* flush any remaining work */
1824	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1825
1826	/*
1827	 * disable and re-enable interrupts in order to flush
1828	 * the threaded_irq
1829	 */
1830	wlcore_disable_interrupts(wl);
1831
1832	/*
1833	 * set suspended flag to avoid triggering a new threaded_irq
1834	 * work. no need for spinlock as interrupts are disabled.
1835	 */
1836	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1837
1838	wlcore_enable_interrupts(wl);
1839	flush_work(&wl->tx_work);
1840	flush_delayed_work(&wl->elp_work);
1841
1842	/*
1843	 * Cancel the watchdog even if above tx_flush failed. We will detect
1844	 * it on resume anyway.
1845	 */
1846	cancel_delayed_work(&wl->tx_watchdog_work);
1847
1848	return 0;
1849}
1850
1851static int wl1271_op_resume(struct ieee80211_hw *hw)
1852{
1853	struct wl1271 *wl = hw->priv;
1854	struct wl12xx_vif *wlvif;
1855	unsigned long flags;
1856	bool run_irq_work = false, pending_recovery;
1857	int ret;
1858
1859	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1860		     wl->wow_enabled);
1861	WARN_ON(!wl->wow_enabled);
1862
1863	/*
1864	 * re-enable irq_work enqueuing, and call irq_work directly if
1865	 * there is a pending work.
1866	 */
1867	spin_lock_irqsave(&wl->wl_lock, flags);
1868	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1869	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1870		run_irq_work = true;
1871	spin_unlock_irqrestore(&wl->wl_lock, flags);
1872
1873	mutex_lock(&wl->mutex);
1874
1875	/* test the recovery flag before calling any SDIO functions */
1876	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1877				    &wl->flags);
1878
1879	if (run_irq_work) {
1880		wl1271_debug(DEBUG_MAC80211,
1881			     "run postponed irq_work directly");
1882
1883		/* don't talk to the HW if recovery is pending */
1884		if (!pending_recovery) {
1885			ret = wlcore_irq_locked(wl);
1886			if (ret)
1887				wl12xx_queue_recovery_work(wl);
1888		}
1889
1890		wlcore_enable_interrupts(wl);
1891	}
1892
1893	if (pending_recovery) {
1894		wl1271_warning("queuing forgotten recovery on resume");
1895		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1896		goto out_sleep;
1897	}
1898
1899	ret = wl1271_ps_elp_wakeup(wl);
1900	if (ret < 0)
1901		goto out;
1902
1903	wl12xx_for_each_wlvif(wl, wlvif) {
1904		wl1271_configure_resume(wl, wlvif);
1905	}
1906
1907	ret = wlcore_hw_interrupt_notify(wl, true);
1908	if (ret < 0)
1909		goto out_sleep;
1910
1911	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1912	ret = wlcore_hw_rx_ba_filter(wl, false);
1913	if (ret < 0)
1914		goto out_sleep;
1915
1916out_sleep:
1917	wl1271_ps_elp_sleep(wl);
1918
1919out:
1920	wl->wow_enabled = false;
1921
1922	/*
1923	 * Set a flag to re-init the watchdog on the first Tx after resume.
1924	 * That way we avoid possible conditions where Tx-complete interrupts
1925	 * fail to arrive and we perform a spurious recovery.
1926	 */
1927	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1928	mutex_unlock(&wl->mutex);
1929
1930	return 0;
1931}
1932#endif
1933
1934static int wl1271_op_start(struct ieee80211_hw *hw)
1935{
1936	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1937
1938	/*
1939	 * We have to delay the booting of the hardware because
1940	 * we need to know the local MAC address before downloading and
1941	 * initializing the firmware. The MAC address cannot be changed
1942	 * after boot, and without the proper MAC address, the firmware
1943	 * will not function properly.
1944	 *
1945	 * The MAC address is first known when the corresponding interface
1946	 * is added. That is where we will initialize the hardware.
1947	 */
1948
1949	return 0;
1950}
1951
1952static void wlcore_op_stop_locked(struct wl1271 *wl)
1953{
1954	int i;
1955
1956	if (wl->state == WLCORE_STATE_OFF) {
1957		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1958					&wl->flags))
1959			wlcore_enable_interrupts(wl);
1960
1961		return;
1962	}
1963
1964	/*
1965	 * this must be before the cancel_work calls below, so that the work
1966	 * functions don't perform further work.
1967	 */
1968	wl->state = WLCORE_STATE_OFF;
1969
1970	/*
1971	 * Use the nosync variant to disable interrupts, so the mutex could be
1972	 * held while doing so without deadlocking.
1973	 */
1974	wlcore_disable_interrupts_nosync(wl);
1975
1976	mutex_unlock(&wl->mutex);
1977
1978	wlcore_synchronize_interrupts(wl);
1979	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1980		cancel_work_sync(&wl->recovery_work);
1981	wl1271_flush_deferred_work(wl);
1982	cancel_delayed_work_sync(&wl->scan_complete_work);
1983	cancel_work_sync(&wl->netstack_work);
1984	cancel_work_sync(&wl->tx_work);
1985	cancel_delayed_work_sync(&wl->elp_work);
1986	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1987
1988	/* let's notify MAC80211 about the remaining pending TX frames */
1989	mutex_lock(&wl->mutex);
1990	wl12xx_tx_reset(wl);
1991
1992	wl1271_power_off(wl);
1993	/*
1994	 * In case a recovery was scheduled, interrupts were disabled to avoid
1995	 * an interrupt storm. Now that the power is down, it is safe to
1996	 * re-enable interrupts to balance the disable depth
1997	 */
1998	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1999		wlcore_enable_interrupts(wl);
2000
2001	wl->band = IEEE80211_BAND_2GHZ;
2002
2003	wl->rx_counter = 0;
2004	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2005	wl->channel_type = NL80211_CHAN_NO_HT;
2006	wl->tx_blocks_available = 0;
2007	wl->tx_allocated_blocks = 0;
2008	wl->tx_results_count = 0;
2009	wl->tx_packets_count = 0;
2010	wl->time_offset = 0;
2011	wl->ap_fw_ps_map = 0;
2012	wl->ap_ps_map = 0;
2013	wl->sleep_auth = WL1271_PSM_ILLEGAL;
2014	memset(wl->roles_map, 0, sizeof(wl->roles_map));
2015	memset(wl->links_map, 0, sizeof(wl->links_map));
2016	memset(wl->roc_map, 0, sizeof(wl->roc_map));
2017	memset(wl->session_ids, 0, sizeof(wl->session_ids));
2018	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2019	wl->active_sta_count = 0;
2020	wl->active_link_count = 0;
2021
2022	/* The system link is always allocated */
2023	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2024	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2025	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2026
2027	/*
2028	 * this is performed after the cancel_work calls and the associated
2029	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2030	 * get executed before all these vars have been reset.
2031	 */
2032	wl->flags = 0;
2033
2034	wl->tx_blocks_freed = 0;
2035
2036	for (i = 0; i < NUM_TX_QUEUES; i++) {
2037		wl->tx_pkts_freed[i] = 0;
2038		wl->tx_allocated_pkts[i] = 0;
2039	}
2040
2041	wl1271_debugfs_reset(wl);
2042
2043	kfree(wl->raw_fw_status);
2044	wl->raw_fw_status = NULL;
2045	kfree(wl->fw_status);
2046	wl->fw_status = NULL;
2047	kfree(wl->tx_res_if);
2048	wl->tx_res_if = NULL;
2049	kfree(wl->target_mem_map);
2050	wl->target_mem_map = NULL;
2051
2052	/*
2053	 * FW channels must be re-calibrated after recovery,
2054	 * save current Reg-Domain channel configuration and clear it.
2055	 */
2056	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2057	       sizeof(wl->reg_ch_conf_pending));
2058	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2059}
2060
2061static void wlcore_op_stop(struct ieee80211_hw *hw)
2062{
2063	struct wl1271 *wl = hw->priv;
2064
2065	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2066
2067	mutex_lock(&wl->mutex);
2068
2069	wlcore_op_stop_locked(wl);
2070
2071	mutex_unlock(&wl->mutex);
2072}
2073
2074static void wlcore_channel_switch_work(struct work_struct *work)
2075{
2076	struct delayed_work *dwork;
2077	struct wl1271 *wl;
2078	struct ieee80211_vif *vif;
2079	struct wl12xx_vif *wlvif;
2080	int ret;
2081
2082	dwork = container_of(work, struct delayed_work, work);
2083	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2084	wl = wlvif->wl;
2085
2086	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2087
2088	mutex_lock(&wl->mutex);
2089
2090	if (unlikely(wl->state != WLCORE_STATE_ON))
2091		goto out;
2092
2093	/* check the channel switch is still ongoing */
2094	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2095		goto out;
2096
2097	vif = wl12xx_wlvif_to_vif(wlvif);
2098	ieee80211_chswitch_done(vif, false);
2099
2100	ret = wl1271_ps_elp_wakeup(wl);
2101	if (ret < 0)
2102		goto out;
2103
2104	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2105
2106	wl1271_ps_elp_sleep(wl);
2107out:
2108	mutex_unlock(&wl->mutex);
2109}
2110
2111static void wlcore_connection_loss_work(struct work_struct *work)
2112{
2113	struct delayed_work *dwork;
2114	struct wl1271 *wl;
2115	struct ieee80211_vif *vif;
2116	struct wl12xx_vif *wlvif;
2117
2118	dwork = container_of(work, struct delayed_work, work);
2119	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2120	wl = wlvif->wl;
2121
2122	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2123
2124	mutex_lock(&wl->mutex);
2125
2126	if (unlikely(wl->state != WLCORE_STATE_ON))
2127		goto out;
2128
2129	/* Call mac80211 connection loss */
2130	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2131		goto out;
2132
2133	vif = wl12xx_wlvif_to_vif(wlvif);
2134	ieee80211_connection_loss(vif);
2135out:
2136	mutex_unlock(&wl->mutex);
2137}
2138
2139static void wlcore_pending_auth_complete_work(struct work_struct *work)
2140{
2141	struct delayed_work *dwork;
2142	struct wl1271 *wl;
2143	struct wl12xx_vif *wlvif;
2144	unsigned long time_spare;
2145	int ret;
2146
2147	dwork = container_of(work, struct delayed_work, work);
2148	wlvif = container_of(dwork, struct wl12xx_vif,
2149			     pending_auth_complete_work);
2150	wl = wlvif->wl;
2151
2152	mutex_lock(&wl->mutex);
2153
2154	if (unlikely(wl->state != WLCORE_STATE_ON))
2155		goto out;
2156
2157	/*
2158	 * Make sure a second really passed since the last auth reply. Maybe
2159	 * a second auth reply arrived while we were stuck on the mutex.
2160	 * Check for a little less than the timeout to protect from scheduler
2161	 * irregularities.
2162	 */
2163	time_spare = jiffies +
2164			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2165	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2166		goto out;
2167
2168	ret = wl1271_ps_elp_wakeup(wl);
2169	if (ret < 0)
2170		goto out;
2171
2172	/* cancel the ROC if active */
2173	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2174
2175	wl1271_ps_elp_sleep(wl);
2176out:
2177	mutex_unlock(&wl->mutex);
2178}
2179
2180static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2181{
2182	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2183					WL12XX_MAX_RATE_POLICIES);
2184	if (policy >= WL12XX_MAX_RATE_POLICIES)
2185		return -EBUSY;
2186
2187	__set_bit(policy, wl->rate_policies_map);
2188	*idx = policy;
2189	return 0;
2190}
2191
2192static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2193{
2194	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2195		return;
2196
2197	__clear_bit(*idx, wl->rate_policies_map);
2198	*idx = WL12XX_MAX_RATE_POLICIES;
2199}
2200
2201static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2202{
2203	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2204					WLCORE_MAX_KLV_TEMPLATES);
2205	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2206		return -EBUSY;
2207
2208	__set_bit(policy, wl->klv_templates_map);
2209	*idx = policy;
2210	return 0;
2211}
2212
2213static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2214{
2215	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2216		return;
2217
2218	__clear_bit(*idx, wl->klv_templates_map);
2219	*idx = WLCORE_MAX_KLV_TEMPLATES;
2220}
2221
2222static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2223{
2224	switch (wlvif->bss_type) {
2225	case BSS_TYPE_AP_BSS:
2226		if (wlvif->p2p)
2227			return WL1271_ROLE_P2P_GO;
2228		else
2229			return WL1271_ROLE_AP;
2230
2231	case BSS_TYPE_STA_BSS:
2232		if (wlvif->p2p)
2233			return WL1271_ROLE_P2P_CL;
2234		else
2235			return WL1271_ROLE_STA;
2236
2237	case BSS_TYPE_IBSS:
2238		return WL1271_ROLE_IBSS;
2239
2240	default:
2241		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2242	}
2243	return WL12XX_INVALID_ROLE_TYPE;
2244}
2245
2246static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2247{
2248	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2249	int i;
2250
2251	/* clear everything but the persistent data */
2252	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2253
2254	switch (ieee80211_vif_type_p2p(vif)) {
2255	case NL80211_IFTYPE_P2P_CLIENT:
2256		wlvif->p2p = 1;
2257		/* fall-through */
2258	case NL80211_IFTYPE_STATION:
2259		wlvif->bss_type = BSS_TYPE_STA_BSS;
2260		break;
2261	case NL80211_IFTYPE_ADHOC:
2262		wlvif->bss_type = BSS_TYPE_IBSS;
2263		break;
2264	case NL80211_IFTYPE_P2P_GO:
2265		wlvif->p2p = 1;
2266		/* fall-through */
2267	case NL80211_IFTYPE_AP:
2268		wlvif->bss_type = BSS_TYPE_AP_BSS;
2269		break;
2270	default:
2271		wlvif->bss_type = MAX_BSS_TYPE;
2272		return -EOPNOTSUPP;
2273	}
2274
2275	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2276	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2277	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2278
2279	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2280	    wlvif->bss_type == BSS_TYPE_IBSS) {
2281		/* init sta/ibss data */
2282		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2283		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2284		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2285		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2286		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2287		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2288		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2289		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2290	} else {
2291		/* init ap data */
2292		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2293		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2294		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2295		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2296		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2297			wl12xx_allocate_rate_policy(wl,
2298						&wlvif->ap.ucast_rate_idx[i]);
2299		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2300		/*
2301		 * TODO: check if basic_rate shouldn't be
2302		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2303		 * instead (the same thing for STA above).
2304		*/
2305		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2306		/* TODO: this seems to be used only for STA, check it */
2307		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2308	}
2309
2310	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2311	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2312	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2313
2314	/*
2315	 * mac80211 configures some values globally, while we treat them
2316	 * per-interface. thus, on init, we have to copy them from wl
2317	 */
2318	wlvif->band = wl->band;
2319	wlvif->channel = wl->channel;
2320	wlvif->power_level = wl->power_level;
2321	wlvif->channel_type = wl->channel_type;
2322
2323	INIT_WORK(&wlvif->rx_streaming_enable_work,
2324		  wl1271_rx_streaming_enable_work);
2325	INIT_WORK(&wlvif->rx_streaming_disable_work,
2326		  wl1271_rx_streaming_disable_work);
2327	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2328	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2329			  wlcore_channel_switch_work);
2330	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2331			  wlcore_connection_loss_work);
2332	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2333			  wlcore_pending_auth_complete_work);
2334	INIT_LIST_HEAD(&wlvif->list);
2335
2336	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2337		    (unsigned long) wlvif);
2338	return 0;
2339}
2340
2341static int wl12xx_init_fw(struct wl1271 *wl)
2342{
2343	int retries = WL1271_BOOT_RETRIES;
2344	bool booted = false;
2345	struct wiphy *wiphy = wl->hw->wiphy;
2346	int ret;
2347
2348	while (retries) {
2349		retries--;
2350		ret = wl12xx_chip_wakeup(wl, false);
2351		if (ret < 0)
2352			goto power_off;
2353
2354		ret = wl->ops->boot(wl);
2355		if (ret < 0)
2356			goto power_off;
2357
2358		ret = wl1271_hw_init(wl);
2359		if (ret < 0)
2360			goto irq_disable;
2361
2362		booted = true;
2363		break;
2364
2365irq_disable:
2366		mutex_unlock(&wl->mutex);
2367		/* Unlocking the mutex in the middle of handling is
2368		   inherently unsafe. In this case we deem it safe to do,
2369		   because we need to let any possibly pending IRQ out of
2370		   the system (and while we are WLCORE_STATE_OFF the IRQ
2371		   work function will not do anything.) Also, any other
2372		   possible concurrent operations will fail due to the
2373		   current state, hence the wl1271 struct should be safe. */
2374		wlcore_disable_interrupts(wl);
2375		wl1271_flush_deferred_work(wl);
2376		cancel_work_sync(&wl->netstack_work);
2377		mutex_lock(&wl->mutex);
2378power_off:
2379		wl1271_power_off(wl);
2380	}
2381
2382	if (!booted) {
2383		wl1271_error("firmware boot failed despite %d retries",
2384			     WL1271_BOOT_RETRIES);
2385		goto out;
2386	}
2387
2388	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2389
2390	/* update hw/fw version info in wiphy struct */
2391	wiphy->hw_version = wl->chip.id;
2392	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2393		sizeof(wiphy->fw_version));
2394
2395	/*
2396	 * Now we know if 11a is supported (info from the NVS), so disable
2397	 * 11a channels if not supported
2398	 */
2399	if (!wl->enable_11a)
2400		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2401
2402	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2403		     wl->enable_11a ? "" : "not ");
2404
2405	wl->state = WLCORE_STATE_ON;
2406out:
2407	return ret;
2408}
2409
2410static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2411{
2412	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2413}
2414
2415/*
2416 * Check whether a fw switch (i.e. moving from one loaded
2417 * fw to another) is needed. This function is also responsible
2418 * for updating wl->last_vif_count, so it must be called before
2419 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2420 * will be used).
2421 */
2422static bool wl12xx_need_fw_change(struct wl1271 *wl,
2423				  struct vif_counter_data vif_counter_data,
2424				  bool add)
2425{
2426	enum wl12xx_fw_type current_fw = wl->fw_type;
2427	u8 vif_count = vif_counter_data.counter;
2428
2429	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2430		return false;
2431
2432	/* increase the vif count if this is a new vif */
2433	if (add && !vif_counter_data.cur_vif_running)
2434		vif_count++;
2435
2436	wl->last_vif_count = vif_count;
2437
2438	/* no need for fw change if the device is OFF */
2439	if (wl->state == WLCORE_STATE_OFF)
2440		return false;
2441
2442	/* no need for fw change if a single fw is used */
2443	if (!wl->mr_fw_name)
2444		return false;
2445
2446	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2447		return true;
2448	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2449		return true;
2450
2451	return false;
2452}
2453
2454/*
2455 * Enter "forced psm". Make sure the sta is in psm against the ap,
2456 * to make the fw switch a bit more disconnection-persistent.
2457 */
2458static void wl12xx_force_active_psm(struct wl1271 *wl)
2459{
2460	struct wl12xx_vif *wlvif;
2461
2462	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2463		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2464	}
2465}
2466
2467struct wlcore_hw_queue_iter_data {
2468	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2469	/* current vif */
2470	struct ieee80211_vif *vif;
2471	/* is the current vif among those iterated */
2472	bool cur_running;
2473};
2474
2475static void wlcore_hw_queue_iter(void *data, u8 *mac,
2476				 struct ieee80211_vif *vif)
2477{
2478	struct wlcore_hw_queue_iter_data *iter_data = data;
2479
2480	if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2481		return;
2482
2483	if (iter_data->cur_running || vif == iter_data->vif) {
2484		iter_data->cur_running = true;
2485		return;
2486	}
2487
2488	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2489}
2490
2491static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2492					 struct wl12xx_vif *wlvif)
2493{
2494	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2495	struct wlcore_hw_queue_iter_data iter_data = {};
2496	int i, q_base;
2497
2498	iter_data.vif = vif;
2499
2500	/* mark all bits taken by active interfaces */
2501	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2502					IEEE80211_IFACE_ITER_RESUME_ALL,
2503					wlcore_hw_queue_iter, &iter_data);
2504
2505	/* the current vif is already running in mac80211 (resume/recovery) */
2506	if (iter_data.cur_running) {
2507		wlvif->hw_queue_base = vif->hw_queue[0];
2508		wl1271_debug(DEBUG_MAC80211,
2509			     "using pre-allocated hw queue base %d",
2510			     wlvif->hw_queue_base);
2511
2512		/* interface type might have changed type */
2513		goto adjust_cab_queue;
2514	}
2515
2516	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2517				     WLCORE_NUM_MAC_ADDRESSES);
2518	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2519		return -EBUSY;
2520
2521	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2522	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2523		     wlvif->hw_queue_base);
2524
2525	for (i = 0; i < NUM_TX_QUEUES; i++) {
2526		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2527		/* register hw queues in mac80211 */
2528		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2529	}
2530
2531adjust_cab_queue:
2532	/* the last places are reserved for cab queues per interface */
2533	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2534		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2535				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2536	else
2537		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2538
2539	return 0;
2540}
2541
2542static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2543				   struct ieee80211_vif *vif)
2544{
2545	struct wl1271 *wl = hw->priv;
2546	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2547	struct vif_counter_data vif_count;
2548	int ret = 0;
2549	u8 role_type;
2550
2551	if (wl->plt) {
2552		wl1271_error("Adding Interface not allowed while in PLT mode");
2553		return -EBUSY;
2554	}
2555
2556	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2557			     IEEE80211_VIF_SUPPORTS_UAPSD |
2558			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2559
2560	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2561		     ieee80211_vif_type_p2p(vif), vif->addr);
2562
2563	wl12xx_get_vif_count(hw, vif, &vif_count);
2564
2565	mutex_lock(&wl->mutex);
2566	ret = wl1271_ps_elp_wakeup(wl);
2567	if (ret < 0)
2568		goto out_unlock;
2569
2570	/*
2571	 * in some very corner case HW recovery scenarios its possible to
2572	 * get here before __wl1271_op_remove_interface is complete, so
2573	 * opt out if that is the case.
2574	 */
2575	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2576	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2577		ret = -EBUSY;
2578		goto out;
2579	}
2580
2581
2582	ret = wl12xx_init_vif_data(wl, vif);
2583	if (ret < 0)
2584		goto out;
2585
2586	wlvif->wl = wl;
2587	role_type = wl12xx_get_role_type(wl, wlvif);
2588	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2589		ret = -EINVAL;
2590		goto out;
2591	}
2592
2593	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2594	if (ret < 0)
2595		goto out;
2596
2597	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2598		wl12xx_force_active_psm(wl);
2599		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2600		mutex_unlock(&wl->mutex);
2601		wl1271_recovery_work(&wl->recovery_work);
2602		return 0;
2603	}
2604
2605	/*
2606	 * TODO: after the nvs issue will be solved, move this block
2607	 * to start(), and make sure here the driver is ON.
2608	 */
2609	if (wl->state == WLCORE_STATE_OFF) {
2610		/*
2611		 * we still need this in order to configure the fw
2612		 * while uploading the nvs
2613		 */
2614		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2615
2616		ret = wl12xx_init_fw(wl);
2617		if (ret < 0)
2618			goto out;
2619	}
2620
2621	ret = wl12xx_cmd_role_enable(wl, vif->addr,
2622				     role_type, &wlvif->role_id);
2623	if (ret < 0)
2624		goto out;
2625
2626	ret = wl1271_init_vif_specific(wl, vif);
2627	if (ret < 0)
2628		goto out;
2629
2630	list_add(&wlvif->list, &wl->wlvif_list);
2631	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2632
2633	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2634		wl->ap_count++;
2635	else
2636		wl->sta_count++;
2637out:
2638	wl1271_ps_elp_sleep(wl);
2639out_unlock:
2640	mutex_unlock(&wl->mutex);
2641
2642	return ret;
2643}
2644
2645static void __wl1271_op_remove_interface(struct wl1271 *wl,
2646					 struct ieee80211_vif *vif,
2647					 bool reset_tx_queues)
2648{
2649	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2650	int i, ret;
2651	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2652
2653	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2654
2655	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2656		return;
2657
2658	/* because of hardware recovery, we may get here twice */
2659	if (wl->state == WLCORE_STATE_OFF)
2660		return;
2661
2662	wl1271_info("down");
2663
2664	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2665	    wl->scan_wlvif == wlvif) {
2666		/*
2667		 * Rearm the tx watchdog just before idling scan. This
2668		 * prevents just-finished scans from triggering the watchdog
2669		 */
2670		wl12xx_rearm_tx_watchdog_locked(wl);
2671
2672		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2673		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2674		wl->scan_wlvif = NULL;
2675		wl->scan.req = NULL;
2676		ieee80211_scan_completed(wl->hw, true);
2677	}
2678
2679	if (wl->sched_vif == wlvif)
2680		wl->sched_vif = NULL;
2681
2682	if (wl->roc_vif == vif) {
2683		wl->roc_vif = NULL;
2684		ieee80211_remain_on_channel_expired(wl->hw);
2685	}
2686
2687	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2688		/* disable active roles */
2689		ret = wl1271_ps_elp_wakeup(wl);
2690		if (ret < 0)
2691			goto deinit;
2692
2693		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2694		    wlvif->bss_type == BSS_TYPE_IBSS) {
2695			if (wl12xx_dev_role_started(wlvif))
2696				wl12xx_stop_dev(wl, wlvif);
2697		}
2698
2699		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2700		if (ret < 0)
2701			goto deinit;
2702
2703		wl1271_ps_elp_sleep(wl);
2704	}
2705deinit:
2706	wl12xx_tx_reset_wlvif(wl, wlvif);
2707
2708	/* clear all hlids (except system_hlid) */
2709	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2710
2711	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2712	    wlvif->bss_type == BSS_TYPE_IBSS) {
2713		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2714		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2715		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2716		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2717		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2718	} else {
2719		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2720		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2721		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2722		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2723		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2724			wl12xx_free_rate_policy(wl,
2725						&wlvif->ap.ucast_rate_idx[i]);
2726		wl1271_free_ap_keys(wl, wlvif);
2727	}
2728
2729	dev_kfree_skb(wlvif->probereq);
2730	wlvif->probereq = NULL;
2731	if (wl->last_wlvif == wlvif)
2732		wl->last_wlvif = NULL;
2733	list_del(&wlvif->list);
2734	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2735	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2736	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2737
2738	if (is_ap)
2739		wl->ap_count--;
2740	else
2741		wl->sta_count--;
2742
2743	/*
2744	 * Last AP, have more stations. Configure sleep auth according to STA.
2745	 * Don't do thin on unintended recovery.
2746	 */
2747	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2748	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2749		goto unlock;
2750
2751	if (wl->ap_count == 0 && is_ap) {
2752		/* mask ap events */
2753		wl->event_mask &= ~wl->ap_event_mask;
2754		wl1271_event_unmask(wl);
2755	}
2756
2757	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2758		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2759		/* Configure for power according to debugfs */
2760		if (sta_auth != WL1271_PSM_ILLEGAL)
2761			wl1271_acx_sleep_auth(wl, sta_auth);
2762		/* Configure for ELP power saving */
2763		else
2764			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2765	}
2766
2767unlock:
2768	mutex_unlock(&wl->mutex);
2769
2770	del_timer_sync(&wlvif->rx_streaming_timer);
2771	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2772	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2773	cancel_work_sync(&wlvif->rc_update_work);
2774	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2775	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2776	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2777
2778	mutex_lock(&wl->mutex);
2779}
2780
2781static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2782				       struct ieee80211_vif *vif)
2783{
2784	struct wl1271 *wl = hw->priv;
2785	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2786	struct wl12xx_vif *iter;
2787	struct vif_counter_data vif_count;
2788
2789	wl12xx_get_vif_count(hw, vif, &vif_count);
2790	mutex_lock(&wl->mutex);
2791
2792	if (wl->state == WLCORE_STATE_OFF ||
2793	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2794		goto out;
2795
2796	/*
2797	 * wl->vif can be null here if someone shuts down the interface
2798	 * just when hardware recovery has been started.
2799	 */
2800	wl12xx_for_each_wlvif(wl, iter) {
2801		if (iter != wlvif)
2802			continue;
2803
2804		__wl1271_op_remove_interface(wl, vif, true);
2805		break;
2806	}
2807	WARN_ON(iter != wlvif);
2808	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2809		wl12xx_force_active_psm(wl);
2810		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2811		wl12xx_queue_recovery_work(wl);
2812	}
2813out:
2814	mutex_unlock(&wl->mutex);
2815}
2816
2817static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2818				      struct ieee80211_vif *vif,
2819				      enum nl80211_iftype new_type, bool p2p)
2820{
2821	struct wl1271 *wl = hw->priv;
2822	int ret;
2823
2824	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2825	wl1271_op_remove_interface(hw, vif);
2826
2827	vif->type = new_type;
2828	vif->p2p = p2p;
2829	ret = wl1271_op_add_interface(hw, vif);
2830
2831	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2832	return ret;
2833}
2834
2835static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2836{
2837	int ret;
2838	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2839
2840	/*
2841	 * One of the side effects of the JOIN command is that is clears
2842	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2843	 * to a WPA/WPA2 access point will therefore kill the data-path.
2844	 * Currently the only valid scenario for JOIN during association
2845	 * is on roaming, in which case we will also be given new keys.
2846	 * Keep the below message for now, unless it starts bothering
2847	 * users who really like to roam a lot :)
2848	 */
2849	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2850		wl1271_info("JOIN while associated.");
2851
2852	/* clear encryption type */
2853	wlvif->encryption_type = KEY_NONE;
2854
2855	if (is_ibss)
2856		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2857	else {
2858		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2859			/*
2860			 * TODO: this is an ugly workaround for wl12xx fw
2861			 * bug - we are not able to tx/rx after the first
2862			 * start_sta, so make dummy start+stop calls,
2863			 * and then call start_sta again.
2864			 * this should be fixed in the fw.
2865			 */
2866			wl12xx_cmd_role_start_sta(wl, wlvif);
2867			wl12xx_cmd_role_stop_sta(wl, wlvif);
2868		}
2869
2870		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2871	}
2872
2873	return ret;
2874}
2875
2876static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2877			    int offset)
2878{
2879	u8 ssid_len;
2880	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2881					 skb->len - offset);
2882
2883	if (!ptr) {
2884		wl1271_error("No SSID in IEs!");
2885		return -ENOENT;
2886	}
2887
2888	ssid_len = ptr[1];
2889	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2890		wl1271_error("SSID is too long!");
2891		return -EINVAL;
2892	}
2893
2894	wlvif->ssid_len = ssid_len;
2895	memcpy(wlvif->ssid, ptr+2, ssid_len);
2896	return 0;
2897}
2898
2899static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2900{
2901	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2902	struct sk_buff *skb;
2903	int ieoffset;
2904
2905	/* we currently only support setting the ssid from the ap probe req */
2906	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2907		return -EINVAL;
2908
2909	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2910	if (!skb)
2911		return -EINVAL;
2912
2913	ieoffset = offsetof(struct ieee80211_mgmt,
2914			    u.probe_req.variable);
2915	wl1271_ssid_set(wlvif, skb, ieoffset);
2916	dev_kfree_skb(skb);
2917
2918	return 0;
2919}
2920
2921static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2922			    struct ieee80211_bss_conf *bss_conf,
2923			    u32 sta_rate_set)
2924{
2925	int ieoffset;
2926	int ret;
2927
2928	wlvif->aid = bss_conf->aid;
2929	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2930	wlvif->beacon_int = bss_conf->beacon_int;
2931	wlvif->wmm_enabled = bss_conf->qos;
2932
2933	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2934
2935	/*
2936	 * with wl1271, we don't need to update the
2937	 * beacon_int and dtim_period, because the firmware
2938	 * updates it by itself when the first beacon is
2939	 * received after a join.
2940	 */
2941	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2942	if (ret < 0)
2943		return ret;
2944
2945	/*
2946	 * Get a template for hardware connection maintenance
2947	 */
2948	dev_kfree_skb(wlvif->probereq);
2949	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2950							wlvif,
2951							NULL);
2952	ieoffset = offsetof(struct ieee80211_mgmt,
2953			    u.probe_req.variable);
2954	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2955
2956	/* enable the connection monitoring feature */
2957	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2958	if (ret < 0)
2959		return ret;
2960
2961	/*
2962	 * The join command disable the keep-alive mode, shut down its process,
2963	 * and also clear the template config, so we need to reset it all after
2964	 * the join. The acx_aid starts the keep-alive process, and the order
2965	 * of the commands below is relevant.
2966	 */
2967	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2968	if (ret < 0)
2969		return ret;
2970
2971	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2972	if (ret < 0)
2973		return ret;
2974
2975	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2976	if (ret < 0)
2977		return ret;
2978
2979	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2980					   wlvif->sta.klv_template_id,
2981					   ACX_KEEP_ALIVE_TPL_VALID);
2982	if (ret < 0)
2983		return ret;
2984
2985	/*
2986	 * The default fw psm configuration is AUTO, while mac80211 default
2987	 * setting is off (ACTIVE), so sync the fw with the correct value.
2988	 */
2989	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2990	if (ret < 0)
2991		return ret;
2992
2993	if (sta_rate_set) {
2994		wlvif->rate_set =
2995			wl1271_tx_enabled_rates_get(wl,
2996						    sta_rate_set,
2997						    wlvif->band);
2998		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2999		if (ret < 0)
3000			return ret;
3001	}
3002
3003	return ret;
3004}
3005
3006static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3007{
3008	int ret;
3009	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3010
3011	/* make sure we are connected (sta) joined */
3012	if (sta &&
3013	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3014		return false;
3015
3016	/* make sure we are joined (ibss) */
3017	if (!sta &&
3018	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3019		return false;
3020
3021	if (sta) {
3022		/* use defaults when not associated */
3023		wlvif->aid = 0;
3024
3025		/* free probe-request template */
3026		dev_kfree_skb(wlvif->probereq);
3027		wlvif->probereq = NULL;
3028
3029		/* disable connection monitor features */
3030		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3031		if (ret < 0)
3032			return ret;
3033
3034		/* Disable the keep-alive feature */
3035		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3036		if (ret < 0)
3037			return ret;
3038
3039		/* disable beacon filtering */
3040		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3041		if (ret < 0)
3042			return ret;
3043	}
3044
3045	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3046		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3047
3048		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3049		ieee80211_chswitch_done(vif, false);
3050		cancel_delayed_work(&wlvif->channel_switch_work);
3051	}
3052
3053	/* invalidate keep-alive template */
3054	wl1271_acx_keep_alive_config(wl, wlvif,
3055				     wlvif->sta.klv_template_id,
3056				     ACX_KEEP_ALIVE_TPL_INVALID);
3057
3058	return 0;
3059}
3060
3061static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3062{
3063	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3064	wlvif->rate_set = wlvif->basic_rate_set;
3065}
3066
3067static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3068				   bool idle)
3069{
3070	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3071
3072	if (idle == cur_idle)
3073		return;
3074
3075	if (idle) {
3076		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3077	} else {
3078		/* The current firmware only supports sched_scan in idle */
3079		if (wl->sched_vif == wlvif)
3080			wl->ops->sched_scan_stop(wl, wlvif);
3081
3082		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3083	}
3084}
3085
3086static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3087			     struct ieee80211_conf *conf, u32 changed)
3088{
3089	int ret;
3090
3091	if (conf->power_level != wlvif->power_level) {
3092		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3093		if (ret < 0)
3094			return ret;
3095
3096		wlvif->power_level = conf->power_level;
3097	}
3098
3099	return 0;
3100}
3101
3102static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3103{
3104	struct wl1271 *wl = hw->priv;
3105	struct wl12xx_vif *wlvif;
3106	struct ieee80211_conf *conf = &hw->conf;
3107	int ret = 0;
3108
3109	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3110		     " changed 0x%x",
3111		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3112		     conf->power_level,
3113		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3114			 changed);
3115
3116	mutex_lock(&wl->mutex);
3117
3118	if (changed & IEEE80211_CONF_CHANGE_POWER)
3119		wl->power_level = conf->power_level;
3120
3121	if (unlikely(wl->state != WLCORE_STATE_ON))
3122		goto out;
3123
3124	ret = wl1271_ps_elp_wakeup(wl);
3125	if (ret < 0)
3126		goto out;
3127
3128	/* configure each interface */
3129	wl12xx_for_each_wlvif(wl, wlvif) {
3130		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3131		if (ret < 0)
3132			goto out_sleep;
3133	}
3134
3135out_sleep:
3136	wl1271_ps_elp_sleep(wl);
3137
3138out:
3139	mutex_unlock(&wl->mutex);
3140
3141	return ret;
3142}
3143
3144struct wl1271_filter_params {
3145	bool enabled;
3146	int mc_list_length;
3147	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3148};
3149
3150static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3151				       struct netdev_hw_addr_list *mc_list)
3152{
3153	struct wl1271_filter_params *fp;
3154	struct netdev_hw_addr *ha;
3155
3156	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3157	if (!fp) {
3158		wl1271_error("Out of memory setting filters.");
3159		return 0;
3160	}
3161
3162	/* update multicast filtering parameters */
3163	fp->mc_list_length = 0;
3164	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3165		fp->enabled = false;
3166	} else {
3167		fp->enabled = true;
3168		netdev_hw_addr_list_for_each(ha, mc_list) {
3169			memcpy(fp->mc_list[fp->mc_list_length],
3170					ha->addr, ETH_ALEN);
3171			fp->mc_list_length++;
3172		}
3173	}
3174
3175	return (u64)(unsigned long)fp;
3176}
3177
3178#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3179				  FIF_ALLMULTI | \
3180				  FIF_FCSFAIL | \
3181				  FIF_BCN_PRBRESP_PROMISC | \
3182				  FIF_CONTROL | \
3183				  FIF_OTHER_BSS)
3184
3185static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3186				       unsigned int changed,
3187				       unsigned int *total, u64 multicast)
3188{
3189	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3190	struct wl1271 *wl = hw->priv;
3191	struct wl12xx_vif *wlvif;
3192
3193	int ret;
3194
3195	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3196		     " total %x", changed, *total);
3197
3198	mutex_lock(&wl->mutex);
3199
3200	*total &= WL1271_SUPPORTED_FILTERS;
3201	changed &= WL1271_SUPPORTED_FILTERS;
3202
3203	if (unlikely(wl->state != WLCORE_STATE_ON))
3204		goto out;
3205
3206	ret = wl1271_ps_elp_wakeup(wl);
3207	if (ret < 0)
3208		goto out;
3209
3210	wl12xx_for_each_wlvif(wl, wlvif) {
3211		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3212			if (*total & FIF_ALLMULTI)
3213				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3214								   false,
3215								   NULL, 0);
3216			else if (fp)
3217				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3218							fp->enabled,
3219							fp->mc_list,
3220							fp->mc_list_length);
3221			if (ret < 0)
3222				goto out_sleep;
3223		}
3224	}
3225
3226	/*
3227	 * the fw doesn't provide an api to configure the filters. instead,
3228	 * the filters configuration is based on the active roles / ROC
3229	 * state.
3230	 */
3231
3232out_sleep:
3233	wl1271_ps_elp_sleep(wl);
3234
3235out:
3236	mutex_unlock(&wl->mutex);
3237	kfree(fp);
3238}
3239
3240static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3241				u8 id, u8 key_type, u8 key_size,
3242				const u8 *key, u8 hlid, u32 tx_seq_32,
3243				u16 tx_seq_16)
3244{
3245	struct wl1271_ap_key *ap_key;
3246	int i;
3247
3248	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3249
3250	if (key_size > MAX_KEY_SIZE)
3251		return -EINVAL;
3252
3253	/*
3254	 * Find next free entry in ap_keys. Also check we are not replacing
3255	 * an existing key.
3256	 */
3257	for (i = 0; i < MAX_NUM_KEYS; i++) {
3258		if (wlvif->ap.recorded_keys[i] == NULL)
3259			break;
3260
3261		if (wlvif->ap.recorded_keys[i]->id == id) {
3262			wl1271_warning("trying to record key replacement");
3263			return -EINVAL;
3264		}
3265	}
3266
3267	if (i == MAX_NUM_KEYS)
3268		return -EBUSY;
3269
3270	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3271	if (!ap_key)
3272		return -ENOMEM;
3273
3274	ap_key->id = id;
3275	ap_key->key_type = key_type;
3276	ap_key->key_size = key_size;
3277	memcpy(ap_key->key, key, key_size);
3278	ap_key->hlid = hlid;
3279	ap_key->tx_seq_32 = tx_seq_32;
3280	ap_key->tx_seq_16 = tx_seq_16;
3281
3282	wlvif->ap.recorded_keys[i] = ap_key;
3283	return 0;
3284}
3285
3286static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3287{
3288	int i;
3289
3290	for (i = 0; i < MAX_NUM_KEYS; i++) {
3291		kfree(wlvif->ap.recorded_keys[i]);
3292		wlvif->ap.recorded_keys[i] = NULL;
3293	}
3294}
3295
3296static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3297{
3298	int i, ret = 0;
3299	struct wl1271_ap_key *key;
3300	bool wep_key_added = false;
3301
3302	for (i = 0; i < MAX_NUM_KEYS; i++) {
3303		u8 hlid;
3304		if (wlvif->ap.recorded_keys[i] == NULL)
3305			break;
3306
3307		key = wlvif->ap.recorded_keys[i];
3308		hlid = key->hlid;
3309		if (hlid == WL12XX_INVALID_LINK_ID)
3310			hlid = wlvif->ap.bcast_hlid;
3311
3312		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3313					    key->id, key->key_type,
3314					    key->key_size, key->key,
3315					    hlid, key->tx_seq_32,
3316					    key->tx_seq_16);
3317		if (ret < 0)
3318			goto out;
3319
3320		if (key->key_type == KEY_WEP)
3321			wep_key_added = true;
3322	}
3323
3324	if (wep_key_added) {
3325		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3326						     wlvif->ap.bcast_hlid);
3327		if (ret < 0)
3328			goto out;
3329	}
3330
3331out:
3332	wl1271_free_ap_keys(wl, wlvif);
3333	return ret;
3334}
3335
3336static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3337		       u16 action, u8 id, u8 key_type,
3338		       u8 key_size, const u8 *key, u32 tx_seq_32,
3339		       u16 tx_seq_16, struct ieee80211_sta *sta)
3340{
3341	int ret;
3342	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3343
3344	if (is_ap) {
3345		struct wl1271_station *wl_sta;
3346		u8 hlid;
3347
3348		if (sta) {
3349			wl_sta = (struct wl1271_station *)sta->drv_priv;
3350			hlid = wl_sta->hlid;
3351		} else {
3352			hlid = wlvif->ap.bcast_hlid;
3353		}
3354
3355		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3356			/*
3357			 * We do not support removing keys after AP shutdown.
3358			 * Pretend we do to make mac80211 happy.
3359			 */
3360			if (action != KEY_ADD_OR_REPLACE)
3361				return 0;
3362
3363			ret = wl1271_record_ap_key(wl, wlvif, id,
3364					     key_type, key_size,
3365					     key, hlid, tx_seq_32,
3366					     tx_seq_16);
3367		} else {
3368			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3369					     id, key_type, key_size,
3370					     key, hlid, tx_seq_32,
3371					     tx_seq_16);
3372		}
3373
3374		if (ret < 0)
3375			return ret;
3376	} else {
3377		const u8 *addr;
3378		static const u8 bcast_addr[ETH_ALEN] = {
3379			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3380		};
3381
3382		addr = sta ? sta->addr : bcast_addr;
3383
3384		if (is_zero_ether_addr(addr)) {
3385			/* We dont support TX only encryption */
3386			return -EOPNOTSUPP;
3387		}
3388
3389		/* The wl1271 does not allow to remove unicast keys - they
3390		   will be cleared automatically on next CMD_JOIN. Ignore the
3391		   request silently, as we dont want the mac80211 to emit
3392		   an error message. */
3393		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3394			return 0;
3395
3396		/* don't remove key if hlid was already deleted */
3397		if (action == KEY_REMOVE &&
3398		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3399			return 0;
3400
3401		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3402					     id, key_type, key_size,
3403					     key, addr, tx_seq_32,
3404					     tx_seq_16);
3405		if (ret < 0)
3406			return ret;
3407
3408	}
3409
3410	return 0;
3411}
3412
3413static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3414			     struct ieee80211_vif *vif,
3415			     struct ieee80211_sta *sta,
3416			     struct ieee80211_key_conf *key_conf)
3417{
3418	struct wl1271 *wl = hw->priv;
3419	int ret;
3420	bool might_change_spare =
3421		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3422		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3423
3424	if (might_change_spare) {
3425		/*
3426		 * stop the queues and flush to ensure the next packets are
3427		 * in sync with FW spare block accounting
3428		 */
3429		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3430		wl1271_tx_flush(wl);
3431	}
3432
3433	mutex_lock(&wl->mutex);
3434
3435	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3436		ret = -EAGAIN;
3437		goto out_wake_queues;
3438	}
3439
3440	ret = wl1271_ps_elp_wakeup(wl);
3441	if (ret < 0)
3442		goto out_wake_queues;
3443
3444	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3445
3446	wl1271_ps_elp_sleep(wl);
3447
3448out_wake_queues:
3449	if (might_change_spare)
3450		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3451
3452	mutex_unlock(&wl->mutex);
3453
3454	return ret;
3455}
3456
3457int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3458		   struct ieee80211_vif *vif,
3459		   struct ieee80211_sta *sta,
3460		   struct ieee80211_key_conf *key_conf)
3461{
3462	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3463	int ret;
3464	u32 tx_seq_32 = 0;
3465	u16 tx_seq_16 = 0;
3466	u8 key_type;
3467	u8 hlid;
3468
3469	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3470
3471	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3472	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3473		     key_conf->cipher, key_conf->keyidx,
3474		     key_conf->keylen, key_conf->flags);
3475	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3476
3477	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3478		if (sta) {
3479			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3480			hlid = wl_sta->hlid;
3481		} else {
3482			hlid = wlvif->ap.bcast_hlid;
3483		}
3484	else
3485		hlid = wlvif->sta.hlid;
3486
3487	if (hlid != WL12XX_INVALID_LINK_ID) {
3488		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3489		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3490		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3491	}
3492
3493	switch (key_conf->cipher) {
3494	case WLAN_CIPHER_SUITE_WEP40:
3495	case WLAN_CIPHER_SUITE_WEP104:
3496		key_type = KEY_WEP;
3497
3498		key_conf->hw_key_idx = key_conf->keyidx;
3499		break;
3500	case WLAN_CIPHER_SUITE_TKIP:
3501		key_type = KEY_TKIP;
3502		key_conf->hw_key_idx = key_conf->keyidx;
3503		break;
3504	case WLAN_CIPHER_SUITE_CCMP:
3505		key_type = KEY_AES;
3506		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3507		break;
3508	case WL1271_CIPHER_SUITE_GEM:
3509		key_type = KEY_GEM;
3510		break;
3511	default:
3512		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3513
3514		return -EOPNOTSUPP;
3515	}
3516
3517	switch (cmd) {
3518	case SET_KEY:
3519		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3520				 key_conf->keyidx, key_type,
3521				 key_conf->keylen, key_conf->key,
3522				 tx_seq_32, tx_seq_16, sta);
3523		if (ret < 0) {
3524			wl1271_error("Could not add or replace key");
3525			return ret;
3526		}
3527
3528		/*
3529		 * reconfiguring arp response if the unicast (or common)
3530		 * encryption key type was changed
3531		 */
3532		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3533		    (sta || key_type == KEY_WEP) &&
3534		    wlvif->encryption_type != key_type) {
3535			wlvif->encryption_type = key_type;
3536			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3537			if (ret < 0) {
3538				wl1271_warning("build arp rsp failed: %d", ret);
3539				return ret;
3540			}
3541		}
3542		break;
3543
3544	case DISABLE_KEY:
3545		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3546				     key_conf->keyidx, key_type,
3547				     key_conf->keylen, key_conf->key,
3548				     0, 0, sta);
3549		if (ret < 0) {
3550			wl1271_error("Could not remove key");
3551			return ret;
3552		}
3553		break;
3554
3555	default:
3556		wl1271_error("Unsupported key cmd 0x%x", cmd);
3557		return -EOPNOTSUPP;
3558	}
3559
3560	return ret;
3561}
3562EXPORT_SYMBOL_GPL(wlcore_set_key);
3563
3564static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3565					  struct ieee80211_vif *vif,
3566					  int key_idx)
3567{
3568	struct wl1271 *wl = hw->priv;
3569	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3570	int ret;
3571
3572	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3573		     key_idx);
3574
3575	/* we don't handle unsetting of default key */
3576	if (key_idx == -1)
3577		return;
3578
3579	mutex_lock(&wl->mutex);
3580
3581	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3582		ret = -EAGAIN;
3583		goto out_unlock;
3584	}
3585
3586	ret = wl1271_ps_elp_wakeup(wl);
3587	if (ret < 0)
3588		goto out_unlock;
3589
3590	wlvif->default_key = key_idx;
3591
3592	/* the default WEP key needs to be configured at least once */
3593	if (wlvif->encryption_type == KEY_WEP) {
3594		ret = wl12xx_cmd_set_default_wep_key(wl,
3595				key_idx,
3596				wlvif->sta.hlid);
3597		if (ret < 0)
3598			goto out_sleep;
3599	}
3600
3601out_sleep:
3602	wl1271_ps_elp_sleep(wl);
3603
3604out_unlock:
3605	mutex_unlock(&wl->mutex);
3606}
3607
3608void wlcore_regdomain_config(struct wl1271 *wl)
3609{
3610	int ret;
3611
3612	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3613		return;
3614
3615	mutex_lock(&wl->mutex);
3616
3617	if (unlikely(wl->state != WLCORE_STATE_ON))
3618		goto out;
3619
3620	ret = wl1271_ps_elp_wakeup(wl);
3621	if (ret < 0)
3622		goto out;
3623
3624	ret = wlcore_cmd_regdomain_config_locked(wl);
3625	if (ret < 0) {
3626		wl12xx_queue_recovery_work(wl);
3627		goto out;
3628	}
3629
3630	wl1271_ps_elp_sleep(wl);
3631out:
3632	mutex_unlock(&wl->mutex);
3633}
3634
3635static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3636			     struct ieee80211_vif *vif,
3637			     struct ieee80211_scan_request *hw_req)
3638{
3639	struct cfg80211_scan_request *req = &hw_req->req;
3640	struct wl1271 *wl = hw->priv;
3641	int ret;
3642	u8 *ssid = NULL;
3643	size_t len = 0;
3644
3645	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3646
3647	if (req->n_ssids) {
3648		ssid = req->ssids[0].ssid;
3649		len = req->ssids[0].ssid_len;
3650	}
3651
3652	mutex_lock(&wl->mutex);
3653
3654	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3655		/*
3656		 * We cannot return -EBUSY here because cfg80211 will expect
3657		 * a call to ieee80211_scan_completed if we do - in this case
3658		 * there won't be any call.
3659		 */
3660		ret = -EAGAIN;
3661		goto out;
3662	}
3663
3664	ret = wl1271_ps_elp_wakeup(wl);
3665	if (ret < 0)
3666		goto out;
3667
3668	/* fail if there is any role in ROC */
3669	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3670		/* don't allow scanning right now */
3671		ret = -EBUSY;
3672		goto out_sleep;
3673	}
3674
3675	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3676out_sleep:
3677	wl1271_ps_elp_sleep(wl);
3678out:
3679	mutex_unlock(&wl->mutex);
3680
3681	return ret;
3682}
3683
3684static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3685				     struct ieee80211_vif *vif)
3686{
3687	struct wl1271 *wl = hw->priv;
3688	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3689	int ret;
3690
3691	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3692
3693	mutex_lock(&wl->mutex);
3694
3695	if (unlikely(wl->state != WLCORE_STATE_ON))
3696		goto out;
3697
3698	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3699		goto out;
3700
3701	ret = wl1271_ps_elp_wakeup(wl);
3702	if (ret < 0)
3703		goto out;
3704
3705	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3706		ret = wl->ops->scan_stop(wl, wlvif);
3707		if (ret < 0)
3708			goto out_sleep;
3709	}
3710
3711	/*
3712	 * Rearm the tx watchdog just before idling scan. This
3713	 * prevents just-finished scans from triggering the watchdog
3714	 */
3715	wl12xx_rearm_tx_watchdog_locked(wl);
3716
3717	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3718	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3719	wl->scan_wlvif = NULL;
3720	wl->scan.req = NULL;
3721	ieee80211_scan_completed(wl->hw, true);
3722
3723out_sleep:
3724	wl1271_ps_elp_sleep(wl);
3725out:
3726	mutex_unlock(&wl->mutex);
3727
3728	cancel_delayed_work_sync(&wl->scan_complete_work);
3729}
3730
3731static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3732				      struct ieee80211_vif *vif,
3733				      struct cfg80211_sched_scan_request *req,
3734				      struct ieee80211_scan_ies *ies)
3735{
3736	struct wl1271 *wl = hw->priv;
3737	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3738	int ret;
3739
3740	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3741
3742	mutex_lock(&wl->mutex);
3743
3744	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3745		ret = -EAGAIN;
3746		goto out;
3747	}
3748
3749	ret = wl1271_ps_elp_wakeup(wl);
3750	if (ret < 0)
3751		goto out;
3752
3753	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3754	if (ret < 0)
3755		goto out_sleep;
3756
3757	wl->sched_vif = wlvif;
3758
3759out_sleep:
3760	wl1271_ps_elp_sleep(wl);
3761out:
3762	mutex_unlock(&wl->mutex);
3763	return ret;
3764}
3765
3766static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3767				     struct ieee80211_vif *vif)
3768{
3769	struct wl1271 *wl = hw->priv;
3770	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3771	int ret;
3772
3773	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3774
3775	mutex_lock(&wl->mutex);
3776
3777	if (unlikely(wl->state != WLCORE_STATE_ON))
3778		goto out;
3779
3780	ret = wl1271_ps_elp_wakeup(wl);
3781	if (ret < 0)
3782		goto out;
3783
3784	wl->ops->sched_scan_stop(wl, wlvif);
3785
3786	wl1271_ps_elp_sleep(wl);
3787out:
3788	mutex_unlock(&wl->mutex);
3789
3790	return 0;
3791}
3792
3793static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3794{
3795	struct wl1271 *wl = hw->priv;
3796	int ret = 0;
3797
3798	mutex_lock(&wl->mutex);
3799
3800	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3801		ret = -EAGAIN;
3802		goto out;
3803	}
3804
3805	ret = wl1271_ps_elp_wakeup(wl);
3806	if (ret < 0)
3807		goto out;
3808
3809	ret = wl1271_acx_frag_threshold(wl, value);
3810	if (ret < 0)
3811		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3812
3813	wl1271_ps_elp_sleep(wl);
3814
3815out:
3816	mutex_unlock(&wl->mutex);
3817
3818	return ret;
3819}
3820
3821static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3822{
3823	struct wl1271 *wl = hw->priv;
3824	struct wl12xx_vif *wlvif;
3825	int ret = 0;
3826
3827	mutex_lock(&wl->mutex);
3828
3829	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3830		ret = -EAGAIN;
3831		goto out;
3832	}
3833
3834	ret = wl1271_ps_elp_wakeup(wl);
3835	if (ret < 0)
3836		goto out;
3837
3838	wl12xx_for_each_wlvif(wl, wlvif) {
3839		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3840		if (ret < 0)
3841			wl1271_warning("set rts threshold failed: %d", ret);
3842	}
3843	wl1271_ps_elp_sleep(wl);
3844
3845out:
3846	mutex_unlock(&wl->mutex);
3847
3848	return ret;
3849}
3850
3851static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3852{
3853	int len;
3854	const u8 *next, *end = skb->data + skb->len;
3855	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3856					skb->len - ieoffset);
3857	if (!ie)
3858		return;
3859	len = ie[1] + 2;
3860	next = ie + len;
3861	memmove(ie, next, end - next);
3862	skb_trim(skb, skb->len - len);
3863}
3864
3865static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3866					    unsigned int oui, u8 oui_type,
3867					    int ieoffset)
3868{
3869	int len;
3870	const u8 *next, *end = skb->data + skb->len;
3871	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3872					       skb->data + ieoffset,
3873					       skb->len - ieoffset);
3874	if (!ie)
3875		return;
3876	len = ie[1] + 2;
3877	next = ie + len;
3878	memmove(ie, next, end - next);
3879	skb_trim(skb, skb->len - len);
3880}
3881
3882static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3883					 struct ieee80211_vif *vif)
3884{
3885	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3886	struct sk_buff *skb;
3887	int ret;
3888
3889	skb = ieee80211_proberesp_get(wl->hw, vif);
3890	if (!skb)
3891		return -EOPNOTSUPP;
3892
3893	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3894				      CMD_TEMPL_AP_PROBE_RESPONSE,
3895				      skb->data,
3896				      skb->len, 0,
3897				      rates);
3898	dev_kfree_skb(skb);
3899
3900	if (ret < 0)
3901		goto out;
3902
3903	wl1271_debug(DEBUG_AP, "probe response updated");
3904	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3905
3906out:
3907	return ret;
3908}
3909
3910static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3911					     struct ieee80211_vif *vif,
3912					     u8 *probe_rsp_data,
3913					     size_t probe_rsp_len,
3914					     u32 rates)
3915{
3916	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3917	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3918	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3919	int ssid_ie_offset, ie_offset, templ_len;
3920	const u8 *ptr;
3921
3922	/* no need to change probe response if the SSID is set correctly */
3923	if (wlvif->ssid_len > 0)
3924		return wl1271_cmd_template_set(wl, wlvif->role_id,
3925					       CMD_TEMPL_AP_PROBE_RESPONSE,
3926					       probe_rsp_data,
3927					       probe_rsp_len, 0,
3928					       rates);
3929
3930	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3931		wl1271_error("probe_rsp template too big");
3932		return -EINVAL;
3933	}
3934
3935	/* start searching from IE offset */
3936	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3937
3938	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3939			       probe_rsp_len - ie_offset);
3940	if (!ptr) {
3941		wl1271_error("No SSID in beacon!");
3942		return -EINVAL;
3943	}
3944
3945	ssid_ie_offset = ptr - probe_rsp_data;
3946	ptr += (ptr[1] + 2);
3947
3948	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3949
3950	/* insert SSID from bss_conf */
3951	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3952	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3953	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3954	       bss_conf->ssid, bss_conf->ssid_len);
3955	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3956
3957	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3958	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3959	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3960
3961	return wl1271_cmd_template_set(wl, wlvif->role_id,
3962				       CMD_TEMPL_AP_PROBE_RESPONSE,
3963				       probe_rsp_templ,
3964				       templ_len, 0,
3965				       rates);
3966}
3967
3968static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3969				       struct ieee80211_vif *vif,
3970				       struct ieee80211_bss_conf *bss_conf,
3971				       u32 changed)
3972{
3973	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3974	int ret = 0;
3975
3976	if (changed & BSS_CHANGED_ERP_SLOT) {
3977		if (bss_conf->use_short_slot)
3978			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3979		else
3980			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3981		if (ret < 0) {
3982			wl1271_warning("Set slot time failed %d", ret);
3983			goto out;
3984		}
3985	}
3986
3987	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3988		if (bss_conf->use_short_preamble)
3989			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3990		else
3991			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3992	}
3993
3994	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3995		if (bss_conf->use_cts_prot)
3996			ret = wl1271_acx_cts_protect(wl, wlvif,
3997						     CTSPROTECT_ENABLE);
3998		else
3999			ret = wl1271_acx_cts_protect(wl, wlvif,
4000						     CTSPROTECT_DISABLE);
4001		if (ret < 0) {
4002			wl1271_warning("Set ctsprotect failed %d", ret);
4003			goto out;
4004		}
4005	}
4006
4007out:
4008	return ret;
4009}
4010
4011static int wlcore_set_beacon_template(struct wl1271 *wl,
4012				      struct ieee80211_vif *vif,
4013				      bool is_ap)
4014{
4015	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4016	struct ieee80211_hdr *hdr;
4017	u32 min_rate;
4018	int ret;
4019	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4020	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4021	u16 tmpl_id;
4022
4023	if (!beacon) {
4024		ret = -EINVAL;
4025		goto out;
4026	}
4027
4028	wl1271_debug(DEBUG_MASTER, "beacon updated");
4029
4030	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4031	if (ret < 0) {
4032		dev_kfree_skb(beacon);
4033		goto out;
4034	}
4035	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4036	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4037		CMD_TEMPL_BEACON;
4038	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4039				      beacon->data,
4040				      beacon->len, 0,
4041				      min_rate);
4042	if (ret < 0) {
4043		dev_kfree_skb(beacon);
4044		goto out;
4045	}
4046
4047	wlvif->wmm_enabled =
4048		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4049					WLAN_OUI_TYPE_MICROSOFT_WMM,
4050					beacon->data + ieoffset,
4051					beacon->len - ieoffset);
4052
4053	/*
4054	 * In case we already have a probe-resp beacon set explicitly
4055	 * by usermode, don't use the beacon data.
4056	 */
4057	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4058		goto end_bcn;
4059
4060	/* remove TIM ie from probe response */
4061	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4062
4063	/*
4064	 * remove p2p ie from probe response.
4065	 * the fw reponds to probe requests that don't include
4066	 * the p2p ie. probe requests with p2p ie will be passed,
4067	 * and will be responded by the supplicant (the spec
4068	 * forbids including the p2p ie when responding to probe
4069	 * requests that didn't include it).
4070	 */
4071	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4072				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4073
4074	hdr = (struct ieee80211_hdr *) beacon->data;
4075	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4076					 IEEE80211_STYPE_PROBE_RESP);
4077	if (is_ap)
4078		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4079							   beacon->data,
4080							   beacon->len,
4081							   min_rate);
4082	else
4083		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4084					      CMD_TEMPL_PROBE_RESPONSE,
4085					      beacon->data,
4086					      beacon->len, 0,
4087					      min_rate);
4088end_bcn:
4089	dev_kfree_skb(beacon);
4090	if (ret < 0)
4091		goto out;
4092
4093out:
4094	return ret;
4095}
4096
4097static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4098					  struct ieee80211_vif *vif,
4099					  struct ieee80211_bss_conf *bss_conf,
4100					  u32 changed)
4101{
4102	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4103	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4104	int ret = 0;
4105
4106	if (changed & BSS_CHANGED_BEACON_INT) {
4107		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4108			bss_conf->beacon_int);
4109
4110		wlvif->beacon_int = bss_conf->beacon_int;
4111	}
4112
4113	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4114		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4115
4116		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4117	}
4118
4119	if (changed & BSS_CHANGED_BEACON) {
4120		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4121		if (ret < 0)
4122			goto out;
4123
4124		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4125				       &wlvif->flags)) {
4126			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4127			if (ret < 0)
4128				goto out;
4129		}
4130	}
4131out:
4132	if (ret != 0)
4133		wl1271_error("beacon info change failed: %d", ret);
4134	return ret;
4135}
4136
4137/* AP mode changes */
4138static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4139				       struct ieee80211_vif *vif,
4140				       struct ieee80211_bss_conf *bss_conf,
4141				       u32 changed)
4142{
4143	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4144	int ret = 0;
4145
4146	if (changed & BSS_CHANGED_BASIC_RATES) {
4147		u32 rates = bss_conf->basic_rates;
4148
4149		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4150								 wlvif->band);
4151		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4152							wlvif->basic_rate_set);
4153
4154		ret = wl1271_init_ap_rates(wl, wlvif);
4155		if (ret < 0) {
4156			wl1271_error("AP rate policy change failed %d", ret);
4157			goto out;
4158		}
4159
4160		ret = wl1271_ap_init_templates(wl, vif);
4161		if (ret < 0)
4162			goto out;
4163
4164		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4165		if (ret < 0)
4166			goto out;
4167
4168		ret = wlcore_set_beacon_template(wl, vif, true);
4169		if (ret < 0)
4170			goto out;
4171	}
4172
4173	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4174	if (ret < 0)
4175		goto out;
4176
4177	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4178		if (bss_conf->enable_beacon) {
4179			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4180				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4181				if (ret < 0)
4182					goto out;
4183
4184				ret = wl1271_ap_init_hwenc(wl, wlvif);
4185				if (ret < 0)
4186					goto out;
4187
4188				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4189				wl1271_debug(DEBUG_AP, "started AP");
4190			}
4191		} else {
4192			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4193				/*
4194				 * AP might be in ROC in case we have just
4195				 * sent auth reply. handle it.
4196				 */
4197				if (test_bit(wlvif->role_id, wl->roc_map))
4198					wl12xx_croc(wl, wlvif->role_id);
4199
4200				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4201				if (ret < 0)
4202					goto out;
4203
4204				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4205				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4206					  &wlvif->flags);
4207				wl1271_debug(DEBUG_AP, "stopped AP");
4208			}
4209		}
4210	}
4211
4212	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4213	if (ret < 0)
4214		goto out;
4215
4216	/* Handle HT information change */
4217	if ((changed & BSS_CHANGED_HT) &&
4218	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4219		ret = wl1271_acx_set_ht_information(wl, wlvif,
4220					bss_conf->ht_operation_mode);
4221		if (ret < 0) {
4222			wl1271_warning("Set ht information failed %d", ret);
4223			goto out;
4224		}
4225	}
4226
4227out:
4228	return;
4229}
4230
4231static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4232			    struct ieee80211_bss_conf *bss_conf,
4233			    u32 sta_rate_set)
4234{
4235	u32 rates;
4236	int ret;
4237
4238	wl1271_debug(DEBUG_MAC80211,
4239	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4240	     bss_conf->bssid, bss_conf->aid,
4241	     bss_conf->beacon_int,
4242	     bss_conf->basic_rates, sta_rate_set);
4243
4244	wlvif->beacon_int = bss_conf->beacon_int;
4245	rates = bss_conf->basic_rates;
4246	wlvif->basic_rate_set =
4247		wl1271_tx_enabled_rates_get(wl, rates,
4248					    wlvif->band);
4249	wlvif->basic_rate =
4250		wl1271_tx_min_rate_get(wl,
4251				       wlvif->basic_rate_set);
4252
4253	if (sta_rate_set)
4254		wlvif->rate_set =
4255			wl1271_tx_enabled_rates_get(wl,
4256						sta_rate_set,
4257						wlvif->band);
4258
4259	/* we only support sched_scan while not connected */
4260	if (wl->sched_vif == wlvif)
4261		wl->ops->sched_scan_stop(wl, wlvif);
4262
4263	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4264	if (ret < 0)
4265		return ret;
4266
4267	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4268	if (ret < 0)
4269		return ret;
4270
4271	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4272	if (ret < 0)
4273		return ret;
4274
4275	wlcore_set_ssid(wl, wlvif);
4276
4277	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4278
4279	return 0;
4280}
4281
4282static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4283{
4284	int ret;
4285
4286	/* revert back to minimum rates for the current band */
4287	wl1271_set_band_rate(wl, wlvif);
4288	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4289
4290	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4291	if (ret < 0)
4292		return ret;
4293
4294	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4295	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4296		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4297		if (ret < 0)
4298			return ret;
4299	}
4300
4301	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4302	return 0;
4303}
4304/* STA/IBSS mode changes */
4305static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4306					struct ieee80211_vif *vif,
4307					struct ieee80211_bss_conf *bss_conf,
4308					u32 changed)
4309{
4310	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4311	bool do_join = false;
4312	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4313	bool ibss_joined = false;
4314	u32 sta_rate_set = 0;
4315	int ret;
4316	struct ieee80211_sta *sta;
4317	bool sta_exists = false;
4318	struct ieee80211_sta_ht_cap sta_ht_cap;
4319
4320	if (is_ibss) {
4321		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4322						     changed);
4323		if (ret < 0)
4324			goto out;
4325	}
4326
4327	if (changed & BSS_CHANGED_IBSS) {
4328		if (bss_conf->ibss_joined) {
4329			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4330			ibss_joined = true;
4331		} else {
4332			wlcore_unset_assoc(wl, wlvif);
4333			wl12xx_cmd_role_stop_sta(wl, wlvif);
4334		}
4335	}
4336
4337	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4338		do_join = true;
4339
4340	/* Need to update the SSID (for filtering etc) */
4341	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4342		do_join = true;
4343
4344	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4345		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4346			     bss_conf->enable_beacon ? "enabled" : "disabled");
4347
4348		do_join = true;
4349	}
4350
4351	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4352		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4353
4354	if (changed & BSS_CHANGED_CQM) {
4355		bool enable = false;
4356		if (bss_conf->cqm_rssi_thold)
4357			enable = true;
4358		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4359						  bss_conf->cqm_rssi_thold,
4360						  bss_conf->cqm_rssi_hyst);
4361		if (ret < 0)
4362			goto out;
4363		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4364	}
4365
4366	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4367		       BSS_CHANGED_ASSOC)) {
4368		rcu_read_lock();
4369		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4370		if (sta) {
4371			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4372
4373			/* save the supp_rates of the ap */
4374			sta_rate_set = sta->supp_rates[wlvif->band];
4375			if (sta->ht_cap.ht_supported)
4376				sta_rate_set |=
4377					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4378					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4379			sta_ht_cap = sta->ht_cap;
4380			sta_exists = true;
4381		}
4382
4383		rcu_read_unlock();
4384	}
4385
4386	if (changed & BSS_CHANGED_BSSID) {
4387		if (!is_zero_ether_addr(bss_conf->bssid)) {
4388			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4389					       sta_rate_set);
4390			if (ret < 0)
4391				goto out;
4392
4393			/* Need to update the BSSID (for filtering etc) */
4394			do_join = true;
4395		} else {
4396			ret = wlcore_clear_bssid(wl, wlvif);
4397			if (ret < 0)
4398				goto out;
4399		}
4400	}
4401
4402	if (changed & BSS_CHANGED_IBSS) {
4403		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4404			     bss_conf->ibss_joined);
4405
4406		if (bss_conf->ibss_joined) {
4407			u32 rates = bss_conf->basic_rates;
4408			wlvif->basic_rate_set =
4409				wl1271_tx_enabled_rates_get(wl, rates,
4410							    wlvif->band);
4411			wlvif->basic_rate =
4412				wl1271_tx_min_rate_get(wl,
4413						       wlvif->basic_rate_set);
4414
4415			/* by default, use 11b + OFDM rates */
4416			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4417			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4418			if (ret < 0)
4419				goto out;
4420		}
4421	}
4422
4423	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4424		/* enable beacon filtering */
4425		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4426		if (ret < 0)
4427			goto out;
4428	}
4429
4430	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4431	if (ret < 0)
4432		goto out;
4433
4434	if (do_join) {
4435		ret = wlcore_join(wl, wlvif);
4436		if (ret < 0) {
4437			wl1271_warning("cmd join failed %d", ret);
4438			goto out;
4439		}
4440	}
4441
4442	if (changed & BSS_CHANGED_ASSOC) {
4443		if (bss_conf->assoc) {
4444			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4445					       sta_rate_set);
4446			if (ret < 0)
4447				goto out;
4448
4449			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4450				wl12xx_set_authorized(wl, wlvif);
4451		} else {
4452			wlcore_unset_assoc(wl, wlvif);
4453		}
4454	}
4455
4456	if (changed & BSS_CHANGED_PS) {
4457		if ((bss_conf->ps) &&
4458		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4459		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4460			int ps_mode;
4461			char *ps_mode_str;
4462
4463			if (wl->conf.conn.forced_ps) {
4464				ps_mode = STATION_POWER_SAVE_MODE;
4465				ps_mode_str = "forced";
4466			} else {
4467				ps_mode = STATION_AUTO_PS_MODE;
4468				ps_mode_str = "auto";
4469			}
4470
4471			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4472
4473			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4474			if (ret < 0)
4475				wl1271_warning("enter %s ps failed %d",
4476					       ps_mode_str, ret);
4477		} else if (!bss_conf->ps &&
4478			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4479			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4480
4481			ret = wl1271_ps_set_mode(wl, wlvif,
4482						 STATION_ACTIVE_MODE);
4483			if (ret < 0)
4484				wl1271_warning("exit auto ps failed %d", ret);
4485		}
4486	}
4487
4488	/* Handle new association with HT. Do this after join. */
4489	if (sta_exists) {
4490		bool enabled =
4491			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4492
4493		ret = wlcore_hw_set_peer_cap(wl,
4494					     &sta_ht_cap,
4495					     enabled,
4496					     wlvif->rate_set,
4497					     wlvif->sta.hlid);
4498		if (ret < 0) {
4499			wl1271_warning("Set ht cap failed %d", ret);
4500			goto out;
4501
4502		}
4503
4504		if (enabled) {
4505			ret = wl1271_acx_set_ht_information(wl, wlvif,
4506						bss_conf->ht_operation_mode);
4507			if (ret < 0) {
4508				wl1271_warning("Set ht information failed %d",
4509					       ret);
4510				goto out;
4511			}
4512		}
4513	}
4514
4515	/* Handle arp filtering. Done after join. */
4516	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4517	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4518		__be32 addr = bss_conf->arp_addr_list[0];
4519		wlvif->sta.qos = bss_conf->qos;
4520		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4521
4522		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4523			wlvif->ip_addr = addr;
4524			/*
4525			 * The template should have been configured only upon
4526			 * association. however, it seems that the correct ip
4527			 * isn't being set (when sending), so we have to
4528			 * reconfigure the template upon every ip change.
4529			 */
4530			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4531			if (ret < 0) {
4532				wl1271_warning("build arp rsp failed: %d", ret);
4533				goto out;
4534			}
4535
4536			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4537				(ACX_ARP_FILTER_ARP_FILTERING |
4538				 ACX_ARP_FILTER_AUTO_ARP),
4539				addr);
4540		} else {
4541			wlvif->ip_addr = 0;
4542			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4543		}
4544
4545		if (ret < 0)
4546			goto out;
4547	}
4548
4549out:
4550	return;
4551}
4552
4553static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4554				       struct ieee80211_vif *vif,
4555				       struct ieee80211_bss_conf *bss_conf,
4556				       u32 changed)
4557{
4558	struct wl1271 *wl = hw->priv;
4559	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4560	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4561	int ret;
4562
4563	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4564		     wlvif->role_id, (int)changed);
4565
4566	/*
4567	 * make sure to cancel pending disconnections if our association
4568	 * state changed
4569	 */
4570	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4571		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4572
4573	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4574	    !bss_conf->enable_beacon)
4575		wl1271_tx_flush(wl);
4576
4577	mutex_lock(&wl->mutex);
4578
4579	if (unlikely(wl->state != WLCORE_STATE_ON))
4580		goto out;
4581
4582	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4583		goto out;
4584
4585	ret = wl1271_ps_elp_wakeup(wl);
4586	if (ret < 0)
4587		goto out;
4588
4589	if ((changed & BSS_CHANGED_TXPOWER) &&
4590	    bss_conf->txpower != wlvif->power_level) {
4591
4592		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4593		if (ret < 0)
4594			goto out;
4595
4596		wlvif->power_level = bss_conf->txpower;
4597	}
4598
4599	if (is_ap)
4600		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4601	else
4602		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4603
4604	wl1271_ps_elp_sleep(wl);
4605
4606out:
4607	mutex_unlock(&wl->mutex);
4608}
4609
4610static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4611				 struct ieee80211_chanctx_conf *ctx)
4612{
4613	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4614		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4615		     cfg80211_get_chandef_type(&ctx->def));
4616	return 0;
4617}
4618
4619static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4620				     struct ieee80211_chanctx_conf *ctx)
4621{
4622	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4623		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4624		     cfg80211_get_chandef_type(&ctx->def));
4625}
4626
4627static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4628				     struct ieee80211_chanctx_conf *ctx,
4629				     u32 changed)
4630{
4631	struct wl1271 *wl = hw->priv;
4632	struct wl12xx_vif *wlvif;
4633	int ret;
4634	int channel = ieee80211_frequency_to_channel(
4635		ctx->def.chan->center_freq);
4636
4637	wl1271_debug(DEBUG_MAC80211,
4638		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4639		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4640
4641	mutex_lock(&wl->mutex);
4642
4643	ret = wl1271_ps_elp_wakeup(wl);
4644	if (ret < 0)
4645		goto out;
4646
4647	wl12xx_for_each_wlvif(wl, wlvif) {
4648		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4649
4650		rcu_read_lock();
4651		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4652			rcu_read_unlock();
4653			continue;
4654		}
4655		rcu_read_unlock();
4656
4657		/* start radar if needed */
4658		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4659		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4660		    ctx->radar_enabled && !wlvif->radar_enabled &&
4661		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4662			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4663			wlcore_hw_set_cac(wl, wlvif, true);
4664			wlvif->radar_enabled = true;
4665		}
4666	}
4667
4668	wl1271_ps_elp_sleep(wl);
4669out:
4670	mutex_unlock(&wl->mutex);
4671}
4672
4673static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4674					struct ieee80211_vif *vif,
4675					struct ieee80211_chanctx_conf *ctx)
4676{
4677	struct wl1271 *wl = hw->priv;
4678	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4679	int channel = ieee80211_frequency_to_channel(
4680		ctx->def.chan->center_freq);
4681	int ret = -EINVAL;
4682
4683	wl1271_debug(DEBUG_MAC80211,
4684		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4685		     wlvif->role_id, channel,
4686		     cfg80211_get_chandef_type(&ctx->def),
4687		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4688
4689	mutex_lock(&wl->mutex);
4690
4691	if (unlikely(wl->state != WLCORE_STATE_ON))
4692		goto out;
4693
4694	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4695		goto out;
4696
4697	ret = wl1271_ps_elp_wakeup(wl);
4698	if (ret < 0)
4699		goto out;
4700
4701	wlvif->band = ctx->def.chan->band;
4702	wlvif->channel = channel;
4703	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4704
4705	/* update default rates according to the band */
4706	wl1271_set_band_rate(wl, wlvif);
4707
4708	if (ctx->radar_enabled &&
4709	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4710		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4711		wlcore_hw_set_cac(wl, wlvif, true);
4712		wlvif->radar_enabled = true;
4713	}
4714
4715	wl1271_ps_elp_sleep(wl);
4716out:
4717	mutex_unlock(&wl->mutex);
4718
4719	return 0;
4720}
4721
4722static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4723					   struct ieee80211_vif *vif,
4724					   struct ieee80211_chanctx_conf *ctx)
4725{
4726	struct wl1271 *wl = hw->priv;
4727	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4728	int ret;
4729
4730	wl1271_debug(DEBUG_MAC80211,
4731		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4732		     wlvif->role_id,
4733		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4734		     cfg80211_get_chandef_type(&ctx->def));
4735
4736	wl1271_tx_flush(wl);
4737
4738	mutex_lock(&wl->mutex);
4739
4740	if (unlikely(wl->state != WLCORE_STATE_ON))
4741		goto out;
4742
4743	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4744		goto out;
4745
4746	ret = wl1271_ps_elp_wakeup(wl);
4747	if (ret < 0)
4748		goto out;
4749
4750	if (wlvif->radar_enabled) {
4751		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4752		wlcore_hw_set_cac(wl, wlvif, false);
4753		wlvif->radar_enabled = false;
4754	}
4755
4756	wl1271_ps_elp_sleep(wl);
4757out:
4758	mutex_unlock(&wl->mutex);
4759}
4760
4761static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4762				    struct wl12xx_vif *wlvif,
4763				    struct ieee80211_chanctx_conf *new_ctx)
4764{
4765	int channel = ieee80211_frequency_to_channel(
4766		new_ctx->def.chan->center_freq);
4767
4768	wl1271_debug(DEBUG_MAC80211,
4769		     "switch vif (role %d) %d -> %d chan_type: %d",
4770		     wlvif->role_id, wlvif->channel, channel,
4771		     cfg80211_get_chandef_type(&new_ctx->def));
4772
4773	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4774		return 0;
4775
4776	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4777
4778	if (wlvif->radar_enabled) {
4779		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4780		wlcore_hw_set_cac(wl, wlvif, false);
4781		wlvif->radar_enabled = false;
4782	}
4783
4784	wlvif->band = new_ctx->def.chan->band;
4785	wlvif->channel = channel;
4786	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4787
4788	/* start radar if needed */
4789	if (new_ctx->radar_enabled) {
4790		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4791		wlcore_hw_set_cac(wl, wlvif, true);
4792		wlvif->radar_enabled = true;
4793	}
4794
4795	return 0;
4796}
4797
4798static int
4799wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4800			     struct ieee80211_vif_chanctx_switch *vifs,
4801			     int n_vifs,
4802			     enum ieee80211_chanctx_switch_mode mode)
4803{
4804	struct wl1271 *wl = hw->priv;
4805	int i, ret;
4806
4807	wl1271_debug(DEBUG_MAC80211,
4808		     "mac80211 switch chanctx n_vifs %d mode %d",
4809		     n_vifs, mode);
4810
4811	mutex_lock(&wl->mutex);
4812
4813	ret = wl1271_ps_elp_wakeup(wl);
4814	if (ret < 0)
4815		goto out;
4816
4817	for (i = 0; i < n_vifs; i++) {
4818		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4819
4820		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4821		if (ret)
4822			goto out_sleep;
4823	}
4824out_sleep:
4825	wl1271_ps_elp_sleep(wl);
4826out:
4827	mutex_unlock(&wl->mutex);
4828
4829	return 0;
4830}
4831
4832static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4833			     struct ieee80211_vif *vif, u16 queue,
4834			     const struct ieee80211_tx_queue_params *params)
4835{
4836	struct wl1271 *wl = hw->priv;
4837	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4838	u8 ps_scheme;
4839	int ret = 0;
4840
4841	mutex_lock(&wl->mutex);
4842
4843	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4844
4845	if (params->uapsd)
4846		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4847	else
4848		ps_scheme = CONF_PS_SCHEME_LEGACY;
4849
4850	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4851		goto out;
4852
4853	ret = wl1271_ps_elp_wakeup(wl);
4854	if (ret < 0)
4855		goto out;
4856
4857	/*
4858	 * the txop is confed in units of 32us by the mac80211,
4859	 * we need us
4860	 */
4861	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4862				params->cw_min, params->cw_max,
4863				params->aifs, params->txop << 5);
4864	if (ret < 0)
4865		goto out_sleep;
4866
4867	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4868				 CONF_CHANNEL_TYPE_EDCF,
4869				 wl1271_tx_get_queue(queue),
4870				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4871				 0, 0);
4872
4873out_sleep:
4874	wl1271_ps_elp_sleep(wl);
4875
4876out:
4877	mutex_unlock(&wl->mutex);
4878
4879	return ret;
4880}
4881
4882static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4883			     struct ieee80211_vif *vif)
4884{
4885
4886	struct wl1271 *wl = hw->priv;
4887	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4888	u64 mactime = ULLONG_MAX;
4889	int ret;
4890
4891	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4892
4893	mutex_lock(&wl->mutex);
4894
4895	if (unlikely(wl->state != WLCORE_STATE_ON))
4896		goto out;
4897
4898	ret = wl1271_ps_elp_wakeup(wl);
4899	if (ret < 0)
4900		goto out;
4901
4902	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4903	if (ret < 0)
4904		goto out_sleep;
4905
4906out_sleep:
4907	wl1271_ps_elp_sleep(wl);
4908
4909out:
4910	mutex_unlock(&wl->mutex);
4911	return mactime;
4912}
4913
4914static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4915				struct survey_info *survey)
4916{
4917	struct ieee80211_conf *conf = &hw->conf;
4918
4919	if (idx != 0)
4920		return -ENOENT;
4921
4922	survey->channel = conf->chandef.chan;
4923	survey->filled = 0;
4924	return 0;
4925}
4926
4927static int wl1271_allocate_sta(struct wl1271 *wl,
4928			     struct wl12xx_vif *wlvif,
4929			     struct ieee80211_sta *sta)
4930{
4931	struct wl1271_station *wl_sta;
4932	int ret;
4933
4934
4935	if (wl->active_sta_count >= wl->max_ap_stations) {
4936		wl1271_warning("could not allocate HLID - too much stations");
4937		return -EBUSY;
4938	}
4939
4940	wl_sta = (struct wl1271_station *)sta->drv_priv;
4941	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4942	if (ret < 0) {
4943		wl1271_warning("could not allocate HLID - too many links");
4944		return -EBUSY;
4945	}
4946
4947	/* use the previous security seq, if this is a recovery/resume */
4948	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4949
4950	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4951	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4952	wl->active_sta_count++;
4953	return 0;
4954}
4955
4956void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4957{
4958	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4959		return;
4960
4961	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4962	__clear_bit(hlid, &wl->ap_ps_map);
4963	__clear_bit(hlid, &wl->ap_fw_ps_map);
4964
4965	/*
4966	 * save the last used PN in the private part of iee80211_sta,
4967	 * in case of recovery/suspend
4968	 */
4969	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4970
4971	wl12xx_free_link(wl, wlvif, &hlid);
4972	wl->active_sta_count--;
4973
4974	/*
4975	 * rearm the tx watchdog when the last STA is freed - give the FW a
4976	 * chance to return STA-buffered packets before complaining.
4977	 */
4978	if (wl->active_sta_count == 0)
4979		wl12xx_rearm_tx_watchdog_locked(wl);
4980}
4981
4982static int wl12xx_sta_add(struct wl1271 *wl,
4983			  struct wl12xx_vif *wlvif,
4984			  struct ieee80211_sta *sta)
4985{
4986	struct wl1271_station *wl_sta;
4987	int ret = 0;
4988	u8 hlid;
4989
4990	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4991
4992	ret = wl1271_allocate_sta(wl, wlvif, sta);
4993	if (ret < 0)
4994		return ret;
4995
4996	wl_sta = (struct wl1271_station *)sta->drv_priv;
4997	hlid = wl_sta->hlid;
4998
4999	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5000	if (ret < 0)
5001		wl1271_free_sta(wl, wlvif, hlid);
5002
5003	return ret;
5004}
5005
5006static int wl12xx_sta_remove(struct wl1271 *wl,
5007			     struct wl12xx_vif *wlvif,
5008			     struct ieee80211_sta *sta)
5009{
5010	struct wl1271_station *wl_sta;
5011	int ret = 0, id;
5012
5013	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5014
5015	wl_sta = (struct wl1271_station *)sta->drv_priv;
5016	id = wl_sta->hlid;
5017	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5018		return -EINVAL;
5019
5020	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5021	if (ret < 0)
5022		return ret;
5023
5024	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5025	return ret;
5026}
5027
5028static void wlcore_roc_if_possible(struct wl1271 *wl,
5029				   struct wl12xx_vif *wlvif)
5030{
5031	if (find_first_bit(wl->roc_map,
5032			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5033		return;
5034
5035	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5036		return;
5037
5038	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5039}
5040
5041/*
5042 * when wl_sta is NULL, we treat this call as if coming from a
5043 * pending auth reply.
5044 * wl->mutex must be taken and the FW must be awake when the call
5045 * takes place.
5046 */
5047void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5048			      struct wl1271_station *wl_sta, bool in_conn)
5049{
5050	if (in_conn) {
5051		if (WARN_ON(wl_sta && wl_sta->in_connection))
5052			return;
5053
5054		if (!wlvif->ap_pending_auth_reply &&
5055		    !wlvif->inconn_count)
5056			wlcore_roc_if_possible(wl, wlvif);
5057
5058		if (wl_sta) {
5059			wl_sta->in_connection = true;
5060			wlvif->inconn_count++;
5061		} else {
5062			wlvif->ap_pending_auth_reply = true;
5063		}
5064	} else {
5065		if (wl_sta && !wl_sta->in_connection)
5066			return;
5067
5068		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5069			return;
5070
5071		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5072			return;
5073
5074		if (wl_sta) {
5075			wl_sta->in_connection = false;
5076			wlvif->inconn_count--;
5077		} else {
5078			wlvif->ap_pending_auth_reply = false;
5079		}
5080
5081		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5082		    test_bit(wlvif->role_id, wl->roc_map))
5083			wl12xx_croc(wl, wlvif->role_id);
5084	}
5085}
5086
5087static int wl12xx_update_sta_state(struct wl1271 *wl,
5088				   struct wl12xx_vif *wlvif,
5089				   struct ieee80211_sta *sta,
5090				   enum ieee80211_sta_state old_state,
5091				   enum ieee80211_sta_state new_state)
5092{
5093	struct wl1271_station *wl_sta;
5094	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5095	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5096	int ret;
5097
5098	wl_sta = (struct wl1271_station *)sta->drv_priv;
5099
5100	/* Add station (AP mode) */
5101	if (is_ap &&
5102	    old_state == IEEE80211_STA_NOTEXIST &&
5103	    new_state == IEEE80211_STA_NONE) {
5104		ret = wl12xx_sta_add(wl, wlvif, sta);
5105		if (ret)
5106			return ret;
5107
5108		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5109	}
5110
5111	/* Remove station (AP mode) */
5112	if (is_ap &&
5113	    old_state == IEEE80211_STA_NONE &&
5114	    new_state == IEEE80211_STA_NOTEXIST) {
5115		/* must not fail */
5116		wl12xx_sta_remove(wl, wlvif, sta);
5117
5118		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5119	}
5120
5121	/* Authorize station (AP mode) */
5122	if (is_ap &&
5123	    new_state == IEEE80211_STA_AUTHORIZED) {
5124		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5125		if (ret < 0)
5126			return ret;
5127
5128		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5129						     wl_sta->hlid);
5130		if (ret)
5131			return ret;
5132
5133		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5134	}
5135
5136	/* Authorize station */
5137	if (is_sta &&
5138	    new_state == IEEE80211_STA_AUTHORIZED) {
5139		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5140		ret = wl12xx_set_authorized(wl, wlvif);
5141		if (ret)
5142			return ret;
5143	}
5144
5145	if (is_sta &&
5146	    old_state == IEEE80211_STA_AUTHORIZED &&
5147	    new_state == IEEE80211_STA_ASSOC) {
5148		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5149		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5150	}
5151
5152	/* save seq number on disassoc (suspend) */
5153	if (is_sta &&
5154	    old_state == IEEE80211_STA_ASSOC &&
5155	    new_state == IEEE80211_STA_AUTH) {
5156		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5157		wlvif->total_freed_pkts = 0;
5158	}
5159
5160	/* restore seq number on assoc (resume) */
5161	if (is_sta &&
5162	    old_state == IEEE80211_STA_AUTH &&
5163	    new_state == IEEE80211_STA_ASSOC) {
5164		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5165	}
5166
5167	/* clear ROCs on failure or authorization */
5168	if (is_sta &&
5169	    (new_state == IEEE80211_STA_AUTHORIZED ||
5170	     new_state == IEEE80211_STA_NOTEXIST)) {
5171		if (test_bit(wlvif->role_id, wl->roc_map))
5172			wl12xx_croc(wl, wlvif->role_id);
5173	}
5174
5175	if (is_sta &&
5176	    old_state == IEEE80211_STA_NOTEXIST &&
5177	    new_state == IEEE80211_STA_NONE) {
5178		if (find_first_bit(wl->roc_map,
5179				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5180			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5181			wl12xx_roc(wl, wlvif, wlvif->role_id,
5182				   wlvif->band, wlvif->channel);
5183		}
5184	}
5185	return 0;
5186}
5187
5188static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5189			       struct ieee80211_vif *vif,
5190			       struct ieee80211_sta *sta,
5191			       enum ieee80211_sta_state old_state,
5192			       enum ieee80211_sta_state new_state)
5193{
5194	struct wl1271 *wl = hw->priv;
5195	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5196	int ret;
5197
5198	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5199		     sta->aid, old_state, new_state);
5200
5201	mutex_lock(&wl->mutex);
5202
5203	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5204		ret = -EBUSY;
5205		goto out;
5206	}
5207
5208	ret = wl1271_ps_elp_wakeup(wl);
5209	if (ret < 0)
5210		goto out;
5211
5212	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5213
5214	wl1271_ps_elp_sleep(wl);
5215out:
5216	mutex_unlock(&wl->mutex);
5217	if (new_state < old_state)
5218		return 0;
5219	return ret;
5220}
5221
5222static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5223				  struct ieee80211_vif *vif,
5224				  enum ieee80211_ampdu_mlme_action action,
5225				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5226				  u8 buf_size)
5227{
5228	struct wl1271 *wl = hw->priv;
5229	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5230	int ret;
5231	u8 hlid, *ba_bitmap;
5232
5233	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5234		     tid);
5235
5236	/* sanity check - the fields in FW are only 8bits wide */
5237	if (WARN_ON(tid > 0xFF))
5238		return -ENOTSUPP;
5239
5240	mutex_lock(&wl->mutex);
5241
5242	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5243		ret = -EAGAIN;
5244		goto out;
5245	}
5246
5247	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5248		hlid = wlvif->sta.hlid;
5249	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5250		struct wl1271_station *wl_sta;
5251
5252		wl_sta = (struct wl1271_station *)sta->drv_priv;
5253		hlid = wl_sta->hlid;
5254	} else {
5255		ret = -EINVAL;
5256		goto out;
5257	}
5258
5259	ba_bitmap = &wl->links[hlid].ba_bitmap;
5260
5261	ret = wl1271_ps_elp_wakeup(wl);
5262	if (ret < 0)
5263		goto out;
5264
5265	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5266		     tid, action);
5267
5268	switch (action) {
5269	case IEEE80211_AMPDU_RX_START:
5270		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5271			ret = -ENOTSUPP;
5272			break;
5273		}
5274
5275		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5276			ret = -EBUSY;
5277			wl1271_error("exceeded max RX BA sessions");
5278			break;
5279		}
5280
5281		if (*ba_bitmap & BIT(tid)) {
5282			ret = -EINVAL;
5283			wl1271_error("cannot enable RX BA session on active "
5284				     "tid: %d", tid);
5285			break;
5286		}
5287
5288		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5289							 hlid);
5290		if (!ret) {
5291			*ba_bitmap |= BIT(tid);
5292			wl->ba_rx_session_count++;
5293		}
5294		break;
5295
5296	case IEEE80211_AMPDU_RX_STOP:
5297		if (!(*ba_bitmap & BIT(tid))) {
5298			/*
5299			 * this happens on reconfig - so only output a debug
5300			 * message for now, and don't fail the function.
5301			 */
5302			wl1271_debug(DEBUG_MAC80211,
5303				     "no active RX BA session on tid: %d",
5304				     tid);
5305			ret = 0;
5306			break;
5307		}
5308
5309		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5310							 hlid);
5311		if (!ret) {
5312			*ba_bitmap &= ~BIT(tid);
5313			wl->ba_rx_session_count--;
5314		}
5315		break;
5316
5317	/*
5318	 * The BA initiator session management in FW independently.
5319	 * Falling break here on purpose for all TX APDU commands.
5320	 */
5321	case IEEE80211_AMPDU_TX_START:
5322	case IEEE80211_AMPDU_TX_STOP_CONT:
5323	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5324	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5325	case IEEE80211_AMPDU_TX_OPERATIONAL:
5326		ret = -EINVAL;
5327		break;
5328
5329	default:
5330		wl1271_error("Incorrect ampdu action id=%x\n", action);
5331		ret = -EINVAL;
5332	}
5333
5334	wl1271_ps_elp_sleep(wl);
5335
5336out:
5337	mutex_unlock(&wl->mutex);
5338
5339	return ret;
5340}
5341
5342static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5343				   struct ieee80211_vif *vif,
5344				   const struct cfg80211_bitrate_mask *mask)
5345{
5346	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5347	struct wl1271 *wl = hw->priv;
5348	int i, ret = 0;
5349
5350	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5351		mask->control[NL80211_BAND_2GHZ].legacy,
5352		mask->control[NL80211_BAND_5GHZ].legacy);
5353
5354	mutex_lock(&wl->mutex);
5355
5356	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5357		wlvif->bitrate_masks[i] =
5358			wl1271_tx_enabled_rates_get(wl,
5359						    mask->control[i].legacy,
5360						    i);
5361
5362	if (unlikely(wl->state != WLCORE_STATE_ON))
5363		goto out;
5364
5365	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5366	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5367
5368		ret = wl1271_ps_elp_wakeup(wl);
5369		if (ret < 0)
5370			goto out;
5371
5372		wl1271_set_band_rate(wl, wlvif);
5373		wlvif->basic_rate =
5374			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5375		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5376
5377		wl1271_ps_elp_sleep(wl);
5378	}
5379out:
5380	mutex_unlock(&wl->mutex);
5381
5382	return ret;
5383}
5384
5385static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5386				     struct ieee80211_vif *vif,
5387				     struct ieee80211_channel_switch *ch_switch)
5388{
5389	struct wl1271 *wl = hw->priv;
5390	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5391	int ret;
5392
5393	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5394
5395	wl1271_tx_flush(wl);
5396
5397	mutex_lock(&wl->mutex);
5398
5399	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5400		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5401			ieee80211_chswitch_done(vif, false);
5402		goto out;
5403	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5404		goto out;
5405	}
5406
5407	ret = wl1271_ps_elp_wakeup(wl);
5408	if (ret < 0)
5409		goto out;
5410
5411	/* TODO: change mac80211 to pass vif as param */
5412
5413	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5414		unsigned long delay_usec;
5415
5416		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5417		if (ret)
5418			goto out_sleep;
5419
5420		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5421
5422		/* indicate failure 5 seconds after channel switch time */
5423		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5424			ch_switch->count;
5425		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5426					     usecs_to_jiffies(delay_usec) +
5427					     msecs_to_jiffies(5000));
5428	}
5429
5430out_sleep:
5431	wl1271_ps_elp_sleep(wl);
5432
5433out:
5434	mutex_unlock(&wl->mutex);
5435}
5436
5437static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5438					struct wl12xx_vif *wlvif,
5439					u8 eid)
5440{
5441	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5442	struct sk_buff *beacon =
5443		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5444
5445	if (!beacon)
5446		return NULL;
5447
5448	return cfg80211_find_ie(eid,
5449				beacon->data + ieoffset,
5450				beacon->len - ieoffset);
5451}
5452
5453static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5454				u8 *csa_count)
5455{
5456	const u8 *ie;
5457	const struct ieee80211_channel_sw_ie *ie_csa;
5458
5459	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5460	if (!ie)
5461		return -EINVAL;
5462
5463	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5464	*csa_count = ie_csa->count;
5465
5466	return 0;
5467}
5468
5469static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5470					    struct ieee80211_vif *vif,
5471					    struct cfg80211_chan_def *chandef)
5472{
5473	struct wl1271 *wl = hw->priv;
5474	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5475	struct ieee80211_channel_switch ch_switch = {
5476		.block_tx = true,
5477		.chandef = *chandef,
5478	};
5479	int ret;
5480
5481	wl1271_debug(DEBUG_MAC80211,
5482		     "mac80211 channel switch beacon (role %d)",
5483		     wlvif->role_id);
5484
5485	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5486	if (ret < 0) {
5487		wl1271_error("error getting beacon (for CSA counter)");
5488		return;
5489	}
5490
5491	mutex_lock(&wl->mutex);
5492
5493	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5494		ret = -EBUSY;
5495		goto out;
5496	}
5497
5498	ret = wl1271_ps_elp_wakeup(wl);
5499	if (ret < 0)
5500		goto out;
5501
5502	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5503	if (ret)
5504		goto out_sleep;
5505
5506	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5507
5508out_sleep:
5509	wl1271_ps_elp_sleep(wl);
5510out:
5511	mutex_unlock(&wl->mutex);
5512}
5513
5514static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5515			    u32 queues, bool drop)
5516{
5517	struct wl1271 *wl = hw->priv;
5518
5519	wl1271_tx_flush(wl);
5520}
5521
5522static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5523				       struct ieee80211_vif *vif,
5524				       struct ieee80211_channel *chan,
5525				       int duration,
5526				       enum ieee80211_roc_type type)
5527{
5528	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5529	struct wl1271 *wl = hw->priv;
5530	int channel, ret = 0;
5531
5532	channel = ieee80211_frequency_to_channel(chan->center_freq);
5533
5534	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5535		     channel, wlvif->role_id);
5536
5537	mutex_lock(&wl->mutex);
5538
5539	if (unlikely(wl->state != WLCORE_STATE_ON))
5540		goto out;
5541
5542	/* return EBUSY if we can't ROC right now */
5543	if (WARN_ON(wl->roc_vif ||
5544		    find_first_bit(wl->roc_map,
5545				   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5546		ret = -EBUSY;
5547		goto out;
5548	}
5549
5550	ret = wl1271_ps_elp_wakeup(wl);
5551	if (ret < 0)
5552		goto out;
5553
5554	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5555	if (ret < 0)
5556		goto out_sleep;
5557
5558	wl->roc_vif = vif;
5559	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5560				     msecs_to_jiffies(duration));
5561out_sleep:
5562	wl1271_ps_elp_sleep(wl);
5563out:
5564	mutex_unlock(&wl->mutex);
5565	return ret;
5566}
5567
5568static int __wlcore_roc_completed(struct wl1271 *wl)
5569{
5570	struct wl12xx_vif *wlvif;
5571	int ret;
5572
5573	/* already completed */
5574	if (unlikely(!wl->roc_vif))
5575		return 0;
5576
5577	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5578
5579	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5580		return -EBUSY;
5581
5582	ret = wl12xx_stop_dev(wl, wlvif);
5583	if (ret < 0)
5584		return ret;
5585
5586	wl->roc_vif = NULL;
5587
5588	return 0;
5589}
5590
5591static int wlcore_roc_completed(struct wl1271 *wl)
5592{
5593	int ret;
5594
5595	wl1271_debug(DEBUG_MAC80211, "roc complete");
5596
5597	mutex_lock(&wl->mutex);
5598
5599	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5600		ret = -EBUSY;
5601		goto out;
5602	}
5603
5604	ret = wl1271_ps_elp_wakeup(wl);
5605	if (ret < 0)
5606		goto out;
5607
5608	ret = __wlcore_roc_completed(wl);
5609
5610	wl1271_ps_elp_sleep(wl);
5611out:
5612	mutex_unlock(&wl->mutex);
5613
5614	return ret;
5615}
5616
5617static void wlcore_roc_complete_work(struct work_struct *work)
5618{
5619	struct delayed_work *dwork;
5620	struct wl1271 *wl;
5621	int ret;
5622
5623	dwork = container_of(work, struct delayed_work, work);
5624	wl = container_of(dwork, struct wl1271, roc_complete_work);
5625
5626	ret = wlcore_roc_completed(wl);
5627	if (!ret)
5628		ieee80211_remain_on_channel_expired(wl->hw);
5629}
5630
5631static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5632{
5633	struct wl1271 *wl = hw->priv;
5634
5635	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5636
5637	/* TODO: per-vif */
5638	wl1271_tx_flush(wl);
5639
5640	/*
5641	 * we can't just flush_work here, because it might deadlock
5642	 * (as we might get called from the same workqueue)
5643	 */
5644	cancel_delayed_work_sync(&wl->roc_complete_work);
5645	wlcore_roc_completed(wl);
5646
5647	return 0;
5648}
5649
5650static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5651				    struct ieee80211_vif *vif,
5652				    struct ieee80211_sta *sta,
5653				    u32 changed)
5654{
5655	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5656
5657	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5658
5659	if (!(changed & IEEE80211_RC_BW_CHANGED))
5660		return;
5661
5662	/* this callback is atomic, so schedule a new work */
5663	wlvif->rc_update_bw = sta->bandwidth;
5664	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5665}
5666
5667static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5668				     struct ieee80211_vif *vif,
5669				     struct ieee80211_sta *sta,
5670				     struct station_info *sinfo)
5671{
5672	struct wl1271 *wl = hw->priv;
5673	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5674	s8 rssi_dbm;
5675	int ret;
5676
5677	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5678
5679	mutex_lock(&wl->mutex);
5680
5681	if (unlikely(wl->state != WLCORE_STATE_ON))
5682		goto out;
5683
5684	ret = wl1271_ps_elp_wakeup(wl);
5685	if (ret < 0)
5686		goto out_sleep;
5687
5688	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5689	if (ret < 0)
5690		goto out_sleep;
5691
5692	sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5693	sinfo->signal = rssi_dbm;
5694
5695out_sleep:
5696	wl1271_ps_elp_sleep(wl);
5697
5698out:
5699	mutex_unlock(&wl->mutex);
5700}
5701
5702static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5703{
5704	struct wl1271 *wl = hw->priv;
5705	bool ret = false;
5706
5707	mutex_lock(&wl->mutex);
5708
5709	if (unlikely(wl->state != WLCORE_STATE_ON))
5710		goto out;
5711
5712	/* packets are considered pending if in the TX queue or the FW */
5713	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5714out:
5715	mutex_unlock(&wl->mutex);
5716
5717	return ret;
5718}
5719
5720/* can't be const, mac80211 writes to this */
5721static struct ieee80211_rate wl1271_rates[] = {
5722	{ .bitrate = 10,
5723	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5724	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5725	{ .bitrate = 20,
5726	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5727	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5728	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5729	{ .bitrate = 55,
5730	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5731	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5732	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5733	{ .bitrate = 110,
5734	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5735	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5736	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5737	{ .bitrate = 60,
5738	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5739	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5740	{ .bitrate = 90,
5741	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5742	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5743	{ .bitrate = 120,
5744	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5745	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5746	{ .bitrate = 180,
5747	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5748	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5749	{ .bitrate = 240,
5750	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5751	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5752	{ .bitrate = 360,
5753	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5754	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5755	{ .bitrate = 480,
5756	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5757	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5758	{ .bitrate = 540,
5759	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5760	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5761};
5762
5763/* can't be const, mac80211 writes to this */
5764static struct ieee80211_channel wl1271_channels[] = {
5765	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5766	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5767	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5768	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5769	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5770	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5771	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5772	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5773	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5774	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5775	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5776	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5777	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5778	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5779};
5780
5781/* can't be const, mac80211 writes to this */
5782static struct ieee80211_supported_band wl1271_band_2ghz = {
5783	.channels = wl1271_channels,
5784	.n_channels = ARRAY_SIZE(wl1271_channels),
5785	.bitrates = wl1271_rates,
5786	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5787};
5788
5789/* 5 GHz data rates for WL1273 */
5790static struct ieee80211_rate wl1271_rates_5ghz[] = {
5791	{ .bitrate = 60,
5792	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5793	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5794	{ .bitrate = 90,
5795	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5796	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5797	{ .bitrate = 120,
5798	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5799	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5800	{ .bitrate = 180,
5801	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5802	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5803	{ .bitrate = 240,
5804	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5805	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5806	{ .bitrate = 360,
5807	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5808	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5809	{ .bitrate = 480,
5810	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5811	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5812	{ .bitrate = 540,
5813	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5814	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5815};
5816
5817/* 5 GHz band channels for WL1273 */
5818static struct ieee80211_channel wl1271_channels_5ghz[] = {
5819	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5820	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5821	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5822	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5823	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5824	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5825	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5826	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5827	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5828	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5829	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5830	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5831	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5832	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5833	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5834	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5835	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5836	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5837	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5838	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5839	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5840	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5841	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5842	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5843	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5844	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5845	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5846	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5847	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5848	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5849	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5850};
5851
5852static struct ieee80211_supported_band wl1271_band_5ghz = {
5853	.channels = wl1271_channels_5ghz,
5854	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5855	.bitrates = wl1271_rates_5ghz,
5856	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5857};
5858
5859static const struct ieee80211_ops wl1271_ops = {
5860	.start = wl1271_op_start,
5861	.stop = wlcore_op_stop,
5862	.add_interface = wl1271_op_add_interface,
5863	.remove_interface = wl1271_op_remove_interface,
5864	.change_interface = wl12xx_op_change_interface,
5865#ifdef CONFIG_PM
5866	.suspend = wl1271_op_suspend,
5867	.resume = wl1271_op_resume,
5868#endif
5869	.config = wl1271_op_config,
5870	.prepare_multicast = wl1271_op_prepare_multicast,
5871	.configure_filter = wl1271_op_configure_filter,
5872	.tx = wl1271_op_tx,
5873	.set_key = wlcore_op_set_key,
5874	.hw_scan = wl1271_op_hw_scan,
5875	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5876	.sched_scan_start = wl1271_op_sched_scan_start,
5877	.sched_scan_stop = wl1271_op_sched_scan_stop,
5878	.bss_info_changed = wl1271_op_bss_info_changed,
5879	.set_frag_threshold = wl1271_op_set_frag_threshold,
5880	.set_rts_threshold = wl1271_op_set_rts_threshold,
5881	.conf_tx = wl1271_op_conf_tx,
5882	.get_tsf = wl1271_op_get_tsf,
5883	.get_survey = wl1271_op_get_survey,
5884	.sta_state = wl12xx_op_sta_state,
5885	.ampdu_action = wl1271_op_ampdu_action,
5886	.tx_frames_pending = wl1271_tx_frames_pending,
5887	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5888	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5889	.channel_switch = wl12xx_op_channel_switch,
5890	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
5891	.flush = wlcore_op_flush,
5892	.remain_on_channel = wlcore_op_remain_on_channel,
5893	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5894	.add_chanctx = wlcore_op_add_chanctx,
5895	.remove_chanctx = wlcore_op_remove_chanctx,
5896	.change_chanctx = wlcore_op_change_chanctx,
5897	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5898	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5899	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5900	.sta_rc_update = wlcore_op_sta_rc_update,
5901	.sta_statistics = wlcore_op_sta_statistics,
5902	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5903};
5904
5905
5906u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5907{
5908	u8 idx;
5909
5910	BUG_ON(band >= 2);
5911
5912	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5913		wl1271_error("Illegal RX rate from HW: %d", rate);
5914		return 0;
5915	}
5916
5917	idx = wl->band_rate_to_idx[band][rate];
5918	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5919		wl1271_error("Unsupported RX rate from HW: %d", rate);
5920		return 0;
5921	}
5922
5923	return idx;
5924}
5925
5926static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5927{
5928	int i;
5929
5930	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5931		     oui, nic);
5932
5933	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5934		wl1271_warning("NIC part of the MAC address wraps around!");
5935
5936	for (i = 0; i < wl->num_mac_addr; i++) {
5937		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5938		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5939		wl->addresses[i].addr[2] = (u8) oui;
5940		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5941		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5942		wl->addresses[i].addr[5] = (u8) nic;
5943		nic++;
5944	}
5945
5946	/* we may be one address short at the most */
5947	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5948
5949	/*
5950	 * turn on the LAA bit in the first address and use it as
5951	 * the last address.
5952	 */
5953	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5954		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5955		memcpy(&wl->addresses[idx], &wl->addresses[0],
5956		       sizeof(wl->addresses[0]));
5957		/* LAA bit */
5958		wl->addresses[idx].addr[0] |= BIT(1);
5959	}
5960
5961	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5962	wl->hw->wiphy->addresses = wl->addresses;
5963}
5964
5965static int wl12xx_get_hw_info(struct wl1271 *wl)
5966{
5967	int ret;
5968
5969	ret = wl12xx_set_power_on(wl);
5970	if (ret < 0)
5971		return ret;
5972
5973	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5974	if (ret < 0)
5975		goto out;
5976
5977	wl->fuse_oui_addr = 0;
5978	wl->fuse_nic_addr = 0;
5979
5980	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5981	if (ret < 0)
5982		goto out;
5983
5984	if (wl->ops->get_mac)
5985		ret = wl->ops->get_mac(wl);
5986
5987out:
5988	wl1271_power_off(wl);
5989	return ret;
5990}
5991
5992static int wl1271_register_hw(struct wl1271 *wl)
5993{
5994	int ret;
5995	u32 oui_addr = 0, nic_addr = 0;
5996
5997	if (wl->mac80211_registered)
5998		return 0;
5999
6000	if (wl->nvs_len >= 12) {
6001		/* NOTE: The wl->nvs->nvs element must be first, in
6002		 * order to simplify the casting, we assume it is at
6003		 * the beginning of the wl->nvs structure.
6004		 */
6005		u8 *nvs_ptr = (u8 *)wl->nvs;
6006
6007		oui_addr =
6008			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6009		nic_addr =
6010			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6011	}
6012
6013	/* if the MAC address is zeroed in the NVS derive from fuse */
6014	if (oui_addr == 0 && nic_addr == 0) {
6015		oui_addr = wl->fuse_oui_addr;
6016		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6017		nic_addr = wl->fuse_nic_addr + 1;
6018	}
6019
6020	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6021
6022	ret = ieee80211_register_hw(wl->hw);
6023	if (ret < 0) {
6024		wl1271_error("unable to register mac80211 hw: %d", ret);
6025		goto out;
6026	}
6027
6028	wl->mac80211_registered = true;
6029
6030	wl1271_debugfs_init(wl);
6031
6032	wl1271_notice("loaded");
6033
6034out:
6035	return ret;
6036}
6037
6038static void wl1271_unregister_hw(struct wl1271 *wl)
6039{
6040	if (wl->plt)
6041		wl1271_plt_stop(wl);
6042
6043	ieee80211_unregister_hw(wl->hw);
6044	wl->mac80211_registered = false;
6045
6046}
6047
6048static int wl1271_init_ieee80211(struct wl1271 *wl)
6049{
6050	int i;
6051	static const u32 cipher_suites[] = {
6052		WLAN_CIPHER_SUITE_WEP40,
6053		WLAN_CIPHER_SUITE_WEP104,
6054		WLAN_CIPHER_SUITE_TKIP,
6055		WLAN_CIPHER_SUITE_CCMP,
6056		WL1271_CIPHER_SUITE_GEM,
6057	};
6058
6059	/* The tx descriptor buffer */
6060	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6061
6062	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6063		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6064
6065	/* unit us */
6066	/* FIXME: find a proper value */
6067	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6068
6069	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
6070		IEEE80211_HW_SUPPORTS_PS |
6071		IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
6072		IEEE80211_HW_HAS_RATE_CONTROL |
6073		IEEE80211_HW_CONNECTION_MONITOR |
6074		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
6075		IEEE80211_HW_SPECTRUM_MGMT |
6076		IEEE80211_HW_AP_LINK_PS |
6077		IEEE80211_HW_AMPDU_AGGREGATION |
6078		IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
6079		IEEE80211_HW_QUEUE_CONTROL |
6080		IEEE80211_HW_CHANCTX_STA_CSA;
6081
6082	wl->hw->wiphy->cipher_suites = cipher_suites;
6083	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6084
6085	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6086		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
6087		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
6088	wl->hw->wiphy->max_scan_ssids = 1;
6089	wl->hw->wiphy->max_sched_scan_ssids = 16;
6090	wl->hw->wiphy->max_match_sets = 16;
6091	/*
6092	 * Maximum length of elements in scanning probe request templates
6093	 * should be the maximum length possible for a template, without
6094	 * the IEEE80211 header of the template
6095	 */
6096	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6097			sizeof(struct ieee80211_header);
6098
6099	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6100		sizeof(struct ieee80211_header);
6101
6102	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6103
6104	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6105				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6106				WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6107				WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6108
6109	/* make sure all our channels fit in the scanned_ch bitmask */
6110	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6111		     ARRAY_SIZE(wl1271_channels_5ghz) >
6112		     WL1271_MAX_CHANNELS);
6113	/*
6114	* clear channel flags from the previous usage
6115	* and restore max_power & max_antenna_gain values.
6116	*/
6117	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6118		wl1271_band_2ghz.channels[i].flags = 0;
6119		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6120		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6121	}
6122
6123	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6124		wl1271_band_5ghz.channels[i].flags = 0;
6125		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6126		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6127	}
6128
6129	/*
6130	 * We keep local copies of the band structs because we need to
6131	 * modify them on a per-device basis.
6132	 */
6133	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6134	       sizeof(wl1271_band_2ghz));
6135	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6136	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
6137	       sizeof(*wl->ht_cap));
6138	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6139	       sizeof(wl1271_band_5ghz));
6140	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6141	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
6142	       sizeof(*wl->ht_cap));
6143
6144	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6145		&wl->bands[IEEE80211_BAND_2GHZ];
6146	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6147		&wl->bands[IEEE80211_BAND_5GHZ];
6148
6149	/*
6150	 * allow 4 queues per mac address we support +
6151	 * 1 cab queue per mac + one global offchannel Tx queue
6152	 */
6153	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6154
6155	/* the last queue is the offchannel queue */
6156	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6157	wl->hw->max_rates = 1;
6158
6159	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6160
6161	/* the FW answers probe-requests in AP-mode */
6162	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6163	wl->hw->wiphy->probe_resp_offload =
6164		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6165		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6166		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6167
6168	/* allowed interface combinations */
6169	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6170	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6171
6172	/* register vendor commands */
6173	wlcore_set_vendor_commands(wl->hw->wiphy);
6174
6175	SET_IEEE80211_DEV(wl->hw, wl->dev);
6176
6177	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6178	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6179
6180	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6181
6182	return 0;
6183}
6184
6185struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6186				     u32 mbox_size)
6187{
6188	struct ieee80211_hw *hw;
6189	struct wl1271 *wl;
6190	int i, j, ret;
6191	unsigned int order;
6192
6193	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6194	if (!hw) {
6195		wl1271_error("could not alloc ieee80211_hw");
6196		ret = -ENOMEM;
6197		goto err_hw_alloc;
6198	}
6199
6200	wl = hw->priv;
6201	memset(wl, 0, sizeof(*wl));
6202
6203	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6204	if (!wl->priv) {
6205		wl1271_error("could not alloc wl priv");
6206		ret = -ENOMEM;
6207		goto err_priv_alloc;
6208	}
6209
6210	INIT_LIST_HEAD(&wl->wlvif_list);
6211
6212	wl->hw = hw;
6213
6214	/*
6215	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6216	 * we don't allocate any additional resource here, so that's fine.
6217	 */
6218	for (i = 0; i < NUM_TX_QUEUES; i++)
6219		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6220			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6221
6222	skb_queue_head_init(&wl->deferred_rx_queue);
6223	skb_queue_head_init(&wl->deferred_tx_queue);
6224
6225	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6226	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6227	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6228	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6229	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6230	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6231	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6232
6233	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6234	if (!wl->freezable_wq) {
6235		ret = -ENOMEM;
6236		goto err_hw;
6237	}
6238
6239	wl->channel = 0;
6240	wl->rx_counter = 0;
6241	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6242	wl->band = IEEE80211_BAND_2GHZ;
6243	wl->channel_type = NL80211_CHAN_NO_HT;
6244	wl->flags = 0;
6245	wl->sg_enabled = true;
6246	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6247	wl->recovery_count = 0;
6248	wl->hw_pg_ver = -1;
6249	wl->ap_ps_map = 0;
6250	wl->ap_fw_ps_map = 0;
6251	wl->quirks = 0;
6252	wl->system_hlid = WL12XX_SYSTEM_HLID;
6253	wl->active_sta_count = 0;
6254	wl->active_link_count = 0;
6255	wl->fwlog_size = 0;
6256	init_waitqueue_head(&wl->fwlog_waitq);
6257
6258	/* The system link is always allocated */
6259	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6260
6261	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6262	for (i = 0; i < wl->num_tx_desc; i++)
6263		wl->tx_frames[i] = NULL;
6264
6265	spin_lock_init(&wl->wl_lock);
6266
6267	wl->state = WLCORE_STATE_OFF;
6268	wl->fw_type = WL12XX_FW_TYPE_NONE;
6269	mutex_init(&wl->mutex);
6270	mutex_init(&wl->flush_mutex);
6271	init_completion(&wl->nvs_loading_complete);
6272
6273	order = get_order(aggr_buf_size);
6274	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6275	if (!wl->aggr_buf) {
6276		ret = -ENOMEM;
6277		goto err_wq;
6278	}
6279	wl->aggr_buf_size = aggr_buf_size;
6280
6281	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6282	if (!wl->dummy_packet) {
6283		ret = -ENOMEM;
6284		goto err_aggr;
6285	}
6286
6287	/* Allocate one page for the FW log */
6288	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6289	if (!wl->fwlog) {
6290		ret = -ENOMEM;
6291		goto err_dummy_packet;
6292	}
6293
6294	wl->mbox_size = mbox_size;
6295	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6296	if (!wl->mbox) {
6297		ret = -ENOMEM;
6298		goto err_fwlog;
6299	}
6300
6301	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6302	if (!wl->buffer_32) {
6303		ret = -ENOMEM;
6304		goto err_mbox;
6305	}
6306
6307	return hw;
6308
6309err_mbox:
6310	kfree(wl->mbox);
6311
6312err_fwlog:
6313	free_page((unsigned long)wl->fwlog);
6314
6315err_dummy_packet:
6316	dev_kfree_skb(wl->dummy_packet);
6317
6318err_aggr:
6319	free_pages((unsigned long)wl->aggr_buf, order);
6320
6321err_wq:
6322	destroy_workqueue(wl->freezable_wq);
6323
6324err_hw:
6325	wl1271_debugfs_exit(wl);
6326	kfree(wl->priv);
6327
6328err_priv_alloc:
6329	ieee80211_free_hw(hw);
6330
6331err_hw_alloc:
6332
6333	return ERR_PTR(ret);
6334}
6335EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6336
6337int wlcore_free_hw(struct wl1271 *wl)
6338{
6339	/* Unblock any fwlog readers */
6340	mutex_lock(&wl->mutex);
6341	wl->fwlog_size = -1;
6342	wake_up_interruptible_all(&wl->fwlog_waitq);
6343	mutex_unlock(&wl->mutex);
6344
6345	wlcore_sysfs_free(wl);
6346
6347	kfree(wl->buffer_32);
6348	kfree(wl->mbox);
6349	free_page((unsigned long)wl->fwlog);
6350	dev_kfree_skb(wl->dummy_packet);
6351	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6352
6353	wl1271_debugfs_exit(wl);
6354
6355	vfree(wl->fw);
6356	wl->fw = NULL;
6357	wl->fw_type = WL12XX_FW_TYPE_NONE;
6358	kfree(wl->nvs);
6359	wl->nvs = NULL;
6360
6361	kfree(wl->raw_fw_status);
6362	kfree(wl->fw_status);
6363	kfree(wl->tx_res_if);
6364	destroy_workqueue(wl->freezable_wq);
6365
6366	kfree(wl->priv);
6367	ieee80211_free_hw(wl->hw);
6368
6369	return 0;
6370}
6371EXPORT_SYMBOL_GPL(wlcore_free_hw);
6372
6373#ifdef CONFIG_PM
6374static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6375	.flags = WIPHY_WOWLAN_ANY,
6376	.n_patterns = WL1271_MAX_RX_FILTERS,
6377	.pattern_min_len = 1,
6378	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6379};
6380#endif
6381
6382static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6383{
6384	return IRQ_WAKE_THREAD;
6385}
6386
6387static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6388{
6389	struct wl1271 *wl = context;
6390	struct platform_device *pdev = wl->pdev;
6391	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6392	struct resource *res;
6393
6394	int ret;
6395	irq_handler_t hardirq_fn = NULL;
6396
6397	if (fw) {
6398		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6399		if (!wl->nvs) {
6400			wl1271_error("Could not allocate nvs data");
6401			goto out;
6402		}
6403		wl->nvs_len = fw->size;
6404	} else {
6405		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6406			     WL12XX_NVS_NAME);
6407		wl->nvs = NULL;
6408		wl->nvs_len = 0;
6409	}
6410
6411	ret = wl->ops->setup(wl);
6412	if (ret < 0)
6413		goto out_free_nvs;
6414
6415	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6416
6417	/* adjust some runtime configuration parameters */
6418	wlcore_adjust_conf(wl);
6419
6420	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6421	if (!res) {
6422		wl1271_error("Could not get IRQ resource");
6423		goto out_free_nvs;
6424	}
6425
6426	wl->irq = res->start;
6427	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6428	wl->if_ops = pdev_data->if_ops;
6429
6430	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6431		hardirq_fn = wlcore_hardirq;
6432	else
6433		wl->irq_flags |= IRQF_ONESHOT;
6434
6435	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6436				   wl->irq_flags, pdev->name, wl);
6437	if (ret < 0) {
6438		wl1271_error("request_irq() failed: %d", ret);
6439		goto out_free_nvs;
6440	}
6441
6442#ifdef CONFIG_PM
6443	ret = enable_irq_wake(wl->irq);
6444	if (!ret) {
6445		wl->irq_wake_enabled = true;
6446		device_init_wakeup(wl->dev, 1);
6447		if (pdev_data->pwr_in_suspend)
6448			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6449	}
6450#endif
6451	disable_irq(wl->irq);
6452
6453	ret = wl12xx_get_hw_info(wl);
6454	if (ret < 0) {
6455		wl1271_error("couldn't get hw info");
6456		goto out_irq;
6457	}
6458
6459	ret = wl->ops->identify_chip(wl);
6460	if (ret < 0)
6461		goto out_irq;
6462
6463	ret = wl1271_init_ieee80211(wl);
6464	if (ret)
6465		goto out_irq;
6466
6467	ret = wl1271_register_hw(wl);
6468	if (ret)
6469		goto out_irq;
6470
6471	ret = wlcore_sysfs_init(wl);
6472	if (ret)
6473		goto out_unreg;
6474
6475	wl->initialized = true;
6476	goto out;
6477
6478out_unreg:
6479	wl1271_unregister_hw(wl);
6480
6481out_irq:
6482	free_irq(wl->irq, wl);
6483
6484out_free_nvs:
6485	kfree(wl->nvs);
6486
6487out:
6488	release_firmware(fw);
6489	complete_all(&wl->nvs_loading_complete);
6490}
6491
6492int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6493{
6494	int ret;
6495
6496	if (!wl->ops || !wl->ptable)
6497		return -EINVAL;
6498
6499	wl->dev = &pdev->dev;
6500	wl->pdev = pdev;
6501	platform_set_drvdata(pdev, wl);
6502
6503	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6504				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6505				      wl, wlcore_nvs_cb);
6506	if (ret < 0) {
6507		wl1271_error("request_firmware_nowait failed: %d", ret);
6508		complete_all(&wl->nvs_loading_complete);
6509	}
6510
6511	return ret;
6512}
6513EXPORT_SYMBOL_GPL(wlcore_probe);
6514
6515int wlcore_remove(struct platform_device *pdev)
6516{
6517	struct wl1271 *wl = platform_get_drvdata(pdev);
6518
6519	wait_for_completion(&wl->nvs_loading_complete);
6520	if (!wl->initialized)
6521		return 0;
6522
6523	if (wl->irq_wake_enabled) {
6524		device_init_wakeup(wl->dev, 0);
6525		disable_irq_wake(wl->irq);
6526	}
6527	wl1271_unregister_hw(wl);
6528	free_irq(wl->irq, wl);
6529	wlcore_free_hw(wl);
6530
6531	return 0;
6532}
6533EXPORT_SYMBOL_GPL(wlcore_remove);
6534
6535u32 wl12xx_debug_level = DEBUG_NONE;
6536EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6537module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6538MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6539
6540module_param_named(fwlog, fwlog_param, charp, 0);
6541MODULE_PARM_DESC(fwlog,
6542		 "FW logger options: continuous, ondemand, dbgpins or disable");
6543
6544module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6545MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6546
6547module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6548MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6549
6550module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6551MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6552
6553MODULE_LICENSE("GPL");
6554MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6555MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6556MODULE_FIRMWARE(WL12XX_NVS_NAME);
6557