1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/kernel.h>
18#include <linux/etherdevice.h>
19#include <linux/module.h>
20#include <net/cfg80211.h>
21#include <net/rtnetlink.h>
22#include <brcmu_utils.h>
23#include <brcmu_wifi.h>
24
25#include "core.h"
26#include "bus.h"
27#include "debug.h"
28#include "fwil_types.h"
29#include "p2p.h"
30#include "cfg80211.h"
31#include "fwil.h"
32#include "fwsignal.h"
33#include "feature.h"
34#include "proto.h"
35#include "pcie.h"
36
37MODULE_AUTHOR("Broadcom Corporation");
38MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
39MODULE_LICENSE("Dual BSD/GPL");
40
41#define MAX_WAIT_FOR_8021X_TX		50	/* msecs */
42
43/* AMPDU rx reordering definitions */
44#define BRCMF_RXREORDER_FLOWID_OFFSET		0
45#define BRCMF_RXREORDER_MAXIDX_OFFSET		2
46#define BRCMF_RXREORDER_FLAGS_OFFSET		4
47#define BRCMF_RXREORDER_CURIDX_OFFSET		6
48#define BRCMF_RXREORDER_EXPIDX_OFFSET		8
49
50#define BRCMF_RXREORDER_DEL_FLOW		0x01
51#define BRCMF_RXREORDER_FLUSH_ALL		0x02
52#define BRCMF_RXREORDER_CURIDX_VALID		0x04
53#define BRCMF_RXREORDER_EXPIDX_VALID		0x08
54#define BRCMF_RXREORDER_NEW_HOLE		0x10
55
56/* Error bits */
57int brcmf_msg_level;
58module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
59MODULE_PARM_DESC(debug, "level of debug output");
60
61/* P2P0 enable */
62static int brcmf_p2p_enable;
63#ifdef CONFIG_BRCMDBG
64module_param_named(p2pon, brcmf_p2p_enable, int, 0);
65MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
66#endif
67
68char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
69{
70	if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
71		brcmf_err("ifidx %d out of range\n", ifidx);
72		return "<if_bad>";
73	}
74
75	if (drvr->iflist[ifidx] == NULL) {
76		brcmf_err("null i/f %d\n", ifidx);
77		return "<if_null>";
78	}
79
80	if (drvr->iflist[ifidx]->ndev)
81		return drvr->iflist[ifidx]->ndev->name;
82
83	return "<if_none>";
84}
85
86static void _brcmf_set_multicast_list(struct work_struct *work)
87{
88	struct brcmf_if *ifp;
89	struct net_device *ndev;
90	struct netdev_hw_addr *ha;
91	u32 cmd_value, cnt;
92	__le32 cnt_le;
93	char *buf, *bufp;
94	u32 buflen;
95	s32 err;
96
97	ifp = container_of(work, struct brcmf_if, multicast_work);
98
99	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
100
101	ndev = ifp->ndev;
102
103	/* Determine initial value of allmulti flag */
104	cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
105
106	/* Send down the multicast list first. */
107	cnt = netdev_mc_count(ndev);
108	buflen = sizeof(cnt) + (cnt * ETH_ALEN);
109	buf = kmalloc(buflen, GFP_ATOMIC);
110	if (!buf)
111		return;
112	bufp = buf;
113
114	cnt_le = cpu_to_le32(cnt);
115	memcpy(bufp, &cnt_le, sizeof(cnt_le));
116	bufp += sizeof(cnt_le);
117
118	netdev_for_each_mc_addr(ha, ndev) {
119		if (!cnt)
120			break;
121		memcpy(bufp, ha->addr, ETH_ALEN);
122		bufp += ETH_ALEN;
123		cnt--;
124	}
125
126	err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
127	if (err < 0) {
128		brcmf_err("Setting mcast_list failed, %d\n", err);
129		cmd_value = cnt ? true : cmd_value;
130	}
131
132	kfree(buf);
133
134	/*
135	 * Now send the allmulti setting.  This is based on the setting in the
136	 * net_device flags, but might be modified above to be turned on if we
137	 * were trying to set some addresses and dongle rejected it...
138	 */
139	err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
140	if (err < 0)
141		brcmf_err("Setting allmulti failed, %d\n", err);
142
143	/*Finally, pick up the PROMISC flag */
144	cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
145	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
146	if (err < 0)
147		brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
148			  err);
149}
150
151static void
152_brcmf_set_mac_address(struct work_struct *work)
153{
154	struct brcmf_if *ifp;
155	s32 err;
156
157	ifp = container_of(work, struct brcmf_if, setmacaddr_work);
158
159	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
160
161	err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
162				       ETH_ALEN);
163	if (err < 0) {
164		brcmf_err("Setting cur_etheraddr failed, %d\n", err);
165	} else {
166		brcmf_dbg(TRACE, "MAC address updated to %pM\n",
167			  ifp->mac_addr);
168		memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
169	}
170}
171
172static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
173{
174	struct brcmf_if *ifp = netdev_priv(ndev);
175	struct sockaddr *sa = (struct sockaddr *)addr;
176
177	memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
178	schedule_work(&ifp->setmacaddr_work);
179	return 0;
180}
181
182static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
183{
184	struct brcmf_if *ifp = netdev_priv(ndev);
185
186	schedule_work(&ifp->multicast_work);
187}
188
189static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
190					   struct net_device *ndev)
191{
192	int ret;
193	struct brcmf_if *ifp = netdev_priv(ndev);
194	struct brcmf_pub *drvr = ifp->drvr;
195	struct ethhdr *eh = (struct ethhdr *)(skb->data);
196
197	brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
198
199	/* Can the device send data? */
200	if (drvr->bus_if->state != BRCMF_BUS_UP) {
201		brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
202		netif_stop_queue(ndev);
203		dev_kfree_skb(skb);
204		ret = -ENODEV;
205		goto done;
206	}
207
208	if (!drvr->iflist[ifp->bssidx]) {
209		brcmf_err("bad ifidx %d\n", ifp->bssidx);
210		netif_stop_queue(ndev);
211		dev_kfree_skb(skb);
212		ret = -ENODEV;
213		goto done;
214	}
215
216	/* Make sure there's enough room for any header */
217	if (skb_headroom(skb) < drvr->hdrlen) {
218		struct sk_buff *skb2;
219
220		brcmf_dbg(INFO, "%s: insufficient headroom\n",
221			  brcmf_ifname(drvr, ifp->bssidx));
222		drvr->bus_if->tx_realloc++;
223		skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
224		dev_kfree_skb(skb);
225		skb = skb2;
226		if (skb == NULL) {
227			brcmf_err("%s: skb_realloc_headroom failed\n",
228				  brcmf_ifname(drvr, ifp->bssidx));
229			ret = -ENOMEM;
230			goto done;
231		}
232	}
233
234	/* validate length for ether packet */
235	if (skb->len < sizeof(*eh)) {
236		ret = -EINVAL;
237		dev_kfree_skb(skb);
238		goto done;
239	}
240
241	if (eh->h_proto == htons(ETH_P_PAE))
242		atomic_inc(&ifp->pend_8021x_cnt);
243
244	ret = brcmf_fws_process_skb(ifp, skb);
245
246done:
247	if (ret) {
248		ifp->stats.tx_dropped++;
249	} else {
250		ifp->stats.tx_packets++;
251		ifp->stats.tx_bytes += skb->len;
252	}
253
254	/* Return ok: we always eat the packet */
255	return NETDEV_TX_OK;
256}
257
258void brcmf_txflowblock_if(struct brcmf_if *ifp,
259			  enum brcmf_netif_stop_reason reason, bool state)
260{
261	unsigned long flags;
262
263	if (!ifp || !ifp->ndev)
264		return;
265
266	brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
267		  ifp->bssidx, ifp->netif_stop, reason, state);
268
269	spin_lock_irqsave(&ifp->netif_stop_lock, flags);
270	if (state) {
271		if (!ifp->netif_stop)
272			netif_stop_queue(ifp->ndev);
273		ifp->netif_stop |= reason;
274	} else {
275		ifp->netif_stop &= ~reason;
276		if (!ifp->netif_stop)
277			netif_wake_queue(ifp->ndev);
278	}
279	spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
280}
281
282void brcmf_txflowblock(struct device *dev, bool state)
283{
284	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
285	struct brcmf_pub *drvr = bus_if->drvr;
286
287	brcmf_dbg(TRACE, "Enter\n");
288
289	brcmf_fws_bus_blocked(drvr, state);
290}
291
292void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
293{
294	skb->dev = ifp->ndev;
295	skb->protocol = eth_type_trans(skb, skb->dev);
296
297	if (skb->pkt_type == PACKET_MULTICAST)
298		ifp->stats.multicast++;
299
300	/* Process special event packets */
301	brcmf_fweh_process_skb(ifp->drvr, skb);
302
303	if (!(ifp->ndev->flags & IFF_UP)) {
304		brcmu_pkt_buf_free_skb(skb);
305		return;
306	}
307
308	ifp->stats.rx_bytes += skb->len;
309	ifp->stats.rx_packets++;
310
311	brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
312	if (in_interrupt())
313		netif_rx(skb);
314	else
315		/* If the receive is not processed inside an ISR,
316		 * the softirqd must be woken explicitly to service
317		 * the NET_RX_SOFTIRQ.  This is handled by netif_rx_ni().
318		 */
319		netif_rx_ni(skb);
320}
321
322static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
323					 u8 start, u8 end,
324					 struct sk_buff_head *skb_list)
325{
326	/* initialize return list */
327	__skb_queue_head_init(skb_list);
328
329	if (rfi->pend_pkts == 0) {
330		brcmf_dbg(INFO, "no packets in reorder queue\n");
331		return;
332	}
333
334	do {
335		if (rfi->pktslots[start]) {
336			__skb_queue_tail(skb_list, rfi->pktslots[start]);
337			rfi->pktslots[start] = NULL;
338		}
339		start++;
340		if (start > rfi->max_idx)
341			start = 0;
342	} while (start != end);
343	rfi->pend_pkts -= skb_queue_len(skb_list);
344}
345
346static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
347					 struct sk_buff *pkt)
348{
349	u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
350	struct brcmf_ampdu_rx_reorder *rfi;
351	struct sk_buff_head reorder_list;
352	struct sk_buff *pnext;
353	u8 flags;
354	u32 buf_size;
355
356	flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
357	flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
358
359	/* validate flags and flow id */
360	if (flags == 0xFF) {
361		brcmf_err("invalid flags...so ignore this packet\n");
362		brcmf_netif_rx(ifp, pkt);
363		return;
364	}
365
366	rfi = ifp->drvr->reorder_flows[flow_id];
367	if (flags & BRCMF_RXREORDER_DEL_FLOW) {
368		brcmf_dbg(INFO, "flow-%d: delete\n",
369			  flow_id);
370
371		if (rfi == NULL) {
372			brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
373				  flow_id);
374			brcmf_netif_rx(ifp, pkt);
375			return;
376		}
377
378		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
379					     &reorder_list);
380		/* add the last packet */
381		__skb_queue_tail(&reorder_list, pkt);
382		kfree(rfi);
383		ifp->drvr->reorder_flows[flow_id] = NULL;
384		goto netif_rx;
385	}
386	/* from here on we need a flow reorder instance */
387	if (rfi == NULL) {
388		buf_size = sizeof(*rfi);
389		max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
390
391		buf_size += (max_idx + 1) * sizeof(pkt);
392
393		/* allocate space for flow reorder info */
394		brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
395			  flow_id, max_idx);
396		rfi = kzalloc(buf_size, GFP_ATOMIC);
397		if (rfi == NULL) {
398			brcmf_err("failed to alloc buffer\n");
399			brcmf_netif_rx(ifp, pkt);
400			return;
401		}
402
403		ifp->drvr->reorder_flows[flow_id] = rfi;
404		rfi->pktslots = (struct sk_buff **)(rfi+1);
405		rfi->max_idx = max_idx;
406	}
407	if (flags & BRCMF_RXREORDER_NEW_HOLE)  {
408		if (rfi->pend_pkts) {
409			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
410						     rfi->exp_idx,
411						     &reorder_list);
412			WARN_ON(rfi->pend_pkts);
413		} else {
414			__skb_queue_head_init(&reorder_list);
415		}
416		rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
417		rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
418		rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
419		rfi->pktslots[rfi->cur_idx] = pkt;
420		rfi->pend_pkts++;
421		brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
422			  flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
423	} else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
424		cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
425		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
426
427		if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
428			/* still in the current hole */
429			/* enqueue the current on the buffer chain */
430			if (rfi->pktslots[cur_idx] != NULL) {
431				brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
432				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
433				rfi->pktslots[cur_idx] = NULL;
434			}
435			rfi->pktslots[cur_idx] = pkt;
436			rfi->pend_pkts++;
437			rfi->cur_idx = cur_idx;
438			brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
439				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);
440
441			/* can return now as there is no reorder
442			 * list to process.
443			 */
444			return;
445		}
446		if (rfi->exp_idx == cur_idx) {
447			if (rfi->pktslots[cur_idx] != NULL) {
448				brcmf_dbg(INFO, "error buffer pending..free it\n");
449				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
450				rfi->pktslots[cur_idx] = NULL;
451			}
452			rfi->pktslots[cur_idx] = pkt;
453			rfi->pend_pkts++;
454
455			/* got the expected one. flush from current to expected
456			 * and update expected
457			 */
458			brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
459				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);
460
461			rfi->cur_idx = cur_idx;
462			rfi->exp_idx = exp_idx;
463
464			brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
465						     &reorder_list);
466			brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
467				  flow_id, skb_queue_len(&reorder_list),
468				  rfi->pend_pkts);
469		} else {
470			u8 end_idx;
471
472			brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
473				  flow_id, flags, rfi->cur_idx, rfi->exp_idx,
474				  cur_idx, exp_idx);
475			if (flags & BRCMF_RXREORDER_FLUSH_ALL)
476				end_idx = rfi->exp_idx;
477			else
478				end_idx = exp_idx;
479
480			/* flush pkts first */
481			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
482						     &reorder_list);
483
484			if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
485				__skb_queue_tail(&reorder_list, pkt);
486			} else {
487				rfi->pktslots[cur_idx] = pkt;
488				rfi->pend_pkts++;
489			}
490			rfi->exp_idx = exp_idx;
491			rfi->cur_idx = cur_idx;
492		}
493	} else {
494		/* explicity window move updating the expected index */
495		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
496
497		brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
498			  flow_id, flags, rfi->exp_idx, exp_idx);
499		if (flags & BRCMF_RXREORDER_FLUSH_ALL)
500			end_idx =  rfi->exp_idx;
501		else
502			end_idx =  exp_idx;
503
504		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
505					     &reorder_list);
506		__skb_queue_tail(&reorder_list, pkt);
507		/* set the new expected idx */
508		rfi->exp_idx = exp_idx;
509	}
510netif_rx:
511	skb_queue_walk_safe(&reorder_list, pkt, pnext) {
512		__skb_unlink(pkt, &reorder_list);
513		brcmf_netif_rx(ifp, pkt);
514	}
515}
516
517void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
518{
519	struct brcmf_if *ifp;
520	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
521	struct brcmf_pub *drvr = bus_if->drvr;
522	struct brcmf_skb_reorder_data *rd;
523	u8 ifidx;
524	int ret;
525
526	brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
527
528	/* process and remove protocol-specific header */
529	ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
530	ifp = drvr->iflist[ifidx];
531
532	if (ret || !ifp || !ifp->ndev) {
533		if ((ret != -ENODATA) && ifp)
534			ifp->stats.rx_errors++;
535		brcmu_pkt_buf_free_skb(skb);
536		return;
537	}
538
539	rd = (struct brcmf_skb_reorder_data *)skb->cb;
540	if (rd->reorder)
541		brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
542	else
543		brcmf_netif_rx(ifp, skb);
544}
545
546void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
547		      bool success)
548{
549	struct brcmf_if *ifp;
550	struct ethhdr *eh;
551	u16 type;
552
553	ifp = drvr->iflist[ifidx];
554	if (!ifp)
555		goto done;
556
557	eh = (struct ethhdr *)(txp->data);
558	type = ntohs(eh->h_proto);
559
560	if (type == ETH_P_PAE) {
561		atomic_dec(&ifp->pend_8021x_cnt);
562		if (waitqueue_active(&ifp->pend_8021x_wait))
563			wake_up(&ifp->pend_8021x_wait);
564	}
565
566	if (!success)
567		ifp->stats.tx_errors++;
568done:
569	brcmu_pkt_buf_free_skb(txp);
570}
571
572void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
573{
574	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
575	struct brcmf_pub *drvr = bus_if->drvr;
576	u8 ifidx;
577
578	/* await txstatus signal for firmware if active */
579	if (brcmf_fws_fc_active(drvr->fws)) {
580		if (!success)
581			brcmf_fws_bustxfail(drvr->fws, txp);
582	} else {
583		if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
584			brcmu_pkt_buf_free_skb(txp);
585		else
586			brcmf_txfinalize(drvr, txp, ifidx, success);
587	}
588}
589
590static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
591{
592	struct brcmf_if *ifp = netdev_priv(ndev);
593
594	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
595
596	return &ifp->stats;
597}
598
599static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
600				    struct ethtool_drvinfo *info)
601{
602	struct brcmf_if *ifp = netdev_priv(ndev);
603	struct brcmf_pub *drvr = ifp->drvr;
604	char drev[BRCMU_DOTREV_LEN] = "n/a";
605
606	if (drvr->revinfo.result == 0)
607		brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
608	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
609	strlcpy(info->version, drev, sizeof(info->version));
610	strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
611	strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
612		sizeof(info->bus_info));
613}
614
615static const struct ethtool_ops brcmf_ethtool_ops = {
616	.get_drvinfo = brcmf_ethtool_get_drvinfo,
617};
618
619static int brcmf_netdev_stop(struct net_device *ndev)
620{
621	struct brcmf_if *ifp = netdev_priv(ndev);
622
623	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
624
625	brcmf_cfg80211_down(ndev);
626
627	/* Set state and stop OS transmissions */
628	netif_stop_queue(ndev);
629
630	return 0;
631}
632
633static int brcmf_netdev_open(struct net_device *ndev)
634{
635	struct brcmf_if *ifp = netdev_priv(ndev);
636	struct brcmf_pub *drvr = ifp->drvr;
637	struct brcmf_bus *bus_if = drvr->bus_if;
638	u32 toe_ol;
639
640	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
641
642	/* If bus is not ready, can't continue */
643	if (bus_if->state != BRCMF_BUS_UP) {
644		brcmf_err("failed bus is not ready\n");
645		return -EAGAIN;
646	}
647
648	atomic_set(&ifp->pend_8021x_cnt, 0);
649
650	/* Get current TOE mode from dongle */
651	if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
652	    && (toe_ol & TOE_TX_CSUM_OL) != 0)
653		ndev->features |= NETIF_F_IP_CSUM;
654	else
655		ndev->features &= ~NETIF_F_IP_CSUM;
656
657	if (brcmf_cfg80211_up(ndev)) {
658		brcmf_err("failed to bring up cfg80211\n");
659		return -EIO;
660	}
661
662	/* Allow transmit calls */
663	netif_start_queue(ndev);
664	return 0;
665}
666
667static const struct net_device_ops brcmf_netdev_ops_pri = {
668	.ndo_open = brcmf_netdev_open,
669	.ndo_stop = brcmf_netdev_stop,
670	.ndo_get_stats = brcmf_netdev_get_stats,
671	.ndo_start_xmit = brcmf_netdev_start_xmit,
672	.ndo_set_mac_address = brcmf_netdev_set_mac_address,
673	.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
674};
675
676int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
677{
678	struct brcmf_pub *drvr = ifp->drvr;
679	struct net_device *ndev;
680	s32 err;
681
682	brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
683		  ifp->mac_addr);
684	ndev = ifp->ndev;
685
686	/* set appropriate operations */
687	ndev->netdev_ops = &brcmf_netdev_ops_pri;
688
689	ndev->hard_header_len += drvr->hdrlen;
690	ndev->ethtool_ops = &brcmf_ethtool_ops;
691
692	drvr->rxsz = ndev->mtu + ndev->hard_header_len +
693			      drvr->hdrlen;
694
695	/* set the mac address */
696	memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
697
698	INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
699	INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
700
701	if (rtnl_locked)
702		err = register_netdevice(ndev);
703	else
704		err = register_netdev(ndev);
705	if (err != 0) {
706		brcmf_err("couldn't register the net device\n");
707		goto fail;
708	}
709
710	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
711
712	ndev->destructor = brcmf_cfg80211_free_netdev;
713	return 0;
714
715fail:
716	drvr->iflist[ifp->bssidx] = NULL;
717	ndev->netdev_ops = NULL;
718	free_netdev(ndev);
719	return -EBADE;
720}
721
722static int brcmf_net_p2p_open(struct net_device *ndev)
723{
724	brcmf_dbg(TRACE, "Enter\n");
725
726	return brcmf_cfg80211_up(ndev);
727}
728
729static int brcmf_net_p2p_stop(struct net_device *ndev)
730{
731	brcmf_dbg(TRACE, "Enter\n");
732
733	return brcmf_cfg80211_down(ndev);
734}
735
736static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
737					    struct net_device *ndev)
738{
739	if (skb)
740		dev_kfree_skb_any(skb);
741
742	return NETDEV_TX_OK;
743}
744
745static const struct net_device_ops brcmf_netdev_ops_p2p = {
746	.ndo_open = brcmf_net_p2p_open,
747	.ndo_stop = brcmf_net_p2p_stop,
748	.ndo_start_xmit = brcmf_net_p2p_start_xmit
749};
750
751static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
752{
753	struct net_device *ndev;
754
755	brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
756		  ifp->mac_addr);
757	ndev = ifp->ndev;
758
759	ndev->netdev_ops = &brcmf_netdev_ops_p2p;
760
761	/* set the mac address */
762	memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
763
764	if (register_netdev(ndev) != 0) {
765		brcmf_err("couldn't register the p2p net device\n");
766		goto fail;
767	}
768
769	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
770
771	return 0;
772
773fail:
774	ifp->drvr->iflist[ifp->bssidx] = NULL;
775	ndev->netdev_ops = NULL;
776	free_netdev(ndev);
777	return -EBADE;
778}
779
780struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
781			      char *name, u8 *mac_addr)
782{
783	struct brcmf_if *ifp;
784	struct net_device *ndev;
785
786	brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
787
788	ifp = drvr->iflist[bssidx];
789	/*
790	 * Delete the existing interface before overwriting it
791	 * in case we missed the BRCMF_E_IF_DEL event.
792	 */
793	if (ifp) {
794		brcmf_err("ERROR: netdev:%s already exists\n",
795			  ifp->ndev->name);
796		if (ifidx) {
797			netif_stop_queue(ifp->ndev);
798			unregister_netdev(ifp->ndev);
799			free_netdev(ifp->ndev);
800			drvr->iflist[bssidx] = NULL;
801		} else {
802			brcmf_err("ignore IF event\n");
803			return ERR_PTR(-EINVAL);
804		}
805	}
806
807	if (!brcmf_p2p_enable && bssidx == 1) {
808		/* this is P2P_DEVICE interface */
809		brcmf_dbg(INFO, "allocate non-netdev interface\n");
810		ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
811		if (!ifp)
812			return ERR_PTR(-ENOMEM);
813	} else {
814		brcmf_dbg(INFO, "allocate netdev interface\n");
815		/* Allocate netdev, including space for private structure */
816		ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
817				    ether_setup);
818		if (!ndev)
819			return ERR_PTR(-ENOMEM);
820
821		ifp = netdev_priv(ndev);
822		ifp->ndev = ndev;
823	}
824
825	ifp->drvr = drvr;
826	drvr->iflist[bssidx] = ifp;
827	ifp->ifidx = ifidx;
828	ifp->bssidx = bssidx;
829
830	init_waitqueue_head(&ifp->pend_8021x_wait);
831	spin_lock_init(&ifp->netif_stop_lock);
832
833	if (mac_addr != NULL)
834		memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
835
836	brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
837		  current->pid, name, ifp->mac_addr);
838
839	return ifp;
840}
841
842static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
843{
844	struct brcmf_if *ifp;
845
846	ifp = drvr->iflist[bssidx];
847	drvr->iflist[bssidx] = NULL;
848	if (!ifp) {
849		brcmf_err("Null interface, idx=%d\n", bssidx);
850		return;
851	}
852	brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
853	if (ifp->ndev) {
854		if (bssidx == 0) {
855			if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
856				rtnl_lock();
857				brcmf_netdev_stop(ifp->ndev);
858				rtnl_unlock();
859			}
860		} else {
861			netif_stop_queue(ifp->ndev);
862		}
863
864		if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
865			cancel_work_sync(&ifp->setmacaddr_work);
866			cancel_work_sync(&ifp->multicast_work);
867		}
868		/* unregister will take care of freeing it */
869		unregister_netdev(ifp->ndev);
870	} else {
871		kfree(ifp);
872	}
873}
874
875void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx)
876{
877	if (drvr->iflist[bssidx]) {
878		brcmf_fws_del_interface(drvr->iflist[bssidx]);
879		brcmf_del_if(drvr, bssidx);
880	}
881}
882
883int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
884{
885	int ifidx;
886	int bsscfgidx;
887	bool available;
888	int highest;
889
890	available = false;
891	bsscfgidx = 2;
892	highest = 2;
893	for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
894		if (drvr->iflist[ifidx]) {
895			if (drvr->iflist[ifidx]->bssidx == bsscfgidx)
896				bsscfgidx = highest + 1;
897			else if (drvr->iflist[ifidx]->bssidx > highest)
898				highest = drvr->iflist[ifidx]->bssidx;
899		} else {
900			available = true;
901		}
902	}
903
904	return available ? bsscfgidx : -ENOMEM;
905}
906
907int brcmf_attach(struct device *dev)
908{
909	struct brcmf_pub *drvr = NULL;
910	int ret = 0;
911
912	brcmf_dbg(TRACE, "Enter\n");
913
914	/* Allocate primary brcmf_info */
915	drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
916	if (!drvr)
917		return -ENOMEM;
918
919	mutex_init(&drvr->proto_block);
920
921	/* Link to bus module */
922	drvr->hdrlen = 0;
923	drvr->bus_if = dev_get_drvdata(dev);
924	drvr->bus_if->drvr = drvr;
925
926	/* create device debugfs folder */
927	brcmf_debugfs_attach(drvr);
928
929	/* Attach and link in the protocol */
930	ret = brcmf_proto_attach(drvr);
931	if (ret != 0) {
932		brcmf_err("brcmf_prot_attach failed\n");
933		goto fail;
934	}
935
936	/* attach firmware event handler */
937	brcmf_fweh_attach(drvr);
938
939	return ret;
940
941fail:
942	brcmf_detach(dev);
943
944	return ret;
945}
946
947static int brcmf_revinfo_read(struct seq_file *s, void *data)
948{
949	struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
950	struct brcmf_rev_info *ri = &bus_if->drvr->revinfo;
951	char drev[BRCMU_DOTREV_LEN];
952	char brev[BRCMU_BOARDREV_LEN];
953
954	seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid);
955	seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid);
956	seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev));
957	seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum);
958	seq_printf(s, "chiprev: %u\n", ri->chiprev);
959	seq_printf(s, "chippkg: %u\n", ri->chippkg);
960	seq_printf(s, "corerev: %u\n", ri->corerev);
961	seq_printf(s, "boardid: 0x%04x\n", ri->boardid);
962	seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor);
963	seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev));
964	seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev));
965	seq_printf(s, "ucoderev: %u\n", ri->ucoderev);
966	seq_printf(s, "bus: %u\n", ri->bus);
967	seq_printf(s, "phytype: %u\n", ri->phytype);
968	seq_printf(s, "phyrev: %u\n", ri->phyrev);
969	seq_printf(s, "anarev: %u\n", ri->anarev);
970	seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
971
972	return 0;
973}
974
975int brcmf_bus_start(struct device *dev)
976{
977	int ret = -1;
978	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
979	struct brcmf_pub *drvr = bus_if->drvr;
980	struct brcmf_if *ifp;
981	struct brcmf_if *p2p_ifp;
982
983	brcmf_dbg(TRACE, "\n");
984
985	/* add primary networking interface */
986	ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
987	if (IS_ERR(ifp))
988		return PTR_ERR(ifp);
989
990	if (brcmf_p2p_enable)
991		p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
992	else
993		p2p_ifp = NULL;
994	if (IS_ERR(p2p_ifp))
995		p2p_ifp = NULL;
996
997	/* signal bus ready */
998	brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
999
1000	/* Bus is ready, do any initialization */
1001	ret = brcmf_c_preinit_dcmds(ifp);
1002	if (ret < 0)
1003		goto fail;
1004
1005	brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
1006
1007	/* assure we have chipid before feature attach */
1008	if (!bus_if->chip) {
1009		bus_if->chip = drvr->revinfo.chipnum;
1010		bus_if->chiprev = drvr->revinfo.chiprev;
1011		brcmf_dbg(INFO, "firmware revinfo: chip %x (%d) rev %d\n",
1012			  bus_if->chip, bus_if->chip, bus_if->chiprev);
1013	}
1014	brcmf_feat_attach(drvr);
1015
1016	ret = brcmf_fws_init(drvr);
1017	if (ret < 0)
1018		goto fail;
1019
1020	brcmf_fws_add_interface(ifp);
1021
1022	drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
1023	if (drvr->config == NULL) {
1024		ret = -ENOMEM;
1025		goto fail;
1026	}
1027
1028	ret = brcmf_fweh_activate_events(ifp);
1029	if (ret < 0)
1030		goto fail;
1031
1032	ret = brcmf_net_attach(ifp, false);
1033fail:
1034	if (ret < 0) {
1035		brcmf_err("failed: %d\n", ret);
1036		brcmf_cfg80211_detach(drvr->config);
1037		if (drvr->fws) {
1038			brcmf_fws_del_interface(ifp);
1039			brcmf_fws_deinit(drvr);
1040		}
1041		if (drvr->iflist[0]) {
1042			free_netdev(ifp->ndev);
1043			drvr->iflist[0] = NULL;
1044		}
1045		if (p2p_ifp) {
1046			free_netdev(p2p_ifp->ndev);
1047			drvr->iflist[1] = NULL;
1048		}
1049		return ret;
1050	}
1051	if ((brcmf_p2p_enable) && (p2p_ifp))
1052		if (brcmf_net_p2p_attach(p2p_ifp) < 0)
1053			brcmf_p2p_enable = 0;
1054
1055	return 0;
1056}
1057
1058void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
1059{
1060	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1061	struct brcmf_pub *drvr = bus_if->drvr;
1062
1063	if (drvr) {
1064		drvr->hdrlen += len;
1065	}
1066}
1067
1068static void brcmf_bus_detach(struct brcmf_pub *drvr)
1069{
1070	brcmf_dbg(TRACE, "Enter\n");
1071
1072	if (drvr) {
1073		/* Stop the bus module */
1074		brcmf_bus_stop(drvr->bus_if);
1075	}
1076}
1077
1078void brcmf_dev_reset(struct device *dev)
1079{
1080	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1081	struct brcmf_pub *drvr = bus_if->drvr;
1082
1083	if (drvr == NULL)
1084		return;
1085
1086	if (drvr->iflist[0])
1087		brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
1088}
1089
1090void brcmf_detach(struct device *dev)
1091{
1092	s32 i;
1093	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1094	struct brcmf_pub *drvr = bus_if->drvr;
1095
1096	brcmf_dbg(TRACE, "Enter\n");
1097
1098	if (drvr == NULL)
1099		return;
1100
1101	/* stop firmware event handling */
1102	brcmf_fweh_detach(drvr);
1103
1104	brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
1105
1106	/* make sure primary interface removed last */
1107	for (i = BRCMF_MAX_IFS-1; i > -1; i--)
1108		brcmf_remove_interface(drvr, i);
1109
1110	brcmf_cfg80211_detach(drvr->config);
1111
1112	brcmf_fws_deinit(drvr);
1113
1114	brcmf_bus_detach(drvr);
1115
1116	brcmf_proto_detach(drvr);
1117
1118	brcmf_debugfs_detach(drvr);
1119	bus_if->drvr = NULL;
1120	kfree(drvr);
1121}
1122
1123s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
1124{
1125	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1126	struct brcmf_if *ifp = bus_if->drvr->iflist[0];
1127
1128	return brcmf_fil_iovar_data_set(ifp, name, data, len);
1129}
1130
1131static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
1132{
1133	return atomic_read(&ifp->pend_8021x_cnt);
1134}
1135
1136int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
1137{
1138	int err;
1139
1140	err = wait_event_timeout(ifp->pend_8021x_wait,
1141				 !brcmf_get_pend_8021x_cnt(ifp),
1142				 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
1143
1144	WARN_ON(!err);
1145
1146	return !err;
1147}
1148
1149void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
1150{
1151	struct brcmf_pub *drvr = bus->drvr;
1152	struct net_device *ndev;
1153	int ifidx;
1154
1155	brcmf_dbg(TRACE, "%d -> %d\n", bus->state, state);
1156	bus->state = state;
1157
1158	if (state == BRCMF_BUS_UP) {
1159		for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
1160			if ((drvr->iflist[ifidx]) &&
1161			    (drvr->iflist[ifidx]->ndev)) {
1162				ndev = drvr->iflist[ifidx]->ndev;
1163				if (netif_queue_stopped(ndev))
1164					netif_wake_queue(ndev);
1165			}
1166		}
1167	}
1168}
1169
1170static void brcmf_driver_register(struct work_struct *work)
1171{
1172#ifdef CONFIG_BRCMFMAC_SDIO
1173	brcmf_sdio_register();
1174#endif
1175#ifdef CONFIG_BRCMFMAC_USB
1176	brcmf_usb_register();
1177#endif
1178#ifdef CONFIG_BRCMFMAC_PCIE
1179	brcmf_pcie_register();
1180#endif
1181}
1182static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1183
1184static int __init brcmfmac_module_init(void)
1185{
1186	brcmf_debugfs_init();
1187#ifdef CONFIG_BRCMFMAC_SDIO
1188	brcmf_sdio_init();
1189#endif
1190	if (!schedule_work(&brcmf_driver_work))
1191		return -EBUSY;
1192
1193	return 0;
1194}
1195
1196static void __exit brcmfmac_module_exit(void)
1197{
1198	cancel_work_sync(&brcmf_driver_work);
1199
1200#ifdef CONFIG_BRCMFMAC_SDIO
1201	brcmf_sdio_exit();
1202#endif
1203#ifdef CONFIG_BRCMFMAC_USB
1204	brcmf_usb_exit();
1205#endif
1206#ifdef CONFIG_BRCMFMAC_PCIE
1207	brcmf_pcie_exit();
1208#endif
1209	brcmf_debugfs_exit();
1210}
1211
1212module_init(brcmfmac_module_init);
1213module_exit(brcmfmac_module_exit);
1214