1/* Copyright (c) 2014 Broadcom Corporation
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16/*******************************************************************************
17 * Communicates with the dongle by using dcmd codes.
18 * For certain dcmd codes, the dongle interprets string data from the host.
19 ******************************************************************************/
20
21#include <linux/types.h>
22#include <linux/netdevice.h>
23
24#include <brcmu_utils.h>
25#include <brcmu_wifi.h>
26
27#include "core.h"
28#include "debug.h"
29#include "proto.h"
30#include "msgbuf.h"
31#include "commonring.h"
32#include "flowring.h"
33#include "bus.h"
34#include "tracepoint.h"
35
36
37#define MSGBUF_IOCTL_RESP_TIMEOUT		2000
38
39#define MSGBUF_TYPE_GEN_STATUS			0x1
40#define MSGBUF_TYPE_RING_STATUS			0x2
41#define MSGBUF_TYPE_FLOW_RING_CREATE		0x3
42#define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT	0x4
43#define MSGBUF_TYPE_FLOW_RING_DELETE		0x5
44#define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT	0x6
45#define MSGBUF_TYPE_FLOW_RING_FLUSH		0x7
46#define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT	0x8
47#define MSGBUF_TYPE_IOCTLPTR_REQ		0x9
48#define MSGBUF_TYPE_IOCTLPTR_REQ_ACK		0xA
49#define MSGBUF_TYPE_IOCTLRESP_BUF_POST		0xB
50#define MSGBUF_TYPE_IOCTL_CMPLT			0xC
51#define MSGBUF_TYPE_EVENT_BUF_POST		0xD
52#define MSGBUF_TYPE_WL_EVENT			0xE
53#define MSGBUF_TYPE_TX_POST			0xF
54#define MSGBUF_TYPE_TX_STATUS			0x10
55#define MSGBUF_TYPE_RXBUF_POST			0x11
56#define MSGBUF_TYPE_RX_CMPLT			0x12
57#define MSGBUF_TYPE_LPBK_DMAXFER		0x13
58#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT		0x14
59
60#define NR_TX_PKTIDS				2048
61#define NR_RX_PKTIDS				1024
62
63#define BRCMF_IOCTL_REQ_PKTID			0xFFFE
64
65#define BRCMF_MSGBUF_MAX_PKT_SIZE		2048
66#define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD	32
67#define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST	8
68#define BRCMF_MSGBUF_MAX_EVENTBUF_POST		8
69
70#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3	0x01
71#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT	5
72
73#define BRCMF_MSGBUF_TX_FLUSH_CNT1		32
74#define BRCMF_MSGBUF_TX_FLUSH_CNT2		96
75
76#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS	64
77#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS	32
78
79struct msgbuf_common_hdr {
80	u8				msgtype;
81	u8				ifidx;
82	u8				flags;
83	u8				rsvd0;
84	__le32				request_id;
85};
86
87struct msgbuf_buf_addr {
88	__le32				low_addr;
89	__le32				high_addr;
90};
91
92struct msgbuf_ioctl_req_hdr {
93	struct msgbuf_common_hdr	msg;
94	__le32				cmd;
95	__le16				trans_id;
96	__le16				input_buf_len;
97	__le16				output_buf_len;
98	__le16				rsvd0[3];
99	struct msgbuf_buf_addr		req_buf_addr;
100	__le32				rsvd1[2];
101};
102
103struct msgbuf_tx_msghdr {
104	struct msgbuf_common_hdr	msg;
105	u8				txhdr[ETH_HLEN];
106	u8				flags;
107	u8				seg_cnt;
108	struct msgbuf_buf_addr		metadata_buf_addr;
109	struct msgbuf_buf_addr		data_buf_addr;
110	__le16				metadata_buf_len;
111	__le16				data_len;
112	__le32				rsvd0;
113};
114
115struct msgbuf_rx_bufpost {
116	struct msgbuf_common_hdr	msg;
117	__le16				metadata_buf_len;
118	__le16				data_buf_len;
119	__le32				rsvd0;
120	struct msgbuf_buf_addr		metadata_buf_addr;
121	struct msgbuf_buf_addr		data_buf_addr;
122};
123
124struct msgbuf_rx_ioctl_resp_or_event {
125	struct msgbuf_common_hdr	msg;
126	__le16				host_buf_len;
127	__le16				rsvd0[3];
128	struct msgbuf_buf_addr		host_buf_addr;
129	__le32				rsvd1[4];
130};
131
132struct msgbuf_completion_hdr {
133	__le16				status;
134	__le16				flow_ring_id;
135};
136
137struct msgbuf_rx_event {
138	struct msgbuf_common_hdr	msg;
139	struct msgbuf_completion_hdr	compl_hdr;
140	__le16				event_data_len;
141	__le16				seqnum;
142	__le16				rsvd0[4];
143};
144
145struct msgbuf_ioctl_resp_hdr {
146	struct msgbuf_common_hdr	msg;
147	struct msgbuf_completion_hdr	compl_hdr;
148	__le16				resp_len;
149	__le16				trans_id;
150	__le32				cmd;
151	__le32				rsvd0;
152};
153
154struct msgbuf_tx_status {
155	struct msgbuf_common_hdr	msg;
156	struct msgbuf_completion_hdr	compl_hdr;
157	__le16				metadata_len;
158	__le16				tx_status;
159};
160
161struct msgbuf_rx_complete {
162	struct msgbuf_common_hdr	msg;
163	struct msgbuf_completion_hdr	compl_hdr;
164	__le16				metadata_len;
165	__le16				data_len;
166	__le16				data_offset;
167	__le16				flags;
168	__le32				rx_status_0;
169	__le32				rx_status_1;
170	__le32				rsvd0;
171};
172
173struct msgbuf_tx_flowring_create_req {
174	struct msgbuf_common_hdr	msg;
175	u8				da[ETH_ALEN];
176	u8				sa[ETH_ALEN];
177	u8				tid;
178	u8				if_flags;
179	__le16				flow_ring_id;
180	u8				tc;
181	u8				priority;
182	__le16				int_vector;
183	__le16				max_items;
184	__le16				len_item;
185	struct msgbuf_buf_addr		flow_ring_addr;
186};
187
188struct msgbuf_tx_flowring_delete_req {
189	struct msgbuf_common_hdr	msg;
190	__le16				flow_ring_id;
191	__le16				reason;
192	__le32				rsvd0[7];
193};
194
195struct msgbuf_flowring_create_resp {
196	struct msgbuf_common_hdr	msg;
197	struct msgbuf_completion_hdr	compl_hdr;
198	__le32				rsvd0[3];
199};
200
201struct msgbuf_flowring_delete_resp {
202	struct msgbuf_common_hdr	msg;
203	struct msgbuf_completion_hdr	compl_hdr;
204	__le32				rsvd0[3];
205};
206
207struct msgbuf_flowring_flush_resp {
208	struct msgbuf_common_hdr	msg;
209	struct msgbuf_completion_hdr	compl_hdr;
210	__le32				rsvd0[3];
211};
212
213struct brcmf_msgbuf_work_item {
214	struct list_head queue;
215	u32 flowid;
216	int ifidx;
217	u8 sa[ETH_ALEN];
218	u8 da[ETH_ALEN];
219};
220
221struct brcmf_msgbuf {
222	struct brcmf_pub *drvr;
223
224	struct brcmf_commonring **commonrings;
225	struct brcmf_commonring **flowrings;
226	dma_addr_t *flowring_dma_handle;
227	u16 nrof_flowrings;
228
229	u16 rx_dataoffset;
230	u32 max_rxbufpost;
231	u16 rx_metadata_offset;
232	u32 rxbufpost;
233
234	u32 max_ioctlrespbuf;
235	u32 cur_ioctlrespbuf;
236	u32 max_eventbuf;
237	u32 cur_eventbuf;
238
239	void *ioctbuf;
240	dma_addr_t ioctbuf_handle;
241	u32 ioctbuf_phys_hi;
242	u32 ioctbuf_phys_lo;
243	int ioctl_resp_status;
244	u32 ioctl_resp_ret_len;
245	u32 ioctl_resp_pktid;
246
247	u16 data_seq_no;
248	u16 ioctl_seq_no;
249	u32 reqid;
250	wait_queue_head_t ioctl_resp_wait;
251	bool ctl_completed;
252
253	struct brcmf_msgbuf_pktids *tx_pktids;
254	struct brcmf_msgbuf_pktids *rx_pktids;
255	struct brcmf_flowring *flow;
256
257	struct workqueue_struct *txflow_wq;
258	struct work_struct txflow_work;
259	unsigned long *flow_map;
260	unsigned long *txstatus_done_map;
261
262	struct work_struct flowring_work;
263	spinlock_t flowring_work_lock;
264	struct list_head work_queue;
265};
266
267struct brcmf_msgbuf_pktid {
268	atomic_t  allocated;
269	u16 data_offset;
270	struct sk_buff *skb;
271	dma_addr_t physaddr;
272};
273
274struct brcmf_msgbuf_pktids {
275	u32 array_size;
276	u32 last_allocated_idx;
277	enum dma_data_direction direction;
278	struct brcmf_msgbuf_pktid *array;
279};
280
281
282/* dma flushing needs implementation for mips and arm platforms. Should
283 * be put in util. Note, this is not real flushing. It is virtual non
284 * cached memory. Only write buffers should have to be drained. Though
285 * this may be different depending on platform......
286 */
287#define brcmf_dma_flush(addr, len)
288#define brcmf_dma_invalidate_cache(addr, len)
289
290
291static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
292
293
294static struct brcmf_msgbuf_pktids *
295brcmf_msgbuf_init_pktids(u32 nr_array_entries,
296			 enum dma_data_direction direction)
297{
298	struct brcmf_msgbuf_pktid *array;
299	struct brcmf_msgbuf_pktids *pktids;
300
301	array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
302	if (!array)
303		return NULL;
304
305	pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
306	if (!pktids) {
307		kfree(array);
308		return NULL;
309	}
310	pktids->array = array;
311	pktids->array_size = nr_array_entries;
312
313	return pktids;
314}
315
316
317static int
318brcmf_msgbuf_alloc_pktid(struct device *dev,
319			 struct brcmf_msgbuf_pktids *pktids,
320			 struct sk_buff *skb, u16 data_offset,
321			 dma_addr_t *physaddr, u32 *idx)
322{
323	struct brcmf_msgbuf_pktid *array;
324	u32 count;
325
326	array = pktids->array;
327
328	*physaddr = dma_map_single(dev, skb->data + data_offset,
329				   skb->len - data_offset, pktids->direction);
330
331	if (dma_mapping_error(dev, *physaddr)) {
332		brcmf_err("dma_map_single failed !!\n");
333		return -ENOMEM;
334	}
335
336	*idx = pktids->last_allocated_idx;
337
338	count = 0;
339	do {
340		(*idx)++;
341		if (*idx == pktids->array_size)
342			*idx = 0;
343		if (array[*idx].allocated.counter == 0)
344			if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
345				break;
346		count++;
347	} while (count < pktids->array_size);
348
349	if (count == pktids->array_size)
350		return -ENOMEM;
351
352	array[*idx].data_offset = data_offset;
353	array[*idx].physaddr = *physaddr;
354	array[*idx].skb = skb;
355
356	pktids->last_allocated_idx = *idx;
357
358	return 0;
359}
360
361
362static struct sk_buff *
363brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
364		       u32 idx)
365{
366	struct brcmf_msgbuf_pktid *pktid;
367	struct sk_buff *skb;
368
369	if (idx >= pktids->array_size) {
370		brcmf_err("Invalid packet id %d (max %d)\n", idx,
371			  pktids->array_size);
372		return NULL;
373	}
374	if (pktids->array[idx].allocated.counter) {
375		pktid = &pktids->array[idx];
376		dma_unmap_single(dev, pktid->physaddr,
377				 pktid->skb->len - pktid->data_offset,
378				 pktids->direction);
379		skb = pktid->skb;
380		pktid->allocated.counter = 0;
381		return skb;
382	} else {
383		brcmf_err("Invalid packet id %d (not in use)\n", idx);
384	}
385
386	return NULL;
387}
388
389
390static void
391brcmf_msgbuf_release_array(struct device *dev,
392			   struct brcmf_msgbuf_pktids *pktids)
393{
394	struct brcmf_msgbuf_pktid *array;
395	struct brcmf_msgbuf_pktid *pktid;
396	u32 count;
397
398	array = pktids->array;
399	count = 0;
400	do {
401		if (array[count].allocated.counter) {
402			pktid = &array[count];
403			dma_unmap_single(dev, pktid->physaddr,
404					 pktid->skb->len - pktid->data_offset,
405					 pktids->direction);
406			brcmu_pkt_buf_free_skb(pktid->skb);
407		}
408		count++;
409	} while (count < pktids->array_size);
410
411	kfree(array);
412	kfree(pktids);
413}
414
415
416static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
417{
418	if (msgbuf->rx_pktids)
419		brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
420					   msgbuf->rx_pktids);
421	if (msgbuf->tx_pktids)
422		brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
423					   msgbuf->tx_pktids);
424}
425
426
427static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
428				 uint cmd, void *buf, uint len)
429{
430	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
431	struct brcmf_commonring *commonring;
432	struct msgbuf_ioctl_req_hdr *request;
433	u16 buf_len;
434	void *ret_ptr;
435	int err;
436
437	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
438	brcmf_commonring_lock(commonring);
439	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
440	if (!ret_ptr) {
441		brcmf_err("Failed to reserve space in commonring\n");
442		brcmf_commonring_unlock(commonring);
443		return -ENOMEM;
444	}
445
446	msgbuf->reqid++;
447
448	request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
449	request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
450	request->msg.ifidx = (u8)ifidx;
451	request->msg.flags = 0;
452	request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
453	request->cmd = cpu_to_le32(cmd);
454	request->output_buf_len = cpu_to_le16(len);
455	request->trans_id = cpu_to_le16(msgbuf->reqid);
456
457	buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
458	request->input_buf_len = cpu_to_le16(buf_len);
459	request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
460	request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
461	if (buf)
462		memcpy(msgbuf->ioctbuf, buf, buf_len);
463	else
464		memset(msgbuf->ioctbuf, 0, buf_len);
465	brcmf_dma_flush(ioctl_buf, buf_len);
466
467	err = brcmf_commonring_write_complete(commonring);
468	brcmf_commonring_unlock(commonring);
469
470	return err;
471}
472
473
474static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
475{
476	return wait_event_timeout(msgbuf->ioctl_resp_wait,
477				  msgbuf->ctl_completed,
478				  msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT));
479}
480
481
482static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
483{
484	msgbuf->ctl_completed = true;
485	if (waitqueue_active(&msgbuf->ioctl_resp_wait))
486		wake_up(&msgbuf->ioctl_resp_wait);
487}
488
489
490static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
491				   uint cmd, void *buf, uint len)
492{
493	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
494	struct sk_buff *skb = NULL;
495	int timeout;
496	int err;
497
498	brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
499	msgbuf->ctl_completed = false;
500	err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
501	if (err)
502		return err;
503
504	timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
505	if (!timeout) {
506		brcmf_err("Timeout on response for query command\n");
507		return -EIO;
508	}
509
510	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
511				     msgbuf->rx_pktids,
512				     msgbuf->ioctl_resp_pktid);
513	if (msgbuf->ioctl_resp_ret_len != 0) {
514		if (!skb)
515			return -EBADF;
516
517		memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
518				       len : msgbuf->ioctl_resp_ret_len);
519	}
520	brcmu_pkt_buf_free_skb(skb);
521
522	return msgbuf->ioctl_resp_status;
523}
524
525
526static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
527				 uint cmd, void *buf, uint len)
528{
529	return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len);
530}
531
532
533static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
534				u8 *ifidx, struct sk_buff *skb)
535{
536	return -ENODEV;
537}
538
539
540static void
541brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
542{
543	u32 dma_sz;
544	void *dma_buf;
545
546	brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
547
548	dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
549	dma_buf = msgbuf->flowrings[flowid]->buf_addr;
550	dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
551			  msgbuf->flowring_dma_handle[flowid]);
552
553	brcmf_flowring_delete(msgbuf->flow, flowid);
554}
555
556
557static struct brcmf_msgbuf_work_item *
558brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
559{
560	struct brcmf_msgbuf_work_item *work = NULL;
561	ulong flags;
562
563	spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
564	if (!list_empty(&msgbuf->work_queue)) {
565		work = list_first_entry(&msgbuf->work_queue,
566					struct brcmf_msgbuf_work_item, queue);
567		list_del(&work->queue);
568	}
569	spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
570
571	return work;
572}
573
574
575static u32
576brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
577				    struct brcmf_msgbuf_work_item *work)
578{
579	struct msgbuf_tx_flowring_create_req *create;
580	struct brcmf_commonring *commonring;
581	void *ret_ptr;
582	u32 flowid;
583	void *dma_buf;
584	u32 dma_sz;
585	u64 address;
586	int err;
587
588	flowid = work->flowid;
589	dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
590	dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
591				     &msgbuf->flowring_dma_handle[flowid],
592				     GFP_KERNEL);
593	if (!dma_buf) {
594		brcmf_err("dma_alloc_coherent failed\n");
595		brcmf_flowring_delete(msgbuf->flow, flowid);
596		return BRCMF_FLOWRING_INVALID_ID;
597	}
598
599	brcmf_commonring_config(msgbuf->flowrings[flowid],
600				BRCMF_H2D_TXFLOWRING_MAX_ITEM,
601				BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
602
603	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
604	brcmf_commonring_lock(commonring);
605	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
606	if (!ret_ptr) {
607		brcmf_err("Failed to reserve space in commonring\n");
608		brcmf_commonring_unlock(commonring);
609		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
610		return BRCMF_FLOWRING_INVALID_ID;
611	}
612
613	create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
614	create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
615	create->msg.ifidx = work->ifidx;
616	create->msg.request_id = 0;
617	create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
618	create->flow_ring_id = cpu_to_le16(flowid +
619					   BRCMF_NROF_H2D_COMMON_MSGRINGS);
620	memcpy(create->sa, work->sa, ETH_ALEN);
621	memcpy(create->da, work->da, ETH_ALEN);
622	address = (u64)msgbuf->flowring_dma_handle[flowid];
623	create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
624	create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
625	create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
626	create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
627
628	brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
629		  flowid, work->da, create->tid, work->ifidx);
630
631	err = brcmf_commonring_write_complete(commonring);
632	brcmf_commonring_unlock(commonring);
633	if (err) {
634		brcmf_err("Failed to write commonring\n");
635		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
636		return BRCMF_FLOWRING_INVALID_ID;
637	}
638
639	return flowid;
640}
641
642
643static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
644{
645	struct brcmf_msgbuf *msgbuf;
646	struct brcmf_msgbuf_work_item *create;
647
648	msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
649
650	while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
651		brcmf_msgbuf_flowring_create_worker(msgbuf, create);
652		kfree(create);
653	}
654}
655
656
657static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
658					struct sk_buff *skb)
659{
660	struct brcmf_msgbuf_work_item *create;
661	struct ethhdr *eh = (struct ethhdr *)(skb->data);
662	u32 flowid;
663	ulong flags;
664
665	create = kzalloc(sizeof(*create), GFP_ATOMIC);
666	if (create == NULL)
667		return BRCMF_FLOWRING_INVALID_ID;
668
669	flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
670				       skb->priority, ifidx);
671	if (flowid == BRCMF_FLOWRING_INVALID_ID) {
672		kfree(create);
673		return flowid;
674	}
675
676	create->flowid = flowid;
677	create->ifidx = ifidx;
678	memcpy(create->sa, eh->h_source, ETH_ALEN);
679	memcpy(create->da, eh->h_dest, ETH_ALEN);
680
681	spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
682	list_add_tail(&create->queue, &msgbuf->work_queue);
683	spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
684	schedule_work(&msgbuf->flowring_work);
685
686	return flowid;
687}
688
689
690static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
691{
692	struct brcmf_flowring *flow = msgbuf->flow;
693	struct brcmf_commonring *commonring;
694	void *ret_ptr;
695	u32 count;
696	struct sk_buff *skb;
697	dma_addr_t physaddr;
698	u32 pktid;
699	struct msgbuf_tx_msghdr *tx_msghdr;
700	u64 address;
701
702	commonring = msgbuf->flowrings[flowid];
703	if (!brcmf_commonring_write_available(commonring))
704		return;
705
706	brcmf_commonring_lock(commonring);
707
708	count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
709	while (brcmf_flowring_qlen(flow, flowid)) {
710		skb = brcmf_flowring_dequeue(flow, flowid);
711		if (skb == NULL) {
712			brcmf_err("No SKB, but qlen %d\n",
713				  brcmf_flowring_qlen(flow, flowid));
714			break;
715		}
716		skb_orphan(skb);
717		if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
718					     msgbuf->tx_pktids, skb, ETH_HLEN,
719					     &physaddr, &pktid)) {
720			brcmf_flowring_reinsert(flow, flowid, skb);
721			brcmf_err("No PKTID available !!\n");
722			break;
723		}
724		ret_ptr = brcmf_commonring_reserve_for_write(commonring);
725		if (!ret_ptr) {
726			brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
727					       msgbuf->tx_pktids, pktid);
728			brcmf_flowring_reinsert(flow, flowid, skb);
729			break;
730		}
731		count++;
732
733		tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
734
735		tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
736		tx_msghdr->msg.request_id = cpu_to_le32(pktid);
737		tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
738		tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
739		tx_msghdr->flags |= (skb->priority & 0x07) <<
740				    BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
741		tx_msghdr->seg_cnt = 1;
742		memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
743		tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
744		address = (u64)physaddr;
745		tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
746		tx_msghdr->data_buf_addr.low_addr =
747			cpu_to_le32(address & 0xffffffff);
748		tx_msghdr->metadata_buf_len = 0;
749		tx_msghdr->metadata_buf_addr.high_addr = 0;
750		tx_msghdr->metadata_buf_addr.low_addr = 0;
751		atomic_inc(&commonring->outstanding_tx);
752		if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
753			brcmf_commonring_write_complete(commonring);
754			count = 0;
755		}
756	}
757	if (count)
758		brcmf_commonring_write_complete(commonring);
759	brcmf_commonring_unlock(commonring);
760}
761
762
763static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
764{
765	struct brcmf_msgbuf *msgbuf;
766	u32 flowid;
767
768	msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
769	for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) {
770		clear_bit(flowid, msgbuf->flow_map);
771		brcmf_msgbuf_txflow(msgbuf, flowid);
772	}
773}
774
775
776static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
777					bool force)
778{
779	struct brcmf_commonring *commonring;
780
781	set_bit(flowid, msgbuf->flow_map);
782	commonring = msgbuf->flowrings[flowid];
783	if ((force) || (atomic_read(&commonring->outstanding_tx) <
784			BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
785		queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
786
787	return 0;
788}
789
790
791static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
792			       u8 offset, struct sk_buff *skb)
793{
794	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
795	struct brcmf_flowring *flow = msgbuf->flow;
796	struct ethhdr *eh = (struct ethhdr *)(skb->data);
797	u32 flowid;
798
799	flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
800	if (flowid == BRCMF_FLOWRING_INVALID_ID) {
801		flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
802		if (flowid == BRCMF_FLOWRING_INVALID_ID)
803			return -ENOMEM;
804	}
805	brcmf_flowring_enqueue(flow, flowid, skb);
806	brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
807
808	return 0;
809}
810
811
812static void
813brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
814				 enum proto_addr_mode addr_mode)
815{
816	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
817
818	brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
819}
820
821
822static void
823brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
824{
825	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
826
827	brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
828}
829
830
831static void
832brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
833{
834	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
835
836	brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
837}
838
839
840static void
841brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
842{
843	struct msgbuf_ioctl_resp_hdr *ioctl_resp;
844
845	ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
846
847	msgbuf->ioctl_resp_status =
848			(s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
849	msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
850	msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
851
852	brcmf_msgbuf_ioctl_resp_wake(msgbuf);
853
854	if (msgbuf->cur_ioctlrespbuf)
855		msgbuf->cur_ioctlrespbuf--;
856	brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
857}
858
859
860static void
861brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
862{
863	struct brcmf_commonring *commonring;
864	struct msgbuf_tx_status *tx_status;
865	u32 idx;
866	struct sk_buff *skb;
867	u16 flowid;
868
869	tx_status = (struct msgbuf_tx_status *)buf;
870	idx = le32_to_cpu(tx_status->msg.request_id);
871	flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
872	flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
873	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
874				     msgbuf->tx_pktids, idx);
875	if (!skb)
876		return;
877
878	set_bit(flowid, msgbuf->txstatus_done_map);
879	commonring = msgbuf->flowrings[flowid];
880	atomic_dec(&commonring->outstanding_tx);
881
882	brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true);
883}
884
885
886static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
887{
888	struct brcmf_commonring *commonring;
889	void *ret_ptr;
890	struct sk_buff *skb;
891	u16 alloced;
892	u32 pktlen;
893	dma_addr_t physaddr;
894	struct msgbuf_rx_bufpost *rx_bufpost;
895	u64 address;
896	u32 pktid;
897	u32 i;
898
899	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
900	ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
901							      count,
902							      &alloced);
903	if (!ret_ptr) {
904		brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
905		return 0;
906	}
907
908	for (i = 0; i < alloced; i++) {
909		rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
910		memset(rx_bufpost, 0, sizeof(*rx_bufpost));
911
912		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
913
914		if (skb == NULL) {
915			brcmf_err("Failed to alloc SKB\n");
916			brcmf_commonring_write_cancel(commonring, alloced - i);
917			break;
918		}
919
920		pktlen = skb->len;
921		if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
922					     msgbuf->rx_pktids, skb, 0,
923					     &physaddr, &pktid)) {
924			dev_kfree_skb_any(skb);
925			brcmf_err("No PKTID available !!\n");
926			brcmf_commonring_write_cancel(commonring, alloced - i);
927			break;
928		}
929
930		if (msgbuf->rx_metadata_offset) {
931			address = (u64)physaddr;
932			rx_bufpost->metadata_buf_len =
933				cpu_to_le16(msgbuf->rx_metadata_offset);
934			rx_bufpost->metadata_buf_addr.high_addr =
935				cpu_to_le32(address >> 32);
936			rx_bufpost->metadata_buf_addr.low_addr =
937				cpu_to_le32(address & 0xffffffff);
938
939			skb_pull(skb, msgbuf->rx_metadata_offset);
940			pktlen = skb->len;
941			physaddr += msgbuf->rx_metadata_offset;
942		}
943		rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
944		rx_bufpost->msg.request_id = cpu_to_le32(pktid);
945
946		address = (u64)physaddr;
947		rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
948		rx_bufpost->data_buf_addr.high_addr =
949			cpu_to_le32(address >> 32);
950		rx_bufpost->data_buf_addr.low_addr =
951			cpu_to_le32(address & 0xffffffff);
952
953		ret_ptr += brcmf_commonring_len_item(commonring);
954	}
955
956	if (i)
957		brcmf_commonring_write_complete(commonring);
958
959	return i;
960}
961
962
963static void
964brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
965{
966	u32 fillbufs;
967	u32 retcount;
968
969	fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
970
971	while (fillbufs) {
972		retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
973		if (!retcount)
974			break;
975		msgbuf->rxbufpost += retcount;
976		fillbufs -= retcount;
977	}
978}
979
980
981static void
982brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
983{
984	msgbuf->rxbufpost -= rxcnt;
985	if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
986				  BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
987		brcmf_msgbuf_rxbuf_data_fill(msgbuf);
988}
989
990
991static u32
992brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
993			     u32 count)
994{
995	struct brcmf_commonring *commonring;
996	void *ret_ptr;
997	struct sk_buff *skb;
998	u16 alloced;
999	u32 pktlen;
1000	dma_addr_t physaddr;
1001	struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
1002	u64 address;
1003	u32 pktid;
1004	u32 i;
1005
1006	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1007	brcmf_commonring_lock(commonring);
1008	ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
1009							      count,
1010							      &alloced);
1011	if (!ret_ptr) {
1012		brcmf_err("Failed to reserve space in commonring\n");
1013		brcmf_commonring_unlock(commonring);
1014		return 0;
1015	}
1016
1017	for (i = 0; i < alloced; i++) {
1018		rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
1019		memset(rx_bufpost, 0, sizeof(*rx_bufpost));
1020
1021		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
1022
1023		if (skb == NULL) {
1024			brcmf_err("Failed to alloc SKB\n");
1025			brcmf_commonring_write_cancel(commonring, alloced - i);
1026			break;
1027		}
1028
1029		pktlen = skb->len;
1030		if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
1031					     msgbuf->rx_pktids, skb, 0,
1032					     &physaddr, &pktid)) {
1033			dev_kfree_skb_any(skb);
1034			brcmf_err("No PKTID available !!\n");
1035			brcmf_commonring_write_cancel(commonring, alloced - i);
1036			break;
1037		}
1038		if (event_buf)
1039			rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
1040		else
1041			rx_bufpost->msg.msgtype =
1042				MSGBUF_TYPE_IOCTLRESP_BUF_POST;
1043		rx_bufpost->msg.request_id = cpu_to_le32(pktid);
1044
1045		address = (u64)physaddr;
1046		rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
1047		rx_bufpost->host_buf_addr.high_addr =
1048			cpu_to_le32(address >> 32);
1049		rx_bufpost->host_buf_addr.low_addr =
1050			cpu_to_le32(address & 0xffffffff);
1051
1052		ret_ptr += brcmf_commonring_len_item(commonring);
1053	}
1054
1055	if (i)
1056		brcmf_commonring_write_complete(commonring);
1057
1058	brcmf_commonring_unlock(commonring);
1059
1060	return i;
1061}
1062
1063
1064static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
1065{
1066	u32 count;
1067
1068	count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
1069	count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
1070	msgbuf->cur_ioctlrespbuf += count;
1071}
1072
1073
1074static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
1075{
1076	u32 count;
1077
1078	count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
1079	count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
1080	msgbuf->cur_eventbuf += count;
1081}
1082
1083
1084static void
1085brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
1086		    u8 ifidx)
1087{
1088	struct brcmf_if *ifp;
1089
1090	/* The ifidx is the idx to map to matching netdev/ifp. When receiving
1091	 * events this is easy because it contains the bssidx which maps
1092	 * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
1093	 * bssidx 1 is used for p2p0 and no data can be received or
1094	 * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
1095	 */
1096	if (ifidx)
1097		(ifidx)++;
1098	ifp = msgbuf->drvr->iflist[ifidx];
1099	if (!ifp || !ifp->ndev) {
1100		brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
1101		brcmu_pkt_buf_free_skb(skb);
1102		return;
1103	}
1104	brcmf_netif_rx(ifp, skb);
1105}
1106
1107
1108static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
1109{
1110	struct msgbuf_rx_event *event;
1111	u32 idx;
1112	u16 buflen;
1113	struct sk_buff *skb;
1114
1115	event = (struct msgbuf_rx_event *)buf;
1116	idx = le32_to_cpu(event->msg.request_id);
1117	buflen = le16_to_cpu(event->event_data_len);
1118
1119	if (msgbuf->cur_eventbuf)
1120		msgbuf->cur_eventbuf--;
1121	brcmf_msgbuf_rxbuf_event_post(msgbuf);
1122
1123	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1124				     msgbuf->rx_pktids, idx);
1125	if (!skb)
1126		return;
1127
1128	if (msgbuf->rx_dataoffset)
1129		skb_pull(skb, msgbuf->rx_dataoffset);
1130
1131	skb_trim(skb, buflen);
1132
1133	brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
1134}
1135
1136
1137static void
1138brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1139{
1140	struct msgbuf_rx_complete *rx_complete;
1141	struct sk_buff *skb;
1142	u16 data_offset;
1143	u16 buflen;
1144	u32 idx;
1145
1146	brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
1147
1148	rx_complete = (struct msgbuf_rx_complete *)buf;
1149	data_offset = le16_to_cpu(rx_complete->data_offset);
1150	buflen = le16_to_cpu(rx_complete->data_len);
1151	idx = le32_to_cpu(rx_complete->msg.request_id);
1152
1153	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1154				     msgbuf->rx_pktids, idx);
1155	if (!skb)
1156		return;
1157
1158	if (data_offset)
1159		skb_pull(skb, data_offset);
1160	else if (msgbuf->rx_dataoffset)
1161		skb_pull(skb, msgbuf->rx_dataoffset);
1162
1163	skb_trim(skb, buflen);
1164
1165	brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
1166}
1167
1168
1169static void
1170brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
1171					       void *buf)
1172{
1173	struct msgbuf_flowring_create_resp *flowring_create_resp;
1174	u16 status;
1175	u16 flowid;
1176
1177	flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
1178
1179	flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
1180	flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
1181	status =  le16_to_cpu(flowring_create_resp->compl_hdr.status);
1182
1183	if (status) {
1184		brcmf_err("Flowring creation failed, code %d\n", status);
1185		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1186		return;
1187	}
1188	brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
1189		  status);
1190
1191	brcmf_flowring_open(msgbuf->flow, flowid);
1192
1193	brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1194}
1195
1196
1197static void
1198brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
1199					       void *buf)
1200{
1201	struct msgbuf_flowring_delete_resp *flowring_delete_resp;
1202	u16 status;
1203	u16 flowid;
1204
1205	flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
1206
1207	flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
1208	flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
1209	status =  le16_to_cpu(flowring_delete_resp->compl_hdr.status);
1210
1211	if (status) {
1212		brcmf_err("Flowring deletion failed, code %d\n", status);
1213		brcmf_flowring_delete(msgbuf->flow, flowid);
1214		return;
1215	}
1216	brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
1217		  status);
1218
1219	brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1220}
1221
1222
1223static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
1224{
1225	struct msgbuf_common_hdr *msg;
1226
1227	msg = (struct msgbuf_common_hdr *)buf;
1228	switch (msg->msgtype) {
1229	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1230		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1231		brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
1232		break;
1233	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1234		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1235		brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
1236		break;
1237	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1238		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1239		break;
1240	case MSGBUF_TYPE_IOCTL_CMPLT:
1241		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1242		brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
1243		break;
1244	case MSGBUF_TYPE_WL_EVENT:
1245		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
1246		brcmf_msgbuf_process_event(msgbuf, buf);
1247		break;
1248	case MSGBUF_TYPE_TX_STATUS:
1249		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
1250		brcmf_msgbuf_process_txstatus(msgbuf, buf);
1251		break;
1252	case MSGBUF_TYPE_RX_CMPLT:
1253		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
1254		brcmf_msgbuf_process_rx_complete(msgbuf, buf);
1255		break;
1256	default:
1257		brcmf_err("Unsupported msgtype %d\n", msg->msgtype);
1258		break;
1259	}
1260}
1261
1262
1263static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
1264				    struct brcmf_commonring *commonring)
1265{
1266	void *buf;
1267	u16 count;
1268
1269again:
1270	buf = brcmf_commonring_get_read_ptr(commonring, &count);
1271	if (buf == NULL)
1272		return;
1273
1274	while (count) {
1275		brcmf_msgbuf_process_msgtype(msgbuf,
1276					     buf + msgbuf->rx_dataoffset);
1277		buf += brcmf_commonring_len_item(commonring);
1278		count--;
1279	}
1280	brcmf_commonring_read_complete(commonring);
1281
1282	if (commonring->r_ptr == 0)
1283		goto again;
1284}
1285
1286
1287int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1288{
1289	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1290	struct brcmf_pub *drvr = bus_if->drvr;
1291	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1292	struct brcmf_commonring *commonring;
1293	void *buf;
1294	u32 flowid;
1295	int qlen;
1296
1297	buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1298	brcmf_msgbuf_process_rx(msgbuf, buf);
1299	buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1300	brcmf_msgbuf_process_rx(msgbuf, buf);
1301	buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1302	brcmf_msgbuf_process_rx(msgbuf, buf);
1303
1304	for_each_set_bit(flowid, msgbuf->txstatus_done_map,
1305			 msgbuf->nrof_flowrings) {
1306		clear_bit(flowid, msgbuf->txstatus_done_map);
1307		commonring = msgbuf->flowrings[flowid];
1308		qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
1309		if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
1310		    ((qlen) && (atomic_read(&commonring->outstanding_tx) <
1311				BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
1312			brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1313	}
1314
1315	return 0;
1316}
1317
1318
1319void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
1320{
1321	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1322	struct msgbuf_tx_flowring_delete_req *delete;
1323	struct brcmf_commonring *commonring;
1324	void *ret_ptr;
1325	u8 ifidx;
1326	int err;
1327
1328	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1329	brcmf_commonring_lock(commonring);
1330	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
1331	if (!ret_ptr) {
1332		brcmf_err("FW unaware, flowring will be removed !!\n");
1333		brcmf_commonring_unlock(commonring);
1334		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1335		return;
1336	}
1337
1338	delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
1339
1340	ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
1341
1342	delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1343	delete->msg.ifidx = ifidx;
1344	delete->msg.request_id = 0;
1345
1346	delete->flow_ring_id = cpu_to_le16(flowid +
1347					   BRCMF_NROF_H2D_COMMON_MSGRINGS);
1348	delete->reason = 0;
1349
1350	brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1351		  flowid, ifidx);
1352
1353	err = brcmf_commonring_write_complete(commonring);
1354	brcmf_commonring_unlock(commonring);
1355	if (err) {
1356		brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n");
1357		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1358	}
1359}
1360
1361
1362int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1363{
1364	struct brcmf_bus_msgbuf *if_msgbuf;
1365	struct brcmf_msgbuf *msgbuf;
1366	u64 address;
1367	u32 count;
1368
1369	if_msgbuf = drvr->bus_if->msgbuf;
1370	msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
1371	if (!msgbuf)
1372		goto fail;
1373
1374	msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
1375	if (msgbuf->txflow_wq == NULL) {
1376		brcmf_err("workqueue creation failed\n");
1377		goto fail;
1378	}
1379	INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
1380	count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
1381	count = count * sizeof(unsigned long);
1382	msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
1383	if (!msgbuf->flow_map)
1384		goto fail;
1385
1386	msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
1387	if (!msgbuf->txstatus_done_map)
1388		goto fail;
1389
1390	msgbuf->drvr = drvr;
1391	msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
1392					     BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1393					     &msgbuf->ioctbuf_handle,
1394					     GFP_KERNEL);
1395	if (!msgbuf->ioctbuf)
1396		goto fail;
1397	address = (u64)msgbuf->ioctbuf_handle;
1398	msgbuf->ioctbuf_phys_hi = address >> 32;
1399	msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
1400
1401	drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
1402	drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
1403	drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
1404	drvr->proto->txdata = brcmf_msgbuf_txdata;
1405	drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
1406	drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
1407	drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
1408	drvr->proto->pd = msgbuf;
1409
1410	init_waitqueue_head(&msgbuf->ioctl_resp_wait);
1411
1412	msgbuf->commonrings =
1413		(struct brcmf_commonring **)if_msgbuf->commonrings;
1414	msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
1415	msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings;
1416	msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings *
1417		sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
1418	if (!msgbuf->flowring_dma_handle)
1419		goto fail;
1420
1421	msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
1422	msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
1423
1424	msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
1425	msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
1426
1427	msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
1428						     DMA_TO_DEVICE);
1429	if (!msgbuf->tx_pktids)
1430		goto fail;
1431	msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
1432						     DMA_FROM_DEVICE);
1433	if (!msgbuf->rx_pktids)
1434		goto fail;
1435
1436	msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
1437					     if_msgbuf->nrof_flowrings);
1438	if (!msgbuf->flow)
1439		goto fail;
1440
1441
1442	brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1443		  msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
1444		  msgbuf->max_ioctlrespbuf);
1445	count = 0;
1446	do {
1447		brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1448		if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
1449			msleep(10);
1450		else
1451			break;
1452		count++;
1453	} while (count < 10);
1454	brcmf_msgbuf_rxbuf_event_post(msgbuf);
1455	brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
1456
1457	INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
1458	spin_lock_init(&msgbuf->flowring_work_lock);
1459	INIT_LIST_HEAD(&msgbuf->work_queue);
1460
1461	return 0;
1462
1463fail:
1464	if (msgbuf) {
1465		kfree(msgbuf->flow_map);
1466		kfree(msgbuf->txstatus_done_map);
1467		brcmf_msgbuf_release_pktids(msgbuf);
1468		kfree(msgbuf->flowring_dma_handle);
1469		if (msgbuf->ioctbuf)
1470			dma_free_coherent(drvr->bus_if->dev,
1471					  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1472					  msgbuf->ioctbuf,
1473					  msgbuf->ioctbuf_handle);
1474		kfree(msgbuf);
1475	}
1476	return -ENOMEM;
1477}
1478
1479
1480void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
1481{
1482	struct brcmf_msgbuf *msgbuf;
1483	struct brcmf_msgbuf_work_item *work;
1484
1485	brcmf_dbg(TRACE, "Enter\n");
1486	if (drvr->proto->pd) {
1487		msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1488		cancel_work_sync(&msgbuf->flowring_work);
1489		while (!list_empty(&msgbuf->work_queue)) {
1490			work = list_first_entry(&msgbuf->work_queue,
1491						struct brcmf_msgbuf_work_item,
1492						queue);
1493			list_del(&work->queue);
1494			kfree(work);
1495		}
1496		kfree(msgbuf->flow_map);
1497		kfree(msgbuf->txstatus_done_map);
1498		if (msgbuf->txflow_wq)
1499			destroy_workqueue(msgbuf->txflow_wq);
1500
1501		brcmf_flowring_detach(msgbuf->flow);
1502		dma_free_coherent(drvr->bus_if->dev,
1503				  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1504				  msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
1505		brcmf_msgbuf_release_pktids(msgbuf);
1506		kfree(msgbuf->flowring_dma_handle);
1507		kfree(msgbuf);
1508		drvr->proto->pd = NULL;
1509	}
1510}
1511