1/*
2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/mempool.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/workqueue.h>
22#include <linux/pci.h>
23#include <linux/scatterlist.h>
24#include <linux/skbuff.h>
25#include <linux/spinlock.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/delay.h>
29#include <linux/gfp.h>
30#include <scsi/scsi.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_tcq.h>
35#include <scsi/fc/fc_els.h>
36#include <scsi/fc/fc_fcoe.h>
37#include <scsi/libfc.h>
38#include <scsi/fc_frame.h>
39#include "fnic_io.h"
40#include "fnic.h"
41
42const char *fnic_state_str[] = {
43	[FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
44	[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
45	[FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
46	[FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
47};
48
49static const char *fnic_ioreq_state_str[] = {
50	[FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
51	[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
52	[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
53	[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
54	[FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
55};
56
57static const char *fcpio_status_str[] =  {
58	[FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
59	[FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
60	[FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
61	[FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
62	[FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
63	[FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
64	[FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
65	[FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
66	[FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
67	[FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
68	[FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
69	[FCPIO_FW_ERR] = "FCPIO_FW_ERR",
70	[FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
71	[FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
72	[FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
73	[FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
74	[FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
75	[FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
76	[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
77};
78
79const char *fnic_state_to_str(unsigned int state)
80{
81	if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
82		return "unknown";
83
84	return fnic_state_str[state];
85}
86
87static const char *fnic_ioreq_state_to_str(unsigned int state)
88{
89	if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
90	    !fnic_ioreq_state_str[state])
91		return "unknown";
92
93	return fnic_ioreq_state_str[state];
94}
95
96static const char *fnic_fcpio_status_to_str(unsigned int status)
97{
98	if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
99		return "unknown";
100
101	return fcpio_status_str[status];
102}
103
104static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
105
106static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
107					    struct scsi_cmnd *sc)
108{
109	u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
110
111	return &fnic->io_req_lock[hash];
112}
113
114static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
115					    int tag)
116{
117	return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
118}
119
120/*
121 * Unmap the data buffer and sense buffer for an io_req,
122 * also unmap and free the device-private scatter/gather list.
123 */
124static void fnic_release_ioreq_buf(struct fnic *fnic,
125				   struct fnic_io_req *io_req,
126				   struct scsi_cmnd *sc)
127{
128	if (io_req->sgl_list_pa)
129		pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
130				 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
131				 PCI_DMA_TODEVICE);
132	scsi_dma_unmap(sc);
133
134	if (io_req->sgl_cnt)
135		mempool_free(io_req->sgl_list_alloc,
136			     fnic->io_sgl_pool[io_req->sgl_type]);
137	if (io_req->sense_buf_pa)
138		pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
139				 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
140}
141
142/* Free up Copy Wq descriptors. Called with copy_wq lock held */
143static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
144{
145	/* if no Ack received from firmware, then nothing to clean */
146	if (!fnic->fw_ack_recd[0])
147		return 1;
148
149	/*
150	 * Update desc_available count based on number of freed descriptors
151	 * Account for wraparound
152	 */
153	if (wq->to_clean_index <= fnic->fw_ack_index[0])
154		wq->ring.desc_avail += (fnic->fw_ack_index[0]
155					- wq->to_clean_index + 1);
156	else
157		wq->ring.desc_avail += (wq->ring.desc_count
158					- wq->to_clean_index
159					+ fnic->fw_ack_index[0] + 1);
160
161	/*
162	 * just bump clean index to ack_index+1 accounting for wraparound
163	 * this will essentially free up all descriptors between
164	 * to_clean_index and fw_ack_index, both inclusive
165	 */
166	wq->to_clean_index =
167		(fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
168
169	/* we have processed the acks received so far */
170	fnic->fw_ack_recd[0] = 0;
171	return 0;
172}
173
174
175/**
176 * __fnic_set_state_flags
177 * Sets/Clears bits in fnic's state_flags
178 **/
179void
180__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
181			unsigned long clearbits)
182{
183	struct Scsi_Host *host = fnic->lport->host;
184	int sh_locked = spin_is_locked(host->host_lock);
185	unsigned long flags = 0;
186
187	if (!sh_locked)
188		spin_lock_irqsave(host->host_lock, flags);
189
190	if (clearbits)
191		fnic->state_flags &= ~st_flags;
192	else
193		fnic->state_flags |= st_flags;
194
195	if (!sh_locked)
196		spin_unlock_irqrestore(host->host_lock, flags);
197
198	return;
199}
200
201
202/*
203 * fnic_fw_reset_handler
204 * Routine to send reset msg to fw
205 */
206int fnic_fw_reset_handler(struct fnic *fnic)
207{
208	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
209	int ret = 0;
210	unsigned long flags;
211
212	/* indicate fwreset to io path */
213	fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
214
215	skb_queue_purge(&fnic->frame_queue);
216	skb_queue_purge(&fnic->tx_queue);
217
218	/* wait for io cmpl */
219	while (atomic_read(&fnic->in_flight))
220		schedule_timeout(msecs_to_jiffies(1));
221
222	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
223
224	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
225		free_wq_copy_descs(fnic, wq);
226
227	if (!vnic_wq_copy_desc_avail(wq))
228		ret = -EAGAIN;
229	else {
230		fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
231		atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
232		if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
233			  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
234			atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
235				atomic64_read(
236				  &fnic->fnic_stats.fw_stats.active_fw_reqs));
237	}
238
239	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
240
241	if (!ret) {
242		atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
243		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
244			      "Issued fw reset\n");
245	} else {
246		fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
247		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
248			      "Failed to issue fw reset\n");
249	}
250
251	return ret;
252}
253
254
255/*
256 * fnic_flogi_reg_handler
257 * Routine to send flogi register msg to fw
258 */
259int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
260{
261	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
262	enum fcpio_flogi_reg_format_type format;
263	struct fc_lport *lp = fnic->lport;
264	u8 gw_mac[ETH_ALEN];
265	int ret = 0;
266	unsigned long flags;
267
268	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
269
270	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
271		free_wq_copy_descs(fnic, wq);
272
273	if (!vnic_wq_copy_desc_avail(wq)) {
274		ret = -EAGAIN;
275		goto flogi_reg_ioreq_end;
276	}
277
278	if (fnic->ctlr.map_dest) {
279		memset(gw_mac, 0xff, ETH_ALEN);
280		format = FCPIO_FLOGI_REG_DEF_DEST;
281	} else {
282		memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
283		format = FCPIO_FLOGI_REG_GW_DEST;
284	}
285
286	if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
287		fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
288						fc_id, gw_mac,
289						fnic->data_src_addr,
290						lp->r_a_tov, lp->e_d_tov);
291		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
292			      "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
293			      fc_id, fnic->data_src_addr, gw_mac);
294	} else {
295		fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
296						  format, fc_id, gw_mac);
297		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
298			      "FLOGI reg issued fcid %x map %d dest %pM\n",
299			      fc_id, fnic->ctlr.map_dest, gw_mac);
300	}
301
302	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
303	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
304		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
305		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
306		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
307
308flogi_reg_ioreq_end:
309	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
310	return ret;
311}
312
313/*
314 * fnic_queue_wq_copy_desc
315 * Routine to enqueue a wq copy desc
316 */
317static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
318					  struct vnic_wq_copy *wq,
319					  struct fnic_io_req *io_req,
320					  struct scsi_cmnd *sc,
321					  int sg_count)
322{
323	struct scatterlist *sg;
324	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
325	struct fc_rport_libfc_priv *rp = rport->dd_data;
326	struct host_sg_desc *desc;
327	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
328	unsigned int i;
329	unsigned long intr_flags;
330	int flags;
331	u8 exch_flags;
332	struct scsi_lun fc_lun;
333
334	if (sg_count) {
335		/* For each SGE, create a device desc entry */
336		desc = io_req->sgl_list;
337		for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
338			desc->addr = cpu_to_le64(sg_dma_address(sg));
339			desc->len = cpu_to_le32(sg_dma_len(sg));
340			desc->_resvd = 0;
341			desc++;
342		}
343
344		io_req->sgl_list_pa = pci_map_single
345			(fnic->pdev,
346			 io_req->sgl_list,
347			 sizeof(io_req->sgl_list[0]) * sg_count,
348			 PCI_DMA_TODEVICE);
349	}
350
351	io_req->sense_buf_pa = pci_map_single(fnic->pdev,
352					      sc->sense_buffer,
353					      SCSI_SENSE_BUFFERSIZE,
354					      PCI_DMA_FROMDEVICE);
355
356	int_to_scsilun(sc->device->lun, &fc_lun);
357
358	/* Enqueue the descriptor in the Copy WQ */
359	spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
360
361	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
362		free_wq_copy_descs(fnic, wq);
363
364	if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
365		spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
366		FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
367			  "fnic_queue_wq_copy_desc failure - no descriptors\n");
368		atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
369		return SCSI_MLQUEUE_HOST_BUSY;
370	}
371
372	flags = 0;
373	if (sc->sc_data_direction == DMA_FROM_DEVICE)
374		flags = FCPIO_ICMND_RDDATA;
375	else if (sc->sc_data_direction == DMA_TO_DEVICE)
376		flags = FCPIO_ICMND_WRDATA;
377
378	exch_flags = 0;
379	if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
380	    (rp->flags & FC_RP_FLAGS_RETRY))
381		exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
382
383	fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
384					 0, exch_flags, io_req->sgl_cnt,
385					 SCSI_SENSE_BUFFERSIZE,
386					 io_req->sgl_list_pa,
387					 io_req->sense_buf_pa,
388					 0, /* scsi cmd ref, always 0 */
389					 FCPIO_ICMND_PTA_SIMPLE,
390					 	/* scsi pri and tag */
391					 flags,	/* command flags */
392					 sc->cmnd, sc->cmd_len,
393					 scsi_bufflen(sc),
394					 fc_lun.scsi_lun, io_req->port_id,
395					 rport->maxframe_size, rp->r_a_tov,
396					 rp->e_d_tov);
397
398	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
399	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
400		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
401		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
402		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
403
404	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
405	return 0;
406}
407
408/*
409 * fnic_queuecommand
410 * Routine to send a scsi cdb
411 * Called with host_lock held and interrupts disabled.
412 */
413static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
414{
415	struct fc_lport *lp = shost_priv(sc->device->host);
416	struct fc_rport *rport;
417	struct fnic_io_req *io_req = NULL;
418	struct fnic *fnic = lport_priv(lp);
419	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
420	struct vnic_wq_copy *wq;
421	int ret;
422	u64 cmd_trace;
423	int sg_count = 0;
424	unsigned long flags = 0;
425	unsigned long ptr;
426	struct fc_rport_priv *rdata;
427	spinlock_t *io_lock = NULL;
428	int io_lock_acquired = 0;
429
430	if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
431		return SCSI_MLQUEUE_HOST_BUSY;
432
433	rport = starget_to_rport(scsi_target(sc->device));
434	ret = fc_remote_port_chkready(rport);
435	if (ret) {
436		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
437		sc->result = ret;
438		done(sc);
439		return 0;
440	}
441
442	rdata = lp->tt.rport_lookup(lp, rport->port_id);
443	if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) {
444		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
445			"returning IO as rport is removed\n");
446		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
447		sc->result = DID_NO_CONNECT;
448		done(sc);
449		return 0;
450	}
451
452	if (lp->state != LPORT_ST_READY || !(lp->link_up))
453		return SCSI_MLQUEUE_HOST_BUSY;
454
455	atomic_inc(&fnic->in_flight);
456
457	/*
458	 * Release host lock, use driver resource specific locks from here.
459	 * Don't re-enable interrupts in case they were disabled prior to the
460	 * caller disabling them.
461	 */
462	spin_unlock(lp->host->host_lock);
463	CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
464	CMD_FLAGS(sc) = FNIC_NO_FLAGS;
465
466	/* Get a new io_req for this SCSI IO */
467	io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
468	if (!io_req) {
469		atomic64_inc(&fnic_stats->io_stats.alloc_failures);
470		ret = SCSI_MLQUEUE_HOST_BUSY;
471		goto out;
472	}
473	memset(io_req, 0, sizeof(*io_req));
474
475	/* Map the data buffer */
476	sg_count = scsi_dma_map(sc);
477	if (sg_count < 0) {
478		FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
479			  sc->request->tag, sc, 0, sc->cmnd[0],
480			  sg_count, CMD_STATE(sc));
481		mempool_free(io_req, fnic->io_req_pool);
482		goto out;
483	}
484
485	/* Determine the type of scatter/gather list we need */
486	io_req->sgl_cnt = sg_count;
487	io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
488	if (sg_count > FNIC_DFLT_SG_DESC_CNT)
489		io_req->sgl_type = FNIC_SGL_CACHE_MAX;
490
491	if (sg_count) {
492		io_req->sgl_list =
493			mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
494				      GFP_ATOMIC);
495		if (!io_req->sgl_list) {
496			atomic64_inc(&fnic_stats->io_stats.alloc_failures);
497			ret = SCSI_MLQUEUE_HOST_BUSY;
498			scsi_dma_unmap(sc);
499			mempool_free(io_req, fnic->io_req_pool);
500			goto out;
501		}
502
503		/* Cache sgl list allocated address before alignment */
504		io_req->sgl_list_alloc = io_req->sgl_list;
505		ptr = (unsigned long) io_req->sgl_list;
506		if (ptr % FNIC_SG_DESC_ALIGN) {
507			io_req->sgl_list = (struct host_sg_desc *)
508				(((unsigned long) ptr
509				  + FNIC_SG_DESC_ALIGN - 1)
510				 & ~(FNIC_SG_DESC_ALIGN - 1));
511		}
512	}
513
514	/*
515	* Will acquire lock defore setting to IO initialized.
516	*/
517
518	io_lock = fnic_io_lock_hash(fnic, sc);
519	spin_lock_irqsave(io_lock, flags);
520
521	/* initialize rest of io_req */
522	io_lock_acquired = 1;
523	io_req->port_id = rport->port_id;
524	io_req->start_time = jiffies;
525	CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
526	CMD_SP(sc) = (char *)io_req;
527	CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
528	sc->scsi_done = done;
529
530	/* create copy wq desc and enqueue it */
531	wq = &fnic->wq_copy[0];
532	ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
533	if (ret) {
534		/*
535		 * In case another thread cancelled the request,
536		 * refetch the pointer under the lock.
537		 */
538		FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
539			  sc->request->tag, sc, 0, 0, 0,
540			  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
541		io_req = (struct fnic_io_req *)CMD_SP(sc);
542		CMD_SP(sc) = NULL;
543		CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
544		spin_unlock_irqrestore(io_lock, flags);
545		if (io_req) {
546			fnic_release_ioreq_buf(fnic, io_req, sc);
547			mempool_free(io_req, fnic->io_req_pool);
548		}
549		atomic_dec(&fnic->in_flight);
550		/* acquire host lock before returning to SCSI */
551		spin_lock(lp->host->host_lock);
552		return ret;
553	} else {
554		atomic64_inc(&fnic_stats->io_stats.active_ios);
555		atomic64_inc(&fnic_stats->io_stats.num_ios);
556		if (atomic64_read(&fnic_stats->io_stats.active_ios) >
557			  atomic64_read(&fnic_stats->io_stats.max_active_ios))
558			atomic64_set(&fnic_stats->io_stats.max_active_ios,
559			     atomic64_read(&fnic_stats->io_stats.active_ios));
560
561		/* REVISIT: Use per IO lock in the final code */
562		CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
563	}
564out:
565	cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
566			(u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
567			(u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
568			sc->cmnd[5]);
569
570	FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
571		  sc->request->tag, sc, io_req,
572		  sg_count, cmd_trace,
573		  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
574
575	/* if only we issued IO, will we have the io lock */
576	if (io_lock_acquired)
577		spin_unlock_irqrestore(io_lock, flags);
578
579	atomic_dec(&fnic->in_flight);
580	/* acquire host lock before returning to SCSI */
581	spin_lock(lp->host->host_lock);
582	return ret;
583}
584
585DEF_SCSI_QCMD(fnic_queuecommand)
586
587/*
588 * fnic_fcpio_fw_reset_cmpl_handler
589 * Routine to handle fw reset completion
590 */
591static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
592					    struct fcpio_fw_req *desc)
593{
594	u8 type;
595	u8 hdr_status;
596	struct fcpio_tag tag;
597	int ret = 0;
598	unsigned long flags;
599	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
600
601	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
602
603	atomic64_inc(&reset_stats->fw_reset_completions);
604
605	/* Clean up all outstanding io requests */
606	fnic_cleanup_io(fnic, SCSI_NO_TAG);
607
608	atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
609	atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
610
611	spin_lock_irqsave(&fnic->fnic_lock, flags);
612
613	/* fnic should be in FC_TRANS_ETH_MODE */
614	if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
615		/* Check status of reset completion */
616		if (!hdr_status) {
617			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
618				      "reset cmpl success\n");
619			/* Ready to send flogi out */
620			fnic->state = FNIC_IN_ETH_MODE;
621		} else {
622			FNIC_SCSI_DBG(KERN_DEBUG,
623				      fnic->lport->host,
624				      "fnic fw_reset : failed %s\n",
625				      fnic_fcpio_status_to_str(hdr_status));
626
627			/*
628			 * Unable to change to eth mode, cannot send out flogi
629			 * Change state to fc mode, so that subsequent Flogi
630			 * requests from libFC will cause more attempts to
631			 * reset the firmware. Free the cached flogi
632			 */
633			fnic->state = FNIC_IN_FC_MODE;
634			atomic64_inc(&reset_stats->fw_reset_failures);
635			ret = -1;
636		}
637	} else {
638		FNIC_SCSI_DBG(KERN_DEBUG,
639			      fnic->lport->host,
640			      "Unexpected state %s while processing"
641			      " reset cmpl\n", fnic_state_to_str(fnic->state));
642		atomic64_inc(&reset_stats->fw_reset_failures);
643		ret = -1;
644	}
645
646	/* Thread removing device blocks till firmware reset is complete */
647	if (fnic->remove_wait)
648		complete(fnic->remove_wait);
649
650	/*
651	 * If fnic is being removed, or fw reset failed
652	 * free the flogi frame. Else, send it out
653	 */
654	if (fnic->remove_wait || ret) {
655		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
656		skb_queue_purge(&fnic->tx_queue);
657		goto reset_cmpl_handler_end;
658	}
659
660	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
661
662	fnic_flush_tx(fnic);
663
664 reset_cmpl_handler_end:
665	fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
666
667	return ret;
668}
669
670/*
671 * fnic_fcpio_flogi_reg_cmpl_handler
672 * Routine to handle flogi register completion
673 */
674static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
675					     struct fcpio_fw_req *desc)
676{
677	u8 type;
678	u8 hdr_status;
679	struct fcpio_tag tag;
680	int ret = 0;
681	unsigned long flags;
682
683	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
684
685	/* Update fnic state based on status of flogi reg completion */
686	spin_lock_irqsave(&fnic->fnic_lock, flags);
687
688	if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
689
690		/* Check flogi registration completion status */
691		if (!hdr_status) {
692			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
693				      "flog reg succeeded\n");
694			fnic->state = FNIC_IN_FC_MODE;
695		} else {
696			FNIC_SCSI_DBG(KERN_DEBUG,
697				      fnic->lport->host,
698				      "fnic flogi reg :failed %s\n",
699				      fnic_fcpio_status_to_str(hdr_status));
700			fnic->state = FNIC_IN_ETH_MODE;
701			ret = -1;
702		}
703	} else {
704		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
705			      "Unexpected fnic state %s while"
706			      " processing flogi reg completion\n",
707			      fnic_state_to_str(fnic->state));
708		ret = -1;
709	}
710
711	if (!ret) {
712		if (fnic->stop_rx_link_events) {
713			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
714			goto reg_cmpl_handler_end;
715		}
716		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
717
718		fnic_flush_tx(fnic);
719		queue_work(fnic_event_queue, &fnic->frame_work);
720	} else {
721		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
722	}
723
724reg_cmpl_handler_end:
725	return ret;
726}
727
728static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
729					u16 request_out)
730{
731	if (wq->to_clean_index <= wq->to_use_index) {
732		/* out of range, stale request_out index */
733		if (request_out < wq->to_clean_index ||
734		    request_out >= wq->to_use_index)
735			return 0;
736	} else {
737		/* out of range, stale request_out index */
738		if (request_out < wq->to_clean_index &&
739		    request_out >= wq->to_use_index)
740			return 0;
741	}
742	/* request_out index is in range */
743	return 1;
744}
745
746
747/*
748 * Mark that ack received and store the Ack index. If there are multiple
749 * acks received before Tx thread cleans it up, the latest value will be
750 * used which is correct behavior. This state should be in the copy Wq
751 * instead of in the fnic
752 */
753static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
754					  unsigned int cq_index,
755					  struct fcpio_fw_req *desc)
756{
757	struct vnic_wq_copy *wq;
758	u16 request_out = desc->u.ack.request_out;
759	unsigned long flags;
760	u64 *ox_id_tag = (u64 *)(void *)desc;
761
762	/* mark the ack state */
763	wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
764	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
765
766	fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
767	if (is_ack_index_in_range(wq, request_out)) {
768		fnic->fw_ack_index[0] = request_out;
769		fnic->fw_ack_recd[0] = 1;
770	} else
771		atomic64_inc(
772			&fnic->fnic_stats.misc_stats.ack_index_out_of_range);
773
774	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
775	FNIC_TRACE(fnic_fcpio_ack_handler,
776		  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
777		  ox_id_tag[4], ox_id_tag[5]);
778}
779
780/*
781 * fnic_fcpio_icmnd_cmpl_handler
782 * Routine to handle icmnd completions
783 */
784static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
785					 struct fcpio_fw_req *desc)
786{
787	u8 type;
788	u8 hdr_status;
789	struct fcpio_tag tag;
790	u32 id;
791	u64 xfer_len = 0;
792	struct fcpio_icmnd_cmpl *icmnd_cmpl;
793	struct fnic_io_req *io_req;
794	struct scsi_cmnd *sc;
795	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
796	unsigned long flags;
797	spinlock_t *io_lock;
798	u64 cmd_trace;
799	unsigned long start_time;
800
801	/* Decode the cmpl description to get the io_req id */
802	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
803	fcpio_tag_id_dec(&tag, &id);
804	icmnd_cmpl = &desc->u.icmnd_cmpl;
805
806	if (id >= fnic->fnic_max_tag_id) {
807		shost_printk(KERN_ERR, fnic->lport->host,
808			"Tag out of range tag %x hdr status = %s\n",
809			     id, fnic_fcpio_status_to_str(hdr_status));
810		return;
811	}
812
813	sc = scsi_host_find_tag(fnic->lport->host, id);
814	WARN_ON_ONCE(!sc);
815	if (!sc) {
816		atomic64_inc(&fnic_stats->io_stats.sc_null);
817		shost_printk(KERN_ERR, fnic->lport->host,
818			  "icmnd_cmpl sc is null - "
819			  "hdr status = %s tag = 0x%x desc = 0x%p\n",
820			  fnic_fcpio_status_to_str(hdr_status), id, desc);
821		FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
822			  fnic->lport->host->host_no, id,
823			  ((u64)icmnd_cmpl->_resvd0[1] << 16 |
824			  (u64)icmnd_cmpl->_resvd0[0]),
825			  ((u64)hdr_status << 16 |
826			  (u64)icmnd_cmpl->scsi_status << 8 |
827			  (u64)icmnd_cmpl->flags), desc,
828			  (u64)icmnd_cmpl->residual, 0);
829		return;
830	}
831
832	io_lock = fnic_io_lock_hash(fnic, sc);
833	spin_lock_irqsave(io_lock, flags);
834	io_req = (struct fnic_io_req *)CMD_SP(sc);
835	WARN_ON_ONCE(!io_req);
836	if (!io_req) {
837		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
838		CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
839		spin_unlock_irqrestore(io_lock, flags);
840		shost_printk(KERN_ERR, fnic->lport->host,
841			  "icmnd_cmpl io_req is null - "
842			  "hdr status = %s tag = 0x%x sc 0x%p\n",
843			  fnic_fcpio_status_to_str(hdr_status), id, sc);
844		return;
845	}
846	start_time = io_req->start_time;
847
848	/* firmware completed the io */
849	io_req->io_completed = 1;
850
851	/*
852	 *  if SCSI-ML has already issued abort on this command,
853	 * ignore completion of the IO. The abts path will clean it up
854	 */
855	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
856		spin_unlock_irqrestore(io_lock, flags);
857		CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
858		switch (hdr_status) {
859		case FCPIO_SUCCESS:
860			CMD_FLAGS(sc) |= FNIC_IO_DONE;
861			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
862				  "icmnd_cmpl ABTS pending hdr status = %s "
863				  "sc  0x%p scsi_status %x  residual %d\n",
864				  fnic_fcpio_status_to_str(hdr_status), sc,
865				  icmnd_cmpl->scsi_status,
866				  icmnd_cmpl->residual);
867			break;
868		case FCPIO_ABORTED:
869			CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
870			break;
871		default:
872			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
873					  "icmnd_cmpl abts pending "
874					  "hdr status = %s tag = 0x%x sc = 0x%p\n",
875					  fnic_fcpio_status_to_str(hdr_status),
876					  id, sc);
877			break;
878		}
879		return;
880	}
881
882	/* Mark the IO as complete */
883	CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
884
885	icmnd_cmpl = &desc->u.icmnd_cmpl;
886
887	switch (hdr_status) {
888	case FCPIO_SUCCESS:
889		sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
890		xfer_len = scsi_bufflen(sc);
891		scsi_set_resid(sc, icmnd_cmpl->residual);
892
893		if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
894			xfer_len -= icmnd_cmpl->residual;
895
896		if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
897			atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
898		break;
899
900	case FCPIO_TIMEOUT:          /* request was timed out */
901		atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
902		sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
903		break;
904
905	case FCPIO_ABORTED:          /* request was aborted */
906		atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
907		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
908		break;
909
910	case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
911		atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
912		scsi_set_resid(sc, icmnd_cmpl->residual);
913		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
914		break;
915
916	case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
917		atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
918		sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
919		break;
920
921	case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
922		atomic64_inc(&fnic_stats->io_stats.io_not_found);
923		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
924		break;
925
926	case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
927		atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
928		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
929		break;
930
931	case FCPIO_FW_ERR:           /* request was terminated due fw error */
932		atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
933		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
934		break;
935
936	case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
937		atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
938		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
939		break;
940
941	case FCPIO_INVALID_HEADER:   /* header contains invalid data */
942	case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
943	case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
944	default:
945		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
946			     fnic_fcpio_status_to_str(hdr_status));
947		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
948		break;
949	}
950
951	if (hdr_status != FCPIO_SUCCESS) {
952		atomic64_inc(&fnic_stats->io_stats.io_failures);
953		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
954			     fnic_fcpio_status_to_str(hdr_status));
955	}
956	/* Break link with the SCSI command */
957	CMD_SP(sc) = NULL;
958	CMD_FLAGS(sc) |= FNIC_IO_DONE;
959
960	spin_unlock_irqrestore(io_lock, flags);
961
962	fnic_release_ioreq_buf(fnic, io_req, sc);
963
964	mempool_free(io_req, fnic->io_req_pool);
965
966	cmd_trace = ((u64)hdr_status << 56) |
967		  (u64)icmnd_cmpl->scsi_status << 48 |
968		  (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
969		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
970		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
971
972	FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
973		  sc->device->host->host_no, id, sc,
974		  ((u64)icmnd_cmpl->_resvd0[1] << 56 |
975		  (u64)icmnd_cmpl->_resvd0[0] << 48 |
976		  jiffies_to_msecs(jiffies - start_time)),
977		  desc, cmd_trace,
978		  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
979
980	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
981		fnic->lport->host_stats.fcp_input_requests++;
982		fnic->fcp_input_bytes += xfer_len;
983	} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
984		fnic->lport->host_stats.fcp_output_requests++;
985		fnic->fcp_output_bytes += xfer_len;
986	} else
987		fnic->lport->host_stats.fcp_control_requests++;
988
989	atomic64_dec(&fnic_stats->io_stats.active_ios);
990	if (atomic64_read(&fnic->io_cmpl_skip))
991		atomic64_dec(&fnic->io_cmpl_skip);
992	else
993		atomic64_inc(&fnic_stats->io_stats.io_completions);
994
995	/* Call SCSI completion function to complete the IO */
996	if (sc->scsi_done)
997		sc->scsi_done(sc);
998}
999
1000/* fnic_fcpio_itmf_cmpl_handler
1001 * Routine to handle itmf completions
1002 */
1003static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1004					struct fcpio_fw_req *desc)
1005{
1006	u8 type;
1007	u8 hdr_status;
1008	struct fcpio_tag tag;
1009	u32 id;
1010	struct scsi_cmnd *sc;
1011	struct fnic_io_req *io_req;
1012	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1013	struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1014	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1015	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1016	unsigned long flags;
1017	spinlock_t *io_lock;
1018	unsigned long start_time;
1019
1020	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1021	fcpio_tag_id_dec(&tag, &id);
1022
1023	if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1024		shost_printk(KERN_ERR, fnic->lport->host,
1025		"Tag out of range tag %x hdr status = %s\n",
1026		id, fnic_fcpio_status_to_str(hdr_status));
1027		return;
1028	}
1029
1030	sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1031	WARN_ON_ONCE(!sc);
1032	if (!sc) {
1033		atomic64_inc(&fnic_stats->io_stats.sc_null);
1034		shost_printk(KERN_ERR, fnic->lport->host,
1035			  "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1036			  fnic_fcpio_status_to_str(hdr_status), id);
1037		return;
1038	}
1039	io_lock = fnic_io_lock_hash(fnic, sc);
1040	spin_lock_irqsave(io_lock, flags);
1041	io_req = (struct fnic_io_req *)CMD_SP(sc);
1042	WARN_ON_ONCE(!io_req);
1043	if (!io_req) {
1044		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1045		spin_unlock_irqrestore(io_lock, flags);
1046		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1047		shost_printk(KERN_ERR, fnic->lport->host,
1048			  "itmf_cmpl io_req is null - "
1049			  "hdr status = %s tag = 0x%x sc 0x%p\n",
1050			  fnic_fcpio_status_to_str(hdr_status), id, sc);
1051		return;
1052	}
1053	start_time = io_req->start_time;
1054
1055	if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1056		/* Abort and terminate completion of device reset req */
1057		/* REVISIT : Add asserts about various flags */
1058		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1059			      "dev reset abts cmpl recd. id %x status %s\n",
1060			      id, fnic_fcpio_status_to_str(hdr_status));
1061		CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1062		CMD_ABTS_STATUS(sc) = hdr_status;
1063		CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1064		if (io_req->abts_done)
1065			complete(io_req->abts_done);
1066		spin_unlock_irqrestore(io_lock, flags);
1067	} else if (id & FNIC_TAG_ABORT) {
1068		/* Completion of abort cmd */
1069		switch (hdr_status) {
1070		case FCPIO_SUCCESS:
1071			break;
1072		case FCPIO_TIMEOUT:
1073			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1074				atomic64_inc(&abts_stats->abort_fw_timeouts);
1075			else
1076				atomic64_inc(
1077					&term_stats->terminate_fw_timeouts);
1078			break;
1079		case FCPIO_IO_NOT_FOUND:
1080			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1081				atomic64_inc(&abts_stats->abort_io_not_found);
1082			else
1083				atomic64_inc(
1084					&term_stats->terminate_io_not_found);
1085			break;
1086		default:
1087			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1088				atomic64_inc(&abts_stats->abort_failures);
1089			else
1090				atomic64_inc(
1091					&term_stats->terminate_failures);
1092			break;
1093		}
1094		if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1095			/* This is a late completion. Ignore it */
1096			spin_unlock_irqrestore(io_lock, flags);
1097			return;
1098		}
1099		CMD_ABTS_STATUS(sc) = hdr_status;
1100		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1101
1102		atomic64_dec(&fnic_stats->io_stats.active_ios);
1103		if (atomic64_read(&fnic->io_cmpl_skip))
1104			atomic64_dec(&fnic->io_cmpl_skip);
1105		else
1106			atomic64_inc(&fnic_stats->io_stats.io_completions);
1107
1108		if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1109			atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1110
1111		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1112			      "abts cmpl recd. id %d status %s\n",
1113			      (int)(id & FNIC_TAG_MASK),
1114			      fnic_fcpio_status_to_str(hdr_status));
1115
1116		/*
1117		 * If scsi_eh thread is blocked waiting for abts to complete,
1118		 * signal completion to it. IO will be cleaned in the thread
1119		 * else clean it in this context
1120		 */
1121		if (io_req->abts_done) {
1122			complete(io_req->abts_done);
1123			spin_unlock_irqrestore(io_lock, flags);
1124		} else {
1125			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1126				      "abts cmpl, completing IO\n");
1127			CMD_SP(sc) = NULL;
1128			sc->result = (DID_ERROR << 16);
1129
1130			spin_unlock_irqrestore(io_lock, flags);
1131
1132			fnic_release_ioreq_buf(fnic, io_req, sc);
1133			mempool_free(io_req, fnic->io_req_pool);
1134			if (sc->scsi_done) {
1135				FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1136					sc->device->host->host_no, id,
1137					sc,
1138					jiffies_to_msecs(jiffies - start_time),
1139					desc,
1140					(((u64)hdr_status << 40) |
1141					(u64)sc->cmnd[0] << 32 |
1142					(u64)sc->cmnd[2] << 24 |
1143					(u64)sc->cmnd[3] << 16 |
1144					(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1145					(((u64)CMD_FLAGS(sc) << 32) |
1146					CMD_STATE(sc)));
1147				sc->scsi_done(sc);
1148			}
1149		}
1150
1151	} else if (id & FNIC_TAG_DEV_RST) {
1152		/* Completion of device reset */
1153		CMD_LR_STATUS(sc) = hdr_status;
1154		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1155			spin_unlock_irqrestore(io_lock, flags);
1156			CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1157			FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1158				  sc->device->host->host_no, id, sc,
1159				  jiffies_to_msecs(jiffies - start_time),
1160				  desc, 0,
1161				  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1162			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1163				"Terminate pending "
1164				"dev reset cmpl recd. id %d status %s\n",
1165				(int)(id & FNIC_TAG_MASK),
1166				fnic_fcpio_status_to_str(hdr_status));
1167			return;
1168		}
1169		if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1170			/* Need to wait for terminate completion */
1171			spin_unlock_irqrestore(io_lock, flags);
1172			FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1173				  sc->device->host->host_no, id, sc,
1174				  jiffies_to_msecs(jiffies - start_time),
1175				  desc, 0,
1176				  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1177			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1178				"dev reset cmpl recd after time out. "
1179				"id %d status %s\n",
1180				(int)(id & FNIC_TAG_MASK),
1181				fnic_fcpio_status_to_str(hdr_status));
1182			return;
1183		}
1184		CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1185		CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1186		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1187			      "dev reset cmpl recd. id %d status %s\n",
1188			      (int)(id & FNIC_TAG_MASK),
1189			      fnic_fcpio_status_to_str(hdr_status));
1190		if (io_req->dr_done)
1191			complete(io_req->dr_done);
1192		spin_unlock_irqrestore(io_lock, flags);
1193
1194	} else {
1195		shost_printk(KERN_ERR, fnic->lport->host,
1196			     "Unexpected itmf io state %s tag %x\n",
1197			     fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1198		spin_unlock_irqrestore(io_lock, flags);
1199	}
1200
1201}
1202
1203/*
1204 * fnic_fcpio_cmpl_handler
1205 * Routine to service the cq for wq_copy
1206 */
1207static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1208				   unsigned int cq_index,
1209				   struct fcpio_fw_req *desc)
1210{
1211	struct fnic *fnic = vnic_dev_priv(vdev);
1212
1213	switch (desc->hdr.type) {
1214	case FCPIO_ICMND_CMPL: /* fw completed a command */
1215	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1216	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1217	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1218	case FCPIO_RESET_CMPL: /* fw completed reset */
1219		atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1220		break;
1221	default:
1222		break;
1223	}
1224
1225	switch (desc->hdr.type) {
1226	case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1227		fnic_fcpio_ack_handler(fnic, cq_index, desc);
1228		break;
1229
1230	case FCPIO_ICMND_CMPL: /* fw completed a command */
1231		fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1232		break;
1233
1234	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1235		fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1236		break;
1237
1238	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1239	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1240		fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1241		break;
1242
1243	case FCPIO_RESET_CMPL: /* fw completed reset */
1244		fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1245		break;
1246
1247	default:
1248		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1249			      "firmware completion type %d\n",
1250			      desc->hdr.type);
1251		break;
1252	}
1253
1254	return 0;
1255}
1256
1257/*
1258 * fnic_wq_copy_cmpl_handler
1259 * Routine to process wq copy
1260 */
1261int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1262{
1263	unsigned int wq_work_done = 0;
1264	unsigned int i, cq_index;
1265	unsigned int cur_work_done;
1266
1267	for (i = 0; i < fnic->wq_copy_count; i++) {
1268		cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1269		cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1270						     fnic_fcpio_cmpl_handler,
1271						     copy_work_to_do);
1272		wq_work_done += cur_work_done;
1273	}
1274	return wq_work_done;
1275}
1276
1277static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1278{
1279	int i;
1280	struct fnic_io_req *io_req;
1281	unsigned long flags = 0;
1282	struct scsi_cmnd *sc;
1283	spinlock_t *io_lock;
1284	unsigned long start_time = 0;
1285	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1286
1287	for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1288		if (i == exclude_id)
1289			continue;
1290
1291		io_lock = fnic_io_lock_tag(fnic, i);
1292		spin_lock_irqsave(io_lock, flags);
1293		sc = scsi_host_find_tag(fnic->lport->host, i);
1294		if (!sc) {
1295			spin_unlock_irqrestore(io_lock, flags);
1296			continue;
1297		}
1298
1299		io_req = (struct fnic_io_req *)CMD_SP(sc);
1300		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1301			!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1302			/*
1303			 * We will be here only when FW completes reset
1304			 * without sending completions for outstanding ios.
1305			 */
1306			CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1307			if (io_req && io_req->dr_done)
1308				complete(io_req->dr_done);
1309			else if (io_req && io_req->abts_done)
1310				complete(io_req->abts_done);
1311			spin_unlock_irqrestore(io_lock, flags);
1312			continue;
1313		} else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1314			spin_unlock_irqrestore(io_lock, flags);
1315			continue;
1316		}
1317		if (!io_req) {
1318			spin_unlock_irqrestore(io_lock, flags);
1319			goto cleanup_scsi_cmd;
1320		}
1321
1322		CMD_SP(sc) = NULL;
1323
1324		spin_unlock_irqrestore(io_lock, flags);
1325
1326		/*
1327		 * If there is a scsi_cmnd associated with this io_req, then
1328		 * free the corresponding state
1329		 */
1330		start_time = io_req->start_time;
1331		fnic_release_ioreq_buf(fnic, io_req, sc);
1332		mempool_free(io_req, fnic->io_req_pool);
1333
1334cleanup_scsi_cmd:
1335		sc->result = DID_TRANSPORT_DISRUPTED << 16;
1336		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1337			      "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
1338			      __func__, (jiffies - start_time));
1339
1340		if (atomic64_read(&fnic->io_cmpl_skip))
1341			atomic64_dec(&fnic->io_cmpl_skip);
1342		else
1343			atomic64_inc(&fnic_stats->io_stats.io_completions);
1344
1345		/* Complete the command to SCSI */
1346		if (sc->scsi_done) {
1347			FNIC_TRACE(fnic_cleanup_io,
1348				  sc->device->host->host_no, i, sc,
1349				  jiffies_to_msecs(jiffies - start_time),
1350				  0, ((u64)sc->cmnd[0] << 32 |
1351				  (u64)sc->cmnd[2] << 24 |
1352				  (u64)sc->cmnd[3] << 16 |
1353				  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1354				  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1355
1356			sc->scsi_done(sc);
1357		}
1358	}
1359}
1360
1361void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1362				  struct fcpio_host_req *desc)
1363{
1364	u32 id;
1365	struct fnic *fnic = vnic_dev_priv(wq->vdev);
1366	struct fnic_io_req *io_req;
1367	struct scsi_cmnd *sc;
1368	unsigned long flags;
1369	spinlock_t *io_lock;
1370	unsigned long start_time = 0;
1371
1372	/* get the tag reference */
1373	fcpio_tag_id_dec(&desc->hdr.tag, &id);
1374	id &= FNIC_TAG_MASK;
1375
1376	if (id >= fnic->fnic_max_tag_id)
1377		return;
1378
1379	sc = scsi_host_find_tag(fnic->lport->host, id);
1380	if (!sc)
1381		return;
1382
1383	io_lock = fnic_io_lock_hash(fnic, sc);
1384	spin_lock_irqsave(io_lock, flags);
1385
1386	/* Get the IO context which this desc refers to */
1387	io_req = (struct fnic_io_req *)CMD_SP(sc);
1388
1389	/* fnic interrupts are turned off by now */
1390
1391	if (!io_req) {
1392		spin_unlock_irqrestore(io_lock, flags);
1393		goto wq_copy_cleanup_scsi_cmd;
1394	}
1395
1396	CMD_SP(sc) = NULL;
1397
1398	spin_unlock_irqrestore(io_lock, flags);
1399
1400	start_time = io_req->start_time;
1401	fnic_release_ioreq_buf(fnic, io_req, sc);
1402	mempool_free(io_req, fnic->io_req_pool);
1403
1404wq_copy_cleanup_scsi_cmd:
1405	sc->result = DID_NO_CONNECT << 16;
1406	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1407		      " DID_NO_CONNECT\n");
1408
1409	if (sc->scsi_done) {
1410		FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1411			  sc->device->host->host_no, id, sc,
1412			  jiffies_to_msecs(jiffies - start_time),
1413			  0, ((u64)sc->cmnd[0] << 32 |
1414			  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1415			  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1416			  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1417
1418		sc->scsi_done(sc);
1419	}
1420}
1421
1422static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1423					  u32 task_req, u8 *fc_lun,
1424					  struct fnic_io_req *io_req)
1425{
1426	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1427	struct Scsi_Host *host = fnic->lport->host;
1428	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1429	unsigned long flags;
1430
1431	spin_lock_irqsave(host->host_lock, flags);
1432	if (unlikely(fnic_chk_state_flags_locked(fnic,
1433						FNIC_FLAGS_IO_BLOCKED))) {
1434		spin_unlock_irqrestore(host->host_lock, flags);
1435		return 1;
1436	} else
1437		atomic_inc(&fnic->in_flight);
1438	spin_unlock_irqrestore(host->host_lock, flags);
1439
1440	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1441
1442	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1443		free_wq_copy_descs(fnic, wq);
1444
1445	if (!vnic_wq_copy_desc_avail(wq)) {
1446		spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1447		atomic_dec(&fnic->in_flight);
1448		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1449			"fnic_queue_abort_io_req: failure: no descriptors\n");
1450		atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1451		return 1;
1452	}
1453	fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1454				     0, task_req, tag, fc_lun, io_req->port_id,
1455				     fnic->config.ra_tov, fnic->config.ed_tov);
1456
1457	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1458	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1459		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1460		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1461		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1462
1463	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1464	atomic_dec(&fnic->in_flight);
1465
1466	return 0;
1467}
1468
1469static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1470{
1471	int tag;
1472	int abt_tag;
1473	int term_cnt = 0;
1474	struct fnic_io_req *io_req;
1475	spinlock_t *io_lock;
1476	unsigned long flags;
1477	struct scsi_cmnd *sc;
1478	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1479	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1480	struct scsi_lun fc_lun;
1481	enum fnic_ioreq_state old_ioreq_state;
1482
1483	FNIC_SCSI_DBG(KERN_DEBUG,
1484		      fnic->lport->host,
1485		      "fnic_rport_exch_reset called portid 0x%06x\n",
1486		      port_id);
1487
1488	if (fnic->in_remove)
1489		return;
1490
1491	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1492		abt_tag = tag;
1493		io_lock = fnic_io_lock_tag(fnic, tag);
1494		spin_lock_irqsave(io_lock, flags);
1495		sc = scsi_host_find_tag(fnic->lport->host, tag);
1496		if (!sc) {
1497			spin_unlock_irqrestore(io_lock, flags);
1498			continue;
1499		}
1500
1501		io_req = (struct fnic_io_req *)CMD_SP(sc);
1502
1503		if (!io_req || io_req->port_id != port_id) {
1504			spin_unlock_irqrestore(io_lock, flags);
1505			continue;
1506		}
1507
1508		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1509			(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1510			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1511			"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1512			sc);
1513			spin_unlock_irqrestore(io_lock, flags);
1514			continue;
1515		}
1516
1517		/*
1518		 * Found IO that is still pending with firmware and
1519		 * belongs to rport that went away
1520		 */
1521		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1522			spin_unlock_irqrestore(io_lock, flags);
1523			continue;
1524		}
1525		if (io_req->abts_done) {
1526			shost_printk(KERN_ERR, fnic->lport->host,
1527			"fnic_rport_exch_reset: io_req->abts_done is set "
1528			"state is %s\n",
1529			fnic_ioreq_state_to_str(CMD_STATE(sc)));
1530		}
1531
1532		if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1533			shost_printk(KERN_ERR, fnic->lport->host,
1534				  "rport_exch_reset "
1535				  "IO not yet issued %p tag 0x%x flags "
1536				  "%x state %d\n",
1537				  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1538		}
1539		old_ioreq_state = CMD_STATE(sc);
1540		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1541		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1542		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1543			atomic64_inc(&reset_stats->device_reset_terminates);
1544			abt_tag = (tag | FNIC_TAG_DEV_RST);
1545			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1546			"fnic_rport_exch_reset dev rst sc 0x%p\n",
1547			sc);
1548		}
1549
1550		BUG_ON(io_req->abts_done);
1551
1552		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1553			      "fnic_rport_reset_exch: Issuing abts\n");
1554
1555		spin_unlock_irqrestore(io_lock, flags);
1556
1557		/* Now queue the abort command to firmware */
1558		int_to_scsilun(sc->device->lun, &fc_lun);
1559
1560		if (fnic_queue_abort_io_req(fnic, abt_tag,
1561					    FCPIO_ITMF_ABT_TASK_TERM,
1562					    fc_lun.scsi_lun, io_req)) {
1563			/*
1564			 * Revert the cmd state back to old state, if
1565			 * it hasn't changed in between. This cmd will get
1566			 * aborted later by scsi_eh, or cleaned up during
1567			 * lun reset
1568			 */
1569			spin_lock_irqsave(io_lock, flags);
1570			if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1571				CMD_STATE(sc) = old_ioreq_state;
1572			spin_unlock_irqrestore(io_lock, flags);
1573		} else {
1574			spin_lock_irqsave(io_lock, flags);
1575			if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1576				CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1577			else
1578				CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1579			spin_unlock_irqrestore(io_lock, flags);
1580			atomic64_inc(&term_stats->terminates);
1581			term_cnt++;
1582		}
1583	}
1584	if (term_cnt > atomic64_read(&term_stats->max_terminates))
1585		atomic64_set(&term_stats->max_terminates, term_cnt);
1586
1587}
1588
1589void fnic_terminate_rport_io(struct fc_rport *rport)
1590{
1591	int tag;
1592	int abt_tag;
1593	int term_cnt = 0;
1594	struct fnic_io_req *io_req;
1595	spinlock_t *io_lock;
1596	unsigned long flags;
1597	struct scsi_cmnd *sc;
1598	struct scsi_lun fc_lun;
1599	struct fc_rport_libfc_priv *rdata;
1600	struct fc_lport *lport;
1601	struct fnic *fnic;
1602	struct fc_rport *cmd_rport;
1603	struct reset_stats *reset_stats;
1604	struct terminate_stats *term_stats;
1605	enum fnic_ioreq_state old_ioreq_state;
1606
1607	if (!rport) {
1608		printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1609		return;
1610	}
1611	rdata = rport->dd_data;
1612
1613	if (!rdata) {
1614		printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1615		return;
1616	}
1617	lport = rdata->local_port;
1618
1619	if (!lport) {
1620		printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1621		return;
1622	}
1623	fnic = lport_priv(lport);
1624	FNIC_SCSI_DBG(KERN_DEBUG,
1625		      fnic->lport->host, "fnic_terminate_rport_io called"
1626		      " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1627		      rport->port_name, rport->node_name, rport,
1628		      rport->port_id);
1629
1630	if (fnic->in_remove)
1631		return;
1632
1633	reset_stats = &fnic->fnic_stats.reset_stats;
1634	term_stats = &fnic->fnic_stats.term_stats;
1635
1636	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1637		abt_tag = tag;
1638		io_lock = fnic_io_lock_tag(fnic, tag);
1639		spin_lock_irqsave(io_lock, flags);
1640		sc = scsi_host_find_tag(fnic->lport->host, tag);
1641		if (!sc) {
1642			spin_unlock_irqrestore(io_lock, flags);
1643			continue;
1644		}
1645
1646		cmd_rport = starget_to_rport(scsi_target(sc->device));
1647		if (rport != cmd_rport) {
1648			spin_unlock_irqrestore(io_lock, flags);
1649			continue;
1650		}
1651
1652		io_req = (struct fnic_io_req *)CMD_SP(sc);
1653
1654		if (!io_req || rport != cmd_rport) {
1655			spin_unlock_irqrestore(io_lock, flags);
1656			continue;
1657		}
1658
1659		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1660			(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1661			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1662			"fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1663			sc);
1664			spin_unlock_irqrestore(io_lock, flags);
1665			continue;
1666		}
1667		/*
1668		 * Found IO that is still pending with firmware and
1669		 * belongs to rport that went away
1670		 */
1671		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1672			spin_unlock_irqrestore(io_lock, flags);
1673			continue;
1674		}
1675		if (io_req->abts_done) {
1676			shost_printk(KERN_ERR, fnic->lport->host,
1677			"fnic_terminate_rport_io: io_req->abts_done is set "
1678			"state is %s\n",
1679			fnic_ioreq_state_to_str(CMD_STATE(sc)));
1680		}
1681		if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1682			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1683				  "fnic_terminate_rport_io "
1684				  "IO not yet issued %p tag 0x%x flags "
1685				  "%x state %d\n",
1686				  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1687		}
1688		old_ioreq_state = CMD_STATE(sc);
1689		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1690		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1691		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1692			atomic64_inc(&reset_stats->device_reset_terminates);
1693			abt_tag = (tag | FNIC_TAG_DEV_RST);
1694			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1695			"fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1696		}
1697
1698		BUG_ON(io_req->abts_done);
1699
1700		FNIC_SCSI_DBG(KERN_DEBUG,
1701			      fnic->lport->host,
1702			      "fnic_terminate_rport_io: Issuing abts\n");
1703
1704		spin_unlock_irqrestore(io_lock, flags);
1705
1706		/* Now queue the abort command to firmware */
1707		int_to_scsilun(sc->device->lun, &fc_lun);
1708
1709		if (fnic_queue_abort_io_req(fnic, abt_tag,
1710					    FCPIO_ITMF_ABT_TASK_TERM,
1711					    fc_lun.scsi_lun, io_req)) {
1712			/*
1713			 * Revert the cmd state back to old state, if
1714			 * it hasn't changed in between. This cmd will get
1715			 * aborted later by scsi_eh, or cleaned up during
1716			 * lun reset
1717			 */
1718			spin_lock_irqsave(io_lock, flags);
1719			if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1720				CMD_STATE(sc) = old_ioreq_state;
1721			spin_unlock_irqrestore(io_lock, flags);
1722		} else {
1723			spin_lock_irqsave(io_lock, flags);
1724			if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1725				CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1726			else
1727				CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1728			spin_unlock_irqrestore(io_lock, flags);
1729			atomic64_inc(&term_stats->terminates);
1730			term_cnt++;
1731		}
1732	}
1733	if (term_cnt > atomic64_read(&term_stats->max_terminates))
1734		atomic64_set(&term_stats->max_terminates, term_cnt);
1735
1736}
1737
1738/*
1739 * This function is exported to SCSI for sending abort cmnds.
1740 * A SCSI IO is represented by a io_req in the driver.
1741 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1742 */
1743int fnic_abort_cmd(struct scsi_cmnd *sc)
1744{
1745	struct fc_lport *lp;
1746	struct fnic *fnic;
1747	struct fnic_io_req *io_req = NULL;
1748	struct fc_rport *rport;
1749	spinlock_t *io_lock;
1750	unsigned long flags;
1751	unsigned long start_time = 0;
1752	int ret = SUCCESS;
1753	u32 task_req = 0;
1754	struct scsi_lun fc_lun;
1755	struct fnic_stats *fnic_stats;
1756	struct abort_stats *abts_stats;
1757	struct terminate_stats *term_stats;
1758	enum fnic_ioreq_state old_ioreq_state;
1759	int tag;
1760	DECLARE_COMPLETION_ONSTACK(tm_done);
1761
1762	/* Wait for rport to unblock */
1763	fc_block_scsi_eh(sc);
1764
1765	/* Get local-port, check ready and link up */
1766	lp = shost_priv(sc->device->host);
1767
1768	fnic = lport_priv(lp);
1769	fnic_stats = &fnic->fnic_stats;
1770	abts_stats = &fnic->fnic_stats.abts_stats;
1771	term_stats = &fnic->fnic_stats.term_stats;
1772
1773	rport = starget_to_rport(scsi_target(sc->device));
1774	tag = sc->request->tag;
1775	FNIC_SCSI_DBG(KERN_DEBUG,
1776		fnic->lport->host,
1777		"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1778		rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1779
1780	CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1781
1782	if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1783		ret = FAILED;
1784		goto fnic_abort_cmd_end;
1785	}
1786
1787	/*
1788	 * Avoid a race between SCSI issuing the abort and the device
1789	 * completing the command.
1790	 *
1791	 * If the command is already completed by the fw cmpl code,
1792	 * we just return SUCCESS from here. This means that the abort
1793	 * succeeded. In the SCSI ML, since the timeout for command has
1794	 * happened, the completion wont actually complete the command
1795	 * and it will be considered as an aborted command
1796	 *
1797	 * The CMD_SP will not be cleared except while holding io_req_lock.
1798	 */
1799	io_lock = fnic_io_lock_hash(fnic, sc);
1800	spin_lock_irqsave(io_lock, flags);
1801	io_req = (struct fnic_io_req *)CMD_SP(sc);
1802	if (!io_req) {
1803		spin_unlock_irqrestore(io_lock, flags);
1804		goto fnic_abort_cmd_end;
1805	}
1806
1807	io_req->abts_done = &tm_done;
1808
1809	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1810		spin_unlock_irqrestore(io_lock, flags);
1811		goto wait_pending;
1812	}
1813	/*
1814	 * Command is still pending, need to abort it
1815	 * If the firmware completes the command after this point,
1816	 * the completion wont be done till mid-layer, since abort
1817	 * has already started.
1818	 */
1819	old_ioreq_state = CMD_STATE(sc);
1820	CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1821	CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1822
1823	spin_unlock_irqrestore(io_lock, flags);
1824
1825	/*
1826	 * Check readiness of the remote port. If the path to remote
1827	 * port is up, then send abts to the remote port to terminate
1828	 * the IO. Else, just locally terminate the IO in the firmware
1829	 */
1830	if (fc_remote_port_chkready(rport) == 0)
1831		task_req = FCPIO_ITMF_ABT_TASK;
1832	else {
1833		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1834		task_req = FCPIO_ITMF_ABT_TASK_TERM;
1835	}
1836
1837	/* Now queue the abort command to firmware */
1838	int_to_scsilun(sc->device->lun, &fc_lun);
1839
1840	if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1841				    fc_lun.scsi_lun, io_req)) {
1842		spin_lock_irqsave(io_lock, flags);
1843		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1844			CMD_STATE(sc) = old_ioreq_state;
1845		io_req = (struct fnic_io_req *)CMD_SP(sc);
1846		if (io_req)
1847			io_req->abts_done = NULL;
1848		spin_unlock_irqrestore(io_lock, flags);
1849		ret = FAILED;
1850		goto fnic_abort_cmd_end;
1851	}
1852	if (task_req == FCPIO_ITMF_ABT_TASK) {
1853		CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1854		atomic64_inc(&fnic_stats->abts_stats.aborts);
1855	} else {
1856		CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1857		atomic64_inc(&fnic_stats->term_stats.terminates);
1858	}
1859
1860	/*
1861	 * We queued an abort IO, wait for its completion.
1862	 * Once the firmware completes the abort command, it will
1863	 * wake up this thread.
1864	 */
1865 wait_pending:
1866	wait_for_completion_timeout(&tm_done,
1867				    msecs_to_jiffies
1868				    (2 * fnic->config.ra_tov +
1869				     fnic->config.ed_tov));
1870
1871	/* Check the abort status */
1872	spin_lock_irqsave(io_lock, flags);
1873
1874	io_req = (struct fnic_io_req *)CMD_SP(sc);
1875	if (!io_req) {
1876		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1877		spin_unlock_irqrestore(io_lock, flags);
1878		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1879		ret = FAILED;
1880		goto fnic_abort_cmd_end;
1881	}
1882	io_req->abts_done = NULL;
1883
1884	/* fw did not complete abort, timed out */
1885	if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1886		spin_unlock_irqrestore(io_lock, flags);
1887		if (task_req == FCPIO_ITMF_ABT_TASK) {
1888			atomic64_inc(&abts_stats->abort_drv_timeouts);
1889		} else {
1890			atomic64_inc(&term_stats->terminate_drv_timeouts);
1891		}
1892		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1893		ret = FAILED;
1894		goto fnic_abort_cmd_end;
1895	}
1896
1897	/* IO out of order */
1898
1899	if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1900		spin_unlock_irqrestore(io_lock, flags);
1901		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1902			"Issuing Host reset due to out of order IO\n");
1903
1904		if (fnic_host_reset(sc) == FAILED) {
1905			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1906				"fnic_host_reset failed.\n");
1907		}
1908		ret = FAILED;
1909		goto fnic_abort_cmd_end;
1910	}
1911
1912	CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1913
1914	/*
1915	 * firmware completed the abort, check the status,
1916	 * free the io_req irrespective of failure or success
1917	 */
1918	if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
1919		ret = FAILED;
1920
1921	CMD_SP(sc) = NULL;
1922
1923	spin_unlock_irqrestore(io_lock, flags);
1924
1925	start_time = io_req->start_time;
1926	fnic_release_ioreq_buf(fnic, io_req, sc);
1927	mempool_free(io_req, fnic->io_req_pool);
1928
1929fnic_abort_cmd_end:
1930	FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
1931		  sc->request->tag, sc,
1932		  jiffies_to_msecs(jiffies - start_time),
1933		  0, ((u64)sc->cmnd[0] << 32 |
1934		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1935		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1936		  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1937
1938	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1939		      "Returning from abort cmd type %x %s\n", task_req,
1940		      (ret == SUCCESS) ?
1941		      "SUCCESS" : "FAILED");
1942	return ret;
1943}
1944
1945static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1946				       struct scsi_cmnd *sc,
1947				       struct fnic_io_req *io_req)
1948{
1949	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1950	struct Scsi_Host *host = fnic->lport->host;
1951	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1952	struct scsi_lun fc_lun;
1953	int ret = 0;
1954	unsigned long intr_flags;
1955
1956	spin_lock_irqsave(host->host_lock, intr_flags);
1957	if (unlikely(fnic_chk_state_flags_locked(fnic,
1958						FNIC_FLAGS_IO_BLOCKED))) {
1959		spin_unlock_irqrestore(host->host_lock, intr_flags);
1960		return FAILED;
1961	} else
1962		atomic_inc(&fnic->in_flight);
1963	spin_unlock_irqrestore(host->host_lock, intr_flags);
1964
1965	spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1966
1967	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1968		free_wq_copy_descs(fnic, wq);
1969
1970	if (!vnic_wq_copy_desc_avail(wq)) {
1971		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1972			  "queue_dr_io_req failure - no descriptors\n");
1973		atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
1974		ret = -EAGAIN;
1975		goto lr_io_req_end;
1976	}
1977
1978	/* fill in the lun info */
1979	int_to_scsilun(sc->device->lun, &fc_lun);
1980
1981	fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
1982				     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
1983				     fc_lun.scsi_lun, io_req->port_id,
1984				     fnic->config.ra_tov, fnic->config.ed_tov);
1985
1986	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1987	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1988		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1989		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1990		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1991
1992lr_io_req_end:
1993	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1994	atomic_dec(&fnic->in_flight);
1995
1996	return ret;
1997}
1998
1999/*
2000 * Clean up any pending aborts on the lun
2001 * For each outstanding IO on this lun, whose abort is not completed by fw,
2002 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2003 * successfully aborted, 1 otherwise
2004 */
2005static int fnic_clean_pending_aborts(struct fnic *fnic,
2006				     struct scsi_cmnd *lr_sc)
2007{
2008	int tag, abt_tag;
2009	struct fnic_io_req *io_req;
2010	spinlock_t *io_lock;
2011	unsigned long flags;
2012	int ret = 0;
2013	struct scsi_cmnd *sc;
2014	struct scsi_lun fc_lun;
2015	struct scsi_device *lun_dev = lr_sc->device;
2016	DECLARE_COMPLETION_ONSTACK(tm_done);
2017	enum fnic_ioreq_state old_ioreq_state;
2018
2019	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2020		io_lock = fnic_io_lock_tag(fnic, tag);
2021		spin_lock_irqsave(io_lock, flags);
2022		sc = scsi_host_find_tag(fnic->lport->host, tag);
2023		/*
2024		 * ignore this lun reset cmd or cmds that do not belong to
2025		 * this lun
2026		 */
2027		if (!sc || sc == lr_sc || sc->device != lun_dev) {
2028			spin_unlock_irqrestore(io_lock, flags);
2029			continue;
2030		}
2031
2032		io_req = (struct fnic_io_req *)CMD_SP(sc);
2033
2034		if (!io_req || sc->device != lun_dev) {
2035			spin_unlock_irqrestore(io_lock, flags);
2036			continue;
2037		}
2038
2039		/*
2040		 * Found IO that is still pending with firmware and
2041		 * belongs to the LUN that we are resetting
2042		 */
2043		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2044			      "Found IO in %s on lun\n",
2045			      fnic_ioreq_state_to_str(CMD_STATE(sc)));
2046
2047		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2048			spin_unlock_irqrestore(io_lock, flags);
2049			continue;
2050		}
2051		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2052			(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2053			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2054				"%s dev rst not pending sc 0x%p\n", __func__,
2055				sc);
2056			spin_unlock_irqrestore(io_lock, flags);
2057			continue;
2058		}
2059
2060		if (io_req->abts_done)
2061			shost_printk(KERN_ERR, fnic->lport->host,
2062			  "%s: io_req->abts_done is set state is %s\n",
2063			  __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2064		old_ioreq_state = CMD_STATE(sc);
2065		/*
2066		 * Any pending IO issued prior to reset is expected to be
2067		 * in abts pending state, if not we need to set
2068		 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2069		 * When IO is completed, the IO will be handed over and
2070		 * handled in this function.
2071		 */
2072		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2073
2074		BUG_ON(io_req->abts_done);
2075
2076		abt_tag = tag;
2077		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2078			abt_tag |= FNIC_TAG_DEV_RST;
2079			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2080				  "%s: dev rst sc 0x%p\n", __func__, sc);
2081		}
2082
2083		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2084		io_req->abts_done = &tm_done;
2085		spin_unlock_irqrestore(io_lock, flags);
2086
2087		/* Now queue the abort command to firmware */
2088		int_to_scsilun(sc->device->lun, &fc_lun);
2089
2090		if (fnic_queue_abort_io_req(fnic, abt_tag,
2091					    FCPIO_ITMF_ABT_TASK_TERM,
2092					    fc_lun.scsi_lun, io_req)) {
2093			spin_lock_irqsave(io_lock, flags);
2094			io_req = (struct fnic_io_req *)CMD_SP(sc);
2095			if (io_req)
2096				io_req->abts_done = NULL;
2097			if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2098				CMD_STATE(sc) = old_ioreq_state;
2099			spin_unlock_irqrestore(io_lock, flags);
2100			ret = 1;
2101			goto clean_pending_aborts_end;
2102		} else {
2103			spin_lock_irqsave(io_lock, flags);
2104			if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2105				CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2106			spin_unlock_irqrestore(io_lock, flags);
2107		}
2108		CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2109
2110		wait_for_completion_timeout(&tm_done,
2111					    msecs_to_jiffies
2112					    (fnic->config.ed_tov));
2113
2114		/* Recheck cmd state to check if it is now aborted */
2115		spin_lock_irqsave(io_lock, flags);
2116		io_req = (struct fnic_io_req *)CMD_SP(sc);
2117		if (!io_req) {
2118			spin_unlock_irqrestore(io_lock, flags);
2119			CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2120			continue;
2121		}
2122
2123		io_req->abts_done = NULL;
2124
2125		/* if abort is still pending with fw, fail */
2126		if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2127			spin_unlock_irqrestore(io_lock, flags);
2128			CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2129			ret = 1;
2130			goto clean_pending_aborts_end;
2131		}
2132		CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2133		CMD_SP(sc) = NULL;
2134		spin_unlock_irqrestore(io_lock, flags);
2135
2136		fnic_release_ioreq_buf(fnic, io_req, sc);
2137		mempool_free(io_req, fnic->io_req_pool);
2138	}
2139
2140	schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2141
2142	/* walk again to check, if IOs are still pending in fw */
2143	if (fnic_is_abts_pending(fnic, lr_sc))
2144		ret = FAILED;
2145
2146clean_pending_aborts_end:
2147	return ret;
2148}
2149
2150/**
2151 * fnic_scsi_host_start_tag
2152 * Allocates tagid from host's tag list
2153 **/
2154static inline int
2155fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2156{
2157	struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2158	int tag, ret = SCSI_NO_TAG;
2159
2160	BUG_ON(!bqt);
2161	if (!bqt) {
2162		pr_err("Tags are not supported\n");
2163		goto end;
2164	}
2165
2166	do {
2167		tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
2168		if (tag >= bqt->max_depth) {
2169			pr_err("Tag allocation failure\n");
2170			goto end;
2171		}
2172	} while (test_and_set_bit(tag, bqt->tag_map));
2173
2174	bqt->tag_index[tag] = sc->request;
2175	sc->request->tag = tag;
2176	sc->tag = tag;
2177	if (!sc->request->special)
2178		sc->request->special = sc;
2179
2180	ret = tag;
2181
2182end:
2183	return ret;
2184}
2185
2186/**
2187 * fnic_scsi_host_end_tag
2188 * frees tag allocated by fnic_scsi_host_start_tag.
2189 **/
2190static inline void
2191fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2192{
2193	struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2194	int tag = sc->request->tag;
2195
2196	if (tag == SCSI_NO_TAG)
2197		return;
2198
2199	BUG_ON(!bqt || !bqt->tag_index[tag]);
2200	if (!bqt)
2201		return;
2202
2203	bqt->tag_index[tag] = NULL;
2204	clear_bit(tag, bqt->tag_map);
2205
2206	return;
2207}
2208
2209/*
2210 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2211 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2212 * on the LUN.
2213 */
2214int fnic_device_reset(struct scsi_cmnd *sc)
2215{
2216	struct fc_lport *lp;
2217	struct fnic *fnic;
2218	struct fnic_io_req *io_req = NULL;
2219	struct fc_rport *rport;
2220	int status;
2221	int ret = FAILED;
2222	spinlock_t *io_lock;
2223	unsigned long flags;
2224	unsigned long start_time = 0;
2225	struct scsi_lun fc_lun;
2226	struct fnic_stats *fnic_stats;
2227	struct reset_stats *reset_stats;
2228	int tag = 0;
2229	DECLARE_COMPLETION_ONSTACK(tm_done);
2230	int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
2231
2232	/* Wait for rport to unblock */
2233	fc_block_scsi_eh(sc);
2234
2235	/* Get local-port, check ready and link up */
2236	lp = shost_priv(sc->device->host);
2237
2238	fnic = lport_priv(lp);
2239	fnic_stats = &fnic->fnic_stats;
2240	reset_stats = &fnic->fnic_stats.reset_stats;
2241
2242	atomic64_inc(&reset_stats->device_resets);
2243
2244	rport = starget_to_rport(scsi_target(sc->device));
2245	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2246		      "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2247		      rport->port_id, sc->device->lun, sc);
2248
2249	if (lp->state != LPORT_ST_READY || !(lp->link_up))
2250		goto fnic_device_reset_end;
2251
2252	/* Check if remote port up */
2253	if (fc_remote_port_chkready(rport)) {
2254		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2255		goto fnic_device_reset_end;
2256	}
2257
2258	CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2259	/* Allocate tag if not present */
2260
2261	tag = sc->request->tag;
2262	if (unlikely(tag < 0)) {
2263		/*
2264		 * XXX(hch): current the midlayer fakes up a struct
2265		 * request for the explicit reset ioctls, and those
2266		 * don't have a tag allocated to them.  The below
2267		 * code pokes into midlayer structures to paper over
2268		 * this design issue, but that won't work for blk-mq.
2269		 *
2270		 * Either someone who can actually test the hardware
2271		 * will have to come up with a similar hack for the
2272		 * blk-mq case, or we'll have to bite the bullet and
2273		 * fix the way the EH ioctls work for real, but until
2274		 * that happens we fail these explicit requests here.
2275		 */
2276		if (shost_use_blk_mq(sc->device->host))
2277			goto fnic_device_reset_end;
2278
2279		tag = fnic_scsi_host_start_tag(fnic, sc);
2280		if (unlikely(tag == SCSI_NO_TAG))
2281			goto fnic_device_reset_end;
2282		tag_gen_flag = 1;
2283	}
2284	io_lock = fnic_io_lock_hash(fnic, sc);
2285	spin_lock_irqsave(io_lock, flags);
2286	io_req = (struct fnic_io_req *)CMD_SP(sc);
2287
2288	/*
2289	 * If there is a io_req attached to this command, then use it,
2290	 * else allocate a new one.
2291	 */
2292	if (!io_req) {
2293		io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2294		if (!io_req) {
2295			spin_unlock_irqrestore(io_lock, flags);
2296			goto fnic_device_reset_end;
2297		}
2298		memset(io_req, 0, sizeof(*io_req));
2299		io_req->port_id = rport->port_id;
2300		CMD_SP(sc) = (char *)io_req;
2301	}
2302	io_req->dr_done = &tm_done;
2303	CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2304	CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2305	spin_unlock_irqrestore(io_lock, flags);
2306
2307	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2308
2309	/*
2310	 * issue the device reset, if enqueue failed, clean up the ioreq
2311	 * and break assoc with scsi cmd
2312	 */
2313	if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2314		spin_lock_irqsave(io_lock, flags);
2315		io_req = (struct fnic_io_req *)CMD_SP(sc);
2316		if (io_req)
2317			io_req->dr_done = NULL;
2318		goto fnic_device_reset_clean;
2319	}
2320	spin_lock_irqsave(io_lock, flags);
2321	CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2322	spin_unlock_irqrestore(io_lock, flags);
2323
2324	/*
2325	 * Wait on the local completion for LUN reset.  The io_req may be
2326	 * freed while we wait since we hold no lock.
2327	 */
2328	wait_for_completion_timeout(&tm_done,
2329				    msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2330
2331	spin_lock_irqsave(io_lock, flags);
2332	io_req = (struct fnic_io_req *)CMD_SP(sc);
2333	if (!io_req) {
2334		spin_unlock_irqrestore(io_lock, flags);
2335		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2336				"io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2337		goto fnic_device_reset_end;
2338	}
2339	io_req->dr_done = NULL;
2340
2341	status = CMD_LR_STATUS(sc);
2342
2343	/*
2344	 * If lun reset not completed, bail out with failed. io_req
2345	 * gets cleaned up during higher levels of EH
2346	 */
2347	if (status == FCPIO_INVALID_CODE) {
2348		atomic64_inc(&reset_stats->device_reset_timeouts);
2349		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2350			      "Device reset timed out\n");
2351		CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2352		spin_unlock_irqrestore(io_lock, flags);
2353		int_to_scsilun(sc->device->lun, &fc_lun);
2354		/*
2355		 * Issue abort and terminate on device reset request.
2356		 * If q'ing of terminate fails, retry it after a delay.
2357		 */
2358		while (1) {
2359			spin_lock_irqsave(io_lock, flags);
2360			if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2361				spin_unlock_irqrestore(io_lock, flags);
2362				break;
2363			}
2364			spin_unlock_irqrestore(io_lock, flags);
2365			if (fnic_queue_abort_io_req(fnic,
2366				tag | FNIC_TAG_DEV_RST,
2367				FCPIO_ITMF_ABT_TASK_TERM,
2368				fc_lun.scsi_lun, io_req)) {
2369				wait_for_completion_timeout(&tm_done,
2370				msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2371			} else {
2372				spin_lock_irqsave(io_lock, flags);
2373				CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2374				CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2375				io_req->abts_done = &tm_done;
2376				spin_unlock_irqrestore(io_lock, flags);
2377				FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2378				"Abort and terminate issued on Device reset "
2379				"tag 0x%x sc 0x%p\n", tag, sc);
2380				break;
2381			}
2382		}
2383		while (1) {
2384			spin_lock_irqsave(io_lock, flags);
2385			if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2386				spin_unlock_irqrestore(io_lock, flags);
2387				wait_for_completion_timeout(&tm_done,
2388				msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2389				break;
2390			} else {
2391				io_req = (struct fnic_io_req *)CMD_SP(sc);
2392				io_req->abts_done = NULL;
2393				goto fnic_device_reset_clean;
2394			}
2395		}
2396	} else {
2397		spin_unlock_irqrestore(io_lock, flags);
2398	}
2399
2400	/* Completed, but not successful, clean up the io_req, return fail */
2401	if (status != FCPIO_SUCCESS) {
2402		spin_lock_irqsave(io_lock, flags);
2403		FNIC_SCSI_DBG(KERN_DEBUG,
2404			      fnic->lport->host,
2405			      "Device reset completed - failed\n");
2406		io_req = (struct fnic_io_req *)CMD_SP(sc);
2407		goto fnic_device_reset_clean;
2408	}
2409
2410	/*
2411	 * Clean up any aborts on this lun that have still not
2412	 * completed. If any of these fail, then LUN reset fails.
2413	 * clean_pending_aborts cleans all cmds on this lun except
2414	 * the lun reset cmd. If all cmds get cleaned, the lun reset
2415	 * succeeds
2416	 */
2417	if (fnic_clean_pending_aborts(fnic, sc)) {
2418		spin_lock_irqsave(io_lock, flags);
2419		io_req = (struct fnic_io_req *)CMD_SP(sc);
2420		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2421			      "Device reset failed"
2422			      " since could not abort all IOs\n");
2423		goto fnic_device_reset_clean;
2424	}
2425
2426	/* Clean lun reset command */
2427	spin_lock_irqsave(io_lock, flags);
2428	io_req = (struct fnic_io_req *)CMD_SP(sc);
2429	if (io_req)
2430		/* Completed, and successful */
2431		ret = SUCCESS;
2432
2433fnic_device_reset_clean:
2434	if (io_req)
2435		CMD_SP(sc) = NULL;
2436
2437	spin_unlock_irqrestore(io_lock, flags);
2438
2439	if (io_req) {
2440		start_time = io_req->start_time;
2441		fnic_release_ioreq_buf(fnic, io_req, sc);
2442		mempool_free(io_req, fnic->io_req_pool);
2443	}
2444
2445fnic_device_reset_end:
2446	FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2447		  sc->request->tag, sc,
2448		  jiffies_to_msecs(jiffies - start_time),
2449		  0, ((u64)sc->cmnd[0] << 32 |
2450		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2451		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2452		  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2453
2454	/* free tag if it is allocated */
2455	if (unlikely(tag_gen_flag))
2456		fnic_scsi_host_end_tag(fnic, sc);
2457
2458	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2459		      "Returning from device reset %s\n",
2460		      (ret == SUCCESS) ?
2461		      "SUCCESS" : "FAILED");
2462
2463	if (ret == FAILED)
2464		atomic64_inc(&reset_stats->device_reset_failures);
2465
2466	return ret;
2467}
2468
2469/* Clean up all IOs, clean up libFC local port */
2470int fnic_reset(struct Scsi_Host *shost)
2471{
2472	struct fc_lport *lp;
2473	struct fnic *fnic;
2474	int ret = 0;
2475	struct reset_stats *reset_stats;
2476
2477	lp = shost_priv(shost);
2478	fnic = lport_priv(lp);
2479	reset_stats = &fnic->fnic_stats.reset_stats;
2480
2481	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2482		      "fnic_reset called\n");
2483
2484	atomic64_inc(&reset_stats->fnic_resets);
2485
2486	/*
2487	 * Reset local port, this will clean up libFC exchanges,
2488	 * reset remote port sessions, and if link is up, begin flogi
2489	 */
2490	ret = lp->tt.lport_reset(lp);
2491
2492	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2493		      "Returning from fnic reset %s\n",
2494		      (ret == 0) ?
2495		      "SUCCESS" : "FAILED");
2496
2497	if (ret == 0)
2498		atomic64_inc(&reset_stats->fnic_reset_completions);
2499	else
2500		atomic64_inc(&reset_stats->fnic_reset_failures);
2501
2502	return ret;
2503}
2504
2505/*
2506 * SCSI Error handling calls driver's eh_host_reset if all prior
2507 * error handling levels return FAILED. If host reset completes
2508 * successfully, and if link is up, then Fabric login begins.
2509 *
2510 * Host Reset is the highest level of error recovery. If this fails, then
2511 * host is offlined by SCSI.
2512 *
2513 */
2514int fnic_host_reset(struct scsi_cmnd *sc)
2515{
2516	int ret;
2517	unsigned long wait_host_tmo;
2518	struct Scsi_Host *shost = sc->device->host;
2519	struct fc_lport *lp = shost_priv(shost);
2520
2521	/*
2522	 * If fnic_reset is successful, wait for fabric login to complete
2523	 * scsi-ml tries to send a TUR to every device if host reset is
2524	 * successful, so before returning to scsi, fabric should be up
2525	 */
2526	ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2527	if (ret == SUCCESS) {
2528		wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2529		ret = FAILED;
2530		while (time_before(jiffies, wait_host_tmo)) {
2531			if ((lp->state == LPORT_ST_READY) &&
2532			    (lp->link_up)) {
2533				ret = SUCCESS;
2534				break;
2535			}
2536			ssleep(1);
2537		}
2538	}
2539
2540	return ret;
2541}
2542
2543/*
2544 * This fxn is called from libFC when host is removed
2545 */
2546void fnic_scsi_abort_io(struct fc_lport *lp)
2547{
2548	int err = 0;
2549	unsigned long flags;
2550	enum fnic_state old_state;
2551	struct fnic *fnic = lport_priv(lp);
2552	DECLARE_COMPLETION_ONSTACK(remove_wait);
2553
2554	/* Issue firmware reset for fnic, wait for reset to complete */
2555retry_fw_reset:
2556	spin_lock_irqsave(&fnic->fnic_lock, flags);
2557	if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2558		/* fw reset is in progress, poll for its completion */
2559		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2560		schedule_timeout(msecs_to_jiffies(100));
2561		goto retry_fw_reset;
2562	}
2563
2564	fnic->remove_wait = &remove_wait;
2565	old_state = fnic->state;
2566	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2567	fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2568	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2569
2570	err = fnic_fw_reset_handler(fnic);
2571	if (err) {
2572		spin_lock_irqsave(&fnic->fnic_lock, flags);
2573		if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2574			fnic->state = old_state;
2575		fnic->remove_wait = NULL;
2576		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2577		return;
2578	}
2579
2580	/* Wait for firmware reset to complete */
2581	wait_for_completion_timeout(&remove_wait,
2582				    msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2583
2584	spin_lock_irqsave(&fnic->fnic_lock, flags);
2585	fnic->remove_wait = NULL;
2586	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2587		      "fnic_scsi_abort_io %s\n",
2588		      (fnic->state == FNIC_IN_ETH_MODE) ?
2589		      "SUCCESS" : "FAILED");
2590	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2591
2592}
2593
2594/*
2595 * This fxn called from libFC to clean up driver IO state on link down
2596 */
2597void fnic_scsi_cleanup(struct fc_lport *lp)
2598{
2599	unsigned long flags;
2600	enum fnic_state old_state;
2601	struct fnic *fnic = lport_priv(lp);
2602
2603	/* issue fw reset */
2604retry_fw_reset:
2605	spin_lock_irqsave(&fnic->fnic_lock, flags);
2606	if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2607		/* fw reset is in progress, poll for its completion */
2608		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2609		schedule_timeout(msecs_to_jiffies(100));
2610		goto retry_fw_reset;
2611	}
2612	old_state = fnic->state;
2613	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2614	fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2615	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2616
2617	if (fnic_fw_reset_handler(fnic)) {
2618		spin_lock_irqsave(&fnic->fnic_lock, flags);
2619		if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2620			fnic->state = old_state;
2621		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2622	}
2623
2624}
2625
2626void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2627{
2628}
2629
2630void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2631{
2632	struct fnic *fnic = lport_priv(lp);
2633
2634	/* Non-zero sid, nothing to do */
2635	if (sid)
2636		goto call_fc_exch_mgr_reset;
2637
2638	if (did) {
2639		fnic_rport_exch_reset(fnic, did);
2640		goto call_fc_exch_mgr_reset;
2641	}
2642
2643	/*
2644	 * sid = 0, did = 0
2645	 * link down or device being removed
2646	 */
2647	if (!fnic->in_remove)
2648		fnic_scsi_cleanup(lp);
2649	else
2650		fnic_scsi_abort_io(lp);
2651
2652	/* call libFC exch mgr reset to reset its exchanges */
2653call_fc_exch_mgr_reset:
2654	fc_exch_mgr_reset(lp, sid, did);
2655
2656}
2657
2658/*
2659 * fnic_is_abts_pending() is a helper function that
2660 * walks through tag map to check if there is any IOs pending,if there is one,
2661 * then it returns 1 (true), otherwise 0 (false)
2662 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2663 * otherwise, it checks for all IOs.
2664 */
2665int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2666{
2667	int tag;
2668	struct fnic_io_req *io_req;
2669	spinlock_t *io_lock;
2670	unsigned long flags;
2671	int ret = 0;
2672	struct scsi_cmnd *sc;
2673	struct scsi_device *lun_dev = NULL;
2674
2675	if (lr_sc)
2676		lun_dev = lr_sc->device;
2677
2678	/* walk again to check, if IOs are still pending in fw */
2679	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2680		sc = scsi_host_find_tag(fnic->lport->host, tag);
2681		/*
2682		 * ignore this lun reset cmd or cmds that do not belong to
2683		 * this lun
2684		 */
2685		if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2686			continue;
2687
2688		io_lock = fnic_io_lock_hash(fnic, sc);
2689		spin_lock_irqsave(io_lock, flags);
2690
2691		io_req = (struct fnic_io_req *)CMD_SP(sc);
2692
2693		if (!io_req || sc->device != lun_dev) {
2694			spin_unlock_irqrestore(io_lock, flags);
2695			continue;
2696		}
2697
2698		/*
2699		 * Found IO that is still pending with firmware and
2700		 * belongs to the LUN that we are resetting
2701		 */
2702		FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2703			      "Found IO in %s on lun\n",
2704			      fnic_ioreq_state_to_str(CMD_STATE(sc)));
2705
2706		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2707			ret = 1;
2708		spin_unlock_irqrestore(io_lock, flags);
2709	}
2710
2711	return ret;
2712}
2713