1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21/* See Fibre Channel protocol T11 FC-LS for details */
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
32
33#include "lpfc_hw4.h"
34#include "lpfc_hw.h"
35#include "lpfc_sli.h"
36#include "lpfc_sli4.h"
37#include "lpfc_nl.h"
38#include "lpfc_disc.h"
39#include "lpfc_scsi.h"
40#include "lpfc.h"
41#include "lpfc_logmsg.h"
42#include "lpfc_crtn.h"
43#include "lpfc_vport.h"
44#include "lpfc_debugfs.h"
45
46static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
47			  struct lpfc_iocbq *);
48static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
49			struct lpfc_iocbq *);
50static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
51static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
52				struct lpfc_nodelist *ndlp, uint8_t retry);
53static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
54				  struct lpfc_iocbq *iocb);
55
56static int lpfc_max_els_tries = 3;
57
58/**
59 * lpfc_els_chk_latt - Check host link attention event for a vport
60 * @vport: pointer to a host virtual N_Port data structure.
61 *
62 * This routine checks whether there is an outstanding host link
63 * attention event during the discovery process with the @vport. It is done
64 * by reading the HBA's Host Attention (HA) register. If there is any host
65 * link attention events during this @vport's discovery process, the @vport
66 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
67 * be issued if the link state is not already in host link cleared state,
68 * and a return code shall indicate whether the host link attention event
69 * had happened.
70 *
71 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
72 * state in LPFC_VPORT_READY, the request for checking host link attention
73 * event will be ignored and a return code shall indicate no host link
74 * attention event had happened.
75 *
76 * Return codes
77 *   0 - no host link attention event happened
78 *   1 - host link attention event happened
79 **/
80int
81lpfc_els_chk_latt(struct lpfc_vport *vport)
82{
83	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
84	struct lpfc_hba  *phba = vport->phba;
85	uint32_t ha_copy;
86
87	if (vport->port_state >= LPFC_VPORT_READY ||
88	    phba->link_state == LPFC_LINK_DOWN ||
89	    phba->sli_rev > LPFC_SLI_REV3)
90		return 0;
91
92	/* Read the HBA Host Attention Register */
93	if (lpfc_readl(phba->HAregaddr, &ha_copy))
94		return 1;
95
96	if (!(ha_copy & HA_LATT))
97		return 0;
98
99	/* Pending Link Event during Discovery */
100	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
101			 "0237 Pending Link Event during "
102			 "Discovery: State x%x\n",
103			 phba->pport->port_state);
104
105	/* CLEAR_LA should re-enable link attention events and
106	 * we should then immediately take a LATT event. The
107	 * LATT processing should call lpfc_linkdown() which
108	 * will cleanup any left over in-progress discovery
109	 * events.
110	 */
111	spin_lock_irq(shost->host_lock);
112	vport->fc_flag |= FC_ABORT_DISCOVERY;
113	spin_unlock_irq(shost->host_lock);
114
115	if (phba->link_state != LPFC_CLEAR_LA)
116		lpfc_issue_clear_la(phba, vport);
117
118	return 1;
119}
120
121/**
122 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
123 * @vport: pointer to a host virtual N_Port data structure.
124 * @expectRsp: flag indicating whether response is expected.
125 * @cmdSize: size of the ELS command.
126 * @retry: number of retries to the command IOCB when it fails.
127 * @ndlp: pointer to a node-list data structure.
128 * @did: destination identifier.
129 * @elscmd: the ELS command code.
130 *
131 * This routine is used for allocating a lpfc-IOCB data structure from
132 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
133 * passed into the routine for discovery state machine to issue an Extended
134 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
135 * and preparation routine that is used by all the discovery state machine
136 * routines and the ELS command-specific fields will be later set up by
137 * the individual discovery machine routines after calling this routine
138 * allocating and preparing a generic IOCB data structure. It fills in the
139 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
140 * payload and response payload (if expected). The reference count on the
141 * ndlp is incremented by 1 and the reference to the ndlp is put into
142 * context1 of the IOCB data structure for this IOCB to hold the ndlp
143 * reference for the command's callback function to access later.
144 *
145 * Return code
146 *   Pointer to the newly allocated/prepared els iocb data structure
147 *   NULL - when els iocb data structure allocation/preparation failed
148 **/
149struct lpfc_iocbq *
150lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
151		   uint16_t cmdSize, uint8_t retry,
152		   struct lpfc_nodelist *ndlp, uint32_t did,
153		   uint32_t elscmd)
154{
155	struct lpfc_hba  *phba = vport->phba;
156	struct lpfc_iocbq *elsiocb;
157	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
158	struct ulp_bde64 *bpl;
159	IOCB_t *icmd;
160
161
162	if (!lpfc_is_link_up(phba))
163		return NULL;
164
165	/* Allocate buffer for  command iocb */
166	elsiocb = lpfc_sli_get_iocbq(phba);
167
168	if (elsiocb == NULL)
169		return NULL;
170
171	/*
172	 * If this command is for fabric controller and HBA running
173	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174	 */
175	if ((did == Fabric_DID) &&
176		(phba->hba_flag & HBA_FIP_SUPPORT) &&
177		((elscmd == ELS_CMD_FLOGI) ||
178		 (elscmd == ELS_CMD_FDISC) ||
179		 (elscmd == ELS_CMD_LOGO)))
180		switch (elscmd) {
181		case ELS_CMD_FLOGI:
182		elsiocb->iocb_flag |=
183			((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
184					& LPFC_FIP_ELS_ID_MASK);
185		break;
186		case ELS_CMD_FDISC:
187		elsiocb->iocb_flag |=
188			((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
189					& LPFC_FIP_ELS_ID_MASK);
190		break;
191		case ELS_CMD_LOGO:
192		elsiocb->iocb_flag |=
193			((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
194					& LPFC_FIP_ELS_ID_MASK);
195		break;
196		}
197	else
198		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
199
200	icmd = &elsiocb->iocb;
201
202	/* fill in BDEs for command */
203	/* Allocate buffer for command payload */
204	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
205	if (pcmd)
206		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
207	if (!pcmd || !pcmd->virt)
208		goto els_iocb_free_pcmb_exit;
209
210	INIT_LIST_HEAD(&pcmd->list);
211
212	/* Allocate buffer for response payload */
213	if (expectRsp) {
214		prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
215		if (prsp)
216			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
217						     &prsp->phys);
218		if (!prsp || !prsp->virt)
219			goto els_iocb_free_prsp_exit;
220		INIT_LIST_HEAD(&prsp->list);
221	} else
222		prsp = NULL;
223
224	/* Allocate buffer for Buffer ptr list */
225	pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
226	if (pbuflist)
227		pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
228						 &pbuflist->phys);
229	if (!pbuflist || !pbuflist->virt)
230		goto els_iocb_free_pbuf_exit;
231
232	INIT_LIST_HEAD(&pbuflist->list);
233
234	if (expectRsp) {
235		icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
236		icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
237		icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
238		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
239
240		icmd->un.elsreq64.remoteID = did;		/* DID */
241		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
242		if (elscmd == ELS_CMD_FLOGI)
243			icmd->ulpTimeout = FF_DEF_RATOV * 2;
244		else
245			icmd->ulpTimeout = phba->fc_ratov * 2;
246	} else {
247		icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
248		icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
249		icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
250		icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
251		icmd->un.xseq64.xmit_els_remoteID = did;	/* DID */
252		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
253	}
254	icmd->ulpBdeCount = 1;
255	icmd->ulpLe = 1;
256	icmd->ulpClass = CLASS3;
257
258	/*
259	 * If we have NPIV enabled, we want to send ELS traffic by VPI.
260	 * For SLI4, since the driver controls VPIs we also want to include
261	 * all ELS pt2pt protocol traffic as well.
262	 */
263	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
264		((phba->sli_rev == LPFC_SLI_REV4) &&
265		    (vport->fc_flag & FC_PT2PT))) {
266
267		if (expectRsp) {
268			icmd->un.elsreq64.myID = vport->fc_myDID;
269
270			/* For ELS_REQUEST64_CR, use the VPI by default */
271			icmd->ulpContext = phba->vpi_ids[vport->vpi];
272		}
273
274		icmd->ulpCt_h = 0;
275		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
276		if (elscmd == ELS_CMD_ECHO)
277			icmd->ulpCt_l = 0; /* context = invalid RPI */
278		else
279			icmd->ulpCt_l = 1; /* context = VPI */
280	}
281
282	bpl = (struct ulp_bde64 *) pbuflist->virt;
283	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
284	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
285	bpl->tus.f.bdeSize = cmdSize;
286	bpl->tus.f.bdeFlags = 0;
287	bpl->tus.w = le32_to_cpu(bpl->tus.w);
288
289	if (expectRsp) {
290		bpl++;
291		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
292		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
293		bpl->tus.f.bdeSize = FCELSSIZE;
294		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
295		bpl->tus.w = le32_to_cpu(bpl->tus.w);
296	}
297
298	/* prevent preparing iocb with NULL ndlp reference */
299	elsiocb->context1 = lpfc_nlp_get(ndlp);
300	if (!elsiocb->context1)
301		goto els_iocb_free_pbuf_exit;
302	elsiocb->context2 = pcmd;
303	elsiocb->context3 = pbuflist;
304	elsiocb->retry = retry;
305	elsiocb->vport = vport;
306	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
307
308	if (prsp) {
309		list_add(&prsp->list, &pcmd->list);
310	}
311	if (expectRsp) {
312		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
313		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
314				 "0116 Xmit ELS command x%x to remote "
315				 "NPORT x%x I/O tag: x%x, port state:x%x"
316				 " fc_flag:x%x\n",
317				 elscmd, did, elsiocb->iotag,
318				 vport->port_state,
319				 vport->fc_flag);
320	} else {
321		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
322		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
323				 "0117 Xmit ELS response x%x to remote "
324				 "NPORT x%x I/O tag: x%x, size: x%x "
325				 "port_state x%x fc_flag x%x\n",
326				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
327				 cmdSize, vport->port_state,
328				 vport->fc_flag);
329	}
330	return elsiocb;
331
332els_iocb_free_pbuf_exit:
333	if (expectRsp)
334		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
335	kfree(pbuflist);
336
337els_iocb_free_prsp_exit:
338	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
339	kfree(prsp);
340
341els_iocb_free_pcmb_exit:
342	kfree(pcmd);
343	lpfc_sli_release_iocbq(phba, elsiocb);
344	return NULL;
345}
346
347/**
348 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
349 * @vport: pointer to a host virtual N_Port data structure.
350 *
351 * This routine issues a fabric registration login for a @vport. An
352 * active ndlp node with Fabric_DID must already exist for this @vport.
353 * The routine invokes two mailbox commands to carry out fabric registration
354 * login through the HBA firmware: the first mailbox command requests the
355 * HBA to perform link configuration for the @vport; and the second mailbox
356 * command requests the HBA to perform the actual fabric registration login
357 * with the @vport.
358 *
359 * Return code
360 *   0 - successfully issued fabric registration login for @vport
361 *   -ENXIO -- failed to issue fabric registration login for @vport
362 **/
363int
364lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
365{
366	struct lpfc_hba  *phba = vport->phba;
367	LPFC_MBOXQ_t *mbox;
368	struct lpfc_dmabuf *mp;
369	struct lpfc_nodelist *ndlp;
370	struct serv_parm *sp;
371	int rc;
372	int err = 0;
373
374	sp = &phba->fc_fabparam;
375	ndlp = lpfc_findnode_did(vport, Fabric_DID);
376	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
377		err = 1;
378		goto fail;
379	}
380
381	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
382	if (!mbox) {
383		err = 2;
384		goto fail;
385	}
386
387	vport->port_state = LPFC_FABRIC_CFG_LINK;
388	lpfc_config_link(phba, mbox);
389	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
390	mbox->vport = vport;
391
392	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
393	if (rc == MBX_NOT_FINISHED) {
394		err = 3;
395		goto fail_free_mbox;
396	}
397
398	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
399	if (!mbox) {
400		err = 4;
401		goto fail;
402	}
403	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
404			  ndlp->nlp_rpi);
405	if (rc) {
406		err = 5;
407		goto fail_free_mbox;
408	}
409
410	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
411	mbox->vport = vport;
412	/* increment the reference count on ndlp to hold reference
413	 * for the callback routine.
414	 */
415	mbox->context2 = lpfc_nlp_get(ndlp);
416
417	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
418	if (rc == MBX_NOT_FINISHED) {
419		err = 6;
420		goto fail_issue_reg_login;
421	}
422
423	return 0;
424
425fail_issue_reg_login:
426	/* decrement the reference count on ndlp just incremented
427	 * for the failed mbox command.
428	 */
429	lpfc_nlp_put(ndlp);
430	mp = (struct lpfc_dmabuf *) mbox->context1;
431	lpfc_mbuf_free(phba, mp->virt, mp->phys);
432	kfree(mp);
433fail_free_mbox:
434	mempool_free(mbox, phba->mbox_mem_pool);
435
436fail:
437	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
438	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
439		"0249 Cannot issue Register Fabric login: Err %d\n", err);
440	return -ENXIO;
441}
442
443/**
444 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
445 * @vport: pointer to a host virtual N_Port data structure.
446 *
447 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
448 * the @vport. This mailbox command is necessary for SLI4 port only.
449 *
450 * Return code
451 *   0 - successfully issued REG_VFI for @vport
452 *   A failure code otherwise.
453 **/
454int
455lpfc_issue_reg_vfi(struct lpfc_vport *vport)
456{
457	struct lpfc_hba  *phba = vport->phba;
458	LPFC_MBOXQ_t *mboxq;
459	struct lpfc_nodelist *ndlp;
460	struct serv_parm *sp;
461	struct lpfc_dmabuf *dmabuf;
462	int rc = 0;
463
464	sp = &phba->fc_fabparam;
465	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
466	if ((phba->sli_rev == LPFC_SLI_REV4) &&
467	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
468	    !(vport->fc_flag & FC_PT2PT)) {
469		ndlp = lpfc_findnode_did(vport, Fabric_DID);
470		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
471			rc = -ENODEV;
472			goto fail;
473		}
474	}
475
476	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
477	if (!dmabuf) {
478		rc = -ENOMEM;
479		goto fail;
480	}
481	dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
482	if (!dmabuf->virt) {
483		rc = -ENOMEM;
484		goto fail_free_dmabuf;
485	}
486
487	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
488	if (!mboxq) {
489		rc = -ENOMEM;
490		goto fail_free_coherent;
491	}
492	vport->port_state = LPFC_FABRIC_CFG_LINK;
493	memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
494	lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
495
496	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
497	mboxq->vport = vport;
498	mboxq->context1 = dmabuf;
499	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
500	if (rc == MBX_NOT_FINISHED) {
501		rc = -ENXIO;
502		goto fail_free_mbox;
503	}
504	return 0;
505
506fail_free_mbox:
507	mempool_free(mboxq, phba->mbox_mem_pool);
508fail_free_coherent:
509	lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
510fail_free_dmabuf:
511	kfree(dmabuf);
512fail:
513	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
514	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
515		"0289 Issue Register VFI failed: Err %d\n", rc);
516	return rc;
517}
518
519/**
520 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
521 * @vport: pointer to a host virtual N_Port data structure.
522 *
523 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
524 * the @vport. This mailbox command is necessary for SLI4 port only.
525 *
526 * Return code
527 *   0 - successfully issued REG_VFI for @vport
528 *   A failure code otherwise.
529 **/
530int
531lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
532{
533	struct lpfc_hba *phba = vport->phba;
534	struct Scsi_Host *shost;
535	LPFC_MBOXQ_t *mboxq;
536	int rc;
537
538	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
539	if (!mboxq) {
540		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
541				"2556 UNREG_VFI mbox allocation failed"
542				"HBA state x%x\n", phba->pport->port_state);
543		return -ENOMEM;
544	}
545
546	lpfc_unreg_vfi(mboxq, vport);
547	mboxq->vport = vport;
548	mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
549
550	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
551	if (rc == MBX_NOT_FINISHED) {
552		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
553				"2557 UNREG_VFI issue mbox failed rc x%x "
554				"HBA state x%x\n",
555				rc, phba->pport->port_state);
556		mempool_free(mboxq, phba->mbox_mem_pool);
557		return -EIO;
558	}
559
560	shost = lpfc_shost_from_vport(vport);
561	spin_lock_irq(shost->host_lock);
562	vport->fc_flag &= ~FC_VFI_REGISTERED;
563	spin_unlock_irq(shost->host_lock);
564	return 0;
565}
566
567/**
568 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
569 * @vport: pointer to a host virtual N_Port data structure.
570 * @sp: pointer to service parameter data structure.
571 *
572 * This routine is called from FLOGI/FDISC completion handler functions.
573 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
574 * node nodename is changed in the completion service parameter else return
575 * 0. This function also set flag in the vport data structure to delay
576 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
577 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
578 * node nodename is changed in the completion service parameter.
579 *
580 * Return code
581 *   0 - FCID and Fabric Nodename and Fabric portname is not changed.
582 *   1 - FCID or Fabric Nodename or Fabric portname is changed.
583 *
584 **/
585static uint8_t
586lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
587		struct serv_parm *sp)
588{
589	uint8_t fabric_param_changed = 0;
590	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
591
592	if ((vport->fc_prevDID != vport->fc_myDID) ||
593		memcmp(&vport->fabric_portname, &sp->portName,
594			sizeof(struct lpfc_name)) ||
595		memcmp(&vport->fabric_nodename, &sp->nodeName,
596			sizeof(struct lpfc_name)))
597		fabric_param_changed = 1;
598
599	/*
600	 * Word 1 Bit 31 in common service parameter is overloaded.
601	 * Word 1 Bit 31 in FLOGI request is multiple NPort request
602	 * Word 1 Bit 31 in FLOGI response is clean address bit
603	 *
604	 * If fabric parameter is changed and clean address bit is
605	 * cleared delay nport discovery if
606	 * - vport->fc_prevDID != 0 (not initial discovery) OR
607	 * - lpfc_delay_discovery module parameter is set.
608	 */
609	if (fabric_param_changed && !sp->cmn.clean_address_bit &&
610	    (vport->fc_prevDID || lpfc_delay_discovery)) {
611		spin_lock_irq(shost->host_lock);
612		vport->fc_flag |= FC_DISC_DELAYED;
613		spin_unlock_irq(shost->host_lock);
614	}
615
616	return fabric_param_changed;
617}
618
619
620/**
621 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
622 * @vport: pointer to a host virtual N_Port data structure.
623 * @ndlp: pointer to a node-list data structure.
624 * @sp: pointer to service parameter data structure.
625 * @irsp: pointer to the IOCB within the lpfc response IOCB.
626 *
627 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
628 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
629 * port in a fabric topology. It properly sets up the parameters to the @ndlp
630 * from the IOCB response. It also check the newly assigned N_Port ID to the
631 * @vport against the previously assigned N_Port ID. If it is different from
632 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
633 * is invoked on all the remaining nodes with the @vport to unregister the
634 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
635 * is invoked to register login to the fabric.
636 *
637 * Return code
638 *   0 - Success (currently, always return 0)
639 **/
640static int
641lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
642			   struct serv_parm *sp, IOCB_t *irsp)
643{
644	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
645	struct lpfc_hba  *phba = vport->phba;
646	struct lpfc_nodelist *np;
647	struct lpfc_nodelist *next_np;
648	uint8_t fabric_param_changed;
649
650	spin_lock_irq(shost->host_lock);
651	vport->fc_flag |= FC_FABRIC;
652	spin_unlock_irq(shost->host_lock);
653
654	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
655	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
656		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
657
658	phba->fc_edtovResol = sp->cmn.edtovResolution;
659	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
660
661	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
662		spin_lock_irq(shost->host_lock);
663		vport->fc_flag |= FC_PUBLIC_LOOP;
664		spin_unlock_irq(shost->host_lock);
665	}
666
667	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
668	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
669	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
670	ndlp->nlp_class_sup = 0;
671	if (sp->cls1.classValid)
672		ndlp->nlp_class_sup |= FC_COS_CLASS1;
673	if (sp->cls2.classValid)
674		ndlp->nlp_class_sup |= FC_COS_CLASS2;
675	if (sp->cls3.classValid)
676		ndlp->nlp_class_sup |= FC_COS_CLASS3;
677	if (sp->cls4.classValid)
678		ndlp->nlp_class_sup |= FC_COS_CLASS4;
679	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
680				sp->cmn.bbRcvSizeLsb;
681
682	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
683	memcpy(&vport->fabric_portname, &sp->portName,
684			sizeof(struct lpfc_name));
685	memcpy(&vport->fabric_nodename, &sp->nodeName,
686			sizeof(struct lpfc_name));
687	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
688
689	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
690		if (sp->cmn.response_multiple_NPort) {
691			lpfc_printf_vlog(vport, KERN_WARNING,
692					 LOG_ELS | LOG_VPORT,
693					 "1816 FLOGI NPIV supported, "
694					 "response data 0x%x\n",
695					 sp->cmn.response_multiple_NPort);
696			spin_lock_irq(&phba->hbalock);
697			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
698			spin_unlock_irq(&phba->hbalock);
699		} else {
700			/* Because we asked f/w for NPIV it still expects us
701			to call reg_vnpid atleast for the physcial host */
702			lpfc_printf_vlog(vport, KERN_WARNING,
703					 LOG_ELS | LOG_VPORT,
704					 "1817 Fabric does not support NPIV "
705					 "- configuring single port mode.\n");
706			spin_lock_irq(&phba->hbalock);
707			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
708			spin_unlock_irq(&phba->hbalock);
709		}
710	}
711
712	/*
713	 * For FC we need to do some special processing because of the SLI
714	 * Port's default settings of the Common Service Parameters.
715	 */
716	if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
717		/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
718		if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
719			lpfc_unregister_fcf_prep(phba);
720
721		/* This should just update the VFI CSPs*/
722		if (vport->fc_flag & FC_VFI_REGISTERED)
723			lpfc_issue_reg_vfi(vport);
724	}
725
726	if (fabric_param_changed &&
727		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
728
729		/* If our NportID changed, we need to ensure all
730		 * remaining NPORTs get unreg_login'ed.
731		 */
732		list_for_each_entry_safe(np, next_np,
733					&vport->fc_nodes, nlp_listp) {
734			if (!NLP_CHK_NODE_ACT(np))
735				continue;
736			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
737				   !(np->nlp_flag & NLP_NPR_ADISC))
738				continue;
739			spin_lock_irq(shost->host_lock);
740			np->nlp_flag &= ~NLP_NPR_ADISC;
741			spin_unlock_irq(shost->host_lock);
742			lpfc_unreg_rpi(vport, np);
743		}
744		lpfc_cleanup_pending_mbox(vport);
745
746		if (phba->sli_rev == LPFC_SLI_REV4) {
747			lpfc_sli4_unreg_all_rpis(vport);
748			lpfc_mbx_unreg_vpi(vport);
749			spin_lock_irq(shost->host_lock);
750			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
751			spin_unlock_irq(shost->host_lock);
752		}
753
754		/*
755		 * For SLI3 and SLI4, the VPI needs to be reregistered in
756		 * response to this fabric parameter change event.
757		 */
758		spin_lock_irq(shost->host_lock);
759		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
760		spin_unlock_irq(shost->host_lock);
761	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
762		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
763			/*
764			 * Driver needs to re-reg VPI in order for f/w
765			 * to update the MAC address.
766			 */
767			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
768			lpfc_register_new_vport(phba, vport, ndlp);
769			return 0;
770	}
771
772	if (phba->sli_rev < LPFC_SLI_REV4) {
773		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
774		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
775		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
776			lpfc_register_new_vport(phba, vport, ndlp);
777		else
778			lpfc_issue_fabric_reglogin(vport);
779	} else {
780		ndlp->nlp_type |= NLP_FABRIC;
781		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
782		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
783			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
784			lpfc_start_fdiscs(phba);
785			lpfc_do_scr_ns_plogi(phba, vport);
786		} else if (vport->fc_flag & FC_VFI_REGISTERED)
787			lpfc_issue_init_vpi(vport);
788		else {
789			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
790					"3135 Need register VFI: (x%x/%x)\n",
791					vport->fc_prevDID, vport->fc_myDID);
792			lpfc_issue_reg_vfi(vport);
793		}
794	}
795	return 0;
796}
797
798/**
799 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
800 * @vport: pointer to a host virtual N_Port data structure.
801 * @ndlp: pointer to a node-list data structure.
802 * @sp: pointer to service parameter data structure.
803 *
804 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
805 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
806 * in a point-to-point topology. First, the @vport's N_Port Name is compared
807 * with the received N_Port Name: if the @vport's N_Port Name is greater than
808 * the received N_Port Name lexicographically, this node shall assign local
809 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
810 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
811 * this node shall just wait for the remote node to issue PLOGI and assign
812 * N_Port IDs.
813 *
814 * Return code
815 *   0 - Success
816 *   -ENXIO - Fail
817 **/
818static int
819lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
820			  struct serv_parm *sp)
821{
822	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
823	struct lpfc_hba  *phba = vport->phba;
824	LPFC_MBOXQ_t *mbox;
825	int rc;
826
827	spin_lock_irq(shost->host_lock);
828	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
829	spin_unlock_irq(shost->host_lock);
830
831	phba->fc_edtov = FF_DEF_EDTOV;
832	phba->fc_ratov = FF_DEF_RATOV;
833	rc = memcmp(&vport->fc_portname, &sp->portName,
834		    sizeof(vport->fc_portname));
835	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
836
837	if (rc >= 0) {
838		/* This side will initiate the PLOGI */
839		spin_lock_irq(shost->host_lock);
840		vport->fc_flag |= FC_PT2PT_PLOGI;
841		spin_unlock_irq(shost->host_lock);
842
843		/*
844		 * N_Port ID cannot be 0, set our to LocalID the other
845		 * side will be RemoteID.
846		 */
847
848		/* not equal */
849		if (rc)
850			vport->fc_myDID = PT2PT_LocalID;
851
852		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
853		if (!mbox)
854			goto fail;
855
856		lpfc_config_link(phba, mbox);
857
858		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
859		mbox->vport = vport;
860		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
861		if (rc == MBX_NOT_FINISHED) {
862			mempool_free(mbox, phba->mbox_mem_pool);
863			goto fail;
864		}
865
866		/*
867		 * For SLI4, the VFI/VPI are registered AFTER the
868		 * Nport with the higher WWPN sends the PLOGI with
869		 * an assigned NPortId.
870		 */
871
872		/* not equal */
873		if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
874			lpfc_issue_reg_vfi(vport);
875
876		/* Decrement ndlp reference count indicating that ndlp can be
877		 * safely released when other references to it are done.
878		 */
879		lpfc_nlp_put(ndlp);
880
881		ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
882		if (!ndlp) {
883			/*
884			 * Cannot find existing Fabric ndlp, so allocate a
885			 * new one
886			 */
887			ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
888			if (!ndlp)
889				goto fail;
890			lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
891		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
892			ndlp = lpfc_enable_node(vport, ndlp,
893						NLP_STE_UNUSED_NODE);
894			if(!ndlp)
895				goto fail;
896		}
897
898		memcpy(&ndlp->nlp_portname, &sp->portName,
899		       sizeof(struct lpfc_name));
900		memcpy(&ndlp->nlp_nodename, &sp->nodeName,
901		       sizeof(struct lpfc_name));
902		/* Set state will put ndlp onto node list if not already done */
903		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
904		spin_lock_irq(shost->host_lock);
905		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
906		spin_unlock_irq(shost->host_lock);
907	} else
908		/* This side will wait for the PLOGI, decrement ndlp reference
909		 * count indicating that ndlp can be released when other
910		 * references to it are done.
911		 */
912		lpfc_nlp_put(ndlp);
913
914	/* If we are pt2pt with another NPort, force NPIV off! */
915	phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
916
917	spin_lock_irq(shost->host_lock);
918	vport->fc_flag |= FC_PT2PT;
919	spin_unlock_irq(shost->host_lock);
920	/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
921	if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
922		lpfc_unregister_fcf_prep(phba);
923
924		/* The FC_VFI_REGISTERED flag will get clear in the cmpl
925		 * handler for unreg_vfi, but if we don't force the
926		 * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
927		 * built with the update bit set instead of just the vp bit to
928		 * change the Nport ID.  We need to have the vp set and the
929		 * Upd cleared on topology changes.
930		 */
931		spin_lock_irq(shost->host_lock);
932		vport->fc_flag &= ~FC_VFI_REGISTERED;
933		spin_unlock_irq(shost->host_lock);
934		phba->fc_topology_changed = 0;
935		lpfc_issue_reg_vfi(vport);
936	}
937
938	/* Start discovery - this should just do CLEAR_LA */
939	lpfc_disc_start(vport);
940	return 0;
941fail:
942	return -ENXIO;
943}
944
945/**
946 * lpfc_cmpl_els_flogi - Completion callback function for flogi
947 * @phba: pointer to lpfc hba data structure.
948 * @cmdiocb: pointer to lpfc command iocb data structure.
949 * @rspiocb: pointer to lpfc response iocb data structure.
950 *
951 * This routine is the top-level completion callback function for issuing
952 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
953 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
954 * retry has been made (either immediately or delayed with lpfc_els_retry()
955 * returning 1), the command IOCB will be released and function returned.
956 * If the retry attempt has been given up (possibly reach the maximum
957 * number of retries), one additional decrement of ndlp reference shall be
958 * invoked before going out after releasing the command IOCB. This will
959 * actually release the remote node (Note, lpfc_els_free_iocb() will also
960 * invoke one decrement of ndlp reference count). If no error reported in
961 * the IOCB status, the command Port ID field is used to determine whether
962 * this is a point-to-point topology or a fabric topology: if the Port ID
963 * field is assigned, it is a fabric topology; otherwise, it is a
964 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
965 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
966 * specific topology completion conditions.
967 **/
968static void
969lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
970		    struct lpfc_iocbq *rspiocb)
971{
972	struct lpfc_vport *vport = cmdiocb->vport;
973	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
974	IOCB_t *irsp = &rspiocb->iocb;
975	struct lpfc_nodelist *ndlp = cmdiocb->context1;
976	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
977	struct serv_parm *sp;
978	uint16_t fcf_index;
979	int rc;
980
981	/* Check to see if link went down during discovery */
982	if (lpfc_els_chk_latt(vport)) {
983		/* One additional decrement on node reference count to
984		 * trigger the release of the node
985		 */
986		lpfc_nlp_put(ndlp);
987		goto out;
988	}
989
990	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
991		"FLOGI cmpl:      status:x%x/x%x state:x%x",
992		irsp->ulpStatus, irsp->un.ulpWord[4],
993		vport->port_state);
994
995	if (irsp->ulpStatus) {
996		/*
997		 * In case of FIP mode, perform roundrobin FCF failover
998		 * due to new FCF discovery
999		 */
1000		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
1001		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
1002			if (phba->link_state < LPFC_LINK_UP)
1003				goto stop_rr_fcf_flogi;
1004			if ((phba->fcoe_cvl_eventtag_attn ==
1005			     phba->fcoe_cvl_eventtag) &&
1006			    (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1007			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1008			    IOERR_SLI_ABORTED))
1009				goto stop_rr_fcf_flogi;
1010			else
1011				phba->fcoe_cvl_eventtag_attn =
1012					phba->fcoe_cvl_eventtag;
1013			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1014					"2611 FLOGI failed on FCF (x%x), "
1015					"status:x%x/x%x, tmo:x%x, perform "
1016					"roundrobin FCF failover\n",
1017					phba->fcf.current_rec.fcf_indx,
1018					irsp->ulpStatus, irsp->un.ulpWord[4],
1019					irsp->ulpTimeout);
1020			lpfc_sli4_set_fcf_flogi_fail(phba,
1021					phba->fcf.current_rec.fcf_indx);
1022			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
1023			rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1024			if (rc)
1025				goto out;
1026		}
1027
1028stop_rr_fcf_flogi:
1029		/* FLOGI failure */
1030		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1031				"2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
1032				irsp->ulpStatus, irsp->un.ulpWord[4],
1033				irsp->ulpTimeout);
1034
1035		/* Check for retry */
1036		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1037			goto out;
1038
1039		/* FLOGI failure */
1040		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1041				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
1042				 irsp->ulpStatus, irsp->un.ulpWord[4],
1043				 irsp->ulpTimeout);
1044
1045		/* FLOGI failed, so there is no fabric */
1046		spin_lock_irq(shost->host_lock);
1047		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1048		spin_unlock_irq(shost->host_lock);
1049
1050		/* If private loop, then allow max outstanding els to be
1051		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1052		 * alpa map would take too long otherwise.
1053		 */
1054		if (phba->alpa_map[0] == 0)
1055			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1056		if ((phba->sli_rev == LPFC_SLI_REV4) &&
1057		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1058		     (vport->fc_prevDID != vport->fc_myDID) ||
1059			phba->fc_topology_changed)) {
1060			if (vport->fc_flag & FC_VFI_REGISTERED) {
1061				if (phba->fc_topology_changed) {
1062					lpfc_unregister_fcf_prep(phba);
1063					spin_lock_irq(shost->host_lock);
1064					vport->fc_flag &= ~FC_VFI_REGISTERED;
1065					spin_unlock_irq(shost->host_lock);
1066					phba->fc_topology_changed = 0;
1067				} else {
1068					lpfc_sli4_unreg_all_rpis(vport);
1069				}
1070			}
1071			lpfc_issue_reg_vfi(vport);
1072			lpfc_nlp_put(ndlp);
1073			goto out;
1074		}
1075		goto flogifail;
1076	}
1077	spin_lock_irq(shost->host_lock);
1078	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1079	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1080	spin_unlock_irq(shost->host_lock);
1081
1082	/*
1083	 * The FLogI succeeded.  Sync the data for the CPU before
1084	 * accessing it.
1085	 */
1086	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1087	if (!prsp)
1088		goto out;
1089	sp = prsp->virt + sizeof(uint32_t);
1090
1091	/* FLOGI completes successfully */
1092	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1093			 "0101 FLOGI completes successfully, I/O tag:x%x, "
1094			 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
1095			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1096			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1097			 vport->port_state, vport->fc_flag);
1098
1099	if (vport->port_state == LPFC_FLOGI) {
1100		/*
1101		 * If Common Service Parameters indicate Nport
1102		 * we are point to point, if Fport we are Fabric.
1103		 */
1104		if (sp->cmn.fPort)
1105			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
1106		else if (!(phba->hba_flag & HBA_FCOE_MODE))
1107			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1108		else {
1109			lpfc_printf_vlog(vport, KERN_ERR,
1110				LOG_FIP | LOG_ELS,
1111				"2831 FLOGI response with cleared Fabric "
1112				"bit fcf_index 0x%x "
1113				"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1114				"Fabric Name "
1115				"%02x%02x%02x%02x%02x%02x%02x%02x\n",
1116				phba->fcf.current_rec.fcf_indx,
1117				phba->fcf.current_rec.switch_name[0],
1118				phba->fcf.current_rec.switch_name[1],
1119				phba->fcf.current_rec.switch_name[2],
1120				phba->fcf.current_rec.switch_name[3],
1121				phba->fcf.current_rec.switch_name[4],
1122				phba->fcf.current_rec.switch_name[5],
1123				phba->fcf.current_rec.switch_name[6],
1124				phba->fcf.current_rec.switch_name[7],
1125				phba->fcf.current_rec.fabric_name[0],
1126				phba->fcf.current_rec.fabric_name[1],
1127				phba->fcf.current_rec.fabric_name[2],
1128				phba->fcf.current_rec.fabric_name[3],
1129				phba->fcf.current_rec.fabric_name[4],
1130				phba->fcf.current_rec.fabric_name[5],
1131				phba->fcf.current_rec.fabric_name[6],
1132				phba->fcf.current_rec.fabric_name[7]);
1133			lpfc_nlp_put(ndlp);
1134			spin_lock_irq(&phba->hbalock);
1135			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1136			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1137			spin_unlock_irq(&phba->hbalock);
1138			goto out;
1139		}
1140		if (!rc) {
1141			/* Mark the FCF discovery process done */
1142			if (phba->hba_flag & HBA_FIP_SUPPORT)
1143				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1144						LOG_ELS,
1145						"2769 FLOGI to FCF (x%x) "
1146						"completed successfully\n",
1147						phba->fcf.current_rec.fcf_indx);
1148			spin_lock_irq(&phba->hbalock);
1149			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1150			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1151			spin_unlock_irq(&phba->hbalock);
1152			goto out;
1153		}
1154	}
1155
1156flogifail:
1157	lpfc_nlp_put(ndlp);
1158
1159	if (!lpfc_error_lost_link(irsp)) {
1160		/* FLOGI failed, so just use loop map to make discovery list */
1161		lpfc_disc_list_loopmap(vport);
1162
1163		/* Start discovery */
1164		lpfc_disc_start(vport);
1165	} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1166			(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1167			 IOERR_SLI_ABORTED) &&
1168			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1169			 IOERR_SLI_DOWN))) &&
1170			(phba->link_state != LPFC_CLEAR_LA)) {
1171		/* If FLOGI failed enable link interrupt. */
1172		lpfc_issue_clear_la(phba, vport);
1173	}
1174out:
1175	lpfc_els_free_iocb(phba, cmdiocb);
1176}
1177
1178/**
1179 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1180 * @vport: pointer to a host virtual N_Port data structure.
1181 * @ndlp: pointer to a node-list data structure.
1182 * @retry: number of retries to the command IOCB.
1183 *
1184 * This routine issues a Fabric Login (FLOGI) Request ELS command
1185 * for a @vport. The initiator service parameters are put into the payload
1186 * of the FLOGI Request IOCB and the top-level callback function pointer
1187 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1188 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1189 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1190 *
1191 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1192 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1193 * will be stored into the context1 field of the IOCB for the completion
1194 * callback function to the FLOGI ELS command.
1195 *
1196 * Return code
1197 *   0 - successfully issued flogi iocb for @vport
1198 *   1 - failed to issue flogi iocb for @vport
1199 **/
1200static int
1201lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1202		     uint8_t retry)
1203{
1204	struct lpfc_hba  *phba = vport->phba;
1205	struct serv_parm *sp;
1206	IOCB_t *icmd;
1207	struct lpfc_iocbq *elsiocb;
1208	struct lpfc_sli_ring *pring;
1209	uint8_t *pcmd;
1210	uint16_t cmdsize;
1211	uint32_t tmo;
1212	int rc;
1213
1214	pring = &phba->sli.ring[LPFC_ELS_RING];
1215
1216	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1217	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1218				     ndlp->nlp_DID, ELS_CMD_FLOGI);
1219
1220	if (!elsiocb)
1221		return 1;
1222
1223	icmd = &elsiocb->iocb;
1224	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1225
1226	/* For FLOGI request, remainder of payload is service parameters */
1227	*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1228	pcmd += sizeof(uint32_t);
1229	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1230	sp = (struct serv_parm *) pcmd;
1231
1232	/* Setup CSPs accordingly for Fabric */
1233	sp->cmn.e_d_tov = 0;
1234	sp->cmn.w2.r_a_tov = 0;
1235	sp->cmn.virtual_fabric_support = 0;
1236	sp->cls1.classValid = 0;
1237	if (sp->cmn.fcphLow < FC_PH3)
1238		sp->cmn.fcphLow = FC_PH3;
1239	if (sp->cmn.fcphHigh < FC_PH3)
1240		sp->cmn.fcphHigh = FC_PH3;
1241
1242	if  (phba->sli_rev == LPFC_SLI_REV4) {
1243		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1244		    LPFC_SLI_INTF_IF_TYPE_0) {
1245			elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1246			elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1247			/* FLOGI needs to be 3 for WQE FCFI */
1248			/* Set the fcfi to the fcfi we registered with */
1249			elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1250		}
1251		/* Can't do SLI4 class2 without support sequence coalescing */
1252		sp->cls2.classValid = 0;
1253		sp->cls2.seqDelivery = 0;
1254	} else {
1255		/* Historical, setting sequential-delivery bit for SLI3 */
1256		sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1257		sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1258		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1259			sp->cmn.request_multiple_Nport = 1;
1260			/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1261			icmd->ulpCt_h = 1;
1262			icmd->ulpCt_l = 0;
1263		} else
1264			sp->cmn.request_multiple_Nport = 0;
1265	}
1266
1267	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1268		icmd->un.elsreq64.myID = 0;
1269		icmd->un.elsreq64.fl = 1;
1270	}
1271
1272	tmo = phba->fc_ratov;
1273	phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1274	lpfc_set_disctmo(vport);
1275	phba->fc_ratov = tmo;
1276
1277	phba->fc_stat.elsXmitFLOGI++;
1278	elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1279
1280	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1281		"Issue FLOGI:     opt:x%x",
1282		phba->sli3_options, 0, 0);
1283
1284	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1285	if (rc == IOCB_ERROR) {
1286		lpfc_els_free_iocb(phba, elsiocb);
1287		return 1;
1288	}
1289	return 0;
1290}
1291
1292/**
1293 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1294 * @phba: pointer to lpfc hba data structure.
1295 *
1296 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1297 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1298 * list and issues an abort IOCB commond on each outstanding IOCB that
1299 * contains a active Fabric_DID ndlp. Note that this function is to issue
1300 * the abort IOCB command on all the outstanding IOCBs, thus when this
1301 * function returns, it does not guarantee all the IOCBs are actually aborted.
1302 *
1303 * Return code
1304 *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1305 **/
1306int
1307lpfc_els_abort_flogi(struct lpfc_hba *phba)
1308{
1309	struct lpfc_sli_ring *pring;
1310	struct lpfc_iocbq *iocb, *next_iocb;
1311	struct lpfc_nodelist *ndlp;
1312	IOCB_t *icmd;
1313
1314	/* Abort outstanding I/O on NPort <nlp_DID> */
1315	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1316			"0201 Abort outstanding I/O on NPort x%x\n",
1317			Fabric_DID);
1318
1319	pring = &phba->sli.ring[LPFC_ELS_RING];
1320
1321	/*
1322	 * Check the txcmplq for an iocb that matches the nport the driver is
1323	 * searching for.
1324	 */
1325	spin_lock_irq(&phba->hbalock);
1326	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1327		icmd = &iocb->iocb;
1328		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
1329			ndlp = (struct lpfc_nodelist *)(iocb->context1);
1330			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1331			    (ndlp->nlp_DID == Fabric_DID))
1332				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1333		}
1334	}
1335	spin_unlock_irq(&phba->hbalock);
1336
1337	return 0;
1338}
1339
1340/**
1341 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1342 * @vport: pointer to a host virtual N_Port data structure.
1343 *
1344 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1345 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1346 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1347 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1348 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1349 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1350 * @vport.
1351 *
1352 * Return code
1353 *   0 - failed to issue initial flogi for @vport
1354 *   1 - successfully issued initial flogi for @vport
1355 **/
1356int
1357lpfc_initial_flogi(struct lpfc_vport *vport)
1358{
1359	struct lpfc_hba *phba = vport->phba;
1360	struct lpfc_nodelist *ndlp;
1361
1362	vport->port_state = LPFC_FLOGI;
1363	lpfc_set_disctmo(vport);
1364
1365	/* First look for the Fabric ndlp */
1366	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1367	if (!ndlp) {
1368		/* Cannot find existing Fabric ndlp, so allocate a new one */
1369		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1370		if (!ndlp)
1371			return 0;
1372		lpfc_nlp_init(vport, ndlp, Fabric_DID);
1373		/* Set the node type */
1374		ndlp->nlp_type |= NLP_FABRIC;
1375		/* Put ndlp onto node list */
1376		lpfc_enqueue_node(vport, ndlp);
1377	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1378		/* re-setup ndlp without removing from node list */
1379		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1380		if (!ndlp)
1381			return 0;
1382	}
1383
1384	if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1385		/* This decrement of reference count to node shall kick off
1386		 * the release of the node.
1387		 */
1388		lpfc_nlp_put(ndlp);
1389		return 0;
1390	}
1391	return 1;
1392}
1393
1394/**
1395 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1396 * @vport: pointer to a host virtual N_Port data structure.
1397 *
1398 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1399 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1400 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1401 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1402 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1403 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1404 * @vport.
1405 *
1406 * Return code
1407 *   0 - failed to issue initial fdisc for @vport
1408 *   1 - successfully issued initial fdisc for @vport
1409 **/
1410int
1411lpfc_initial_fdisc(struct lpfc_vport *vport)
1412{
1413	struct lpfc_hba *phba = vport->phba;
1414	struct lpfc_nodelist *ndlp;
1415
1416	/* First look for the Fabric ndlp */
1417	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1418	if (!ndlp) {
1419		/* Cannot find existing Fabric ndlp, so allocate a new one */
1420		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1421		if (!ndlp)
1422			return 0;
1423		lpfc_nlp_init(vport, ndlp, Fabric_DID);
1424		/* Put ndlp onto node list */
1425		lpfc_enqueue_node(vport, ndlp);
1426	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1427		/* re-setup ndlp without removing from node list */
1428		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1429		if (!ndlp)
1430			return 0;
1431	}
1432
1433	if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1434		/* decrement node reference count to trigger the release of
1435		 * the node.
1436		 */
1437		lpfc_nlp_put(ndlp);
1438		return 0;
1439	}
1440	return 1;
1441}
1442
1443/**
1444 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1445 * @vport: pointer to a host virtual N_Port data structure.
1446 *
1447 * This routine checks whether there are more remaining Port Logins
1448 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1449 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1450 * to issue ELS PLOGIs up to the configured discover threads with the
1451 * @vport (@vport->cfg_discovery_threads). The function also decrement
1452 * the @vport's num_disc_node by 1 if it is not already 0.
1453 **/
1454void
1455lpfc_more_plogi(struct lpfc_vport *vport)
1456{
1457	int sentplogi;
1458
1459	if (vport->num_disc_nodes)
1460		vport->num_disc_nodes--;
1461
1462	/* Continue discovery with <num_disc_nodes> PLOGIs to go */
1463	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1464			 "0232 Continue discovery with %d PLOGIs to go "
1465			 "Data: x%x x%x x%x\n",
1466			 vport->num_disc_nodes, vport->fc_plogi_cnt,
1467			 vport->fc_flag, vport->port_state);
1468	/* Check to see if there are more PLOGIs to be sent */
1469	if (vport->fc_flag & FC_NLP_MORE)
1470		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
1471		sentplogi = lpfc_els_disc_plogi(vport);
1472
1473	return;
1474}
1475
1476/**
1477 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1478 * @phba: pointer to lpfc hba data structure.
1479 * @prsp: pointer to response IOCB payload.
1480 * @ndlp: pointer to a node-list data structure.
1481 *
1482 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1483 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1484 * The following cases are considered N_Port confirmed:
1485 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1486 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1487 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1488 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1489 * 1) if there is a node on vport list other than the @ndlp with the same
1490 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1491 * on that node to release the RPI associated with the node; 2) if there is
1492 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1493 * into, a new node shall be allocated (or activated). In either case, the
1494 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1495 * be released and the new_ndlp shall be put on to the vport node list and
1496 * its pointer returned as the confirmed node.
1497 *
1498 * Note that before the @ndlp got "released", the keepDID from not-matching
1499 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1500 * of the @ndlp. This is because the release of @ndlp is actually to put it
1501 * into an inactive state on the vport node list and the vport node list
1502 * management algorithm does not allow two node with a same DID.
1503 *
1504 * Return code
1505 *   pointer to the PLOGI N_Port @ndlp
1506 **/
1507static struct lpfc_nodelist *
1508lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1509			 struct lpfc_nodelist *ndlp)
1510{
1511	struct lpfc_vport    *vport = ndlp->vport;
1512	struct lpfc_nodelist *new_ndlp;
1513	struct lpfc_rport_data *rdata;
1514	struct fc_rport *rport;
1515	struct serv_parm *sp;
1516	uint8_t  name[sizeof(struct lpfc_name)];
1517	uint32_t rc, keepDID = 0;
1518	int  put_node;
1519	int  put_rport;
1520	unsigned long *active_rrqs_xri_bitmap = NULL;
1521
1522	/* Fabric nodes can have the same WWPN so we don't bother searching
1523	 * by WWPN.  Just return the ndlp that was given to us.
1524	 */
1525	if (ndlp->nlp_type & NLP_FABRIC)
1526		return ndlp;
1527
1528	sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1529	memset(name, 0, sizeof(struct lpfc_name));
1530
1531	/* Now we find out if the NPort we are logging into, matches the WWPN
1532	 * we have for that ndlp. If not, we have some work to do.
1533	 */
1534	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1535
1536	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1537		return ndlp;
1538	if (phba->sli_rev == LPFC_SLI_REV4) {
1539		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1540						       GFP_KERNEL);
1541		if (active_rrqs_xri_bitmap)
1542			memset(active_rrqs_xri_bitmap, 0,
1543			       phba->cfg_rrq_xri_bitmap_sz);
1544	}
1545
1546	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1547		 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
1548		 ndlp, ndlp->nlp_DID, new_ndlp);
1549
1550	if (!new_ndlp) {
1551		rc = memcmp(&ndlp->nlp_portname, name,
1552			    sizeof(struct lpfc_name));
1553		if (!rc) {
1554			if (active_rrqs_xri_bitmap)
1555				mempool_free(active_rrqs_xri_bitmap,
1556					     phba->active_rrq_pool);
1557			return ndlp;
1558		}
1559		new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1560		if (!new_ndlp) {
1561			if (active_rrqs_xri_bitmap)
1562				mempool_free(active_rrqs_xri_bitmap,
1563					     phba->active_rrq_pool);
1564			return ndlp;
1565		}
1566		lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
1567	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1568		rc = memcmp(&ndlp->nlp_portname, name,
1569			    sizeof(struct lpfc_name));
1570		if (!rc) {
1571			if (active_rrqs_xri_bitmap)
1572				mempool_free(active_rrqs_xri_bitmap,
1573					     phba->active_rrq_pool);
1574			return ndlp;
1575		}
1576		new_ndlp = lpfc_enable_node(vport, new_ndlp,
1577						NLP_STE_UNUSED_NODE);
1578		if (!new_ndlp) {
1579			if (active_rrqs_xri_bitmap)
1580				mempool_free(active_rrqs_xri_bitmap,
1581					     phba->active_rrq_pool);
1582			return ndlp;
1583		}
1584		keepDID = new_ndlp->nlp_DID;
1585		if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
1586			memcpy(active_rrqs_xri_bitmap,
1587			       new_ndlp->active_rrqs_xri_bitmap,
1588			       phba->cfg_rrq_xri_bitmap_sz);
1589	} else {
1590		keepDID = new_ndlp->nlp_DID;
1591		if (phba->sli_rev == LPFC_SLI_REV4 &&
1592		    active_rrqs_xri_bitmap)
1593			memcpy(active_rrqs_xri_bitmap,
1594			       new_ndlp->active_rrqs_xri_bitmap,
1595			       phba->cfg_rrq_xri_bitmap_sz);
1596	}
1597
1598	lpfc_unreg_rpi(vport, new_ndlp);
1599	new_ndlp->nlp_DID = ndlp->nlp_DID;
1600	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1601	if (phba->sli_rev == LPFC_SLI_REV4)
1602		memcpy(new_ndlp->active_rrqs_xri_bitmap,
1603		       ndlp->active_rrqs_xri_bitmap,
1604		       phba->cfg_rrq_xri_bitmap_sz);
1605
1606	if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1607		new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1608	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1609
1610	/* Set state will put new_ndlp on to node list if not already done */
1611	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1612
1613	/* Move this back to NPR state */
1614	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1615		/* The new_ndlp is replacing ndlp totally, so we need
1616		 * to put ndlp on UNUSED list and try to free it.
1617		 */
1618		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1619			 "3179 PLOGI confirm NEW: %x %x\n",
1620			 new_ndlp->nlp_DID, keepDID);
1621
1622		/* Fix up the rport accordingly */
1623		rport =  ndlp->rport;
1624		if (rport) {
1625			rdata = rport->dd_data;
1626			if (rdata->pnode == ndlp) {
1627				lpfc_nlp_put(ndlp);
1628				ndlp->rport = NULL;
1629				rdata->pnode = lpfc_nlp_get(new_ndlp);
1630				new_ndlp->rport = rport;
1631			}
1632			new_ndlp->nlp_type = ndlp->nlp_type;
1633		}
1634		/* We shall actually free the ndlp with both nlp_DID and
1635		 * nlp_portname fields equals 0 to avoid any ndlp on the
1636		 * nodelist never to be used.
1637		 */
1638		if (ndlp->nlp_DID == 0) {
1639			spin_lock_irq(&phba->ndlp_lock);
1640			NLP_SET_FREE_REQ(ndlp);
1641			spin_unlock_irq(&phba->ndlp_lock);
1642		}
1643
1644		/* Two ndlps cannot have the same did on the nodelist */
1645		ndlp->nlp_DID = keepDID;
1646		if (phba->sli_rev == LPFC_SLI_REV4 &&
1647		    active_rrqs_xri_bitmap)
1648			memcpy(ndlp->active_rrqs_xri_bitmap,
1649			       active_rrqs_xri_bitmap,
1650			       phba->cfg_rrq_xri_bitmap_sz);
1651		lpfc_drop_node(vport, ndlp);
1652	}
1653	else {
1654		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1655			 "3180 PLOGI confirm SWAP: %x %x\n",
1656			 new_ndlp->nlp_DID, keepDID);
1657
1658		lpfc_unreg_rpi(vport, ndlp);
1659
1660		/* Two ndlps cannot have the same did */
1661		ndlp->nlp_DID = keepDID;
1662		if (phba->sli_rev == LPFC_SLI_REV4 &&
1663		    active_rrqs_xri_bitmap)
1664			memcpy(ndlp->active_rrqs_xri_bitmap,
1665			       active_rrqs_xri_bitmap,
1666			       phba->cfg_rrq_xri_bitmap_sz);
1667
1668		/* Since we are swapping the ndlp passed in with the new one
1669		 * and the did has already been swapped, copy over state.
1670		 * The new WWNs are already in new_ndlp since thats what
1671		 * we looked it up by in the begining of this routine.
1672		 */
1673		new_ndlp->nlp_state = ndlp->nlp_state;
1674
1675		/* Since we are switching over to the new_ndlp, the old
1676		 * ndlp should be put in the NPR state, unless we have
1677		 * already started re-discovery on it.
1678		 */
1679		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1680		    (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1681			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1682
1683		/* Fix up the rport accordingly */
1684		rport = ndlp->rport;
1685		if (rport) {
1686			rdata = rport->dd_data;
1687			put_node = rdata->pnode != NULL;
1688			put_rport = ndlp->rport != NULL;
1689			rdata->pnode = NULL;
1690			ndlp->rport = NULL;
1691			if (put_node)
1692				lpfc_nlp_put(ndlp);
1693			if (put_rport)
1694				put_device(&rport->dev);
1695		}
1696	}
1697	if (phba->sli_rev == LPFC_SLI_REV4 &&
1698	    active_rrqs_xri_bitmap)
1699		mempool_free(active_rrqs_xri_bitmap,
1700			     phba->active_rrq_pool);
1701	return new_ndlp;
1702}
1703
1704/**
1705 * lpfc_end_rscn - Check and handle more rscn for a vport
1706 * @vport: pointer to a host virtual N_Port data structure.
1707 *
1708 * This routine checks whether more Registration State Change
1709 * Notifications (RSCNs) came in while the discovery state machine was in
1710 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1711 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1712 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1713 * handling the RSCNs.
1714 **/
1715void
1716lpfc_end_rscn(struct lpfc_vport *vport)
1717{
1718	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1719
1720	if (vport->fc_flag & FC_RSCN_MODE) {
1721		/*
1722		 * Check to see if more RSCNs came in while we were
1723		 * processing this one.
1724		 */
1725		if (vport->fc_rscn_id_cnt ||
1726		    (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1727			lpfc_els_handle_rscn(vport);
1728		else {
1729			spin_lock_irq(shost->host_lock);
1730			vport->fc_flag &= ~FC_RSCN_MODE;
1731			spin_unlock_irq(shost->host_lock);
1732		}
1733	}
1734}
1735
1736/**
1737 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1738 * @phba: pointer to lpfc hba data structure.
1739 * @cmdiocb: pointer to lpfc command iocb data structure.
1740 * @rspiocb: pointer to lpfc response iocb data structure.
1741 *
1742 * This routine will call the clear rrq function to free the rrq and
1743 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1744 * exist then the clear_rrq is still called because the rrq needs to
1745 * be freed.
1746 **/
1747
1748static void
1749lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1750		    struct lpfc_iocbq *rspiocb)
1751{
1752	struct lpfc_vport *vport = cmdiocb->vport;
1753	IOCB_t *irsp;
1754	struct lpfc_nodelist *ndlp;
1755	struct lpfc_node_rrq *rrq;
1756
1757	/* we pass cmdiocb to state machine which needs rspiocb as well */
1758	rrq = cmdiocb->context_un.rrq;
1759	cmdiocb->context_un.rsp_iocb = rspiocb;
1760
1761	irsp = &rspiocb->iocb;
1762	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1763		"RRQ cmpl:      status:x%x/x%x did:x%x",
1764		irsp->ulpStatus, irsp->un.ulpWord[4],
1765		irsp->un.elsreq64.remoteID);
1766
1767	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1768	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1769		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1770				 "2882 RRQ completes to NPort x%x "
1771				 "with no ndlp. Data: x%x x%x x%x\n",
1772				 irsp->un.elsreq64.remoteID,
1773				 irsp->ulpStatus, irsp->un.ulpWord[4],
1774				 irsp->ulpIoTag);
1775		goto out;
1776	}
1777
1778	/* rrq completes to NPort <nlp_DID> */
1779	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1780			 "2880 RRQ completes to NPort x%x "
1781			 "Data: x%x x%x x%x x%x x%x\n",
1782			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1783			 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1784
1785	if (irsp->ulpStatus) {
1786		/* Check for retry */
1787		/* RRQ failed Don't print the vport to vport rjts */
1788		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1789			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1790			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1791			(phba)->pport->cfg_log_verbose & LOG_ELS)
1792			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1793				 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1794				 ndlp->nlp_DID, irsp->ulpStatus,
1795				 irsp->un.ulpWord[4]);
1796	}
1797out:
1798	if (rrq)
1799		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1800	lpfc_els_free_iocb(phba, cmdiocb);
1801	return;
1802}
1803/**
1804 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1805 * @phba: pointer to lpfc hba data structure.
1806 * @cmdiocb: pointer to lpfc command iocb data structure.
1807 * @rspiocb: pointer to lpfc response iocb data structure.
1808 *
1809 * This routine is the completion callback function for issuing the Port
1810 * Login (PLOGI) command. For PLOGI completion, there must be an active
1811 * ndlp on the vport node list that matches the remote node ID from the
1812 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1813 * ignored and command IOCB released. The PLOGI response IOCB status is
1814 * checked for error conditons. If there is error status reported, PLOGI
1815 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1816 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1817 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1818 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1819 * there are additional N_Port nodes with the vport that need to perform
1820 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1821 * PLOGIs.
1822 **/
1823static void
1824lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1825		    struct lpfc_iocbq *rspiocb)
1826{
1827	struct lpfc_vport *vport = cmdiocb->vport;
1828	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1829	IOCB_t *irsp;
1830	struct lpfc_nodelist *ndlp;
1831	struct lpfc_dmabuf *prsp;
1832	int disc, rc;
1833
1834	/* we pass cmdiocb to state machine which needs rspiocb as well */
1835	cmdiocb->context_un.rsp_iocb = rspiocb;
1836
1837	irsp = &rspiocb->iocb;
1838	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1839		"PLOGI cmpl:      status:x%x/x%x did:x%x",
1840		irsp->ulpStatus, irsp->un.ulpWord[4],
1841		irsp->un.elsreq64.remoteID);
1842
1843	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1844	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1845		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1846				 "0136 PLOGI completes to NPort x%x "
1847				 "with no ndlp. Data: x%x x%x x%x\n",
1848				 irsp->un.elsreq64.remoteID,
1849				 irsp->ulpStatus, irsp->un.ulpWord[4],
1850				 irsp->ulpIoTag);
1851		goto out;
1852	}
1853
1854	/* Since ndlp can be freed in the disc state machine, note if this node
1855	 * is being used during discovery.
1856	 */
1857	spin_lock_irq(shost->host_lock);
1858	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1859	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1860	spin_unlock_irq(shost->host_lock);
1861	rc   = 0;
1862
1863	/* PLOGI completes to NPort <nlp_DID> */
1864	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1865			 "0102 PLOGI completes to NPort x%x "
1866			 "Data: x%x x%x x%x x%x x%x\n",
1867			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1868			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1869	/* Check to see if link went down during discovery */
1870	if (lpfc_els_chk_latt(vport)) {
1871		spin_lock_irq(shost->host_lock);
1872		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1873		spin_unlock_irq(shost->host_lock);
1874		goto out;
1875	}
1876
1877	if (irsp->ulpStatus) {
1878		/* Check for retry */
1879		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1880			/* ELS command is being retried */
1881			if (disc) {
1882				spin_lock_irq(shost->host_lock);
1883				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1884				spin_unlock_irq(shost->host_lock);
1885			}
1886			goto out;
1887		}
1888		/* PLOGI failed Don't print the vport to vport rjts */
1889		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1890			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1891			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1892			(phba)->pport->cfg_log_verbose & LOG_ELS)
1893			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1894				 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1895				 ndlp->nlp_DID, irsp->ulpStatus,
1896				 irsp->un.ulpWord[4]);
1897		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1898		if (lpfc_error_lost_link(irsp))
1899			rc = NLP_STE_FREED_NODE;
1900		else
1901			rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1902						     NLP_EVT_CMPL_PLOGI);
1903	} else {
1904		/* Good status, call state machine */
1905		prsp = list_entry(((struct lpfc_dmabuf *)
1906				   cmdiocb->context2)->list.next,
1907				  struct lpfc_dmabuf, list);
1908		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1909		rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1910					     NLP_EVT_CMPL_PLOGI);
1911	}
1912
1913	if (disc && vport->num_disc_nodes) {
1914		/* Check to see if there are more PLOGIs to be sent */
1915		lpfc_more_plogi(vport);
1916
1917		if (vport->num_disc_nodes == 0) {
1918			spin_lock_irq(shost->host_lock);
1919			vport->fc_flag &= ~FC_NDISC_ACTIVE;
1920			spin_unlock_irq(shost->host_lock);
1921
1922			lpfc_can_disctmo(vport);
1923			lpfc_end_rscn(vport);
1924		}
1925	}
1926
1927out:
1928	lpfc_els_free_iocb(phba, cmdiocb);
1929	return;
1930}
1931
1932/**
1933 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
1934 * @vport: pointer to a host virtual N_Port data structure.
1935 * @did: destination port identifier.
1936 * @retry: number of retries to the command IOCB.
1937 *
1938 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1939 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1940 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1941 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1942 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1943 *
1944 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1945 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1946 * will be stored into the context1 field of the IOCB for the completion
1947 * callback function to the PLOGI ELS command.
1948 *
1949 * Return code
1950 *   0 - Successfully issued a plogi for @vport
1951 *   1 - failed to issue a plogi for @vport
1952 **/
1953int
1954lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1955{
1956	struct lpfc_hba  *phba = vport->phba;
1957	struct serv_parm *sp;
1958	IOCB_t *icmd;
1959	struct lpfc_nodelist *ndlp;
1960	struct lpfc_iocbq *elsiocb;
1961	struct lpfc_sli *psli;
1962	uint8_t *pcmd;
1963	uint16_t cmdsize;
1964	int ret;
1965
1966	psli = &phba->sli;
1967
1968	ndlp = lpfc_findnode_did(vport, did);
1969	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1970		ndlp = NULL;
1971
1972	/* If ndlp is not NULL, we will bump the reference count on it */
1973	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1974	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1975				     ELS_CMD_PLOGI);
1976	if (!elsiocb)
1977		return 1;
1978
1979	icmd = &elsiocb->iocb;
1980	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1981
1982	/* For PLOGI request, remainder of payload is service parameters */
1983	*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1984	pcmd += sizeof(uint32_t);
1985	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1986	sp = (struct serv_parm *) pcmd;
1987
1988	/*
1989	 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1990	 * to device on remote loops work.
1991	 */
1992	if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
1993		sp->cmn.altBbCredit = 1;
1994
1995	if (sp->cmn.fcphLow < FC_PH_4_3)
1996		sp->cmn.fcphLow = FC_PH_4_3;
1997
1998	if (sp->cmn.fcphHigh < FC_PH3)
1999		sp->cmn.fcphHigh = FC_PH3;
2000
2001	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2002		"Issue PLOGI:     did:x%x",
2003		did, 0, 0);
2004
2005	phba->fc_stat.elsXmitPLOGI++;
2006	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
2007	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2008
2009	if (ret == IOCB_ERROR) {
2010		lpfc_els_free_iocb(phba, elsiocb);
2011		return 1;
2012	}
2013	return 0;
2014}
2015
2016/**
2017 * lpfc_cmpl_els_prli - Completion callback function for prli
2018 * @phba: pointer to lpfc hba data structure.
2019 * @cmdiocb: pointer to lpfc command iocb data structure.
2020 * @rspiocb: pointer to lpfc response iocb data structure.
2021 *
2022 * This routine is the completion callback function for a Process Login
2023 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2024 * status. If there is error status reported, PRLI retry shall be attempted
2025 * by invoking the lpfc_els_retry() routine. Otherwise, the state
2026 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2027 * ndlp to mark the PRLI completion.
2028 **/
2029static void
2030lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2031		   struct lpfc_iocbq *rspiocb)
2032{
2033	struct lpfc_vport *vport = cmdiocb->vport;
2034	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2035	IOCB_t *irsp;
2036	struct lpfc_sli *psli;
2037	struct lpfc_nodelist *ndlp;
2038
2039	psli = &phba->sli;
2040	/* we pass cmdiocb to state machine which needs rspiocb as well */
2041	cmdiocb->context_un.rsp_iocb = rspiocb;
2042
2043	irsp = &(rspiocb->iocb);
2044	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2045	spin_lock_irq(shost->host_lock);
2046	ndlp->nlp_flag &= ~NLP_PRLI_SND;
2047	spin_unlock_irq(shost->host_lock);
2048
2049	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2050		"PRLI cmpl:       status:x%x/x%x did:x%x",
2051		irsp->ulpStatus, irsp->un.ulpWord[4],
2052		ndlp->nlp_DID);
2053	/* PRLI completes to NPort <nlp_DID> */
2054	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2055			 "0103 PRLI completes to NPort x%x "
2056			 "Data: x%x x%x x%x x%x\n",
2057			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2058			 irsp->ulpTimeout, vport->num_disc_nodes);
2059
2060	vport->fc_prli_sent--;
2061	/* Check to see if link went down during discovery */
2062	if (lpfc_els_chk_latt(vport))
2063		goto out;
2064
2065	if (irsp->ulpStatus) {
2066		/* Check for retry */
2067		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2068			/* ELS command is being retried */
2069			goto out;
2070		}
2071		/* PRLI failed */
2072		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2073				 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
2074				 ndlp->nlp_DID, irsp->ulpStatus,
2075				 irsp->un.ulpWord[4]);
2076		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2077		if (lpfc_error_lost_link(irsp))
2078			goto out;
2079		else
2080			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2081						NLP_EVT_CMPL_PRLI);
2082	} else
2083		/* Good status, call state machine */
2084		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2085					NLP_EVT_CMPL_PRLI);
2086out:
2087	lpfc_els_free_iocb(phba, cmdiocb);
2088	return;
2089}
2090
2091/**
2092 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2093 * @vport: pointer to a host virtual N_Port data structure.
2094 * @ndlp: pointer to a node-list data structure.
2095 * @retry: number of retries to the command IOCB.
2096 *
2097 * This routine issues a Process Login (PRLI) ELS command for the
2098 * @vport. The PRLI service parameters are set up in the payload of the
2099 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2100 * is put to the IOCB completion callback func field before invoking the
2101 * routine lpfc_sli_issue_iocb() to send out PRLI command.
2102 *
2103 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2104 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2105 * will be stored into the context1 field of the IOCB for the completion
2106 * callback function to the PRLI ELS command.
2107 *
2108 * Return code
2109 *   0 - successfully issued prli iocb command for @vport
2110 *   1 - failed to issue prli iocb command for @vport
2111 **/
2112int
2113lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2114		    uint8_t retry)
2115{
2116	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2117	struct lpfc_hba *phba = vport->phba;
2118	PRLI *npr;
2119	IOCB_t *icmd;
2120	struct lpfc_iocbq *elsiocb;
2121	uint8_t *pcmd;
2122	uint16_t cmdsize;
2123
2124	cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2125	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2126				     ndlp->nlp_DID, ELS_CMD_PRLI);
2127	if (!elsiocb)
2128		return 1;
2129
2130	icmd = &elsiocb->iocb;
2131	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2132
2133	/* For PRLI request, remainder of payload is service parameters */
2134	memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
2135	*((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
2136	pcmd += sizeof(uint32_t);
2137
2138	/* For PRLI, remainder of payload is PRLI parameter page */
2139	npr = (PRLI *) pcmd;
2140	/*
2141	 * If our firmware version is 3.20 or later,
2142	 * set the following bits for FC-TAPE support.
2143	 */
2144	if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2145		npr->ConfmComplAllowed = 1;
2146		npr->Retry = 1;
2147		npr->TaskRetryIdReq = 1;
2148	}
2149	npr->estabImagePair = 1;
2150	npr->readXferRdyDis = 1;
2151	 if (vport->cfg_first_burst_size)
2152		npr->writeXferRdyDis = 1;
2153
2154	/* For FCP support */
2155	npr->prliType = PRLI_FCP_TYPE;
2156	npr->initiatorFunc = 1;
2157
2158	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2159		"Issue PRLI:      did:x%x",
2160		ndlp->nlp_DID, 0, 0);
2161
2162	phba->fc_stat.elsXmitPRLI++;
2163	elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2164	spin_lock_irq(shost->host_lock);
2165	ndlp->nlp_flag |= NLP_PRLI_SND;
2166	spin_unlock_irq(shost->host_lock);
2167	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2168	    IOCB_ERROR) {
2169		spin_lock_irq(shost->host_lock);
2170		ndlp->nlp_flag &= ~NLP_PRLI_SND;
2171		spin_unlock_irq(shost->host_lock);
2172		lpfc_els_free_iocb(phba, elsiocb);
2173		return 1;
2174	}
2175	vport->fc_prli_sent++;
2176	return 0;
2177}
2178
2179/**
2180 * lpfc_rscn_disc - Perform rscn discovery for a vport
2181 * @vport: pointer to a host virtual N_Port data structure.
2182 *
2183 * This routine performs Registration State Change Notification (RSCN)
2184 * discovery for a @vport. If the @vport's node port recovery count is not
2185 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2186 * the nodes that need recovery. If none of the PLOGI were needed through
2187 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2188 * invoked to check and handle possible more RSCN came in during the period
2189 * of processing the current ones.
2190 **/
2191static void
2192lpfc_rscn_disc(struct lpfc_vport *vport)
2193{
2194	lpfc_can_disctmo(vport);
2195
2196	/* RSCN discovery */
2197	/* go thru NPR nodes and issue ELS PLOGIs */
2198	if (vport->fc_npr_cnt)
2199		if (lpfc_els_disc_plogi(vport))
2200			return;
2201
2202	lpfc_end_rscn(vport);
2203}
2204
2205/**
2206 * lpfc_adisc_done - Complete the adisc phase of discovery
2207 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2208 *
2209 * This function is called when the final ADISC is completed during discovery.
2210 * This function handles clearing link attention or issuing reg_vpi depending
2211 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2212 * discovery.
2213 * This function is called with no locks held.
2214 **/
2215static void
2216lpfc_adisc_done(struct lpfc_vport *vport)
2217{
2218	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
2219	struct lpfc_hba   *phba = vport->phba;
2220
2221	/*
2222	 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2223	 * and continue discovery.
2224	 */
2225	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2226	    !(vport->fc_flag & FC_RSCN_MODE) &&
2227	    (phba->sli_rev < LPFC_SLI_REV4)) {
2228		/* The ADISCs are complete.  Doesn't matter if they
2229		 * succeeded or failed because the ADISC completion
2230		 * routine guarantees to call the state machine and
2231		 * the RPI is either unregistered (failed ADISC response)
2232		 * or the RPI is still valid and the node is marked
2233		 * mapped for a target.  The exchanges should be in the
2234		 * correct state. This code is specific to SLI3.
2235		 */
2236		lpfc_issue_clear_la(phba, vport);
2237		lpfc_issue_reg_vpi(phba, vport);
2238		return;
2239	}
2240	/*
2241	* For SLI2, we need to set port_state to READY
2242	* and continue discovery.
2243	*/
2244	if (vport->port_state < LPFC_VPORT_READY) {
2245		/* If we get here, there is nothing to ADISC */
2246		lpfc_issue_clear_la(phba, vport);
2247		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2248			vport->num_disc_nodes = 0;
2249			/* go thru NPR list, issue ELS PLOGIs */
2250			if (vport->fc_npr_cnt)
2251				lpfc_els_disc_plogi(vport);
2252			if (!vport->num_disc_nodes) {
2253				spin_lock_irq(shost->host_lock);
2254				vport->fc_flag &= ~FC_NDISC_ACTIVE;
2255				spin_unlock_irq(shost->host_lock);
2256				lpfc_can_disctmo(vport);
2257				lpfc_end_rscn(vport);
2258			}
2259		}
2260		vport->port_state = LPFC_VPORT_READY;
2261	} else
2262		lpfc_rscn_disc(vport);
2263}
2264
2265/**
2266 * lpfc_more_adisc - Issue more adisc as needed
2267 * @vport: pointer to a host virtual N_Port data structure.
2268 *
2269 * This routine determines whether there are more ndlps on a @vport
2270 * node list need to have Address Discover (ADISC) issued. If so, it will
2271 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2272 * remaining nodes which need to have ADISC sent.
2273 **/
2274void
2275lpfc_more_adisc(struct lpfc_vport *vport)
2276{
2277	if (vport->num_disc_nodes)
2278		vport->num_disc_nodes--;
2279	/* Continue discovery with <num_disc_nodes> ADISCs to go */
2280	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2281			 "0210 Continue discovery with %d ADISCs to go "
2282			 "Data: x%x x%x x%x\n",
2283			 vport->num_disc_nodes, vport->fc_adisc_cnt,
2284			 vport->fc_flag, vport->port_state);
2285	/* Check to see if there are more ADISCs to be sent */
2286	if (vport->fc_flag & FC_NLP_MORE) {
2287		lpfc_set_disctmo(vport);
2288		/* go thru NPR nodes and issue any remaining ELS ADISCs */
2289		lpfc_els_disc_adisc(vport);
2290	}
2291	if (!vport->num_disc_nodes)
2292		lpfc_adisc_done(vport);
2293	return;
2294}
2295
2296/**
2297 * lpfc_cmpl_els_adisc - Completion callback function for adisc
2298 * @phba: pointer to lpfc hba data structure.
2299 * @cmdiocb: pointer to lpfc command iocb data structure.
2300 * @rspiocb: pointer to lpfc response iocb data structure.
2301 *
2302 * This routine is the completion function for issuing the Address Discover
2303 * (ADISC) command. It first checks to see whether link went down during
2304 * the discovery process. If so, the node will be marked as node port
2305 * recovery for issuing discover IOCB by the link attention handler and
2306 * exit. Otherwise, the response status is checked. If error was reported
2307 * in the response status, the ADISC command shall be retried by invoking
2308 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2309 * the response status, the state machine is invoked to set transition
2310 * with respect to NLP_EVT_CMPL_ADISC event.
2311 **/
2312static void
2313lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2314		    struct lpfc_iocbq *rspiocb)
2315{
2316	struct lpfc_vport *vport = cmdiocb->vport;
2317	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2318	IOCB_t *irsp;
2319	struct lpfc_nodelist *ndlp;
2320	int  disc;
2321
2322	/* we pass cmdiocb to state machine which needs rspiocb as well */
2323	cmdiocb->context_un.rsp_iocb = rspiocb;
2324
2325	irsp = &(rspiocb->iocb);
2326	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2327
2328	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2329		"ADISC cmpl:      status:x%x/x%x did:x%x",
2330		irsp->ulpStatus, irsp->un.ulpWord[4],
2331		ndlp->nlp_DID);
2332
2333	/* Since ndlp can be freed in the disc state machine, note if this node
2334	 * is being used during discovery.
2335	 */
2336	spin_lock_irq(shost->host_lock);
2337	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2338	ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2339	spin_unlock_irq(shost->host_lock);
2340	/* ADISC completes to NPort <nlp_DID> */
2341	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2342			 "0104 ADISC completes to NPort x%x "
2343			 "Data: x%x x%x x%x x%x x%x\n",
2344			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2345			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
2346	/* Check to see if link went down during discovery */
2347	if (lpfc_els_chk_latt(vport)) {
2348		spin_lock_irq(shost->host_lock);
2349		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2350		spin_unlock_irq(shost->host_lock);
2351		goto out;
2352	}
2353
2354	if (irsp->ulpStatus) {
2355		/* Check for retry */
2356		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2357			/* ELS command is being retried */
2358			if (disc) {
2359				spin_lock_irq(shost->host_lock);
2360				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2361				spin_unlock_irq(shost->host_lock);
2362				lpfc_set_disctmo(vport);
2363			}
2364			goto out;
2365		}
2366		/* ADISC failed */
2367		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2368				 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2369				 ndlp->nlp_DID, irsp->ulpStatus,
2370				 irsp->un.ulpWord[4]);
2371		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2372		if (!lpfc_error_lost_link(irsp))
2373			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2374						NLP_EVT_CMPL_ADISC);
2375	} else
2376		/* Good status, call state machine */
2377		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2378					NLP_EVT_CMPL_ADISC);
2379
2380	/* Check to see if there are more ADISCs to be sent */
2381	if (disc && vport->num_disc_nodes)
2382		lpfc_more_adisc(vport);
2383out:
2384	lpfc_els_free_iocb(phba, cmdiocb);
2385	return;
2386}
2387
2388/**
2389 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2390 * @vport: pointer to a virtual N_Port data structure.
2391 * @ndlp: pointer to a node-list data structure.
2392 * @retry: number of retries to the command IOCB.
2393 *
2394 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2395 * @vport. It prepares the payload of the ADISC ELS command, updates the
2396 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2397 * to issue the ADISC ELS command.
2398 *
2399 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2400 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2401 * will be stored into the context1 field of the IOCB for the completion
2402 * callback function to the ADISC ELS command.
2403 *
2404 * Return code
2405 *   0 - successfully issued adisc
2406 *   1 - failed to issue adisc
2407 **/
2408int
2409lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2410		     uint8_t retry)
2411{
2412	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2413	struct lpfc_hba  *phba = vport->phba;
2414	ADISC *ap;
2415	IOCB_t *icmd;
2416	struct lpfc_iocbq *elsiocb;
2417	uint8_t *pcmd;
2418	uint16_t cmdsize;
2419
2420	cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2421	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2422				     ndlp->nlp_DID, ELS_CMD_ADISC);
2423	if (!elsiocb)
2424		return 1;
2425
2426	icmd = &elsiocb->iocb;
2427	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2428
2429	/* For ADISC request, remainder of payload is service parameters */
2430	*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2431	pcmd += sizeof(uint32_t);
2432
2433	/* Fill in ADISC payload */
2434	ap = (ADISC *) pcmd;
2435	ap->hardAL_PA = phba->fc_pref_ALPA;
2436	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2437	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2438	ap->DID = be32_to_cpu(vport->fc_myDID);
2439
2440	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2441		"Issue ADISC:     did:x%x",
2442		ndlp->nlp_DID, 0, 0);
2443
2444	phba->fc_stat.elsXmitADISC++;
2445	elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2446	spin_lock_irq(shost->host_lock);
2447	ndlp->nlp_flag |= NLP_ADISC_SND;
2448	spin_unlock_irq(shost->host_lock);
2449	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2450	    IOCB_ERROR) {
2451		spin_lock_irq(shost->host_lock);
2452		ndlp->nlp_flag &= ~NLP_ADISC_SND;
2453		spin_unlock_irq(shost->host_lock);
2454		lpfc_els_free_iocb(phba, elsiocb);
2455		return 1;
2456	}
2457	return 0;
2458}
2459
2460/**
2461 * lpfc_cmpl_els_logo - Completion callback function for logo
2462 * @phba: pointer to lpfc hba data structure.
2463 * @cmdiocb: pointer to lpfc command iocb data structure.
2464 * @rspiocb: pointer to lpfc response iocb data structure.
2465 *
2466 * This routine is the completion function for issuing the ELS Logout (LOGO)
2467 * command. If no error status was reported from the LOGO response, the
2468 * state machine of the associated ndlp shall be invoked for transition with
2469 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2470 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2471 **/
2472static void
2473lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2474		   struct lpfc_iocbq *rspiocb)
2475{
2476	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2477	struct lpfc_vport *vport = ndlp->vport;
2478	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2479	IOCB_t *irsp;
2480	struct lpfc_sli *psli;
2481	struct lpfcMboxq *mbox;
2482	unsigned long flags;
2483	uint32_t skip_recovery = 0;
2484
2485	psli = &phba->sli;
2486	/* we pass cmdiocb to state machine which needs rspiocb as well */
2487	cmdiocb->context_un.rsp_iocb = rspiocb;
2488
2489	irsp = &(rspiocb->iocb);
2490	spin_lock_irq(shost->host_lock);
2491	ndlp->nlp_flag &= ~NLP_LOGO_SND;
2492	spin_unlock_irq(shost->host_lock);
2493
2494	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2495		"LOGO cmpl:       status:x%x/x%x did:x%x",
2496		irsp->ulpStatus, irsp->un.ulpWord[4],
2497		ndlp->nlp_DID);
2498
2499	/* LOGO completes to NPort <nlp_DID> */
2500	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2501			 "0105 LOGO completes to NPort x%x "
2502			 "Data: x%x x%x x%x x%x\n",
2503			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2504			 irsp->ulpTimeout, vport->num_disc_nodes);
2505
2506	if (lpfc_els_chk_latt(vport)) {
2507		skip_recovery = 1;
2508		goto out;
2509	}
2510
2511	/* Check to see if link went down during discovery */
2512	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2513	        /* NLP_EVT_DEVICE_RM should unregister the RPI
2514		 * which should abort all outstanding IOs.
2515		 */
2516		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2517					NLP_EVT_DEVICE_RM);
2518		skip_recovery = 1;
2519		goto out;
2520	}
2521
2522	if (irsp->ulpStatus) {
2523		/* Check for retry */
2524		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2525			/* ELS command is being retried */
2526			skip_recovery = 1;
2527			goto out;
2528		}
2529		/* LOGO failed */
2530		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2531				 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2532				 ndlp->nlp_DID, irsp->ulpStatus,
2533				 irsp->un.ulpWord[4]);
2534		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2535		if (lpfc_error_lost_link(irsp)) {
2536			skip_recovery = 1;
2537			goto out;
2538		}
2539	}
2540
2541	/* Call state machine. This will unregister the rpi if needed. */
2542	lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2543
2544out:
2545	lpfc_els_free_iocb(phba, cmdiocb);
2546	/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2547	if ((vport->fc_flag & FC_PT2PT) &&
2548		!(vport->fc_flag & FC_PT2PT_PLOGI)) {
2549		phba->pport->fc_myDID = 0;
2550		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2551		if (mbox) {
2552			lpfc_config_link(phba, mbox);
2553			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2554			mbox->vport = vport;
2555			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2556				MBX_NOT_FINISHED) {
2557				mempool_free(mbox, phba->mbox_mem_pool);
2558				skip_recovery = 1;
2559			}
2560		}
2561	}
2562
2563	/*
2564	 * If the node is a target, the handling attempts to recover the port.
2565	 * For any other port type, the rpi is unregistered as an implicit
2566	 * LOGO.
2567	 */
2568	if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
2569		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2570		spin_lock_irqsave(shost->host_lock, flags);
2571		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2572		spin_unlock_irqrestore(shost->host_lock, flags);
2573
2574		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2575				 "3187 LOGO completes to NPort x%x: Start "
2576				 "Recovery Data: x%x x%x x%x x%x\n",
2577				 ndlp->nlp_DID, irsp->ulpStatus,
2578				 irsp->un.ulpWord[4], irsp->ulpTimeout,
2579				 vport->num_disc_nodes);
2580		lpfc_disc_start(vport);
2581	}
2582	return;
2583}
2584
2585/**
2586 * lpfc_issue_els_logo - Issue a logo to an node on a vport
2587 * @vport: pointer to a virtual N_Port data structure.
2588 * @ndlp: pointer to a node-list data structure.
2589 * @retry: number of retries to the command IOCB.
2590 *
2591 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2592 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2593 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2594 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2595 *
2596 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2597 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2598 * will be stored into the context1 field of the IOCB for the completion
2599 * callback function to the LOGO ELS command.
2600 *
2601 * Return code
2602 *   0 - successfully issued logo
2603 *   1 - failed to issue logo
2604 **/
2605int
2606lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2607		    uint8_t retry)
2608{
2609	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2610	struct lpfc_hba  *phba = vport->phba;
2611	IOCB_t *icmd;
2612	struct lpfc_iocbq *elsiocb;
2613	uint8_t *pcmd;
2614	uint16_t cmdsize;
2615	int rc;
2616
2617	spin_lock_irq(shost->host_lock);
2618	if (ndlp->nlp_flag & NLP_LOGO_SND) {
2619		spin_unlock_irq(shost->host_lock);
2620		return 0;
2621	}
2622	spin_unlock_irq(shost->host_lock);
2623
2624	cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2625	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2626				     ndlp->nlp_DID, ELS_CMD_LOGO);
2627	if (!elsiocb)
2628		return 1;
2629
2630	icmd = &elsiocb->iocb;
2631	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2632	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
2633	pcmd += sizeof(uint32_t);
2634
2635	/* Fill in LOGO payload */
2636	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
2637	pcmd += sizeof(uint32_t);
2638	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
2639
2640	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2641		"Issue LOGO:      did:x%x",
2642		ndlp->nlp_DID, 0, 0);
2643
2644	/*
2645	 * If we are issuing a LOGO, we may try to recover the remote NPort
2646	 * by issuing a PLOGI later. Even though we issue ELS cmds by the
2647	 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
2648	 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
2649	 * for that ELS cmd. To avoid this situation, lets get rid of the
2650	 * RPI right now, before any ELS cmds are sent.
2651	 */
2652	spin_lock_irq(shost->host_lock);
2653	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
2654	spin_unlock_irq(shost->host_lock);
2655	if (lpfc_unreg_rpi(vport, ndlp)) {
2656		lpfc_els_free_iocb(phba, elsiocb);
2657		return 0;
2658	}
2659
2660	phba->fc_stat.elsXmitLOGO++;
2661	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2662	spin_lock_irq(shost->host_lock);
2663	ndlp->nlp_flag |= NLP_LOGO_SND;
2664	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2665	spin_unlock_irq(shost->host_lock);
2666	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2667
2668	if (rc == IOCB_ERROR) {
2669		spin_lock_irq(shost->host_lock);
2670		ndlp->nlp_flag &= ~NLP_LOGO_SND;
2671		spin_unlock_irq(shost->host_lock);
2672		lpfc_els_free_iocb(phba, elsiocb);
2673		return 1;
2674	}
2675	return 0;
2676}
2677
2678/**
2679 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
2680 * @phba: pointer to lpfc hba data structure.
2681 * @cmdiocb: pointer to lpfc command iocb data structure.
2682 * @rspiocb: pointer to lpfc response iocb data structure.
2683 *
2684 * This routine is a generic completion callback function for ELS commands.
2685 * Specifically, it is the callback function which does not need to perform
2686 * any command specific operations. It is currently used by the ELS command
2687 * issuing routines for the ELS State Change  Request (SCR),
2688 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2689 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2690 * certain debug loggings, this callback function simply invokes the
2691 * lpfc_els_chk_latt() routine to check whether link went down during the
2692 * discovery process.
2693 **/
2694static void
2695lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2696		  struct lpfc_iocbq *rspiocb)
2697{
2698	struct lpfc_vport *vport = cmdiocb->vport;
2699	IOCB_t *irsp;
2700
2701	irsp = &rspiocb->iocb;
2702
2703	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2704		"ELS cmd cmpl:    status:x%x/x%x did:x%x",
2705		irsp->ulpStatus, irsp->un.ulpWord[4],
2706		irsp->un.elsreq64.remoteID);
2707	/* ELS cmd tag <ulpIoTag> completes */
2708	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2709			 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2710			 irsp->ulpIoTag, irsp->ulpStatus,
2711			 irsp->un.ulpWord[4], irsp->ulpTimeout);
2712	/* Check to see if link went down during discovery */
2713	lpfc_els_chk_latt(vport);
2714	lpfc_els_free_iocb(phba, cmdiocb);
2715	return;
2716}
2717
2718/**
2719 * lpfc_issue_els_scr - Issue a scr to an node on a vport
2720 * @vport: pointer to a host virtual N_Port data structure.
2721 * @nportid: N_Port identifier to the remote node.
2722 * @retry: number of retries to the command IOCB.
2723 *
2724 * This routine issues a State Change Request (SCR) to a fabric node
2725 * on a @vport. The remote node @nportid is passed into the function. It
2726 * first search the @vport node list to find the matching ndlp. If no such
2727 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2728 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2729 * routine is invoked to send the SCR IOCB.
2730 *
2731 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2732 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2733 * will be stored into the context1 field of the IOCB for the completion
2734 * callback function to the SCR ELS command.
2735 *
2736 * Return code
2737 *   0 - Successfully issued scr command
2738 *   1 - Failed to issue scr command
2739 **/
2740int
2741lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2742{
2743	struct lpfc_hba  *phba = vport->phba;
2744	IOCB_t *icmd;
2745	struct lpfc_iocbq *elsiocb;
2746	struct lpfc_sli *psli;
2747	uint8_t *pcmd;
2748	uint16_t cmdsize;
2749	struct lpfc_nodelist *ndlp;
2750
2751	psli = &phba->sli;
2752	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2753
2754	ndlp = lpfc_findnode_did(vport, nportid);
2755	if (!ndlp) {
2756		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2757		if (!ndlp)
2758			return 1;
2759		lpfc_nlp_init(vport, ndlp, nportid);
2760		lpfc_enqueue_node(vport, ndlp);
2761	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
2762		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2763		if (!ndlp)
2764			return 1;
2765	}
2766
2767	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2768				     ndlp->nlp_DID, ELS_CMD_SCR);
2769
2770	if (!elsiocb) {
2771		/* This will trigger the release of the node just
2772		 * allocated
2773		 */
2774		lpfc_nlp_put(ndlp);
2775		return 1;
2776	}
2777
2778	icmd = &elsiocb->iocb;
2779	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2780
2781	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
2782	pcmd += sizeof(uint32_t);
2783
2784	/* For SCR, remainder of payload is SCR parameter page */
2785	memset(pcmd, 0, sizeof(SCR));
2786	((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2787
2788	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2789		"Issue SCR:       did:x%x",
2790		ndlp->nlp_DID, 0, 0);
2791
2792	phba->fc_stat.elsXmitSCR++;
2793	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2794	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2795	    IOCB_ERROR) {
2796		/* The additional lpfc_nlp_put will cause the following
2797		 * lpfc_els_free_iocb routine to trigger the rlease of
2798		 * the node.
2799		 */
2800		lpfc_nlp_put(ndlp);
2801		lpfc_els_free_iocb(phba, elsiocb);
2802		return 1;
2803	}
2804	/* This will cause the callback-function lpfc_cmpl_els_cmd to
2805	 * trigger the release of node.
2806	 */
2807
2808	lpfc_nlp_put(ndlp);
2809	return 0;
2810}
2811
2812/**
2813 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
2814 * @vport: pointer to a host virtual N_Port data structure.
2815 * @nportid: N_Port identifier to the remote node.
2816 * @retry: number of retries to the command IOCB.
2817 *
2818 * This routine issues a Fibre Channel Address Resolution Response
2819 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2820 * is passed into the function. It first search the @vport node list to find
2821 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2822 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2823 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2824 *
2825 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2826 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2827 * will be stored into the context1 field of the IOCB for the completion
2828 * callback function to the PARPR ELS command.
2829 *
2830 * Return code
2831 *   0 - Successfully issued farpr command
2832 *   1 - Failed to issue farpr command
2833 **/
2834static int
2835lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2836{
2837	struct lpfc_hba  *phba = vport->phba;
2838	IOCB_t *icmd;
2839	struct lpfc_iocbq *elsiocb;
2840	struct lpfc_sli *psli;
2841	FARP *fp;
2842	uint8_t *pcmd;
2843	uint32_t *lp;
2844	uint16_t cmdsize;
2845	struct lpfc_nodelist *ondlp;
2846	struct lpfc_nodelist *ndlp;
2847
2848	psli = &phba->sli;
2849	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2850
2851	ndlp = lpfc_findnode_did(vport, nportid);
2852	if (!ndlp) {
2853		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2854		if (!ndlp)
2855			return 1;
2856		lpfc_nlp_init(vport, ndlp, nportid);
2857		lpfc_enqueue_node(vport, ndlp);
2858	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
2859		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2860		if (!ndlp)
2861			return 1;
2862	}
2863
2864	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2865				     ndlp->nlp_DID, ELS_CMD_RNID);
2866	if (!elsiocb) {
2867		/* This will trigger the release of the node just
2868		 * allocated
2869		 */
2870		lpfc_nlp_put(ndlp);
2871		return 1;
2872	}
2873
2874	icmd = &elsiocb->iocb;
2875	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2876
2877	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
2878	pcmd += sizeof(uint32_t);
2879
2880	/* Fill in FARPR payload */
2881	fp = (FARP *) (pcmd);
2882	memset(fp, 0, sizeof(FARP));
2883	lp = (uint32_t *) pcmd;
2884	*lp++ = be32_to_cpu(nportid);
2885	*lp++ = be32_to_cpu(vport->fc_myDID);
2886	fp->Rflags = 0;
2887	fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2888
2889	memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2890	memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2891	ondlp = lpfc_findnode_did(vport, nportid);
2892	if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
2893		memcpy(&fp->OportName, &ondlp->nlp_portname,
2894		       sizeof(struct lpfc_name));
2895		memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
2896		       sizeof(struct lpfc_name));
2897	}
2898
2899	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2900		"Issue FARPR:     did:x%x",
2901		ndlp->nlp_DID, 0, 0);
2902
2903	phba->fc_stat.elsXmitFARPR++;
2904	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2905	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2906	    IOCB_ERROR) {
2907		/* The additional lpfc_nlp_put will cause the following
2908		 * lpfc_els_free_iocb routine to trigger the release of
2909		 * the node.
2910		 */
2911		lpfc_nlp_put(ndlp);
2912		lpfc_els_free_iocb(phba, elsiocb);
2913		return 1;
2914	}
2915	/* This will cause the callback-function lpfc_cmpl_els_cmd to
2916	 * trigger the release of the node.
2917	 */
2918	lpfc_nlp_put(ndlp);
2919	return 0;
2920}
2921
2922/**
2923 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
2924 * @vport: pointer to a host virtual N_Port data structure.
2925 * @nlp: pointer to a node-list data structure.
2926 *
2927 * This routine cancels the timer with a delayed IOCB-command retry for
2928 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2929 * removes the ELS retry event if it presents. In addition, if the
2930 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2931 * commands are sent for the @vport's nodes that require issuing discovery
2932 * ADISC.
2933 **/
2934void
2935lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
2936{
2937	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2938	struct lpfc_work_evt *evtp;
2939
2940	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2941		return;
2942	spin_lock_irq(shost->host_lock);
2943	nlp->nlp_flag &= ~NLP_DELAY_TMO;
2944	spin_unlock_irq(shost->host_lock);
2945	del_timer_sync(&nlp->nlp_delayfunc);
2946	nlp->nlp_last_elscmd = 0;
2947	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
2948		list_del_init(&nlp->els_retry_evt.evt_listp);
2949		/* Decrement nlp reference count held for the delayed retry */
2950		evtp = &nlp->els_retry_evt;
2951		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2952	}
2953	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2954		spin_lock_irq(shost->host_lock);
2955		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2956		spin_unlock_irq(shost->host_lock);
2957		if (vport->num_disc_nodes) {
2958			if (vport->port_state < LPFC_VPORT_READY) {
2959				/* Check if there are more ADISCs to be sent */
2960				lpfc_more_adisc(vport);
2961			} else {
2962				/* Check if there are more PLOGIs to be sent */
2963				lpfc_more_plogi(vport);
2964				if (vport->num_disc_nodes == 0) {
2965					spin_lock_irq(shost->host_lock);
2966					vport->fc_flag &= ~FC_NDISC_ACTIVE;
2967					spin_unlock_irq(shost->host_lock);
2968					lpfc_can_disctmo(vport);
2969					lpfc_end_rscn(vport);
2970				}
2971			}
2972		}
2973	}
2974	return;
2975}
2976
2977/**
2978 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
2979 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2980 *
2981 * This routine is invoked by the ndlp delayed-function timer to check
2982 * whether there is any pending ELS retry event(s) with the node. If not, it
2983 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2984 * adds the delayed events to the HBA work list and invokes the
2985 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2986 * event. Note that lpfc_nlp_get() is called before posting the event to
2987 * the work list to hold reference count of ndlp so that it guarantees the
2988 * reference to ndlp will still be available when the worker thread gets
2989 * to the event associated with the ndlp.
2990 **/
2991void
2992lpfc_els_retry_delay(unsigned long ptr)
2993{
2994	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2995	struct lpfc_vport *vport = ndlp->vport;
2996	struct lpfc_hba   *phba = vport->phba;
2997	unsigned long flags;
2998	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
2999
3000	spin_lock_irqsave(&phba->hbalock, flags);
3001	if (!list_empty(&evtp->evt_listp)) {
3002		spin_unlock_irqrestore(&phba->hbalock, flags);
3003		return;
3004	}
3005
3006	/* We need to hold the node by incrementing the reference
3007	 * count until the queued work is done
3008	 */
3009	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
3010	if (evtp->evt_arg1) {
3011		evtp->evt = LPFC_EVT_ELS_RETRY;
3012		list_add_tail(&evtp->evt_listp, &phba->work_list);
3013		lpfc_worker_wake_up(phba);
3014	}
3015	spin_unlock_irqrestore(&phba->hbalock, flags);
3016	return;
3017}
3018
3019/**
3020 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
3021 * @ndlp: pointer to a node-list data structure.
3022 *
3023 * This routine is the worker-thread handler for processing the @ndlp delayed
3024 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
3025 * the last ELS command from the associated ndlp and invokes the proper ELS
3026 * function according to the delayed ELS command to retry the command.
3027 **/
3028void
3029lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
3030{
3031	struct lpfc_vport *vport = ndlp->vport;
3032	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3033	uint32_t cmd, retry;
3034
3035	spin_lock_irq(shost->host_lock);
3036	cmd = ndlp->nlp_last_elscmd;
3037	ndlp->nlp_last_elscmd = 0;
3038
3039	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
3040		spin_unlock_irq(shost->host_lock);
3041		return;
3042	}
3043
3044	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
3045	spin_unlock_irq(shost->host_lock);
3046	/*
3047	 * If a discovery event readded nlp_delayfunc after timer
3048	 * firing and before processing the timer, cancel the
3049	 * nlp_delayfunc.
3050	 */
3051	del_timer_sync(&ndlp->nlp_delayfunc);
3052	retry = ndlp->nlp_retry;
3053	ndlp->nlp_retry = 0;
3054
3055	switch (cmd) {
3056	case ELS_CMD_FLOGI:
3057		lpfc_issue_els_flogi(vport, ndlp, retry);
3058		break;
3059	case ELS_CMD_PLOGI:
3060		if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
3061			ndlp->nlp_prev_state = ndlp->nlp_state;
3062			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3063		}
3064		break;
3065	case ELS_CMD_ADISC:
3066		if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
3067			ndlp->nlp_prev_state = ndlp->nlp_state;
3068			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3069		}
3070		break;
3071	case ELS_CMD_PRLI:
3072		if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
3073			ndlp->nlp_prev_state = ndlp->nlp_state;
3074			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3075		}
3076		break;
3077	case ELS_CMD_LOGO:
3078		if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
3079			ndlp->nlp_prev_state = ndlp->nlp_state;
3080			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3081		}
3082		break;
3083	case ELS_CMD_FDISC:
3084		if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
3085			lpfc_issue_els_fdisc(vport, ndlp, retry);
3086		break;
3087	}
3088	return;
3089}
3090
3091/**
3092 * lpfc_els_retry - Make retry decision on an els command iocb
3093 * @phba: pointer to lpfc hba data structure.
3094 * @cmdiocb: pointer to lpfc command iocb data structure.
3095 * @rspiocb: pointer to lpfc response iocb data structure.
3096 *
3097 * This routine makes a retry decision on an ELS command IOCB, which has
3098 * failed. The following ELS IOCBs use this function for retrying the command
3099 * when previously issued command responsed with error status: FLOGI, PLOGI,
3100 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
3101 * returned error status, it makes the decision whether a retry shall be
3102 * issued for the command, and whether a retry shall be made immediately or
3103 * delayed. In the former case, the corresponding ELS command issuing-function
3104 * is called to retry the command. In the later case, the ELS command shall
3105 * be posted to the ndlp delayed event and delayed function timer set to the
3106 * ndlp for the delayed command issusing.
3107 *
3108 * Return code
3109 *   0 - No retry of els command is made
3110 *   1 - Immediate or delayed retry of els command is made
3111 **/
3112static int
3113lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3114	       struct lpfc_iocbq *rspiocb)
3115{
3116	struct lpfc_vport *vport = cmdiocb->vport;
3117	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3118	IOCB_t *irsp = &rspiocb->iocb;
3119	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3120	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3121	uint32_t *elscmd;
3122	struct ls_rjt stat;
3123	int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
3124	int logerr = 0;
3125	uint32_t cmd = 0;
3126	uint32_t did;
3127
3128
3129	/* Note: context2 may be 0 for internal driver abort
3130	 * of delays ELS command.
3131	 */
3132
3133	if (pcmd && pcmd->virt) {
3134		elscmd = (uint32_t *) (pcmd->virt);
3135		cmd = *elscmd++;
3136	}
3137
3138	if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3139		did = ndlp->nlp_DID;
3140	else {
3141		/* We should only hit this case for retrying PLOGI */
3142		did = irsp->un.elsreq64.remoteID;
3143		ndlp = lpfc_findnode_did(vport, did);
3144		if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
3145		    && (cmd != ELS_CMD_PLOGI))
3146			return 1;
3147	}
3148
3149	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3150		"Retry ELS:       wd7:x%x wd4:x%x did:x%x",
3151		*(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
3152
3153	switch (irsp->ulpStatus) {
3154	case IOSTAT_FCP_RSP_ERROR:
3155		break;
3156	case IOSTAT_REMOTE_STOP:
3157		if (phba->sli_rev == LPFC_SLI_REV4) {
3158			/* This IO was aborted by the target, we don't
3159			 * know the rxid and because we did not send the
3160			 * ABTS we cannot generate and RRQ.
3161			 */
3162			lpfc_set_rrq_active(phba, ndlp,
3163					 cmdiocb->sli4_lxritag, 0, 0);
3164		}
3165		break;
3166	case IOSTAT_LOCAL_REJECT:
3167		switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
3168		case IOERR_LOOP_OPEN_FAILURE:
3169			if (cmd == ELS_CMD_FLOGI) {
3170				if (PCI_DEVICE_ID_HORNET ==
3171					phba->pcidev->device) {
3172					phba->fc_topology = LPFC_TOPOLOGY_LOOP;
3173					phba->pport->fc_myDID = 0;
3174					phba->alpa_map[0] = 0;
3175					phba->alpa_map[1] = 0;
3176				}
3177			}
3178			if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
3179				delay = 1000;
3180			retry = 1;
3181			break;
3182
3183		case IOERR_ILLEGAL_COMMAND:
3184			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3185					 "0124 Retry illegal cmd x%x "
3186					 "retry:x%x delay:x%x\n",
3187					 cmd, cmdiocb->retry, delay);
3188			retry = 1;
3189			/* All command's retry policy */
3190			maxretry = 8;
3191			if (cmdiocb->retry > 2)
3192				delay = 1000;
3193			break;
3194
3195		case IOERR_NO_RESOURCES:
3196			logerr = 1; /* HBA out of resources */
3197			retry = 1;
3198			if (cmdiocb->retry > 100)
3199				delay = 100;
3200			maxretry = 250;
3201			break;
3202
3203		case IOERR_ILLEGAL_FRAME:
3204			delay = 100;
3205			retry = 1;
3206			break;
3207
3208		case IOERR_SEQUENCE_TIMEOUT:
3209		case IOERR_INVALID_RPI:
3210			if (cmd == ELS_CMD_PLOGI &&
3211			    did == NameServer_DID) {
3212				/* Continue forever if plogi to */
3213				/* the nameserver fails */
3214				maxretry = 0;
3215				delay = 100;
3216			}
3217			retry = 1;
3218			break;
3219		}
3220		break;
3221
3222	case IOSTAT_NPORT_RJT:
3223	case IOSTAT_FABRIC_RJT:
3224		if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
3225			retry = 1;
3226			break;
3227		}
3228		break;
3229
3230	case IOSTAT_NPORT_BSY:
3231	case IOSTAT_FABRIC_BSY:
3232		logerr = 1; /* Fabric / Remote NPort out of resources */
3233		retry = 1;
3234		break;
3235
3236	case IOSTAT_LS_RJT:
3237		stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
3238		/* Added for Vendor specifc support
3239		 * Just keep retrying for these Rsn / Exp codes
3240		 */
3241		switch (stat.un.b.lsRjtRsnCode) {
3242		case LSRJT_UNABLE_TPC:
3243			if (stat.un.b.lsRjtRsnCodeExp ==
3244			    LSEXP_CMD_IN_PROGRESS) {
3245				if (cmd == ELS_CMD_PLOGI) {
3246					delay = 1000;
3247					maxretry = 48;
3248				}
3249				retry = 1;
3250				break;
3251			}
3252			if (stat.un.b.lsRjtRsnCodeExp ==
3253			    LSEXP_CANT_GIVE_DATA) {
3254				if (cmd == ELS_CMD_PLOGI) {
3255					delay = 1000;
3256					maxretry = 48;
3257				}
3258				retry = 1;
3259				break;
3260			}
3261			if ((cmd == ELS_CMD_PLOGI) ||
3262			    (cmd == ELS_CMD_PRLI)) {
3263				delay = 1000;
3264				maxretry = lpfc_max_els_tries + 1;
3265				retry = 1;
3266				break;
3267			}
3268			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3269			  (cmd == ELS_CMD_FDISC) &&
3270			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
3271				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3272						 "0125 FDISC Failed (x%x). "
3273						 "Fabric out of resources\n",
3274						 stat.un.lsRjtError);
3275				lpfc_vport_set_state(vport,
3276						     FC_VPORT_NO_FABRIC_RSCS);
3277			}
3278			break;
3279
3280		case LSRJT_LOGICAL_BSY:
3281			if ((cmd == ELS_CMD_PLOGI) ||
3282			    (cmd == ELS_CMD_PRLI)) {
3283				delay = 1000;
3284				maxretry = 48;
3285			} else if (cmd == ELS_CMD_FDISC) {
3286				/* FDISC retry policy */
3287				maxretry = 48;
3288				if (cmdiocb->retry >= 32)
3289					delay = 1000;
3290			}
3291			retry = 1;
3292			break;
3293
3294		case LSRJT_LOGICAL_ERR:
3295			/* There are some cases where switches return this
3296			 * error when they are not ready and should be returning
3297			 * Logical Busy. We should delay every time.
3298			 */
3299			if (cmd == ELS_CMD_FDISC &&
3300			    stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
3301				maxretry = 3;
3302				delay = 1000;
3303				retry = 1;
3304				break;
3305			}
3306		case LSRJT_PROTOCOL_ERR:
3307			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3308			  (cmd == ELS_CMD_FDISC) &&
3309			  ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
3310			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
3311			  ) {
3312				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3313						 "0122 FDISC Failed (x%x). "
3314						 "Fabric Detected Bad WWN\n",
3315						 stat.un.lsRjtError);
3316				lpfc_vport_set_state(vport,
3317						     FC_VPORT_FABRIC_REJ_WWN);
3318			}
3319			break;
3320		}
3321		break;
3322
3323	case IOSTAT_INTERMED_RSP:
3324	case IOSTAT_BA_RJT:
3325		break;
3326
3327	default:
3328		break;
3329	}
3330
3331	if (did == FDMI_DID)
3332		retry = 1;
3333
3334	if ((cmd == ELS_CMD_FLOGI) &&
3335	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
3336	    !lpfc_error_lost_link(irsp)) {
3337		/* FLOGI retry policy */
3338		retry = 1;
3339		/* retry FLOGI forever */
3340		if (phba->link_flag != LS_LOOPBACK_MODE)
3341			maxretry = 0;
3342		else
3343			maxretry = 2;
3344
3345		if (cmdiocb->retry >= 100)
3346			delay = 5000;
3347		else if (cmdiocb->retry >= 32)
3348			delay = 1000;
3349	} else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
3350		/* retry FDISCs every second up to devloss */
3351		retry = 1;
3352		maxretry = vport->cfg_devloss_tmo;
3353		delay = 1000;
3354	}
3355
3356	cmdiocb->retry++;
3357	if (maxretry && (cmdiocb->retry >= maxretry)) {
3358		phba->fc_stat.elsRetryExceeded++;
3359		retry = 0;
3360	}
3361
3362	if ((vport->load_flag & FC_UNLOADING) != 0)
3363		retry = 0;
3364
3365	if (retry) {
3366		if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
3367			/* Stop retrying PLOGI and FDISC if in FCF discovery */
3368			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3369				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3370						 "2849 Stop retry ELS command "
3371						 "x%x to remote NPORT x%x, "
3372						 "Data: x%x x%x\n", cmd, did,
3373						 cmdiocb->retry, delay);
3374				return 0;
3375			}
3376		}
3377
3378		/* Retry ELS command <elsCmd> to remote NPORT <did> */
3379		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3380				 "0107 Retry ELS command x%x to remote "
3381				 "NPORT x%x Data: x%x x%x\n",
3382				 cmd, did, cmdiocb->retry, delay);
3383
3384		if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3385			((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
3386			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
3387			IOERR_NO_RESOURCES))) {
3388			/* Don't reset timer for no resources */
3389
3390			/* If discovery / RSCN timer is running, reset it */
3391			if (timer_pending(&vport->fc_disctmo) ||
3392			    (vport->fc_flag & FC_RSCN_MODE))
3393				lpfc_set_disctmo(vport);
3394		}
3395
3396		phba->fc_stat.elsXmitRetry++;
3397		if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
3398			phba->fc_stat.elsDelayRetry++;
3399			ndlp->nlp_retry = cmdiocb->retry;
3400
3401			/* delay is specified in milliseconds */
3402			mod_timer(&ndlp->nlp_delayfunc,
3403				jiffies + msecs_to_jiffies(delay));
3404			spin_lock_irq(shost->host_lock);
3405			ndlp->nlp_flag |= NLP_DELAY_TMO;
3406			spin_unlock_irq(shost->host_lock);
3407
3408			ndlp->nlp_prev_state = ndlp->nlp_state;
3409			if (cmd == ELS_CMD_PRLI)
3410				lpfc_nlp_set_state(vport, ndlp,
3411					NLP_STE_PRLI_ISSUE);
3412			else
3413				lpfc_nlp_set_state(vport, ndlp,
3414					NLP_STE_NPR_NODE);
3415			ndlp->nlp_last_elscmd = cmd;
3416
3417			return 1;
3418		}
3419		switch (cmd) {
3420		case ELS_CMD_FLOGI:
3421			lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
3422			return 1;
3423		case ELS_CMD_FDISC:
3424			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
3425			return 1;
3426		case ELS_CMD_PLOGI:
3427			if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3428				ndlp->nlp_prev_state = ndlp->nlp_state;
3429				lpfc_nlp_set_state(vport, ndlp,
3430						   NLP_STE_PLOGI_ISSUE);
3431			}
3432			lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
3433			return 1;
3434		case ELS_CMD_ADISC:
3435			ndlp->nlp_prev_state = ndlp->nlp_state;
3436			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3437			lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
3438			return 1;
3439		case ELS_CMD_PRLI:
3440			ndlp->nlp_prev_state = ndlp->nlp_state;
3441			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3442			lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
3443			return 1;
3444		case ELS_CMD_LOGO:
3445			ndlp->nlp_prev_state = ndlp->nlp_state;
3446			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3447			lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
3448			return 1;
3449		}
3450	}
3451	/* No retry ELS command <elsCmd> to remote NPORT <did> */
3452	if (logerr) {
3453		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3454			 "0137 No retry ELS command x%x to remote "
3455			 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3456			 cmd, did, irsp->ulpStatus,
3457			 irsp->un.ulpWord[4]);
3458	}
3459	else {
3460		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3461			 "0108 No retry ELS command x%x to remote "
3462			 "NPORT x%x Retried:%d Error:x%x/%x\n",
3463			 cmd, did, cmdiocb->retry, irsp->ulpStatus,
3464			 irsp->un.ulpWord[4]);
3465	}
3466	return 0;
3467}
3468
3469/**
3470 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
3471 * @phba: pointer to lpfc hba data structure.
3472 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3473 *
3474 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3475 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3476 * checks to see whether there is a lpfc DMA buffer associated with the
3477 * response of the command IOCB. If so, it will be released before releasing
3478 * the lpfc DMA buffer associated with the IOCB itself.
3479 *
3480 * Return code
3481 *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
3482 **/
3483static int
3484lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3485{
3486	struct lpfc_dmabuf *buf_ptr;
3487
3488	/* Free the response before processing the command. */
3489	if (!list_empty(&buf_ptr1->list)) {
3490		list_remove_head(&buf_ptr1->list, buf_ptr,
3491				 struct lpfc_dmabuf,
3492				 list);
3493		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3494		kfree(buf_ptr);
3495	}
3496	lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
3497	kfree(buf_ptr1);
3498	return 0;
3499}
3500
3501/**
3502 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
3503 * @phba: pointer to lpfc hba data structure.
3504 * @buf_ptr: pointer to the lpfc dma buffer data structure.
3505 *
3506 * This routine releases the lpfc Direct Memory Access (DMA) buffer
3507 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3508 * pool.
3509 *
3510 * Return code
3511 *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
3512 **/
3513static int
3514lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
3515{
3516	lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3517	kfree(buf_ptr);
3518	return 0;
3519}
3520
3521/**
3522 * lpfc_els_free_iocb - Free a command iocb and its associated resources
3523 * @phba: pointer to lpfc hba data structure.
3524 * @elsiocb: pointer to lpfc els command iocb data structure.
3525 *
3526 * This routine frees a command IOCB and its associated resources. The
3527 * command IOCB data structure contains the reference to various associated
3528 * resources, these fields must be set to NULL if the associated reference
3529 * not present:
3530 *   context1 - reference to ndlp
3531 *   context2 - reference to cmd
3532 *   context2->next - reference to rsp
3533 *   context3 - reference to bpl
3534 *
3535 * It first properly decrements the reference count held on ndlp for the
3536 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3537 * set, it invokes the lpfc_els_free_data() routine to release the Direct
3538 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3539 * adds the DMA buffer the @phba data structure for the delayed release.
3540 * If reference to the Buffer Pointer List (BPL) is present, the
3541 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3542 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3543 * invoked to release the IOCB data structure back to @phba IOCBQ list.
3544 *
3545 * Return code
3546 *   0 - Success (currently, always return 0)
3547 **/
3548int
3549lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3550{
3551	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
3552	struct lpfc_nodelist *ndlp;
3553
3554	ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3555	if (ndlp) {
3556		if (ndlp->nlp_flag & NLP_DEFER_RM) {
3557			lpfc_nlp_put(ndlp);
3558
3559			/* If the ndlp is not being used by another discovery
3560			 * thread, free it.
3561			 */
3562			if (!lpfc_nlp_not_used(ndlp)) {
3563				/* If ndlp is being used by another discovery
3564				 * thread, just clear NLP_DEFER_RM
3565				 */
3566				ndlp->nlp_flag &= ~NLP_DEFER_RM;
3567			}
3568		}
3569		else
3570			lpfc_nlp_put(ndlp);
3571		elsiocb->context1 = NULL;
3572	}
3573	/* context2  = cmd,  context2->next = rsp, context3 = bpl */
3574	if (elsiocb->context2) {
3575		if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3576			/* Firmware could still be in progress of DMAing
3577			 * payload, so don't free data buffer till after
3578			 * a hbeat.
3579			 */
3580			elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3581			buf_ptr = elsiocb->context2;
3582			elsiocb->context2 = NULL;
3583			if (buf_ptr) {
3584				buf_ptr1 = NULL;
3585				spin_lock_irq(&phba->hbalock);
3586				if (!list_empty(&buf_ptr->list)) {
3587					list_remove_head(&buf_ptr->list,
3588						buf_ptr1, struct lpfc_dmabuf,
3589						list);
3590					INIT_LIST_HEAD(&buf_ptr1->list);
3591					list_add_tail(&buf_ptr1->list,
3592						&phba->elsbuf);
3593					phba->elsbuf_cnt++;
3594				}
3595				INIT_LIST_HEAD(&buf_ptr->list);
3596				list_add_tail(&buf_ptr->list, &phba->elsbuf);
3597				phba->elsbuf_cnt++;
3598				spin_unlock_irq(&phba->hbalock);
3599			}
3600		} else {
3601			buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3602			lpfc_els_free_data(phba, buf_ptr1);
3603		}
3604	}
3605
3606	if (elsiocb->context3) {
3607		buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3608		lpfc_els_free_bpl(phba, buf_ptr);
3609	}
3610	lpfc_sli_release_iocbq(phba, elsiocb);
3611	return 0;
3612}
3613
3614/**
3615 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
3616 * @phba: pointer to lpfc hba data structure.
3617 * @cmdiocb: pointer to lpfc command iocb data structure.
3618 * @rspiocb: pointer to lpfc response iocb data structure.
3619 *
3620 * This routine is the completion callback function to the Logout (LOGO)
3621 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3622 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3623 * release the ndlp if it has the last reference remaining (reference count
3624 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3625 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3626 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3627 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3628 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3629 * IOCB data structure.
3630 **/
3631static void
3632lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3633		       struct lpfc_iocbq *rspiocb)
3634{
3635	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3636	struct lpfc_vport *vport = cmdiocb->vport;
3637	IOCB_t *irsp;
3638
3639	irsp = &rspiocb->iocb;
3640	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3641		"ACC LOGO cmpl:   status:x%x/x%x did:x%x",
3642		irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
3643	/* ACC to LOGO completes to NPort <nlp_DID> */
3644	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3645			 "0109 ACC to LOGO completes to NPort x%x "
3646			 "Data: x%x x%x x%x\n",
3647			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3648			 ndlp->nlp_rpi);
3649
3650	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3651		/* NPort Recovery mode or node is just allocated */
3652		if (!lpfc_nlp_not_used(ndlp)) {
3653			/* If the ndlp is being used by another discovery
3654			 * thread, just unregister the RPI.
3655			 */
3656			lpfc_unreg_rpi(vport, ndlp);
3657		} else {
3658			/* Indicate the node has already released, should
3659			 * not reference to it from within lpfc_els_free_iocb.
3660			 */
3661			cmdiocb->context1 = NULL;
3662		}
3663	}
3664
3665	/*
3666	 * The driver received a LOGO from the rport and has ACK'd it.
3667	 * At this point, the driver is done so release the IOCB
3668	 */
3669	lpfc_els_free_iocb(phba, cmdiocb);
3670
3671	/*
3672	 * Remove the ndlp reference if it's a fabric node that has
3673	 * sent us an unsolicted LOGO.
3674	 */
3675	if (ndlp->nlp_type & NLP_FABRIC)
3676		lpfc_nlp_put(ndlp);
3677
3678	return;
3679}
3680
3681/**
3682 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
3683 * @phba: pointer to lpfc hba data structure.
3684 * @pmb: pointer to the driver internal queue element for mailbox command.
3685 *
3686 * This routine is the completion callback function for unregister default
3687 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3688 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3689 * decrements the ndlp reference count held for this completion callback
3690 * function. After that, it invokes the lpfc_nlp_not_used() to check
3691 * whether there is only one reference left on the ndlp. If so, it will
3692 * perform one more decrement and trigger the release of the ndlp.
3693 **/
3694void
3695lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3696{
3697	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3698	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3699
3700	pmb->context1 = NULL;
3701	pmb->context2 = NULL;
3702
3703	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3704	kfree(mp);
3705	mempool_free(pmb, phba->mbox_mem_pool);
3706	if (ndlp) {
3707		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3708				 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
3709				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3710				 atomic_read(&ndlp->kref.refcount),
3711				 ndlp->nlp_usg_map, ndlp);
3712		if (NLP_CHK_NODE_ACT(ndlp)) {
3713			lpfc_nlp_put(ndlp);
3714			/* This is the end of the default RPI cleanup logic for
3715			 * this ndlp. If no other discovery threads are using
3716			 * this ndlp, free all resources associated with it.
3717			 */
3718			lpfc_nlp_not_used(ndlp);
3719		} else {
3720			lpfc_drop_node(ndlp->vport, ndlp);
3721		}
3722	}
3723
3724	return;
3725}
3726
3727/**
3728 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
3729 * @phba: pointer to lpfc hba data structure.
3730 * @cmdiocb: pointer to lpfc command iocb data structure.
3731 * @rspiocb: pointer to lpfc response iocb data structure.
3732 *
3733 * This routine is the completion callback function for ELS Response IOCB
3734 * command. In normal case, this callback function just properly sets the
3735 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3736 * field in the command IOCB is not NULL, the referred mailbox command will
3737 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3738 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3739 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3740 * routine shall be invoked trying to release the ndlp if no other threads
3741 * are currently referring it.
3742 **/
3743static void
3744lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3745		  struct lpfc_iocbq *rspiocb)
3746{
3747	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3748	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3749	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
3750	IOCB_t  *irsp;
3751	uint8_t *pcmd;
3752	LPFC_MBOXQ_t *mbox = NULL;
3753	struct lpfc_dmabuf *mp = NULL;
3754	uint32_t ls_rjt = 0;
3755
3756	irsp = &rspiocb->iocb;
3757
3758	if (cmdiocb->context_un.mbox)
3759		mbox = cmdiocb->context_un.mbox;
3760
3761	/* First determine if this is a LS_RJT cmpl. Note, this callback
3762	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3763	 */
3764	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
3765	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3766	    (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
3767		/* A LS_RJT associated with Default RPI cleanup has its own
3768		 * separate code path.
3769		 */
3770		if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3771			ls_rjt = 1;
3772	}
3773
3774	/* Check to see if link went down during discovery */
3775	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
3776		if (mbox) {
3777			mp = (struct lpfc_dmabuf *) mbox->context1;
3778			if (mp) {
3779				lpfc_mbuf_free(phba, mp->virt, mp->phys);
3780				kfree(mp);
3781			}
3782			mempool_free(mbox, phba->mbox_mem_pool);
3783		}
3784		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3785		    (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3786			if (lpfc_nlp_not_used(ndlp)) {
3787				ndlp = NULL;
3788				/* Indicate the node has already released,
3789				 * should not reference to it from within
3790				 * the routine lpfc_els_free_iocb.
3791				 */
3792				cmdiocb->context1 = NULL;
3793			}
3794		goto out;
3795	}
3796
3797	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3798		"ELS rsp cmpl:    status:x%x/x%x did:x%x",
3799		irsp->ulpStatus, irsp->un.ulpWord[4],
3800		cmdiocb->iocb.un.elsreq64.remoteID);
3801	/* ELS response tag <ulpIoTag> completes */
3802	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3803			 "0110 ELS response tag x%x completes "
3804			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3805			 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3806			 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3807			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3808			 ndlp->nlp_rpi);
3809	if (mbox) {
3810		if ((rspiocb->iocb.ulpStatus == 0)
3811		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
3812			lpfc_unreg_rpi(vport, ndlp);
3813			/* Increment reference count to ndlp to hold the
3814			 * reference to ndlp for the callback function.
3815			 */
3816			mbox->context2 = lpfc_nlp_get(ndlp);
3817			mbox->vport = vport;
3818			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3819				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3820				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3821			}
3822			else {
3823				mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3824				ndlp->nlp_prev_state = ndlp->nlp_state;
3825				lpfc_nlp_set_state(vport, ndlp,
3826					   NLP_STE_REG_LOGIN_ISSUE);
3827			}
3828			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3829			    != MBX_NOT_FINISHED)
3830				goto out;
3831			else
3832				/* Decrement the ndlp reference count we
3833				 * set for this failed mailbox command.
3834				 */
3835				lpfc_nlp_put(ndlp);
3836
3837			/* ELS rsp: Cannot issue reg_login for <NPortid> */
3838			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3839				"0138 ELS rsp: Cannot issue reg_login for x%x "
3840				"Data: x%x x%x x%x\n",
3841				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3842				ndlp->nlp_rpi);
3843
3844			if (lpfc_nlp_not_used(ndlp)) {
3845				ndlp = NULL;
3846				/* Indicate node has already been released,
3847				 * should not reference to it from within
3848				 * the routine lpfc_els_free_iocb.
3849				 */
3850				cmdiocb->context1 = NULL;
3851			}
3852		} else {
3853			/* Do not drop node for lpfc_els_abort'ed ELS cmds */
3854			if (!lpfc_error_lost_link(irsp) &&
3855			    ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
3856				if (lpfc_nlp_not_used(ndlp)) {
3857					ndlp = NULL;
3858					/* Indicate node has already been
3859					 * released, should not reference
3860					 * to it from within the routine
3861					 * lpfc_els_free_iocb.
3862					 */
3863					cmdiocb->context1 = NULL;
3864				}
3865			}
3866		}
3867		mp = (struct lpfc_dmabuf *) mbox->context1;
3868		if (mp) {
3869			lpfc_mbuf_free(phba, mp->virt, mp->phys);
3870			kfree(mp);
3871		}
3872		mempool_free(mbox, phba->mbox_mem_pool);
3873	}
3874out:
3875	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3876		spin_lock_irq(shost->host_lock);
3877		ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
3878		spin_unlock_irq(shost->host_lock);
3879
3880		/* If the node is not being used by another discovery thread,
3881		 * and we are sending a reject, we are done with it.
3882		 * Release driver reference count here and free associated
3883		 * resources.
3884		 */
3885		if (ls_rjt)
3886			if (lpfc_nlp_not_used(ndlp))
3887				/* Indicate node has already been released,
3888				 * should not reference to it from within
3889				 * the routine lpfc_els_free_iocb.
3890				 */
3891				cmdiocb->context1 = NULL;
3892	}
3893
3894	lpfc_els_free_iocb(phba, cmdiocb);
3895	return;
3896}
3897
3898/**
3899 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
3900 * @vport: pointer to a host virtual N_Port data structure.
3901 * @flag: the els command code to be accepted.
3902 * @oldiocb: pointer to the original lpfc command iocb data structure.
3903 * @ndlp: pointer to a node-list data structure.
3904 * @mbox: pointer to the driver internal queue element for mailbox command.
3905 *
3906 * This routine prepares and issues an Accept (ACC) response IOCB
3907 * command. It uses the @flag to properly set up the IOCB field for the
3908 * specific ACC response command to be issued and invokes the
3909 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3910 * @mbox pointer is passed in, it will be put into the context_un.mbox
3911 * field of the IOCB for the completion callback function to issue the
3912 * mailbox command to the HBA later when callback is invoked.
3913 *
3914 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3915 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3916 * will be stored into the context1 field of the IOCB for the completion
3917 * callback function to the corresponding response ELS IOCB command.
3918 *
3919 * Return code
3920 *   0 - Successfully issued acc response
3921 *   1 - Failed to issue acc response
3922 **/
3923int
3924lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3925		 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3926		 LPFC_MBOXQ_t *mbox)
3927{
3928	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3929	struct lpfc_hba  *phba = vport->phba;
3930	IOCB_t *icmd;
3931	IOCB_t *oldcmd;
3932	struct lpfc_iocbq *elsiocb;
3933	struct lpfc_sli *psli;
3934	uint8_t *pcmd;
3935	uint16_t cmdsize;
3936	int rc;
3937	ELS_PKT *els_pkt_ptr;
3938
3939	psli = &phba->sli;
3940	oldcmd = &oldiocb->iocb;
3941
3942	switch (flag) {
3943	case ELS_CMD_ACC:
3944		cmdsize = sizeof(uint32_t);
3945		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3946					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3947		if (!elsiocb) {
3948			spin_lock_irq(shost->host_lock);
3949			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3950			spin_unlock_irq(shost->host_lock);
3951			return 1;
3952		}
3953
3954		icmd = &elsiocb->iocb;
3955		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
3956		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3957		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3958		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3959		pcmd += sizeof(uint32_t);
3960
3961		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3962			"Issue ACC:       did:x%x flg:x%x",
3963			ndlp->nlp_DID, ndlp->nlp_flag, 0);
3964		break;
3965	case ELS_CMD_PLOGI:
3966		cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
3967		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3968					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3969		if (!elsiocb)
3970			return 1;
3971
3972		icmd = &elsiocb->iocb;
3973		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
3974		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3975		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3976
3977		if (mbox)
3978			elsiocb->context_un.mbox = mbox;
3979
3980		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3981		pcmd += sizeof(uint32_t);
3982		memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
3983
3984		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3985			"Issue ACC PLOGI: did:x%x flg:x%x",
3986			ndlp->nlp_DID, ndlp->nlp_flag, 0);
3987		break;
3988	case ELS_CMD_PRLO:
3989		cmdsize = sizeof(uint32_t) + sizeof(PRLO);
3990		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3991					     ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
3992		if (!elsiocb)
3993			return 1;
3994
3995		icmd = &elsiocb->iocb;
3996		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
3997		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3998		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3999
4000		memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
4001		       sizeof(uint32_t) + sizeof(PRLO));
4002		*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
4003		els_pkt_ptr = (ELS_PKT *) pcmd;
4004		els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
4005
4006		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4007			"Issue ACC PRLO:  did:x%x flg:x%x",
4008			ndlp->nlp_DID, ndlp->nlp_flag, 0);
4009		break;
4010	default:
4011		return 1;
4012	}
4013	/* Xmit ELS ACC response tag <ulpIoTag> */
4014	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4015			 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
4016			 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
4017			 "fc_flag x%x\n",
4018			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4019			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4020			 ndlp->nlp_rpi, vport->fc_flag);
4021	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
4022		spin_lock_irq(shost->host_lock);
4023		ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4024		spin_unlock_irq(shost->host_lock);
4025		elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
4026	} else {
4027		elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4028	}
4029
4030	phba->fc_stat.elsXmitACC++;
4031	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4032	if (rc == IOCB_ERROR) {
4033		lpfc_els_free_iocb(phba, elsiocb);
4034		return 1;
4035	}
4036	return 0;
4037}
4038
4039/**
4040 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
4041 * @vport: pointer to a virtual N_Port data structure.
4042 * @rejectError:
4043 * @oldiocb: pointer to the original lpfc command iocb data structure.
4044 * @ndlp: pointer to a node-list data structure.
4045 * @mbox: pointer to the driver internal queue element for mailbox command.
4046 *
4047 * This routine prepares and issue an Reject (RJT) response IOCB
4048 * command. If a @mbox pointer is passed in, it will be put into the
4049 * context_un.mbox field of the IOCB for the completion callback function
4050 * to issue to the HBA later.
4051 *
4052 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4053 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4054 * will be stored into the context1 field of the IOCB for the completion
4055 * callback function to the reject response ELS IOCB command.
4056 *
4057 * Return code
4058 *   0 - Successfully issued reject response
4059 *   1 - Failed to issue reject response
4060 **/
4061int
4062lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
4063		    struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4064		    LPFC_MBOXQ_t *mbox)
4065{
4066	struct lpfc_hba  *phba = vport->phba;
4067	IOCB_t *icmd;
4068	IOCB_t *oldcmd;
4069	struct lpfc_iocbq *elsiocb;
4070	struct lpfc_sli *psli;
4071	uint8_t *pcmd;
4072	uint16_t cmdsize;
4073	int rc;
4074
4075	psli = &phba->sli;
4076	cmdsize = 2 * sizeof(uint32_t);
4077	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4078				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
4079	if (!elsiocb)
4080		return 1;
4081
4082	icmd = &elsiocb->iocb;
4083	oldcmd = &oldiocb->iocb;
4084	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4085	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4086	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4087
4088	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
4089	pcmd += sizeof(uint32_t);
4090	*((uint32_t *) (pcmd)) = rejectError;
4091
4092	if (mbox)
4093		elsiocb->context_un.mbox = mbox;
4094
4095	/* Xmit ELS RJT <err> response tag <ulpIoTag> */
4096	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4097			 "0129 Xmit ELS RJT x%x response tag x%x "
4098			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4099			 "rpi x%x\n",
4100			 rejectError, elsiocb->iotag,
4101			 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
4102			 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
4103	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4104		"Issue LS_RJT:    did:x%x flg:x%x err:x%x",
4105		ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
4106
4107	phba->fc_stat.elsXmitLSRJT++;
4108	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4109	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4110
4111	if (rc == IOCB_ERROR) {
4112		lpfc_els_free_iocb(phba, elsiocb);
4113		return 1;
4114	}
4115	return 0;
4116}
4117
4118/**
4119 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
4120 * @vport: pointer to a virtual N_Port data structure.
4121 * @oldiocb: pointer to the original lpfc command iocb data structure.
4122 * @ndlp: pointer to a node-list data structure.
4123 *
4124 * This routine prepares and issues an Accept (ACC) response to Address
4125 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
4126 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4127 *
4128 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4129 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4130 * will be stored into the context1 field of the IOCB for the completion
4131 * callback function to the ADISC Accept response ELS IOCB command.
4132 *
4133 * Return code
4134 *   0 - Successfully issued acc adisc response
4135 *   1 - Failed to issue adisc acc response
4136 **/
4137int
4138lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4139		       struct lpfc_nodelist *ndlp)
4140{
4141	struct lpfc_hba  *phba = vport->phba;
4142	ADISC *ap;
4143	IOCB_t *icmd, *oldcmd;
4144	struct lpfc_iocbq *elsiocb;
4145	uint8_t *pcmd;
4146	uint16_t cmdsize;
4147	int rc;
4148
4149	cmdsize = sizeof(uint32_t) + sizeof(ADISC);
4150	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4151				     ndlp->nlp_DID, ELS_CMD_ACC);
4152	if (!elsiocb)
4153		return 1;
4154
4155	icmd = &elsiocb->iocb;
4156	oldcmd = &oldiocb->iocb;
4157	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4158	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4159
4160	/* Xmit ADISC ACC response tag <ulpIoTag> */
4161	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4162			 "0130 Xmit ADISC ACC response iotag x%x xri: "
4163			 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
4164			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4165			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4166			 ndlp->nlp_rpi);
4167	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4168
4169	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4170	pcmd += sizeof(uint32_t);
4171
4172	ap = (ADISC *) (pcmd);
4173	ap->hardAL_PA = phba->fc_pref_ALPA;
4174	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4175	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4176	ap->DID = be32_to_cpu(vport->fc_myDID);
4177
4178	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4179		"Issue ACC ADISC: did:x%x flg:x%x",
4180		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4181
4182	phba->fc_stat.elsXmitACC++;
4183	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4184	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4185	if (rc == IOCB_ERROR) {
4186		lpfc_els_free_iocb(phba, elsiocb);
4187		return 1;
4188	}
4189	return 0;
4190}
4191
4192/**
4193 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
4194 * @vport: pointer to a virtual N_Port data structure.
4195 * @oldiocb: pointer to the original lpfc command iocb data structure.
4196 * @ndlp: pointer to a node-list data structure.
4197 *
4198 * This routine prepares and issues an Accept (ACC) response to Process
4199 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
4200 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4201 *
4202 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4203 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4204 * will be stored into the context1 field of the IOCB for the completion
4205 * callback function to the PRLI Accept response ELS IOCB command.
4206 *
4207 * Return code
4208 *   0 - Successfully issued acc prli response
4209 *   1 - Failed to issue acc prli response
4210 **/
4211int
4212lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4213		      struct lpfc_nodelist *ndlp)
4214{
4215	struct lpfc_hba  *phba = vport->phba;
4216	PRLI *npr;
4217	lpfc_vpd_t *vpd;
4218	IOCB_t *icmd;
4219	IOCB_t *oldcmd;
4220	struct lpfc_iocbq *elsiocb;
4221	struct lpfc_sli *psli;
4222	uint8_t *pcmd;
4223	uint16_t cmdsize;
4224	int rc;
4225
4226	psli = &phba->sli;
4227
4228	cmdsize = sizeof(uint32_t) + sizeof(PRLI);
4229	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4230		ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
4231	if (!elsiocb)
4232		return 1;
4233
4234	icmd = &elsiocb->iocb;
4235	oldcmd = &oldiocb->iocb;
4236	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4237	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4238
4239	/* Xmit PRLI ACC response tag <ulpIoTag> */
4240	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4241			 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
4242			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4243			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4244			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4245			 ndlp->nlp_rpi);
4246	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4247
4248	*((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
4249	pcmd += sizeof(uint32_t);
4250
4251	/* For PRLI, remainder of payload is PRLI parameter page */
4252	memset(pcmd, 0, sizeof(PRLI));
4253
4254	npr = (PRLI *) pcmd;
4255	vpd = &phba->vpd;
4256	/*
4257	 * If the remote port is a target and our firmware version is 3.20 or
4258	 * later, set the following bits for FC-TAPE support.
4259	 */
4260	if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4261	    (vpd->rev.feaLevelHigh >= 0x02)) {
4262		npr->ConfmComplAllowed = 1;
4263		npr->Retry = 1;
4264		npr->TaskRetryIdReq = 1;
4265	}
4266
4267	npr->acceptRspCode = PRLI_REQ_EXECUTED;
4268	npr->estabImagePair = 1;
4269	npr->readXferRdyDis = 1;
4270	npr->ConfmComplAllowed = 1;
4271
4272	npr->prliType = PRLI_FCP_TYPE;
4273	npr->initiatorFunc = 1;
4274
4275	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4276		"Issue ACC PRLI:  did:x%x flg:x%x",
4277		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4278
4279	phba->fc_stat.elsXmitACC++;
4280	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4281
4282	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4283	if (rc == IOCB_ERROR) {
4284		lpfc_els_free_iocb(phba, elsiocb);
4285		return 1;
4286	}
4287	return 0;
4288}
4289
4290/**
4291 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
4292 * @vport: pointer to a virtual N_Port data structure.
4293 * @format: rnid command format.
4294 * @oldiocb: pointer to the original lpfc command iocb data structure.
4295 * @ndlp: pointer to a node-list data structure.
4296 *
4297 * This routine issues a Request Node Identification Data (RNID) Accept
4298 * (ACC) response. It constructs the RNID ACC response command according to
4299 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
4300 * issue the response. Note that this command does not need to hold the ndlp
4301 * reference count for the callback. So, the ndlp reference count taken by
4302 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
4303 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
4304 * there is no ndlp reference available.
4305 *
4306 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4307 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4308 * will be stored into the context1 field of the IOCB for the completion
4309 * callback function. However, for the RNID Accept Response ELS command,
4310 * this is undone later by this routine after the IOCB is allocated.
4311 *
4312 * Return code
4313 *   0 - Successfully issued acc rnid response
4314 *   1 - Failed to issue acc rnid response
4315 **/
4316static int
4317lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
4318		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4319{
4320	struct lpfc_hba  *phba = vport->phba;
4321	RNID *rn;
4322	IOCB_t *icmd, *oldcmd;
4323	struct lpfc_iocbq *elsiocb;
4324	struct lpfc_sli *psli;
4325	uint8_t *pcmd;
4326	uint16_t cmdsize;
4327	int rc;
4328
4329	psli = &phba->sli;
4330	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
4331					+ (2 * sizeof(struct lpfc_name));
4332	if (format)
4333		cmdsize += sizeof(RNID_TOP_DISC);
4334
4335	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4336				     ndlp->nlp_DID, ELS_CMD_ACC);
4337	if (!elsiocb)
4338		return 1;
4339
4340	icmd = &elsiocb->iocb;
4341	oldcmd = &oldiocb->iocb;
4342	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4343	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4344
4345	/* Xmit RNID ACC response tag <ulpIoTag> */
4346	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4347			 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4348			 elsiocb->iotag, elsiocb->iocb.ulpContext);
4349	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4350	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4351	pcmd += sizeof(uint32_t);
4352
4353	memset(pcmd, 0, sizeof(RNID));
4354	rn = (RNID *) (pcmd);
4355	rn->Format = format;
4356	rn->CommonLen = (2 * sizeof(struct lpfc_name));
4357	memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4358	memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4359	switch (format) {
4360	case 0:
4361		rn->SpecificLen = 0;
4362		break;
4363	case RNID_TOPOLOGY_DISC:
4364		rn->SpecificLen = sizeof(RNID_TOP_DISC);
4365		memcpy(&rn->un.topologyDisc.portName,
4366		       &vport->fc_portname, sizeof(struct lpfc_name));
4367		rn->un.topologyDisc.unitType = RNID_HBA;
4368		rn->un.topologyDisc.physPort = 0;
4369		rn->un.topologyDisc.attachedNodes = 0;
4370		break;
4371	default:
4372		rn->CommonLen = 0;
4373		rn->SpecificLen = 0;
4374		break;
4375	}
4376
4377	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4378		"Issue ACC RNID:  did:x%x flg:x%x",
4379		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4380
4381	phba->fc_stat.elsXmitACC++;
4382	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4383
4384	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4385	if (rc == IOCB_ERROR) {
4386		lpfc_els_free_iocb(phba, elsiocb);
4387		return 1;
4388	}
4389	return 0;
4390}
4391
4392/**
4393 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4394 * @vport: pointer to a virtual N_Port data structure.
4395 * @iocb: pointer to the lpfc command iocb data structure.
4396 * @ndlp: pointer to a node-list data structure.
4397 *
4398 * Return
4399 **/
4400static void
4401lpfc_els_clear_rrq(struct lpfc_vport *vport,
4402      struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
4403{
4404	struct lpfc_hba  *phba = vport->phba;
4405	uint8_t *pcmd;
4406	struct RRQ *rrq;
4407	uint16_t rxid;
4408	uint16_t xri;
4409	struct lpfc_node_rrq *prrq;
4410
4411
4412	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
4413	pcmd += sizeof(uint32_t);
4414	rrq = (struct RRQ *)pcmd;
4415	rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
4416	rxid = bf_get(rrq_rxid, rrq);
4417
4418	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4419			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4420			" x%x x%x\n",
4421			be32_to_cpu(bf_get(rrq_did, rrq)),
4422			bf_get(rrq_oxid, rrq),
4423			rxid,
4424			iocb->iotag, iocb->iocb.ulpContext);
4425
4426	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4427		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
4428		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
4429	if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
4430		xri = bf_get(rrq_oxid, rrq);
4431	else
4432		xri = rxid;
4433	prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
4434	if (prrq)
4435		lpfc_clr_rrq_active(phba, xri, prrq);
4436	return;
4437}
4438
4439/**
4440 * lpfc_els_rsp_echo_acc - Issue echo acc response
4441 * @vport: pointer to a virtual N_Port data structure.
4442 * @data: pointer to echo data to return in the accept.
4443 * @oldiocb: pointer to the original lpfc command iocb data structure.
4444 * @ndlp: pointer to a node-list data structure.
4445 *
4446 * Return code
4447 *   0 - Successfully issued acc echo response
4448 *   1 - Failed to issue acc echo response
4449 **/
4450static int
4451lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4452		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4453{
4454	struct lpfc_hba  *phba = vport->phba;
4455	struct lpfc_iocbq *elsiocb;
4456	struct lpfc_sli *psli;
4457	uint8_t *pcmd;
4458	uint16_t cmdsize;
4459	int rc;
4460
4461	psli = &phba->sli;
4462	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
4463
4464	/* The accumulated length can exceed the BPL_SIZE.  For
4465	 * now, use this as the limit
4466	 */
4467	if (cmdsize > LPFC_BPL_SIZE)
4468		cmdsize = LPFC_BPL_SIZE;
4469	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4470				     ndlp->nlp_DID, ELS_CMD_ACC);
4471	if (!elsiocb)
4472		return 1;
4473
4474	elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;  /* Xri / rx_id */
4475	elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4476
4477	/* Xmit ECHO ACC response tag <ulpIoTag> */
4478	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4479			 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4480			 elsiocb->iotag, elsiocb->iocb.ulpContext);
4481	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4482	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4483	pcmd += sizeof(uint32_t);
4484	memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
4485
4486	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4487		"Issue ACC ECHO:  did:x%x flg:x%x",
4488		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4489
4490	phba->fc_stat.elsXmitACC++;
4491	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4492
4493	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4494	if (rc == IOCB_ERROR) {
4495		lpfc_els_free_iocb(phba, elsiocb);
4496		return 1;
4497	}
4498	return 0;
4499}
4500
4501/**
4502 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
4503 * @vport: pointer to a host virtual N_Port data structure.
4504 *
4505 * This routine issues Address Discover (ADISC) ELS commands to those
4506 * N_Ports which are in node port recovery state and ADISC has not been issued
4507 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
4508 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
4509 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
4510 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
4511 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
4512 * IOCBs quit for later pick up. On the other hand, after walking through
4513 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
4514 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
4515 * no more ADISC need to be sent.
4516 *
4517 * Return code
4518 *    The number of N_Ports with adisc issued.
4519 **/
4520int
4521lpfc_els_disc_adisc(struct lpfc_vport *vport)
4522{
4523	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4524	struct lpfc_nodelist *ndlp, *next_ndlp;
4525	int sentadisc = 0;
4526
4527	/* go thru NPR nodes and issue any remaining ELS ADISCs */
4528	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
4529		if (!NLP_CHK_NODE_ACT(ndlp))
4530			continue;
4531		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4532		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4533		    (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
4534			spin_lock_irq(shost->host_lock);
4535			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4536			spin_unlock_irq(shost->host_lock);
4537			ndlp->nlp_prev_state = ndlp->nlp_state;
4538			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4539			lpfc_issue_els_adisc(vport, ndlp, 0);
4540			sentadisc++;
4541			vport->num_disc_nodes++;
4542			if (vport->num_disc_nodes >=
4543			    vport->cfg_discovery_threads) {
4544				spin_lock_irq(shost->host_lock);
4545				vport->fc_flag |= FC_NLP_MORE;
4546				spin_unlock_irq(shost->host_lock);
4547				break;
4548			}
4549		}
4550	}
4551	if (sentadisc == 0) {
4552		spin_lock_irq(shost->host_lock);
4553		vport->fc_flag &= ~FC_NLP_MORE;
4554		spin_unlock_irq(shost->host_lock);
4555	}
4556	return sentadisc;
4557}
4558
4559/**
4560 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
4561 * @vport: pointer to a host virtual N_Port data structure.
4562 *
4563 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
4564 * which are in node port recovery state, with a @vport. Each time an ELS
4565 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
4566 * the per @vport number of discover count (num_disc_nodes) shall be
4567 * incremented. If the num_disc_nodes reaches a pre-configured threshold
4568 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
4569 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
4570 * later pick up. On the other hand, after walking through all the ndlps with
4571 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
4572 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
4573 * PLOGI need to be sent.
4574 *
4575 * Return code
4576 *   The number of N_Ports with plogi issued.
4577 **/
4578int
4579lpfc_els_disc_plogi(struct lpfc_vport *vport)
4580{
4581	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4582	struct lpfc_nodelist *ndlp, *next_ndlp;
4583	int sentplogi = 0;
4584
4585	/* go thru NPR nodes and issue any remaining ELS PLOGIs */
4586	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
4587		if (!NLP_CHK_NODE_ACT(ndlp))
4588			continue;
4589		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4590		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4591		    (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4592		    (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
4593			ndlp->nlp_prev_state = ndlp->nlp_state;
4594			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4595			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4596			sentplogi++;
4597			vport->num_disc_nodes++;
4598			if (vport->num_disc_nodes >=
4599			    vport->cfg_discovery_threads) {
4600				spin_lock_irq(shost->host_lock);
4601				vport->fc_flag |= FC_NLP_MORE;
4602				spin_unlock_irq(shost->host_lock);
4603				break;
4604			}
4605		}
4606	}
4607	if (sentplogi) {
4608		lpfc_set_disctmo(vport);
4609	}
4610	else {
4611		spin_lock_irq(shost->host_lock);
4612		vport->fc_flag &= ~FC_NLP_MORE;
4613		spin_unlock_irq(shost->host_lock);
4614	}
4615	return sentplogi;
4616}
4617
4618/**
4619 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
4620 * @vport: pointer to a host virtual N_Port data structure.
4621 *
4622 * This routine cleans up any Registration State Change Notification
4623 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
4624 * @vport together with the host_lock is used to prevent multiple thread
4625 * trying to access the RSCN array on a same @vport at the same time.
4626 **/
4627void
4628lpfc_els_flush_rscn(struct lpfc_vport *vport)
4629{
4630	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4631	struct lpfc_hba  *phba = vport->phba;
4632	int i;
4633
4634	spin_lock_irq(shost->host_lock);
4635	if (vport->fc_rscn_flush) {
4636		/* Another thread is walking fc_rscn_id_list on this vport */
4637		spin_unlock_irq(shost->host_lock);
4638		return;
4639	}
4640	/* Indicate we are walking lpfc_els_flush_rscn on this vport */
4641	vport->fc_rscn_flush = 1;
4642	spin_unlock_irq(shost->host_lock);
4643
4644	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4645		lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
4646		vport->fc_rscn_id_list[i] = NULL;
4647	}
4648	spin_lock_irq(shost->host_lock);
4649	vport->fc_rscn_id_cnt = 0;
4650	vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
4651	spin_unlock_irq(shost->host_lock);
4652	lpfc_can_disctmo(vport);
4653	/* Indicate we are done walking this fc_rscn_id_list */
4654	vport->fc_rscn_flush = 0;
4655}
4656
4657/**
4658 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
4659 * @vport: pointer to a host virtual N_Port data structure.
4660 * @did: remote destination port identifier.
4661 *
4662 * This routine checks whether there is any pending Registration State
4663 * Configuration Notification (RSCN) to a @did on @vport.
4664 *
4665 * Return code
4666 *   None zero - The @did matched with a pending rscn
4667 *   0 - not able to match @did with a pending rscn
4668 **/
4669int
4670lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
4671{
4672	D_ID ns_did;
4673	D_ID rscn_did;
4674	uint32_t *lp;
4675	uint32_t payload_len, i;
4676	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4677
4678	ns_did.un.word = did;
4679
4680	/* Never match fabric nodes for RSCNs */
4681	if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4682		return 0;
4683
4684	/* If we are doing a FULL RSCN rediscovery, match everything */
4685	if (vport->fc_flag & FC_RSCN_DISCOVERY)
4686		return did;
4687
4688	spin_lock_irq(shost->host_lock);
4689	if (vport->fc_rscn_flush) {
4690		/* Another thread is walking fc_rscn_id_list on this vport */
4691		spin_unlock_irq(shost->host_lock);
4692		return 0;
4693	}
4694	/* Indicate we are walking fc_rscn_id_list on this vport */
4695	vport->fc_rscn_flush = 1;
4696	spin_unlock_irq(shost->host_lock);
4697	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4698		lp = vport->fc_rscn_id_list[i]->virt;
4699		payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4700		payload_len -= sizeof(uint32_t);	/* take off word 0 */
4701		while (payload_len) {
4702			rscn_did.un.word = be32_to_cpu(*lp++);
4703			payload_len -= sizeof(uint32_t);
4704			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
4705			case RSCN_ADDRESS_FORMAT_PORT:
4706				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4707				    && (ns_did.un.b.area == rscn_did.un.b.area)
4708				    && (ns_did.un.b.id == rscn_did.un.b.id))
4709					goto return_did_out;
4710				break;
4711			case RSCN_ADDRESS_FORMAT_AREA:
4712				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4713				    && (ns_did.un.b.area == rscn_did.un.b.area))
4714					goto return_did_out;
4715				break;
4716			case RSCN_ADDRESS_FORMAT_DOMAIN:
4717				if (ns_did.un.b.domain == rscn_did.un.b.domain)
4718					goto return_did_out;
4719				break;
4720			case RSCN_ADDRESS_FORMAT_FABRIC:
4721				goto return_did_out;
4722			}
4723		}
4724	}
4725	/* Indicate we are done with walking fc_rscn_id_list on this vport */
4726	vport->fc_rscn_flush = 0;
4727	return 0;
4728return_did_out:
4729	/* Indicate we are done with walking fc_rscn_id_list on this vport */
4730	vport->fc_rscn_flush = 0;
4731	return did;
4732}
4733
4734/**
4735 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
4736 * @vport: pointer to a host virtual N_Port data structure.
4737 *
4738 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
4739 * state machine for a @vport's nodes that are with pending RSCN (Registration
4740 * State Change Notification).
4741 *
4742 * Return code
4743 *   0 - Successful (currently alway return 0)
4744 **/
4745static int
4746lpfc_rscn_recovery_check(struct lpfc_vport *vport)
4747{
4748	struct lpfc_nodelist *ndlp = NULL;
4749
4750	/* Move all affected nodes by pending RSCNs to NPR state. */
4751	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4752		if (!NLP_CHK_NODE_ACT(ndlp) ||
4753		    (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
4754		    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
4755			continue;
4756		lpfc_disc_state_machine(vport, ndlp, NULL,
4757					NLP_EVT_DEVICE_RECOVERY);
4758		lpfc_cancel_retry_delay_tmo(vport, ndlp);
4759	}
4760	return 0;
4761}
4762
4763/**
4764 * lpfc_send_rscn_event - Send an RSCN event to management application
4765 * @vport: pointer to a host virtual N_Port data structure.
4766 * @cmdiocb: pointer to lpfc command iocb data structure.
4767 *
4768 * lpfc_send_rscn_event sends an RSCN netlink event to management
4769 * applications.
4770 */
4771static void
4772lpfc_send_rscn_event(struct lpfc_vport *vport,
4773		struct lpfc_iocbq *cmdiocb)
4774{
4775	struct lpfc_dmabuf *pcmd;
4776	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4777	uint32_t *payload_ptr;
4778	uint32_t payload_len;
4779	struct lpfc_rscn_event_header *rscn_event_data;
4780
4781	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4782	payload_ptr = (uint32_t *) pcmd->virt;
4783	payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
4784
4785	rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
4786		payload_len, GFP_KERNEL);
4787	if (!rscn_event_data) {
4788		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4789			"0147 Failed to allocate memory for RSCN event\n");
4790		return;
4791	}
4792	rscn_event_data->event_type = FC_REG_RSCN_EVENT;
4793	rscn_event_data->payload_length = payload_len;
4794	memcpy(rscn_event_data->rscn_payload, payload_ptr,
4795		payload_len);
4796
4797	fc_host_post_vendor_event(shost,
4798		fc_get_event_number(),
4799		sizeof(struct lpfc_els_event_header) + payload_len,
4800		(char *)rscn_event_data,
4801		LPFC_NL_VENDOR_ID);
4802
4803	kfree(rscn_event_data);
4804}
4805
4806/**
4807 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
4808 * @vport: pointer to a host virtual N_Port data structure.
4809 * @cmdiocb: pointer to lpfc command iocb data structure.
4810 * @ndlp: pointer to a node-list data structure.
4811 *
4812 * This routine processes an unsolicited RSCN (Registration State Change
4813 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
4814 * to invoke fc_host_post_event() routine to the FC transport layer. If the
4815 * discover state machine is about to begin discovery, it just accepts the
4816 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
4817 * contains N_Port IDs for other vports on this HBA, it just accepts the
4818 * RSCN and ignore processing it. If the state machine is in the recovery
4819 * state, the fc_rscn_id_list of this @vport is walked and the
4820 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
4821 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
4822 * routine is invoked to handle the RSCN event.
4823 *
4824 * Return code
4825 *   0 - Just sent the acc response
4826 *   1 - Sent the acc response and waited for name server completion
4827 **/
4828static int
4829lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4830		  struct lpfc_nodelist *ndlp)
4831{
4832	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4833	struct lpfc_hba  *phba = vport->phba;
4834	struct lpfc_dmabuf *pcmd;
4835	uint32_t *lp, *datap;
4836	IOCB_t *icmd;
4837	uint32_t payload_len, length, nportid, *cmd;
4838	int rscn_cnt;
4839	int rscn_id = 0, hba_id = 0;
4840	int i;
4841
4842	icmd = &cmdiocb->iocb;
4843	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4844	lp = (uint32_t *) pcmd->virt;
4845
4846	payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4847	payload_len -= sizeof(uint32_t);	/* take off word 0 */
4848	/* RSCN received */
4849	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4850			 "0214 RSCN received Data: x%x x%x x%x x%x\n",
4851			 vport->fc_flag, payload_len, *lp,
4852			 vport->fc_rscn_id_cnt);
4853
4854	/* Send an RSCN event to the management application */
4855	lpfc_send_rscn_event(vport, cmdiocb);
4856
4857	for (i = 0; i < payload_len/sizeof(uint32_t); i++)
4858		fc_host_post_event(shost, fc_get_event_number(),
4859			FCH_EVT_RSCN, lp[i]);
4860
4861	/* If we are about to begin discovery, just ACC the RSCN.
4862	 * Discovery processing will satisfy it.
4863	 */
4864	if (vport->port_state <= LPFC_NS_QRY) {
4865		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4866			"RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4867			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4868
4869		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4870		return 0;
4871	}
4872
4873	/* If this RSCN just contains NPortIDs for other vports on this HBA,
4874	 * just ACC and ignore it.
4875	 */
4876	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4877		!(vport->cfg_peer_port_login)) {
4878		i = payload_len;
4879		datap = lp;
4880		while (i > 0) {
4881			nportid = *datap++;
4882			nportid = ((be32_to_cpu(nportid)) & Mask_DID);
4883			i -= sizeof(uint32_t);
4884			rscn_id++;
4885			if (lpfc_find_vport_by_did(phba, nportid))
4886				hba_id++;
4887		}
4888		if (rscn_id == hba_id) {
4889			/* ALL NPortIDs in RSCN are on HBA */
4890			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4891					 "0219 Ignore RSCN "
4892					 "Data: x%x x%x x%x x%x\n",
4893					 vport->fc_flag, payload_len,
4894					 *lp, vport->fc_rscn_id_cnt);
4895			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4896				"RCV RSCN vport:  did:x%x/ste:x%x flg:x%x",
4897				ndlp->nlp_DID, vport->port_state,
4898				ndlp->nlp_flag);
4899
4900			lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
4901				ndlp, NULL);
4902			return 0;
4903		}
4904	}
4905
4906	spin_lock_irq(shost->host_lock);
4907	if (vport->fc_rscn_flush) {
4908		/* Another thread is walking fc_rscn_id_list on this vport */
4909		vport->fc_flag |= FC_RSCN_DISCOVERY;
4910		spin_unlock_irq(shost->host_lock);
4911		/* Send back ACC */
4912		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4913		return 0;
4914	}
4915	/* Indicate we are walking fc_rscn_id_list on this vport */
4916	vport->fc_rscn_flush = 1;
4917	spin_unlock_irq(shost->host_lock);
4918	/* Get the array count after successfully have the token */
4919	rscn_cnt = vport->fc_rscn_id_cnt;
4920	/* If we are already processing an RSCN, save the received
4921	 * RSCN payload buffer, cmdiocb->context2 to process later.
4922	 */
4923	if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
4924		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4925			"RCV RSCN defer:  did:x%x/ste:x%x flg:x%x",
4926			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4927
4928		spin_lock_irq(shost->host_lock);
4929		vport->fc_flag |= FC_RSCN_DEFERRED;
4930		if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
4931		    !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
4932			vport->fc_flag |= FC_RSCN_MODE;
4933			spin_unlock_irq(shost->host_lock);
4934			if (rscn_cnt) {
4935				cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
4936				length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
4937			}
4938			if ((rscn_cnt) &&
4939			    (payload_len + length <= LPFC_BPL_SIZE)) {
4940				*cmd &= ELS_CMD_MASK;
4941				*cmd |= cpu_to_be32(payload_len + length);
4942				memcpy(((uint8_t *)cmd) + length, lp,
4943				       payload_len);
4944			} else {
4945				vport->fc_rscn_id_list[rscn_cnt] = pcmd;
4946				vport->fc_rscn_id_cnt++;
4947				/* If we zero, cmdiocb->context2, the calling
4948				 * routine will not try to free it.
4949				 */
4950				cmdiocb->context2 = NULL;
4951			}
4952			/* Deferred RSCN */
4953			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4954					 "0235 Deferred RSCN "
4955					 "Data: x%x x%x x%x\n",
4956					 vport->fc_rscn_id_cnt, vport->fc_flag,
4957					 vport->port_state);
4958		} else {
4959			vport->fc_flag |= FC_RSCN_DISCOVERY;
4960			spin_unlock_irq(shost->host_lock);
4961			/* ReDiscovery RSCN */
4962			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4963					 "0234 ReDiscovery RSCN "
4964					 "Data: x%x x%x x%x\n",
4965					 vport->fc_rscn_id_cnt, vport->fc_flag,
4966					 vport->port_state);
4967		}
4968		/* Indicate we are done walking fc_rscn_id_list on this vport */
4969		vport->fc_rscn_flush = 0;
4970		/* Send back ACC */
4971		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4972		/* send RECOVERY event for ALL nodes that match RSCN payload */
4973		lpfc_rscn_recovery_check(vport);
4974		spin_lock_irq(shost->host_lock);
4975		vport->fc_flag &= ~FC_RSCN_DEFERRED;
4976		spin_unlock_irq(shost->host_lock);
4977		return 0;
4978	}
4979	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4980		"RCV RSCN:        did:x%x/ste:x%x flg:x%x",
4981		ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4982
4983	spin_lock_irq(shost->host_lock);
4984	vport->fc_flag |= FC_RSCN_MODE;
4985	spin_unlock_irq(shost->host_lock);
4986	vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
4987	/* Indicate we are done walking fc_rscn_id_list on this vport */
4988	vport->fc_rscn_flush = 0;
4989	/*
4990	 * If we zero, cmdiocb->context2, the calling routine will
4991	 * not try to free it.
4992	 */
4993	cmdiocb->context2 = NULL;
4994	lpfc_set_disctmo(vport);
4995	/* Send back ACC */
4996	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4997	/* send RECOVERY event for ALL nodes that match RSCN payload */
4998	lpfc_rscn_recovery_check(vport);
4999	return lpfc_els_handle_rscn(vport);
5000}
5001
5002/**
5003 * lpfc_els_handle_rscn - Handle rscn for a vport
5004 * @vport: pointer to a host virtual N_Port data structure.
5005 *
5006 * This routine handles the Registration State Configuration Notification
5007 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
5008 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
5009 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
5010 * NameServer shall be issued. If CT command to the NameServer fails to be
5011 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
5012 * RSCN activities with the @vport.
5013 *
5014 * Return code
5015 *   0 - Cleaned up rscn on the @vport
5016 *   1 - Wait for plogi to name server before proceed
5017 **/
5018int
5019lpfc_els_handle_rscn(struct lpfc_vport *vport)
5020{
5021	struct lpfc_nodelist *ndlp;
5022	struct lpfc_hba *phba = vport->phba;
5023
5024	/* Ignore RSCN if the port is being torn down. */
5025	if (vport->load_flag & FC_UNLOADING) {
5026		lpfc_els_flush_rscn(vport);
5027		return 0;
5028	}
5029
5030	/* Start timer for RSCN processing */
5031	lpfc_set_disctmo(vport);
5032
5033	/* RSCN processed */
5034	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5035			 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
5036			 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
5037			 vport->port_state);
5038
5039	/* To process RSCN, first compare RSCN data with NameServer */
5040	vport->fc_ns_retry = 0;
5041	vport->num_disc_nodes = 0;
5042
5043	ndlp = lpfc_findnode_did(vport, NameServer_DID);
5044	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
5045	    && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
5046		/* Good ndlp, issue CT Request to NameServer */
5047		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
5048			/* Wait for NameServer query cmpl before we can
5049			   continue */
5050			return 1;
5051	} else {
5052		/* If login to NameServer does not exist, issue one */
5053		/* Good status, issue PLOGI to NameServer */
5054		ndlp = lpfc_findnode_did(vport, NameServer_DID);
5055		if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5056			/* Wait for NameServer login cmpl before we can
5057			   continue */
5058			return 1;
5059
5060		if (ndlp) {
5061			ndlp = lpfc_enable_node(vport, ndlp,
5062						NLP_STE_PLOGI_ISSUE);
5063			if (!ndlp) {
5064				lpfc_els_flush_rscn(vport);
5065				return 0;
5066			}
5067			ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
5068		} else {
5069			ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
5070			if (!ndlp) {
5071				lpfc_els_flush_rscn(vport);
5072				return 0;
5073			}
5074			lpfc_nlp_init(vport, ndlp, NameServer_DID);
5075			ndlp->nlp_prev_state = ndlp->nlp_state;
5076			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
5077		}
5078		ndlp->nlp_type |= NLP_FABRIC;
5079		lpfc_issue_els_plogi(vport, NameServer_DID, 0);
5080		/* Wait for NameServer login cmpl before we can
5081		 * continue
5082		 */
5083		return 1;
5084	}
5085
5086	lpfc_els_flush_rscn(vport);
5087	return 0;
5088}
5089
5090/**
5091 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
5092 * @vport: pointer to a host virtual N_Port data structure.
5093 * @cmdiocb: pointer to lpfc command iocb data structure.
5094 * @ndlp: pointer to a node-list data structure.
5095 *
5096 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
5097 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
5098 * point topology. As an unsolicited FLOGI should not be received in a loop
5099 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
5100 * lpfc_check_sparm() routine is invoked to check the parameters in the
5101 * unsolicited FLOGI. If parameters validation failed, the routine
5102 * lpfc_els_rsp_reject() shall be called with reject reason code set to
5103 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
5104 * FLOGI shall be compared with the Port WWN of the @vport to determine who
5105 * will initiate PLOGI. The higher lexicographical value party shall has
5106 * higher priority (as the winning port) and will initiate PLOGI and
5107 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
5108 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
5109 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
5110 *
5111 * Return code
5112 *   0 - Successfully processed the unsolicited flogi
5113 *   1 - Failed to process the unsolicited flogi
5114 **/
5115static int
5116lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5117		   struct lpfc_nodelist *ndlp)
5118{
5119	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5120	struct lpfc_hba  *phba = vport->phba;
5121	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5122	uint32_t *lp = (uint32_t *) pcmd->virt;
5123	IOCB_t *icmd = &cmdiocb->iocb;
5124	struct serv_parm *sp;
5125	LPFC_MBOXQ_t *mbox;
5126	struct ls_rjt stat;
5127	uint32_t cmd, did;
5128	int rc;
5129	uint32_t fc_flag = 0;
5130	uint32_t port_state = 0;
5131
5132	cmd = *lp++;
5133	sp = (struct serv_parm *) lp;
5134
5135	/* FLOGI received */
5136
5137	lpfc_set_disctmo(vport);
5138
5139	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
5140		/* We should never receive a FLOGI in loop mode, ignore it */
5141		did = icmd->un.elsreq64.remoteID;
5142
5143		/* An FLOGI ELS command <elsCmd> was received from DID <did> in
5144		   Loop Mode */
5145		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5146				 "0113 An FLOGI ELS command x%x was "
5147				 "received from DID x%x in Loop Mode\n",
5148				 cmd, did);
5149		return 1;
5150	}
5151
5152	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
5153		/* For a FLOGI we accept, then if our portname is greater
5154		 * then the remote portname we initiate Nport login.
5155		 */
5156
5157		rc = memcmp(&vport->fc_portname, &sp->portName,
5158			    sizeof(struct lpfc_name));
5159
5160		if (!rc) {
5161			if (phba->sli_rev < LPFC_SLI_REV4) {
5162				mbox = mempool_alloc(phba->mbox_mem_pool,
5163						     GFP_KERNEL);
5164				if (!mbox)
5165					return 1;
5166				lpfc_linkdown(phba);
5167				lpfc_init_link(phba, mbox,
5168					       phba->cfg_topology,
5169					       phba->cfg_link_speed);
5170				mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
5171				mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5172				mbox->vport = vport;
5173				rc = lpfc_sli_issue_mbox(phba, mbox,
5174							 MBX_NOWAIT);
5175				lpfc_set_loopback_flag(phba);
5176				if (rc == MBX_NOT_FINISHED)
5177					mempool_free(mbox, phba->mbox_mem_pool);
5178				return 1;
5179			} else {
5180				/* abort the flogi coming back to ourselves
5181				 * due to external loopback on the port.
5182				 */
5183				lpfc_els_abort_flogi(phba);
5184				return 0;
5185			}
5186		} else if (rc > 0) {	/* greater than */
5187			spin_lock_irq(shost->host_lock);
5188			vport->fc_flag |= FC_PT2PT_PLOGI;
5189			spin_unlock_irq(shost->host_lock);
5190
5191			/* If we have the high WWPN we can assign our own
5192			 * myDID; otherwise, we have to WAIT for a PLOGI
5193			 * from the remote NPort to find out what it
5194			 * will be.
5195			 */
5196			vport->fc_myDID = PT2PT_LocalID;
5197		} else
5198			vport->fc_myDID = PT2PT_RemoteID;
5199
5200		/*
5201		 * The vport state should go to LPFC_FLOGI only
5202		 * AFTER we issue a FLOGI, not receive one.
5203		 */
5204		spin_lock_irq(shost->host_lock);
5205		fc_flag = vport->fc_flag;
5206		port_state = vport->port_state;
5207		vport->fc_flag |= FC_PT2PT;
5208		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
5209		spin_unlock_irq(shost->host_lock);
5210		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5211				 "3311 Rcv Flogi PS x%x new PS x%x "
5212				 "fc_flag x%x new fc_flag x%x\n",
5213				 port_state, vport->port_state,
5214				 fc_flag, vport->fc_flag);
5215
5216		/*
5217		 * We temporarily set fc_myDID to make it look like we are
5218		 * a Fabric. This is done just so we end up with the right
5219		 * did / sid on the FLOGI ACC rsp.
5220		 */
5221		did = vport->fc_myDID;
5222		vport->fc_myDID = Fabric_DID;
5223
5224	} else {
5225		/* Reject this request because invalid parameters */
5226		stat.un.b.lsRjtRsvd0 = 0;
5227		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5228		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
5229		stat.un.b.vendorUnique = 0;
5230
5231		/*
5232		 * We temporarily set fc_myDID to make it look like we are
5233		 * a Fabric. This is done just so we end up with the right
5234		 * did / sid on the FLOGI LS_RJT rsp.
5235		 */
5236		did = vport->fc_myDID;
5237		vport->fc_myDID = Fabric_DID;
5238
5239		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
5240			NULL);
5241
5242		/* Now lets put fc_myDID back to what its supposed to be */
5243		vport->fc_myDID = did;
5244
5245		return 1;
5246	}
5247
5248	/* Send back ACC */
5249	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
5250
5251	/* Now lets put fc_myDID back to what its supposed to be */
5252	vport->fc_myDID = did;
5253
5254	if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
5255
5256		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5257		if (!mbox)
5258			goto fail;
5259
5260		lpfc_config_link(phba, mbox);
5261
5262		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5263		mbox->vport = vport;
5264		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5265		if (rc == MBX_NOT_FINISHED) {
5266			mempool_free(mbox, phba->mbox_mem_pool);
5267			goto fail;
5268		}
5269	}
5270
5271	return 0;
5272fail:
5273	return 1;
5274}
5275
5276/**
5277 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
5278 * @vport: pointer to a host virtual N_Port data structure.
5279 * @cmdiocb: pointer to lpfc command iocb data structure.
5280 * @ndlp: pointer to a node-list data structure.
5281 *
5282 * This routine processes Request Node Identification Data (RNID) IOCB
5283 * received as an ELS unsolicited event. Only when the RNID specified format
5284 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
5285 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
5286 * Accept (ACC) the RNID ELS command. All the other RNID formats are
5287 * rejected by invoking the lpfc_els_rsp_reject() routine.
5288 *
5289 * Return code
5290 *   0 - Successfully processed rnid iocb (currently always return 0)
5291 **/
5292static int
5293lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5294		  struct lpfc_nodelist *ndlp)
5295{
5296	struct lpfc_dmabuf *pcmd;
5297	uint32_t *lp;
5298	IOCB_t *icmd;
5299	RNID *rn;
5300	struct ls_rjt stat;
5301	uint32_t cmd;
5302
5303	icmd = &cmdiocb->iocb;
5304	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5305	lp = (uint32_t *) pcmd->virt;
5306
5307	cmd = *lp++;
5308	rn = (RNID *) lp;
5309
5310	/* RNID received */
5311
5312	switch (rn->Format) {
5313	case 0:
5314	case RNID_TOPOLOGY_DISC:
5315		/* Send back ACC */
5316		lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
5317		break;
5318	default:
5319		/* Reject this request because format not supported */
5320		stat.un.b.lsRjtRsvd0 = 0;
5321		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5322		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5323		stat.un.b.vendorUnique = 0;
5324		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
5325			NULL);
5326	}
5327	return 0;
5328}
5329
5330/**
5331 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
5332 * @vport: pointer to a host virtual N_Port data structure.
5333 * @cmdiocb: pointer to lpfc command iocb data structure.
5334 * @ndlp: pointer to a node-list data structure.
5335 *
5336 * Return code
5337 *   0 - Successfully processed echo iocb (currently always return 0)
5338 **/
5339static int
5340lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5341		  struct lpfc_nodelist *ndlp)
5342{
5343	uint8_t *pcmd;
5344
5345	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
5346
5347	/* skip over first word of echo command to find echo data */
5348	pcmd += sizeof(uint32_t);
5349
5350	lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
5351	return 0;
5352}
5353
5354/**
5355 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
5356 * @vport: pointer to a host virtual N_Port data structure.
5357 * @cmdiocb: pointer to lpfc command iocb data structure.
5358 * @ndlp: pointer to a node-list data structure.
5359 *
5360 * This routine processes a Link Incident Report Registration(LIRR) IOCB
5361 * received as an ELS unsolicited event. Currently, this function just invokes
5362 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
5363 *
5364 * Return code
5365 *   0 - Successfully processed lirr iocb (currently always return 0)
5366 **/
5367static int
5368lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5369		  struct lpfc_nodelist *ndlp)
5370{
5371	struct ls_rjt stat;
5372
5373	/* For now, unconditionally reject this command */
5374	stat.un.b.lsRjtRsvd0 = 0;
5375	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5376	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5377	stat.un.b.vendorUnique = 0;
5378	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5379	return 0;
5380}
5381
5382/**
5383 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
5384 * @vport: pointer to a host virtual N_Port data structure.
5385 * @cmdiocb: pointer to lpfc command iocb data structure.
5386 * @ndlp: pointer to a node-list data structure.
5387 *
5388 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
5389 * received as an ELS unsolicited event. A request to RRQ shall only
5390 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
5391 * Nx_Port N_Port_ID of the target Exchange is the same as the
5392 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
5393 * not accepted, an LS_RJT with reason code "Unable to perform
5394 * command request" and reason code explanation "Invalid Originator
5395 * S_ID" shall be returned. For now, we just unconditionally accept
5396 * RRQ from the target.
5397 **/
5398static void
5399lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5400		 struct lpfc_nodelist *ndlp)
5401{
5402	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
5403	if (vport->phba->sli_rev == LPFC_SLI_REV4)
5404		lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
5405}
5406
5407/**
5408 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
5409 * @phba: pointer to lpfc hba data structure.
5410 * @pmb: pointer to the driver internal queue element for mailbox command.
5411 *
5412 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5413 * mailbox command. This callback function is to actually send the Accept
5414 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5415 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5416 * mailbox command, constructs the RPS response with the link statistics
5417 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5418 * response to the RPS.
5419 *
5420 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5421 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5422 * will be stored into the context1 field of the IOCB for the completion
5423 * callback function to the RPS Accept Response ELS IOCB command.
5424 *
5425 **/
5426static void
5427lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5428{
5429	MAILBOX_t *mb;
5430	IOCB_t *icmd;
5431	struct RLS_RSP *rls_rsp;
5432	uint8_t *pcmd;
5433	struct lpfc_iocbq *elsiocb;
5434	struct lpfc_nodelist *ndlp;
5435	uint16_t oxid;
5436	uint16_t rxid;
5437	uint32_t cmdsize;
5438
5439	mb = &pmb->u.mb;
5440
5441	ndlp = (struct lpfc_nodelist *) pmb->context2;
5442	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5443	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5444	pmb->context1 = NULL;
5445	pmb->context2 = NULL;
5446
5447	if (mb->mbxStatus) {
5448		mempool_free(pmb, phba->mbox_mem_pool);
5449		return;
5450	}
5451
5452	cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
5453	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5454				     lpfc_max_els_tries, ndlp,
5455				     ndlp->nlp_DID, ELS_CMD_ACC);
5456
5457	/* Decrement the ndlp reference count from previous mbox command */
5458	lpfc_nlp_put(ndlp);
5459
5460	if (!elsiocb) {
5461		mempool_free(pmb, phba->mbox_mem_pool);
5462		return;
5463	}
5464
5465	icmd = &elsiocb->iocb;
5466	icmd->ulpContext = rxid;
5467	icmd->unsli3.rcvsli3.ox_id = oxid;
5468
5469	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5470	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5471	pcmd += sizeof(uint32_t); /* Skip past command */
5472	rls_rsp = (struct RLS_RSP *)pcmd;
5473
5474	rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
5475	rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5476	rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
5477	rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
5478	rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
5479	rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
5480	mempool_free(pmb, phba->mbox_mem_pool);
5481	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
5482	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5483			 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
5484			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5485			 elsiocb->iotag, elsiocb->iocb.ulpContext,
5486			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5487			 ndlp->nlp_rpi);
5488	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5489	phba->fc_stat.elsXmitACC++;
5490	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5491		lpfc_els_free_iocb(phba, elsiocb);
5492}
5493
5494/**
5495 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
5496 * @phba: pointer to lpfc hba data structure.
5497 * @pmb: pointer to the driver internal queue element for mailbox command.
5498 *
5499 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5500 * mailbox command. This callback function is to actually send the Accept
5501 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5502 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5503 * mailbox command, constructs the RPS response with the link statistics
5504 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5505 * response to the RPS.
5506 *
5507 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5508 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5509 * will be stored into the context1 field of the IOCB for the completion
5510 * callback function to the RPS Accept Response ELS IOCB command.
5511 *
5512 **/
5513static void
5514lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5515{
5516	MAILBOX_t *mb;
5517	IOCB_t *icmd;
5518	RPS_RSP *rps_rsp;
5519	uint8_t *pcmd;
5520	struct lpfc_iocbq *elsiocb;
5521	struct lpfc_nodelist *ndlp;
5522	uint16_t status;
5523	uint16_t oxid;
5524	uint16_t rxid;
5525	uint32_t cmdsize;
5526
5527	mb = &pmb->u.mb;
5528
5529	ndlp = (struct lpfc_nodelist *) pmb->context2;
5530	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5531	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5532	pmb->context1 = NULL;
5533	pmb->context2 = NULL;
5534
5535	if (mb->mbxStatus) {
5536		mempool_free(pmb, phba->mbox_mem_pool);
5537		return;
5538	}
5539
5540	cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
5541	mempool_free(pmb, phba->mbox_mem_pool);
5542	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5543				     lpfc_max_els_tries, ndlp,
5544				     ndlp->nlp_DID, ELS_CMD_ACC);
5545
5546	/* Decrement the ndlp reference count from previous mbox command */
5547	lpfc_nlp_put(ndlp);
5548
5549	if (!elsiocb)
5550		return;
5551
5552	icmd = &elsiocb->iocb;
5553	icmd->ulpContext = rxid;
5554	icmd->unsli3.rcvsli3.ox_id = oxid;
5555
5556	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5557	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5558	pcmd += sizeof(uint32_t); /* Skip past command */
5559	rps_rsp = (RPS_RSP *)pcmd;
5560
5561	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5562		status = 0x10;
5563	else
5564		status = 0x8;
5565	if (phba->pport->fc_flag & FC_FABRIC)
5566		status |= 0x4;
5567
5568	rps_rsp->rsvd1 = 0;
5569	rps_rsp->portStatus = cpu_to_be16(status);
5570	rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
5571	rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5572	rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
5573	rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
5574	rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
5575	rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
5576	/* Xmit ELS RPS ACC response tag <ulpIoTag> */
5577	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5578			 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
5579			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5580			 elsiocb->iotag, elsiocb->iocb.ulpContext,
5581			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5582			 ndlp->nlp_rpi);
5583	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5584	phba->fc_stat.elsXmitACC++;
5585	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5586		lpfc_els_free_iocb(phba, elsiocb);
5587	return;
5588}
5589
5590/**
5591 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
5592 * @vport: pointer to a host virtual N_Port data structure.
5593 * @cmdiocb: pointer to lpfc command iocb data structure.
5594 * @ndlp: pointer to a node-list data structure.
5595 *
5596 * This routine processes Read Port Status (RPL) IOCB received as an
5597 * ELS unsolicited event. It first checks the remote port state. If the
5598 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5599 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5600 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5601 * for reading the HBA link statistics. It is for the callback function,
5602 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
5603 * to actually sending out RPL Accept (ACC) response.
5604 *
5605 * Return codes
5606 *   0 - Successfully processed rls iocb (currently always return 0)
5607 **/
5608static int
5609lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5610		 struct lpfc_nodelist *ndlp)
5611{
5612	struct lpfc_hba *phba = vport->phba;
5613	LPFC_MBOXQ_t *mbox;
5614	struct lpfc_dmabuf *pcmd;
5615	struct ls_rjt stat;
5616
5617	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5618	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5619		/* reject the unsolicited RPS request and done with it */
5620		goto reject_out;
5621
5622	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5623
5624	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5625	if (mbox) {
5626		lpfc_read_lnk_stat(phba, mbox);
5627		mbox->context1 = (void *)((unsigned long)
5628			((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5629			cmdiocb->iocb.ulpContext)); /* rx_id */
5630		mbox->context2 = lpfc_nlp_get(ndlp);
5631		mbox->vport = vport;
5632		mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
5633		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5634			!= MBX_NOT_FINISHED)
5635			/* Mbox completion will send ELS Response */
5636			return 0;
5637		/* Decrement reference count used for the failed mbox
5638		 * command.
5639		 */
5640		lpfc_nlp_put(ndlp);
5641		mempool_free(mbox, phba->mbox_mem_pool);
5642	}
5643reject_out:
5644	/* issue rejection response */
5645	stat.un.b.lsRjtRsvd0 = 0;
5646	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5647	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5648	stat.un.b.vendorUnique = 0;
5649	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5650	return 0;
5651}
5652
5653/**
5654 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
5655 * @vport: pointer to a host virtual N_Port data structure.
5656 * @cmdiocb: pointer to lpfc command iocb data structure.
5657 * @ndlp: pointer to a node-list data structure.
5658 *
5659 * This routine processes Read Timout Value (RTV) IOCB received as an
5660 * ELS unsolicited event. It first checks the remote port state. If the
5661 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5662 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5663 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
5664 * Value (RTV) unsolicited IOCB event.
5665 *
5666 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5667 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5668 * will be stored into the context1 field of the IOCB for the completion
5669 * callback function to the RPS Accept Response ELS IOCB command.
5670 *
5671 * Return codes
5672 *   0 - Successfully processed rtv iocb (currently always return 0)
5673 **/
5674static int
5675lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5676		 struct lpfc_nodelist *ndlp)
5677{
5678	struct lpfc_hba *phba = vport->phba;
5679	struct ls_rjt stat;
5680	struct RTV_RSP *rtv_rsp;
5681	uint8_t *pcmd;
5682	struct lpfc_iocbq *elsiocb;
5683	uint32_t cmdsize;
5684
5685
5686	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5687	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5688		/* reject the unsolicited RPS request and done with it */
5689		goto reject_out;
5690
5691	cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
5692	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5693				     lpfc_max_els_tries, ndlp,
5694				     ndlp->nlp_DID, ELS_CMD_ACC);
5695
5696	if (!elsiocb)
5697		return 1;
5698
5699	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5700		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5701	pcmd += sizeof(uint32_t); /* Skip past command */
5702
5703	/* use the command's xri in the response */
5704	elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;  /* Xri / rx_id */
5705	elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
5706
5707	rtv_rsp = (struct RTV_RSP *)pcmd;
5708
5709	/* populate RTV payload */
5710	rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
5711	rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
5712	bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
5713	bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
5714	rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
5715
5716	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
5717	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5718			 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
5719			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
5720			 "Data: x%x x%x x%x\n",
5721			 elsiocb->iotag, elsiocb->iocb.ulpContext,
5722			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5723			 ndlp->nlp_rpi,
5724			rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
5725	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5726	phba->fc_stat.elsXmitACC++;
5727	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5728		lpfc_els_free_iocb(phba, elsiocb);
5729	return 0;
5730
5731reject_out:
5732	/* issue rejection response */
5733	stat.un.b.lsRjtRsvd0 = 0;
5734	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5735	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5736	stat.un.b.vendorUnique = 0;
5737	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5738	return 0;
5739}
5740
5741/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
5742 * @vport: pointer to a host virtual N_Port data structure.
5743 * @cmdiocb: pointer to lpfc command iocb data structure.
5744 * @ndlp: pointer to a node-list data structure.
5745 *
5746 * This routine processes Read Port Status (RPS) IOCB received as an
5747 * ELS unsolicited event. It first checks the remote port state. If the
5748 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5749 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
5750 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5751 * for reading the HBA link statistics. It is for the callback function,
5752 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
5753 * to actually sending out RPS Accept (ACC) response.
5754 *
5755 * Return codes
5756 *   0 - Successfully processed rps iocb (currently always return 0)
5757 **/
5758static int
5759lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5760		 struct lpfc_nodelist *ndlp)
5761{
5762	struct lpfc_hba *phba = vport->phba;
5763	uint32_t *lp;
5764	uint8_t flag;
5765	LPFC_MBOXQ_t *mbox;
5766	struct lpfc_dmabuf *pcmd;
5767	RPS *rps;
5768	struct ls_rjt stat;
5769
5770	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5771	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5772		/* reject the unsolicited RPS request and done with it */
5773		goto reject_out;
5774
5775	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5776	lp = (uint32_t *) pcmd->virt;
5777	flag = (be32_to_cpu(*lp++) & 0xf);
5778	rps = (RPS *) lp;
5779
5780	if ((flag == 0) ||
5781	    ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
5782	    ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
5783				    sizeof(struct lpfc_name)) == 0))) {
5784
5785		printk("Fix me....\n");
5786		dump_stack();
5787		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5788		if (mbox) {
5789			lpfc_read_lnk_stat(phba, mbox);
5790			mbox->context1 = (void *)((unsigned long)
5791				((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5792				cmdiocb->iocb.ulpContext)); /* rx_id */
5793			mbox->context2 = lpfc_nlp_get(ndlp);
5794			mbox->vport = vport;
5795			mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
5796			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5797				!= MBX_NOT_FINISHED)
5798				/* Mbox completion will send ELS Response */
5799				return 0;
5800			/* Decrement reference count used for the failed mbox
5801			 * command.
5802			 */
5803			lpfc_nlp_put(ndlp);
5804			mempool_free(mbox, phba->mbox_mem_pool);
5805		}
5806	}
5807
5808reject_out:
5809	/* issue rejection response */
5810	stat.un.b.lsRjtRsvd0 = 0;
5811	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5812	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5813	stat.un.b.vendorUnique = 0;
5814	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5815	return 0;
5816}
5817
5818/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
5819 * @vport: pointer to a host virtual N_Port data structure.
5820 * @ndlp: pointer to a node-list data structure.
5821 * @did: DID of the target.
5822 * @rrq: Pointer to the rrq struct.
5823 *
5824 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
5825 * Successful the the completion handler will clear the RRQ.
5826 *
5827 * Return codes
5828 *   0 - Successfully sent rrq els iocb.
5829 *   1 - Failed to send rrq els iocb.
5830 **/
5831static int
5832lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5833			uint32_t did, struct lpfc_node_rrq *rrq)
5834{
5835	struct lpfc_hba  *phba = vport->phba;
5836	struct RRQ *els_rrq;
5837	IOCB_t *icmd;
5838	struct lpfc_iocbq *elsiocb;
5839	uint8_t *pcmd;
5840	uint16_t cmdsize;
5841	int ret;
5842
5843
5844	if (ndlp != rrq->ndlp)
5845		ndlp = rrq->ndlp;
5846	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5847		return 1;
5848
5849	/* If ndlp is not NULL, we will bump the reference count on it */
5850	cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
5851	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
5852				     ELS_CMD_RRQ);
5853	if (!elsiocb)
5854		return 1;
5855
5856	icmd = &elsiocb->iocb;
5857	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5858
5859	/* For RRQ request, remainder of payload is Exchange IDs */
5860	*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
5861	pcmd += sizeof(uint32_t);
5862	els_rrq = (struct RRQ *) pcmd;
5863
5864	bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
5865	bf_set(rrq_rxid, els_rrq, rrq->rxid);
5866	bf_set(rrq_did, els_rrq, vport->fc_myDID);
5867	els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
5868	els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
5869
5870
5871	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5872		"Issue RRQ:     did:x%x",
5873		did, rrq->xritag, rrq->rxid);
5874	elsiocb->context_un.rrq = rrq;
5875	elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
5876	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5877
5878	if (ret == IOCB_ERROR) {
5879		lpfc_els_free_iocb(phba, elsiocb);
5880		return 1;
5881	}
5882	return 0;
5883}
5884
5885/**
5886 * lpfc_send_rrq - Sends ELS RRQ if needed.
5887 * @phba: pointer to lpfc hba data structure.
5888 * @rrq: pointer to the active rrq.
5889 *
5890 * This routine will call the lpfc_issue_els_rrq if the rrq is
5891 * still active for the xri. If this function returns a failure then
5892 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
5893 *
5894 * Returns 0 Success.
5895 *         1 Failure.
5896 **/
5897int
5898lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
5899{
5900	struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
5901							rrq->nlp_DID);
5902	if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
5903		return lpfc_issue_els_rrq(rrq->vport, ndlp,
5904					 rrq->nlp_DID, rrq);
5905	else
5906		return 1;
5907}
5908
5909/**
5910 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
5911 * @vport: pointer to a host virtual N_Port data structure.
5912 * @cmdsize: size of the ELS command.
5913 * @oldiocb: pointer to the original lpfc command iocb data structure.
5914 * @ndlp: pointer to a node-list data structure.
5915 *
5916 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
5917 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
5918 *
5919 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5920 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5921 * will be stored into the context1 field of the IOCB for the completion
5922 * callback function to the RPL Accept Response ELS command.
5923 *
5924 * Return code
5925 *   0 - Successfully issued ACC RPL ELS command
5926 *   1 - Failed to issue ACC RPL ELS command
5927 **/
5928static int
5929lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
5930		     struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
5931{
5932	struct lpfc_hba *phba = vport->phba;
5933	IOCB_t *icmd, *oldcmd;
5934	RPL_RSP rpl_rsp;
5935	struct lpfc_iocbq *elsiocb;
5936	uint8_t *pcmd;
5937
5938	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5939				     ndlp->nlp_DID, ELS_CMD_ACC);
5940
5941	if (!elsiocb)
5942		return 1;
5943
5944	icmd = &elsiocb->iocb;
5945	oldcmd = &oldiocb->iocb;
5946	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
5947	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5948
5949	pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5950	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5951	pcmd += sizeof(uint16_t);
5952	*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
5953	pcmd += sizeof(uint16_t);
5954
5955	/* Setup the RPL ACC payload */
5956	rpl_rsp.listLen = be32_to_cpu(1);
5957	rpl_rsp.index = 0;
5958	rpl_rsp.port_num_blk.portNum = 0;
5959	rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
5960	memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
5961	    sizeof(struct lpfc_name));
5962	memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
5963	/* Xmit ELS RPL ACC response tag <ulpIoTag> */
5964	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5965			 "0120 Xmit ELS RPL ACC response tag x%x "
5966			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5967			 "rpi x%x\n",
5968			 elsiocb->iotag, elsiocb->iocb.ulpContext,
5969			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5970			 ndlp->nlp_rpi);
5971	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5972	phba->fc_stat.elsXmitACC++;
5973	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
5974	    IOCB_ERROR) {
5975		lpfc_els_free_iocb(phba, elsiocb);
5976		return 1;
5977	}
5978	return 0;
5979}
5980
5981/**
5982 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
5983 * @vport: pointer to a host virtual N_Port data structure.
5984 * @cmdiocb: pointer to lpfc command iocb data structure.
5985 * @ndlp: pointer to a node-list data structure.
5986 *
5987 * This routine processes Read Port List (RPL) IOCB received as an ELS
5988 * unsolicited event. It first checks the remote port state. If the remote
5989 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
5990 * invokes the lpfc_els_rsp_reject() routine to send reject response.
5991 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
5992 * to accept the RPL.
5993 *
5994 * Return code
5995 *   0 - Successfully processed rpl iocb (currently always return 0)
5996 **/
5997static int
5998lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5999		 struct lpfc_nodelist *ndlp)
6000{
6001	struct lpfc_dmabuf *pcmd;
6002	uint32_t *lp;
6003	uint32_t maxsize;
6004	uint16_t cmdsize;
6005	RPL *rpl;
6006	struct ls_rjt stat;
6007
6008	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6009	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
6010		/* issue rejection response */
6011		stat.un.b.lsRjtRsvd0 = 0;
6012		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6013		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6014		stat.un.b.vendorUnique = 0;
6015		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
6016			NULL);
6017		/* rejected the unsolicited RPL request and done with it */
6018		return 0;
6019	}
6020
6021	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6022	lp = (uint32_t *) pcmd->virt;
6023	rpl = (RPL *) (lp + 1);
6024	maxsize = be32_to_cpu(rpl->maxsize);
6025
6026	/* We support only one port */
6027	if ((rpl->index == 0) &&
6028	    ((maxsize == 0) ||
6029	     ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
6030		cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
6031	} else {
6032		cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
6033	}
6034	lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
6035
6036	return 0;
6037}
6038
6039/**
6040 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
6041 * @vport: pointer to a virtual N_Port data structure.
6042 * @cmdiocb: pointer to lpfc command iocb data structure.
6043 * @ndlp: pointer to a node-list data structure.
6044 *
6045 * This routine processes Fibre Channel Address Resolution Protocol
6046 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
6047 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
6048 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
6049 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
6050 * remote PortName is compared against the FC PortName stored in the @vport
6051 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
6052 * compared against the FC NodeName stored in the @vport data structure.
6053 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
6054 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
6055 * invoked to send out FARP Response to the remote node. Before sending the
6056 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
6057 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
6058 * routine is invoked to log into the remote port first.
6059 *
6060 * Return code
6061 *   0 - Either the FARP Match Mode not supported or successfully processed
6062 **/
6063static int
6064lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6065		  struct lpfc_nodelist *ndlp)
6066{
6067	struct lpfc_dmabuf *pcmd;
6068	uint32_t *lp;
6069	IOCB_t *icmd;
6070	FARP *fp;
6071	uint32_t cmd, cnt, did;
6072
6073	icmd = &cmdiocb->iocb;
6074	did = icmd->un.elsreq64.remoteID;
6075	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6076	lp = (uint32_t *) pcmd->virt;
6077
6078	cmd = *lp++;
6079	fp = (FARP *) lp;
6080	/* FARP-REQ received from DID <did> */
6081	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6082			 "0601 FARP-REQ received from DID x%x\n", did);
6083	/* We will only support match on WWPN or WWNN */
6084	if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
6085		return 0;
6086	}
6087
6088	cnt = 0;
6089	/* If this FARP command is searching for my portname */
6090	if (fp->Mflags & FARP_MATCH_PORT) {
6091		if (memcmp(&fp->RportName, &vport->fc_portname,
6092			   sizeof(struct lpfc_name)) == 0)
6093			cnt = 1;
6094	}
6095
6096	/* If this FARP command is searching for my nodename */
6097	if (fp->Mflags & FARP_MATCH_NODE) {
6098		if (memcmp(&fp->RnodeName, &vport->fc_nodename,
6099			   sizeof(struct lpfc_name)) == 0)
6100			cnt = 1;
6101	}
6102
6103	if (cnt) {
6104		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
6105		   (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
6106			/* Log back into the node before sending the FARP. */
6107			if (fp->Rflags & FARP_REQUEST_PLOGI) {
6108				ndlp->nlp_prev_state = ndlp->nlp_state;
6109				lpfc_nlp_set_state(vport, ndlp,
6110						   NLP_STE_PLOGI_ISSUE);
6111				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
6112			}
6113
6114			/* Send a FARP response to that node */
6115			if (fp->Rflags & FARP_REQUEST_FARPR)
6116				lpfc_issue_els_farpr(vport, did, 0);
6117		}
6118	}
6119	return 0;
6120}
6121
6122/**
6123 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
6124 * @vport: pointer to a host virtual N_Port data structure.
6125 * @cmdiocb: pointer to lpfc command iocb data structure.
6126 * @ndlp: pointer to a node-list data structure.
6127 *
6128 * This routine processes Fibre Channel Address Resolution Protocol
6129 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
6130 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
6131 * the FARP response request.
6132 *
6133 * Return code
6134 *   0 - Successfully processed FARPR IOCB (currently always return 0)
6135 **/
6136static int
6137lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6138		   struct lpfc_nodelist  *ndlp)
6139{
6140	struct lpfc_dmabuf *pcmd;
6141	uint32_t *lp;
6142	IOCB_t *icmd;
6143	uint32_t cmd, did;
6144
6145	icmd = &cmdiocb->iocb;
6146	did = icmd->un.elsreq64.remoteID;
6147	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6148	lp = (uint32_t *) pcmd->virt;
6149
6150	cmd = *lp++;
6151	/* FARP-RSP received from DID <did> */
6152	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6153			 "0600 FARP-RSP received from DID x%x\n", did);
6154	/* ACCEPT the Farp resp request */
6155	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6156
6157	return 0;
6158}
6159
6160/**
6161 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
6162 * @vport: pointer to a host virtual N_Port data structure.
6163 * @cmdiocb: pointer to lpfc command iocb data structure.
6164 * @fan_ndlp: pointer to a node-list data structure.
6165 *
6166 * This routine processes a Fabric Address Notification (FAN) IOCB
6167 * command received as an ELS unsolicited event. The FAN ELS command will
6168 * only be processed on a physical port (i.e., the @vport represents the
6169 * physical port). The fabric NodeName and PortName from the FAN IOCB are
6170 * compared against those in the phba data structure. If any of those is
6171 * different, the lpfc_initial_flogi() routine is invoked to initialize
6172 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
6173 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
6174 * is invoked to register login to the fabric.
6175 *
6176 * Return code
6177 *   0 - Successfully processed fan iocb (currently always return 0).
6178 **/
6179static int
6180lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6181		 struct lpfc_nodelist *fan_ndlp)
6182{
6183	struct lpfc_hba *phba = vport->phba;
6184	uint32_t *lp;
6185	FAN *fp;
6186
6187	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
6188	lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
6189	fp = (FAN *) ++lp;
6190	/* FAN received; Fan does not have a reply sequence */
6191	if ((vport == phba->pport) &&
6192	    (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
6193		if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
6194			    sizeof(struct lpfc_name))) ||
6195		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
6196			    sizeof(struct lpfc_name)))) {
6197			/* This port has switched fabrics. FLOGI is required */
6198			lpfc_issue_init_vfi(vport);
6199		} else {
6200			/* FAN verified - skip FLOGI */
6201			vport->fc_myDID = vport->fc_prevDID;
6202			if (phba->sli_rev < LPFC_SLI_REV4)
6203				lpfc_issue_fabric_reglogin(vport);
6204			else {
6205				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6206					"3138 Need register VFI: (x%x/%x)\n",
6207					vport->fc_prevDID, vport->fc_myDID);
6208				lpfc_issue_reg_vfi(vport);
6209			}
6210		}
6211	}
6212	return 0;
6213}
6214
6215/**
6216 * lpfc_els_timeout - Handler funciton to the els timer
6217 * @ptr: holder for the timer function associated data.
6218 *
6219 * This routine is invoked by the ELS timer after timeout. It posts the ELS
6220 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
6221 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
6222 * up the worker thread. It is for the worker thread to invoke the routine
6223 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
6224 **/
6225void
6226lpfc_els_timeout(unsigned long ptr)
6227{
6228	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
6229	struct lpfc_hba   *phba = vport->phba;
6230	uint32_t tmo_posted;
6231	unsigned long iflag;
6232
6233	spin_lock_irqsave(&vport->work_port_lock, iflag);
6234	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
6235	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
6236		vport->work_port_events |= WORKER_ELS_TMO;
6237	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
6238
6239	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
6240		lpfc_worker_wake_up(phba);
6241	return;
6242}
6243
6244
6245/**
6246 * lpfc_els_timeout_handler - Process an els timeout event
6247 * @vport: pointer to a virtual N_Port data structure.
6248 *
6249 * This routine is the actual handler function that processes an ELS timeout
6250 * event. It walks the ELS ring to get and abort all the IOCBs (except the
6251 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
6252 * invoking the lpfc_sli_issue_abort_iotag() routine.
6253 **/
6254void
6255lpfc_els_timeout_handler(struct lpfc_vport *vport)
6256{
6257	struct lpfc_hba  *phba = vport->phba;
6258	struct lpfc_sli_ring *pring;
6259	struct lpfc_iocbq *tmp_iocb, *piocb;
6260	IOCB_t *cmd = NULL;
6261	struct lpfc_dmabuf *pcmd;
6262	uint32_t els_command = 0;
6263	uint32_t timeout;
6264	uint32_t remote_ID = 0xffffffff;
6265	LIST_HEAD(abort_list);
6266
6267
6268	timeout = (uint32_t)(phba->fc_ratov << 1);
6269
6270	pring = &phba->sli.ring[LPFC_ELS_RING];
6271	if ((phba->pport->load_flag & FC_UNLOADING))
6272		return;
6273	spin_lock_irq(&phba->hbalock);
6274	if (phba->sli_rev == LPFC_SLI_REV4)
6275		spin_lock(&pring->ring_lock);
6276
6277	if ((phba->pport->load_flag & FC_UNLOADING)) {
6278		if (phba->sli_rev == LPFC_SLI_REV4)
6279			spin_unlock(&pring->ring_lock);
6280		spin_unlock_irq(&phba->hbalock);
6281		return;
6282	}
6283
6284	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6285		cmd = &piocb->iocb;
6286
6287		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
6288		    piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6289		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6290			continue;
6291
6292		if (piocb->vport != vport)
6293			continue;
6294
6295		pcmd = (struct lpfc_dmabuf *) piocb->context2;
6296		if (pcmd)
6297			els_command = *(uint32_t *) (pcmd->virt);
6298
6299		if (els_command == ELS_CMD_FARP ||
6300		    els_command == ELS_CMD_FARPR ||
6301		    els_command == ELS_CMD_FDISC)
6302			continue;
6303
6304		if (piocb->drvrTimeout > 0) {
6305			if (piocb->drvrTimeout >= timeout)
6306				piocb->drvrTimeout -= timeout;
6307			else
6308				piocb->drvrTimeout = 0;
6309			continue;
6310		}
6311
6312		remote_ID = 0xffffffff;
6313		if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
6314			remote_ID = cmd->un.elsreq64.remoteID;
6315		else {
6316			struct lpfc_nodelist *ndlp;
6317			ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
6318			if (ndlp && NLP_CHK_NODE_ACT(ndlp))
6319				remote_ID = ndlp->nlp_DID;
6320		}
6321		list_add_tail(&piocb->dlist, &abort_list);
6322	}
6323	if (phba->sli_rev == LPFC_SLI_REV4)
6324		spin_unlock(&pring->ring_lock);
6325	spin_unlock_irq(&phba->hbalock);
6326
6327	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
6328		cmd = &piocb->iocb;
6329		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6330			 "0127 ELS timeout Data: x%x x%x x%x "
6331			 "x%x\n", els_command,
6332			 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
6333		spin_lock_irq(&phba->hbalock);
6334		list_del_init(&piocb->dlist);
6335		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6336		spin_unlock_irq(&phba->hbalock);
6337	}
6338
6339	if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
6340		if (!(phba->pport->load_flag & FC_UNLOADING))
6341			mod_timer(&vport->els_tmofunc,
6342				  jiffies + msecs_to_jiffies(1000 * timeout));
6343}
6344
6345/**
6346 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
6347 * @vport: pointer to a host virtual N_Port data structure.
6348 *
6349 * This routine is used to clean up all the outstanding ELS commands on a
6350 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
6351 * routine. After that, it walks the ELS transmit queue to remove all the
6352 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
6353 * the IOCBs with a non-NULL completion callback function, the callback
6354 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
6355 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
6356 * callback function, the IOCB will simply be released. Finally, it walks
6357 * the ELS transmit completion queue to issue an abort IOCB to any transmit
6358 * completion queue IOCB that is associated with the @vport and is not
6359 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
6360 * part of the discovery state machine) out to HBA by invoking the
6361 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
6362 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
6363 * the IOCBs are aborted when this function returns.
6364 **/
6365void
6366lpfc_els_flush_cmd(struct lpfc_vport *vport)
6367{
6368	LIST_HEAD(abort_list);
6369	struct lpfc_hba  *phba = vport->phba;
6370	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6371	struct lpfc_iocbq *tmp_iocb, *piocb;
6372	IOCB_t *cmd = NULL;
6373
6374	lpfc_fabric_abort_vport(vport);
6375	/*
6376	 * For SLI3, only the hbalock is required.  But SLI4 needs to coordinate
6377	 * with the ring insert operation.  Because lpfc_sli_issue_abort_iotag
6378	 * ultimately grabs the ring_lock, the driver must splice the list into
6379	 * a working list and release the locks before calling the abort.
6380	 */
6381	spin_lock_irq(&phba->hbalock);
6382	if (phba->sli_rev == LPFC_SLI_REV4)
6383		spin_lock(&pring->ring_lock);
6384
6385	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6386		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6387			continue;
6388
6389		if (piocb->vport != vport)
6390			continue;
6391		list_add_tail(&piocb->dlist, &abort_list);
6392	}
6393	if (phba->sli_rev == LPFC_SLI_REV4)
6394		spin_unlock(&pring->ring_lock);
6395	spin_unlock_irq(&phba->hbalock);
6396	/* Abort each iocb on the aborted list and remove the dlist links. */
6397	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
6398		spin_lock_irq(&phba->hbalock);
6399		list_del_init(&piocb->dlist);
6400		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6401		spin_unlock_irq(&phba->hbalock);
6402	}
6403	if (!list_empty(&abort_list))
6404		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6405				 "3387 abort list for txq not empty\n");
6406	INIT_LIST_HEAD(&abort_list);
6407
6408	spin_lock_irq(&phba->hbalock);
6409	if (phba->sli_rev == LPFC_SLI_REV4)
6410		spin_lock(&pring->ring_lock);
6411
6412	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
6413		cmd = &piocb->iocb;
6414
6415		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
6416			continue;
6417		}
6418
6419		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
6420		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
6421		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
6422		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
6423		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
6424			continue;
6425
6426		if (piocb->vport != vport)
6427			continue;
6428
6429		list_del_init(&piocb->list);
6430		list_add_tail(&piocb->list, &abort_list);
6431	}
6432	if (phba->sli_rev == LPFC_SLI_REV4)
6433		spin_unlock(&pring->ring_lock);
6434	spin_unlock_irq(&phba->hbalock);
6435
6436	/* Cancell all the IOCBs from the completions list */
6437	lpfc_sli_cancel_iocbs(phba, &abort_list,
6438			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
6439
6440	return;
6441}
6442
6443/**
6444 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
6445 * @phba: pointer to lpfc hba data structure.
6446 *
6447 * This routine is used to clean up all the outstanding ELS commands on a
6448 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
6449 * routine. After that, it walks the ELS transmit queue to remove all the
6450 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
6451 * the IOCBs with the completion callback function associated, the callback
6452 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
6453 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
6454 * callback function associated, the IOCB will simply be released. Finally,
6455 * it walks the ELS transmit completion queue to issue an abort IOCB to any
6456 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
6457 * management plane IOCBs that are not part of the discovery state machine)
6458 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
6459 **/
6460void
6461lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
6462{
6463	struct lpfc_vport *vport;
6464	list_for_each_entry(vport, &phba->port_list, listentry)
6465		lpfc_els_flush_cmd(vport);
6466
6467	return;
6468}
6469
6470/**
6471 * lpfc_send_els_failure_event - Posts an ELS command failure event
6472 * @phba: Pointer to hba context object.
6473 * @cmdiocbp: Pointer to command iocb which reported error.
6474 * @rspiocbp: Pointer to response iocb which reported error.
6475 *
6476 * This function sends an event when there is an ELS command
6477 * failure.
6478 **/
6479void
6480lpfc_send_els_failure_event(struct lpfc_hba *phba,
6481			struct lpfc_iocbq *cmdiocbp,
6482			struct lpfc_iocbq *rspiocbp)
6483{
6484	struct lpfc_vport *vport = cmdiocbp->vport;
6485	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6486	struct lpfc_lsrjt_event lsrjt_event;
6487	struct lpfc_fabric_event_header fabric_event;
6488	struct ls_rjt stat;
6489	struct lpfc_nodelist *ndlp;
6490	uint32_t *pcmd;
6491
6492	ndlp = cmdiocbp->context1;
6493	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
6494		return;
6495
6496	if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
6497		lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
6498		lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
6499		memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
6500			sizeof(struct lpfc_name));
6501		memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
6502			sizeof(struct lpfc_name));
6503		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
6504			cmdiocbp->context2)->virt);
6505		lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
6506		stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
6507		lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
6508		lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
6509		fc_host_post_vendor_event(shost,
6510			fc_get_event_number(),
6511			sizeof(lsrjt_event),
6512			(char *)&lsrjt_event,
6513			LPFC_NL_VENDOR_ID);
6514		return;
6515	}
6516	if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
6517		(rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
6518		fabric_event.event_type = FC_REG_FABRIC_EVENT;
6519		if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
6520			fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
6521		else
6522			fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
6523		memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
6524			sizeof(struct lpfc_name));
6525		memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
6526			sizeof(struct lpfc_name));
6527		fc_host_post_vendor_event(shost,
6528			fc_get_event_number(),
6529			sizeof(fabric_event),
6530			(char *)&fabric_event,
6531			LPFC_NL_VENDOR_ID);
6532		return;
6533	}
6534
6535}
6536
6537/**
6538 * lpfc_send_els_event - Posts unsolicited els event
6539 * @vport: Pointer to vport object.
6540 * @ndlp: Pointer FC node object.
6541 * @cmd: ELS command code.
6542 *
6543 * This function posts an event when there is an incoming
6544 * unsolicited ELS command.
6545 **/
6546static void
6547lpfc_send_els_event(struct lpfc_vport *vport,
6548		    struct lpfc_nodelist *ndlp,
6549		    uint32_t *payload)
6550{
6551	struct lpfc_els_event_header *els_data = NULL;
6552	struct lpfc_logo_event *logo_data = NULL;
6553	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6554
6555	if (*payload == ELS_CMD_LOGO) {
6556		logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
6557		if (!logo_data) {
6558			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6559				"0148 Failed to allocate memory "
6560				"for LOGO event\n");
6561			return;
6562		}
6563		els_data = &logo_data->header;
6564	} else {
6565		els_data = kmalloc(sizeof(struct lpfc_els_event_header),
6566			GFP_KERNEL);
6567		if (!els_data) {
6568			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6569				"0149 Failed to allocate memory "
6570				"for ELS event\n");
6571			return;
6572		}
6573	}
6574	els_data->event_type = FC_REG_ELS_EVENT;
6575	switch (*payload) {
6576	case ELS_CMD_PLOGI:
6577		els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
6578		break;
6579	case ELS_CMD_PRLO:
6580		els_data->subcategory = LPFC_EVENT_PRLO_RCV;
6581		break;
6582	case ELS_CMD_ADISC:
6583		els_data->subcategory = LPFC_EVENT_ADISC_RCV;
6584		break;
6585	case ELS_CMD_LOGO:
6586		els_data->subcategory = LPFC_EVENT_LOGO_RCV;
6587		/* Copy the WWPN in the LOGO payload */
6588		memcpy(logo_data->logo_wwpn, &payload[2],
6589			sizeof(struct lpfc_name));
6590		break;
6591	default:
6592		kfree(els_data);
6593		return;
6594	}
6595	memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
6596	memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
6597	if (*payload == ELS_CMD_LOGO) {
6598		fc_host_post_vendor_event(shost,
6599			fc_get_event_number(),
6600			sizeof(struct lpfc_logo_event),
6601			(char *)logo_data,
6602			LPFC_NL_VENDOR_ID);
6603		kfree(logo_data);
6604	} else {
6605		fc_host_post_vendor_event(shost,
6606			fc_get_event_number(),
6607			sizeof(struct lpfc_els_event_header),
6608			(char *)els_data,
6609			LPFC_NL_VENDOR_ID);
6610		kfree(els_data);
6611	}
6612
6613	return;
6614}
6615
6616
6617/**
6618 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
6619 * @phba: pointer to lpfc hba data structure.
6620 * @pring: pointer to a SLI ring.
6621 * @vport: pointer to a host virtual N_Port data structure.
6622 * @elsiocb: pointer to lpfc els command iocb data structure.
6623 *
6624 * This routine is used for processing the IOCB associated with a unsolicited
6625 * event. It first determines whether there is an existing ndlp that matches
6626 * the DID from the unsolicited IOCB. If not, it will create a new one with
6627 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
6628 * IOCB is then used to invoke the proper routine and to set up proper state
6629 * of the discovery state machine.
6630 **/
6631static void
6632lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6633		      struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
6634{
6635	struct Scsi_Host  *shost;
6636	struct lpfc_nodelist *ndlp;
6637	struct ls_rjt stat;
6638	uint32_t *payload;
6639	uint32_t cmd, did, newnode;
6640	uint8_t rjt_exp, rjt_err = 0;
6641	IOCB_t *icmd = &elsiocb->iocb;
6642
6643	if (!vport || !(elsiocb->context2))
6644		goto dropit;
6645
6646	newnode = 0;
6647	payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
6648	cmd = *payload;
6649	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
6650		lpfc_post_buffer(phba, pring, 1);
6651
6652	did = icmd->un.rcvels.remoteID;
6653	if (icmd->ulpStatus) {
6654		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6655			"RCV Unsol ELS:  status:x%x/x%x did:x%x",
6656			icmd->ulpStatus, icmd->un.ulpWord[4], did);
6657		goto dropit;
6658	}
6659
6660	/* Check to see if link went down during discovery */
6661	if (lpfc_els_chk_latt(vport))
6662		goto dropit;
6663
6664	/* Ignore traffic received during vport shutdown. */
6665	if (vport->load_flag & FC_UNLOADING)
6666		goto dropit;
6667
6668	/* If NPort discovery is delayed drop incoming ELS */
6669	if ((vport->fc_flag & FC_DISC_DELAYED) &&
6670			(cmd != ELS_CMD_PLOGI))
6671		goto dropit;
6672
6673	ndlp = lpfc_findnode_did(vport, did);
6674	if (!ndlp) {
6675		/* Cannot find existing Fabric ndlp, so allocate a new one */
6676		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6677		if (!ndlp)
6678			goto dropit;
6679
6680		lpfc_nlp_init(vport, ndlp, did);
6681		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6682		newnode = 1;
6683		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6684			ndlp->nlp_type |= NLP_FABRIC;
6685	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
6686		ndlp = lpfc_enable_node(vport, ndlp,
6687					NLP_STE_UNUSED_NODE);
6688		if (!ndlp)
6689			goto dropit;
6690		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6691		newnode = 1;
6692		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6693			ndlp->nlp_type |= NLP_FABRIC;
6694	} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
6695		/* This is similar to the new node path */
6696		ndlp = lpfc_nlp_get(ndlp);
6697		if (!ndlp)
6698			goto dropit;
6699		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6700		newnode = 1;
6701	}
6702
6703	phba->fc_stat.elsRcvFrame++;
6704
6705	/*
6706	 * Do not process any unsolicited ELS commands
6707	 * if the ndlp is in DEV_LOSS
6708	 */
6709	if (ndlp->nlp_add_flag & NLP_IN_DEV_LOSS)
6710		goto dropit;
6711
6712	elsiocb->context1 = lpfc_nlp_get(ndlp);
6713	elsiocb->vport = vport;
6714
6715	if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
6716		cmd &= ELS_CMD_MASK;
6717	}
6718	/* ELS command <elsCmd> received from NPORT <did> */
6719	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6720			 "0112 ELS command x%x received from NPORT x%x "
6721			 "Data: x%x x%x x%x x%x\n",
6722			cmd, did, vport->port_state, vport->fc_flag,
6723			vport->fc_myDID, vport->fc_prevDID);
6724	switch (cmd) {
6725	case ELS_CMD_PLOGI:
6726		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6727			"RCV PLOGI:       did:x%x/ste:x%x flg:x%x",
6728			did, vport->port_state, ndlp->nlp_flag);
6729
6730		phba->fc_stat.elsRcvPLOGI++;
6731		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
6732		if (phba->sli_rev == LPFC_SLI_REV4 &&
6733		    (phba->pport->fc_flag & FC_PT2PT)) {
6734			vport->fc_prevDID = vport->fc_myDID;
6735			/* Our DID needs to be updated before registering
6736			 * the vfi. This is done in lpfc_rcv_plogi but
6737			 * that is called after the reg_vfi.
6738			 */
6739			vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
6740			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6741					 "3312 Remote port assigned DID x%x "
6742					 "%x\n", vport->fc_myDID,
6743					 vport->fc_prevDID);
6744		}
6745
6746		lpfc_send_els_event(vport, ndlp, payload);
6747
6748		/* If Nport discovery is delayed, reject PLOGIs */
6749		if (vport->fc_flag & FC_DISC_DELAYED) {
6750			rjt_err = LSRJT_UNABLE_TPC;
6751			rjt_exp = LSEXP_NOTHING_MORE;
6752			break;
6753		}
6754		shost = lpfc_shost_from_vport(vport);
6755		if (vport->port_state < LPFC_DISC_AUTH) {
6756			if (!(phba->pport->fc_flag & FC_PT2PT) ||
6757				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
6758				rjt_err = LSRJT_UNABLE_TPC;
6759				rjt_exp = LSEXP_NOTHING_MORE;
6760				break;
6761			}
6762			/* We get here, and drop thru, if we are PT2PT with
6763			 * another NPort and the other side has initiated
6764			 * the PLOGI before responding to our FLOGI.
6765			 */
6766			if (phba->sli_rev == LPFC_SLI_REV4 &&
6767			    (phba->fc_topology_changed ||
6768			     vport->fc_myDID != vport->fc_prevDID)) {
6769				lpfc_unregister_fcf_prep(phba);
6770				spin_lock_irq(shost->host_lock);
6771				vport->fc_flag &= ~FC_VFI_REGISTERED;
6772				spin_unlock_irq(shost->host_lock);
6773				phba->fc_topology_changed = 0;
6774				lpfc_issue_reg_vfi(vport);
6775			}
6776		}
6777
6778		spin_lock_irq(shost->host_lock);
6779		ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
6780		spin_unlock_irq(shost->host_lock);
6781
6782		lpfc_disc_state_machine(vport, ndlp, elsiocb,
6783					NLP_EVT_RCV_PLOGI);
6784
6785		break;
6786	case ELS_CMD_FLOGI:
6787		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6788			"RCV FLOGI:       did:x%x/ste:x%x flg:x%x",
6789			did, vport->port_state, ndlp->nlp_flag);
6790
6791		phba->fc_stat.elsRcvFLOGI++;
6792		lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
6793		if (newnode)
6794			lpfc_nlp_put(ndlp);
6795		break;
6796	case ELS_CMD_LOGO:
6797		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6798			"RCV LOGO:        did:x%x/ste:x%x flg:x%x",
6799			did, vport->port_state, ndlp->nlp_flag);
6800
6801		phba->fc_stat.elsRcvLOGO++;
6802		lpfc_send_els_event(vport, ndlp, payload);
6803		if (vport->port_state < LPFC_DISC_AUTH) {
6804			rjt_err = LSRJT_UNABLE_TPC;
6805			rjt_exp = LSEXP_NOTHING_MORE;
6806			break;
6807		}
6808		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
6809		break;
6810	case ELS_CMD_PRLO:
6811		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6812			"RCV PRLO:        did:x%x/ste:x%x flg:x%x",
6813			did, vport->port_state, ndlp->nlp_flag);
6814
6815		phba->fc_stat.elsRcvPRLO++;
6816		lpfc_send_els_event(vport, ndlp, payload);
6817		if (vport->port_state < LPFC_DISC_AUTH) {
6818			rjt_err = LSRJT_UNABLE_TPC;
6819			rjt_exp = LSEXP_NOTHING_MORE;
6820			break;
6821		}
6822		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
6823		break;
6824	case ELS_CMD_RSCN:
6825		phba->fc_stat.elsRcvRSCN++;
6826		lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
6827		if (newnode)
6828			lpfc_nlp_put(ndlp);
6829		break;
6830	case ELS_CMD_ADISC:
6831		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6832			"RCV ADISC:       did:x%x/ste:x%x flg:x%x",
6833			did, vport->port_state, ndlp->nlp_flag);
6834
6835		lpfc_send_els_event(vport, ndlp, payload);
6836		phba->fc_stat.elsRcvADISC++;
6837		if (vport->port_state < LPFC_DISC_AUTH) {
6838			rjt_err = LSRJT_UNABLE_TPC;
6839			rjt_exp = LSEXP_NOTHING_MORE;
6840			break;
6841		}
6842		lpfc_disc_state_machine(vport, ndlp, elsiocb,
6843					NLP_EVT_RCV_ADISC);
6844		break;
6845	case ELS_CMD_PDISC:
6846		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6847			"RCV PDISC:       did:x%x/ste:x%x flg:x%x",
6848			did, vport->port_state, ndlp->nlp_flag);
6849
6850		phba->fc_stat.elsRcvPDISC++;
6851		if (vport->port_state < LPFC_DISC_AUTH) {
6852			rjt_err = LSRJT_UNABLE_TPC;
6853			rjt_exp = LSEXP_NOTHING_MORE;
6854			break;
6855		}
6856		lpfc_disc_state_machine(vport, ndlp, elsiocb,
6857					NLP_EVT_RCV_PDISC);
6858		break;
6859	case ELS_CMD_FARPR:
6860		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6861			"RCV FARPR:       did:x%x/ste:x%x flg:x%x",
6862			did, vport->port_state, ndlp->nlp_flag);
6863
6864		phba->fc_stat.elsRcvFARPR++;
6865		lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
6866		break;
6867	case ELS_CMD_FARP:
6868		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6869			"RCV FARP:        did:x%x/ste:x%x flg:x%x",
6870			did, vport->port_state, ndlp->nlp_flag);
6871
6872		phba->fc_stat.elsRcvFARP++;
6873		lpfc_els_rcv_farp(vport, elsiocb, ndlp);
6874		break;
6875	case ELS_CMD_FAN:
6876		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6877			"RCV FAN:         did:x%x/ste:x%x flg:x%x",
6878			did, vport->port_state, ndlp->nlp_flag);
6879
6880		phba->fc_stat.elsRcvFAN++;
6881		lpfc_els_rcv_fan(vport, elsiocb, ndlp);
6882		break;
6883	case ELS_CMD_PRLI:
6884		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6885			"RCV PRLI:        did:x%x/ste:x%x flg:x%x",
6886			did, vport->port_state, ndlp->nlp_flag);
6887
6888		phba->fc_stat.elsRcvPRLI++;
6889		if (vport->port_state < LPFC_DISC_AUTH) {
6890			rjt_err = LSRJT_UNABLE_TPC;
6891			rjt_exp = LSEXP_NOTHING_MORE;
6892			break;
6893		}
6894		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
6895		break;
6896	case ELS_CMD_LIRR:
6897		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6898			"RCV LIRR:        did:x%x/ste:x%x flg:x%x",
6899			did, vport->port_state, ndlp->nlp_flag);
6900
6901		phba->fc_stat.elsRcvLIRR++;
6902		lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
6903		if (newnode)
6904			lpfc_nlp_put(ndlp);
6905		break;
6906	case ELS_CMD_RLS:
6907		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6908			"RCV RLS:         did:x%x/ste:x%x flg:x%x",
6909			did, vport->port_state, ndlp->nlp_flag);
6910
6911		phba->fc_stat.elsRcvRLS++;
6912		lpfc_els_rcv_rls(vport, elsiocb, ndlp);
6913		if (newnode)
6914			lpfc_nlp_put(ndlp);
6915		break;
6916	case ELS_CMD_RPS:
6917		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6918			"RCV RPS:         did:x%x/ste:x%x flg:x%x",
6919			did, vport->port_state, ndlp->nlp_flag);
6920
6921		phba->fc_stat.elsRcvRPS++;
6922		lpfc_els_rcv_rps(vport, elsiocb, ndlp);
6923		if (newnode)
6924			lpfc_nlp_put(ndlp);
6925		break;
6926	case ELS_CMD_RPL:
6927		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6928			"RCV RPL:         did:x%x/ste:x%x flg:x%x",
6929			did, vport->port_state, ndlp->nlp_flag);
6930
6931		phba->fc_stat.elsRcvRPL++;
6932		lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
6933		if (newnode)
6934			lpfc_nlp_put(ndlp);
6935		break;
6936	case ELS_CMD_RNID:
6937		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6938			"RCV RNID:        did:x%x/ste:x%x flg:x%x",
6939			did, vport->port_state, ndlp->nlp_flag);
6940
6941		phba->fc_stat.elsRcvRNID++;
6942		lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
6943		if (newnode)
6944			lpfc_nlp_put(ndlp);
6945		break;
6946	case ELS_CMD_RTV:
6947		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6948			"RCV RTV:        did:x%x/ste:x%x flg:x%x",
6949			did, vport->port_state, ndlp->nlp_flag);
6950		phba->fc_stat.elsRcvRTV++;
6951		lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
6952		if (newnode)
6953			lpfc_nlp_put(ndlp);
6954		break;
6955	case ELS_CMD_RRQ:
6956		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6957			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
6958			did, vport->port_state, ndlp->nlp_flag);
6959
6960		phba->fc_stat.elsRcvRRQ++;
6961		lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
6962		if (newnode)
6963			lpfc_nlp_put(ndlp);
6964		break;
6965	case ELS_CMD_ECHO:
6966		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6967			"RCV ECHO:        did:x%x/ste:x%x flg:x%x",
6968			did, vport->port_state, ndlp->nlp_flag);
6969
6970		phba->fc_stat.elsRcvECHO++;
6971		lpfc_els_rcv_echo(vport, elsiocb, ndlp);
6972		if (newnode)
6973			lpfc_nlp_put(ndlp);
6974		break;
6975	case ELS_CMD_REC:
6976			/* receive this due to exchange closed */
6977			rjt_err = LSRJT_UNABLE_TPC;
6978			rjt_exp = LSEXP_INVALID_OX_RX;
6979		break;
6980	default:
6981		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6982			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
6983			cmd, did, vport->port_state);
6984
6985		/* Unsupported ELS command, reject */
6986		rjt_err = LSRJT_CMD_UNSUPPORTED;
6987		rjt_exp = LSEXP_NOTHING_MORE;
6988
6989		/* Unknown ELS command <elsCmd> received from NPORT <did> */
6990		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6991				 "0115 Unknown ELS command x%x "
6992				 "received from NPORT x%x\n", cmd, did);
6993		if (newnode)
6994			lpfc_nlp_put(ndlp);
6995		break;
6996	}
6997
6998	/* check if need to LS_RJT received ELS cmd */
6999	if (rjt_err) {
7000		memset(&stat, 0, sizeof(stat));
7001		stat.un.b.lsRjtRsnCode = rjt_err;
7002		stat.un.b.lsRjtRsnCodeExp = rjt_exp;
7003		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
7004			NULL);
7005	}
7006
7007	lpfc_nlp_put(elsiocb->context1);
7008	elsiocb->context1 = NULL;
7009	return;
7010
7011dropit:
7012	if (vport && !(vport->load_flag & FC_UNLOADING))
7013		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7014			"0111 Dropping received ELS cmd "
7015			"Data: x%x x%x x%x\n",
7016			icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
7017	phba->fc_stat.elsRcvDrop++;
7018}
7019
7020/**
7021 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
7022 * @phba: pointer to lpfc hba data structure.
7023 * @pring: pointer to a SLI ring.
7024 * @elsiocb: pointer to lpfc els iocb data structure.
7025 *
7026 * This routine is used to process an unsolicited event received from a SLI
7027 * (Service Level Interface) ring. The actual processing of the data buffer
7028 * associated with the unsolicited event is done by invoking the routine
7029 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
7030 * SLI ring on which the unsolicited event was received.
7031 **/
7032void
7033lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7034		     struct lpfc_iocbq *elsiocb)
7035{
7036	struct lpfc_vport *vport = phba->pport;
7037	IOCB_t *icmd = &elsiocb->iocb;
7038	dma_addr_t paddr;
7039	struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
7040	struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
7041
7042	elsiocb->context1 = NULL;
7043	elsiocb->context2 = NULL;
7044	elsiocb->context3 = NULL;
7045
7046	if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
7047		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
7048	} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
7049		   (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
7050		   IOERR_RCV_BUFFER_WAITING) {
7051		phba->fc_stat.NoRcvBuf++;
7052		/* Not enough posted buffers; Try posting more buffers */
7053		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
7054			lpfc_post_buffer(phba, pring, 0);
7055		return;
7056	}
7057
7058	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
7059	    (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
7060	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
7061		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
7062			vport = phba->pport;
7063		else
7064			vport = lpfc_find_vport_by_vpid(phba,
7065						icmd->unsli3.rcvsli3.vpi);
7066	}
7067
7068	/* If there are no BDEs associated
7069	 * with this IOCB, there is nothing to do.
7070	 */
7071	if (icmd->ulpBdeCount == 0)
7072		return;
7073
7074	/* type of ELS cmd is first 32bit word
7075	 * in packet
7076	 */
7077	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
7078		elsiocb->context2 = bdeBuf1;
7079	} else {
7080		paddr = getPaddr(icmd->un.cont64[0].addrHigh,
7081				 icmd->un.cont64[0].addrLow);
7082		elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
7083							     paddr);
7084	}
7085
7086	lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
7087	/*
7088	 * The different unsolicited event handlers would tell us
7089	 * if they are done with "mp" by setting context2 to NULL.
7090	 */
7091	if (elsiocb->context2) {
7092		lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
7093		elsiocb->context2 = NULL;
7094	}
7095
7096	/* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
7097	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
7098	    icmd->ulpBdeCount == 2) {
7099		elsiocb->context2 = bdeBuf2;
7100		lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
7101		/* free mp if we are done with it */
7102		if (elsiocb->context2) {
7103			lpfc_in_buf_free(phba, elsiocb->context2);
7104			elsiocb->context2 = NULL;
7105		}
7106	}
7107}
7108
7109/**
7110 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
7111 * @phba: pointer to lpfc hba data structure.
7112 * @vport: pointer to a virtual N_Port data structure.
7113 *
7114 * This routine issues a Port Login (PLOGI) to the Name Server with
7115 * State Change Request (SCR) for a @vport. This routine will create an
7116 * ndlp for the Name Server associated to the @vport if such node does
7117 * not already exist. The PLOGI to Name Server is issued by invoking the
7118 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
7119 * (FDMI) is configured to the @vport, a FDMI node will be created and
7120 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
7121 **/
7122void
7123lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
7124{
7125	struct lpfc_nodelist *ndlp, *ndlp_fdmi;
7126	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7127
7128	/*
7129	 * If lpfc_delay_discovery parameter is set and the clean address
7130	 * bit is cleared and fc fabric parameters chenged, delay FC NPort
7131	 * discovery.
7132	 */
7133	spin_lock_irq(shost->host_lock);
7134	if (vport->fc_flag & FC_DISC_DELAYED) {
7135		spin_unlock_irq(shost->host_lock);
7136		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
7137				"3334 Delay fc port discovery for %d seconds\n",
7138				phba->fc_ratov);
7139		mod_timer(&vport->delayed_disc_tmo,
7140			jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
7141		return;
7142	}
7143	spin_unlock_irq(shost->host_lock);
7144
7145	ndlp = lpfc_findnode_did(vport, NameServer_DID);
7146	if (!ndlp) {
7147		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
7148		if (!ndlp) {
7149			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7150				lpfc_disc_start(vport);
7151				return;
7152			}
7153			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7154			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7155					 "0251 NameServer login: no memory\n");
7156			return;
7157		}
7158		lpfc_nlp_init(vport, ndlp, NameServer_DID);
7159	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
7160		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
7161		if (!ndlp) {
7162			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7163				lpfc_disc_start(vport);
7164				return;
7165			}
7166			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7167			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7168					"0348 NameServer login: node freed\n");
7169			return;
7170		}
7171	}
7172	ndlp->nlp_type |= NLP_FABRIC;
7173
7174	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
7175
7176	if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
7177		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7178		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7179				 "0252 Cannot issue NameServer login\n");
7180		return;
7181	}
7182
7183	if (vport->cfg_fdmi_on & LPFC_FDMI_SUPPORT) {
7184		/* If this is the first time, allocate an ndlp and initialize
7185		 * it. Otherwise, make sure the node is enabled and then do the
7186		 * login.
7187		 */
7188		ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
7189		if (!ndlp_fdmi) {
7190			ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
7191						  GFP_KERNEL);
7192			if (ndlp_fdmi) {
7193				lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
7194				ndlp_fdmi->nlp_type |= NLP_FABRIC;
7195			} else
7196				return;
7197		}
7198		if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
7199			ndlp_fdmi = lpfc_enable_node(vport,
7200						     ndlp_fdmi,
7201						     NLP_STE_NPR_NODE);
7202
7203		if (ndlp_fdmi) {
7204			lpfc_nlp_set_state(vport, ndlp_fdmi,
7205					   NLP_STE_PLOGI_ISSUE);
7206			lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
7207		}
7208	}
7209}
7210
7211/**
7212 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
7213 * @phba: pointer to lpfc hba data structure.
7214 * @pmb: pointer to the driver internal queue element for mailbox command.
7215 *
7216 * This routine is the completion callback function to register new vport
7217 * mailbox command. If the new vport mailbox command completes successfully,
7218 * the fabric registration login shall be performed on physical port (the
7219 * new vport created is actually a physical port, with VPI 0) or the port
7220 * login to Name Server for State Change Request (SCR) will be performed
7221 * on virtual port (real virtual port, with VPI greater than 0).
7222 **/
7223static void
7224lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7225{
7226	struct lpfc_vport *vport = pmb->vport;
7227	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7228	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
7229	MAILBOX_t *mb = &pmb->u.mb;
7230	int rc;
7231
7232	spin_lock_irq(shost->host_lock);
7233	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
7234	spin_unlock_irq(shost->host_lock);
7235
7236	if (mb->mbxStatus) {
7237		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
7238				"0915 Register VPI failed : Status: x%x"
7239				" upd bit: x%x \n", mb->mbxStatus,
7240				 mb->un.varRegVpi.upd);
7241		if (phba->sli_rev == LPFC_SLI_REV4 &&
7242			mb->un.varRegVpi.upd)
7243			goto mbox_err_exit ;
7244
7245		switch (mb->mbxStatus) {
7246		case 0x11:	/* unsupported feature */
7247		case 0x9603:	/* max_vpi exceeded */
7248		case 0x9602:	/* Link event since CLEAR_LA */
7249			/* giving up on vport registration */
7250			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7251			spin_lock_irq(shost->host_lock);
7252			vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
7253			spin_unlock_irq(shost->host_lock);
7254			lpfc_can_disctmo(vport);
7255			break;
7256		/* If reg_vpi fail with invalid VPI status, re-init VPI */
7257		case 0x20:
7258			spin_lock_irq(shost->host_lock);
7259			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7260			spin_unlock_irq(shost->host_lock);
7261			lpfc_init_vpi(phba, pmb, vport->vpi);
7262			pmb->vport = vport;
7263			pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
7264			rc = lpfc_sli_issue_mbox(phba, pmb,
7265				MBX_NOWAIT);
7266			if (rc == MBX_NOT_FINISHED) {
7267				lpfc_printf_vlog(vport,
7268					KERN_ERR, LOG_MBOX,
7269					"2732 Failed to issue INIT_VPI"
7270					" mailbox command\n");
7271			} else {
7272				lpfc_nlp_put(ndlp);
7273				return;
7274			}
7275
7276		default:
7277			/* Try to recover from this error */
7278			if (phba->sli_rev == LPFC_SLI_REV4)
7279				lpfc_sli4_unreg_all_rpis(vport);
7280			lpfc_mbx_unreg_vpi(vport);
7281			spin_lock_irq(shost->host_lock);
7282			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7283			spin_unlock_irq(shost->host_lock);
7284			if (vport->port_type == LPFC_PHYSICAL_PORT
7285				&& !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
7286				lpfc_issue_init_vfi(vport);
7287			else
7288				lpfc_initial_fdisc(vport);
7289			break;
7290		}
7291	} else {
7292		spin_lock_irq(shost->host_lock);
7293		vport->vpi_state |= LPFC_VPI_REGISTERED;
7294		spin_unlock_irq(shost->host_lock);
7295		if (vport == phba->pport) {
7296			if (phba->sli_rev < LPFC_SLI_REV4)
7297				lpfc_issue_fabric_reglogin(vport);
7298			else {
7299				/*
7300				 * If the physical port is instantiated using
7301				 * FDISC, do not start vport discovery.
7302				 */
7303				if (vport->port_state != LPFC_FDISC)
7304					lpfc_start_fdiscs(phba);
7305				lpfc_do_scr_ns_plogi(phba, vport);
7306			}
7307		} else
7308			lpfc_do_scr_ns_plogi(phba, vport);
7309	}
7310mbox_err_exit:
7311	/* Now, we decrement the ndlp reference count held for this
7312	 * callback function
7313	 */
7314	lpfc_nlp_put(ndlp);
7315
7316	mempool_free(pmb, phba->mbox_mem_pool);
7317	return;
7318}
7319
7320/**
7321 * lpfc_register_new_vport - Register a new vport with a HBA
7322 * @phba: pointer to lpfc hba data structure.
7323 * @vport: pointer to a host virtual N_Port data structure.
7324 * @ndlp: pointer to a node-list data structure.
7325 *
7326 * This routine registers the @vport as a new virtual port with a HBA.
7327 * It is done through a registering vpi mailbox command.
7328 **/
7329void
7330lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
7331			struct lpfc_nodelist *ndlp)
7332{
7333	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7334	LPFC_MBOXQ_t *mbox;
7335
7336	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7337	if (mbox) {
7338		lpfc_reg_vpi(vport, mbox);
7339		mbox->vport = vport;
7340		mbox->context2 = lpfc_nlp_get(ndlp);
7341		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
7342		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
7343		    == MBX_NOT_FINISHED) {
7344			/* mailbox command not success, decrement ndlp
7345			 * reference count for this command
7346			 */
7347			lpfc_nlp_put(ndlp);
7348			mempool_free(mbox, phba->mbox_mem_pool);
7349
7350			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
7351				"0253 Register VPI: Can't send mbox\n");
7352			goto mbox_err_exit;
7353		}
7354	} else {
7355		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
7356				 "0254 Register VPI: no memory\n");
7357		goto mbox_err_exit;
7358	}
7359	return;
7360
7361mbox_err_exit:
7362	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7363	spin_lock_irq(shost->host_lock);
7364	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
7365	spin_unlock_irq(shost->host_lock);
7366	return;
7367}
7368
7369/**
7370 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
7371 * @phba: pointer to lpfc hba data structure.
7372 *
7373 * This routine cancels the retry delay timers to all the vports.
7374 **/
7375void
7376lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
7377{
7378	struct lpfc_vport **vports;
7379	struct lpfc_nodelist *ndlp;
7380	uint32_t link_state;
7381	int i;
7382
7383	/* Treat this failure as linkdown for all vports */
7384	link_state = phba->link_state;
7385	lpfc_linkdown(phba);
7386	phba->link_state = link_state;
7387
7388	vports = lpfc_create_vport_work_array(phba);
7389
7390	if (vports) {
7391		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
7392			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
7393			if (ndlp)
7394				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
7395			lpfc_els_flush_cmd(vports[i]);
7396		}
7397		lpfc_destroy_vport_work_array(phba, vports);
7398	}
7399}
7400
7401/**
7402 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
7403 * @phba: pointer to lpfc hba data structure.
7404 *
7405 * This routine abort all pending discovery commands and
7406 * start a timer to retry FLOGI for the physical port
7407 * discovery.
7408 **/
7409void
7410lpfc_retry_pport_discovery(struct lpfc_hba *phba)
7411{
7412	struct lpfc_nodelist *ndlp;
7413	struct Scsi_Host  *shost;
7414
7415	/* Cancel the all vports retry delay retry timers */
7416	lpfc_cancel_all_vport_retry_delay_timer(phba);
7417
7418	/* If fabric require FLOGI, then re-instantiate physical login */
7419	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
7420	if (!ndlp)
7421		return;
7422
7423	shost = lpfc_shost_from_vport(phba->pport);
7424	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
7425	spin_lock_irq(shost->host_lock);
7426	ndlp->nlp_flag |= NLP_DELAY_TMO;
7427	spin_unlock_irq(shost->host_lock);
7428	ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
7429	phba->pport->port_state = LPFC_FLOGI;
7430	return;
7431}
7432
7433/**
7434 * lpfc_fabric_login_reqd - Check if FLOGI required.
7435 * @phba: pointer to lpfc hba data structure.
7436 * @cmdiocb: pointer to FDISC command iocb.
7437 * @rspiocb: pointer to FDISC response iocb.
7438 *
7439 * This routine checks if a FLOGI is reguired for FDISC
7440 * to succeed.
7441 **/
7442static int
7443lpfc_fabric_login_reqd(struct lpfc_hba *phba,
7444		struct lpfc_iocbq *cmdiocb,
7445		struct lpfc_iocbq *rspiocb)
7446{
7447
7448	if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
7449		(rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
7450		return 0;
7451	else
7452		return 1;
7453}
7454
7455/**
7456 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
7457 * @phba: pointer to lpfc hba data structure.
7458 * @cmdiocb: pointer to lpfc command iocb data structure.
7459 * @rspiocb: pointer to lpfc response iocb data structure.
7460 *
7461 * This routine is the completion callback function to a Fabric Discover
7462 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
7463 * single threaded, each FDISC completion callback function will reset
7464 * the discovery timer for all vports such that the timers will not get
7465 * unnecessary timeout. The function checks the FDISC IOCB status. If error
7466 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
7467 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
7468 * assigned to the vport has been changed with the completion of the FDISC
7469 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
7470 * are unregistered from the HBA, and then the lpfc_register_new_vport()
7471 * routine is invoked to register new vport with the HBA. Otherwise, the
7472 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
7473 * Server for State Change Request (SCR).
7474 **/
7475static void
7476lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7477		    struct lpfc_iocbq *rspiocb)
7478{
7479	struct lpfc_vport *vport = cmdiocb->vport;
7480	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7481	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
7482	struct lpfc_nodelist *np;
7483	struct lpfc_nodelist *next_np;
7484	IOCB_t *irsp = &rspiocb->iocb;
7485	struct lpfc_iocbq *piocb;
7486	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
7487	struct serv_parm *sp;
7488	uint8_t fabric_param_changed;
7489
7490	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7491			 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
7492			 irsp->ulpStatus, irsp->un.ulpWord[4],
7493			 vport->fc_prevDID);
7494	/* Since all FDISCs are being single threaded, we
7495	 * must reset the discovery timer for ALL vports
7496	 * waiting to send FDISC when one completes.
7497	 */
7498	list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
7499		lpfc_set_disctmo(piocb->vport);
7500	}
7501
7502	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7503		"FDISC cmpl:      status:x%x/x%x prevdid:x%x",
7504		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
7505
7506	if (irsp->ulpStatus) {
7507
7508		if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
7509			lpfc_retry_pport_discovery(phba);
7510			goto out;
7511		}
7512
7513		/* Check for retry */
7514		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
7515			goto out;
7516		/* FDISC failed */
7517		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7518				 "0126 FDISC failed. (x%x/x%x)\n",
7519				 irsp->ulpStatus, irsp->un.ulpWord[4]);
7520		goto fdisc_failed;
7521	}
7522	spin_lock_irq(shost->host_lock);
7523	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
7524	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
7525	vport->fc_flag |= FC_FABRIC;
7526	if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
7527		vport->fc_flag |=  FC_PUBLIC_LOOP;
7528	spin_unlock_irq(shost->host_lock);
7529
7530	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
7531	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
7532	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
7533	if (!prsp)
7534		goto out;
7535	sp = prsp->virt + sizeof(uint32_t);
7536	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
7537	memcpy(&vport->fabric_portname, &sp->portName,
7538		sizeof(struct lpfc_name));
7539	memcpy(&vport->fabric_nodename, &sp->nodeName,
7540		sizeof(struct lpfc_name));
7541	if (fabric_param_changed &&
7542		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7543		/* If our NportID changed, we need to ensure all
7544		 * remaining NPORTs get unreg_login'ed so we can
7545		 * issue unreg_vpi.
7546		 */
7547		list_for_each_entry_safe(np, next_np,
7548			&vport->fc_nodes, nlp_listp) {
7549			if (!NLP_CHK_NODE_ACT(ndlp) ||
7550			    (np->nlp_state != NLP_STE_NPR_NODE) ||
7551			    !(np->nlp_flag & NLP_NPR_ADISC))
7552				continue;
7553			spin_lock_irq(shost->host_lock);
7554			np->nlp_flag &= ~NLP_NPR_ADISC;
7555			spin_unlock_irq(shost->host_lock);
7556			lpfc_unreg_rpi(vport, np);
7557		}
7558		lpfc_cleanup_pending_mbox(vport);
7559
7560		if (phba->sli_rev == LPFC_SLI_REV4)
7561			lpfc_sli4_unreg_all_rpis(vport);
7562
7563		lpfc_mbx_unreg_vpi(vport);
7564		spin_lock_irq(shost->host_lock);
7565		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7566		if (phba->sli_rev == LPFC_SLI_REV4)
7567			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
7568		else
7569			vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
7570		spin_unlock_irq(shost->host_lock);
7571	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
7572		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7573		/*
7574		 * Driver needs to re-reg VPI in order for f/w
7575		 * to update the MAC address.
7576		 */
7577		lpfc_register_new_vport(phba, vport, ndlp);
7578		goto out;
7579	}
7580
7581	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
7582		lpfc_issue_init_vpi(vport);
7583	else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
7584		lpfc_register_new_vport(phba, vport, ndlp);
7585	else
7586		lpfc_do_scr_ns_plogi(phba, vport);
7587	goto out;
7588fdisc_failed:
7589	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7590	/* Cancel discovery timer */
7591	lpfc_can_disctmo(vport);
7592	lpfc_nlp_put(ndlp);
7593out:
7594	lpfc_els_free_iocb(phba, cmdiocb);
7595}
7596
7597/**
7598 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
7599 * @vport: pointer to a virtual N_Port data structure.
7600 * @ndlp: pointer to a node-list data structure.
7601 * @retry: number of retries to the command IOCB.
7602 *
7603 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
7604 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
7605 * routine to issue the IOCB, which makes sure only one outstanding fabric
7606 * IOCB will be sent off HBA at any given time.
7607 *
7608 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7609 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7610 * will be stored into the context1 field of the IOCB for the completion
7611 * callback function to the FDISC ELS command.
7612 *
7613 * Return code
7614 *   0 - Successfully issued fdisc iocb command
7615 *   1 - Failed to issue fdisc iocb command
7616 **/
7617static int
7618lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7619		     uint8_t retry)
7620{
7621	struct lpfc_hba *phba = vport->phba;
7622	IOCB_t *icmd;
7623	struct lpfc_iocbq *elsiocb;
7624	struct serv_parm *sp;
7625	uint8_t *pcmd;
7626	uint16_t cmdsize;
7627	int did = ndlp->nlp_DID;
7628	int rc;
7629
7630	vport->port_state = LPFC_FDISC;
7631	vport->fc_myDID = 0;
7632	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
7633	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
7634				     ELS_CMD_FDISC);
7635	if (!elsiocb) {
7636		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7637		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7638				 "0255 Issue FDISC: no IOCB\n");
7639		return 1;
7640	}
7641
7642	icmd = &elsiocb->iocb;
7643	icmd->un.elsreq64.myID = 0;
7644	icmd->un.elsreq64.fl = 1;
7645
7646	/*
7647	 * SLI3 ports require a different context type value than SLI4.
7648	 * Catch SLI3 ports here and override the prep.
7649	 */
7650	if (phba->sli_rev == LPFC_SLI_REV3) {
7651		icmd->ulpCt_h = 1;
7652		icmd->ulpCt_l = 0;
7653	}
7654
7655	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7656	*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
7657	pcmd += sizeof(uint32_t); /* CSP Word 1 */
7658	memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
7659	sp = (struct serv_parm *) pcmd;
7660	/* Setup CSPs accordingly for Fabric */
7661	sp->cmn.e_d_tov = 0;
7662	sp->cmn.w2.r_a_tov = 0;
7663	sp->cmn.virtual_fabric_support = 0;
7664	sp->cls1.classValid = 0;
7665	sp->cls2.seqDelivery = 1;
7666	sp->cls3.seqDelivery = 1;
7667
7668	pcmd += sizeof(uint32_t); /* CSP Word 2 */
7669	pcmd += sizeof(uint32_t); /* CSP Word 3 */
7670	pcmd += sizeof(uint32_t); /* CSP Word 4 */
7671	pcmd += sizeof(uint32_t); /* Port Name */
7672	memcpy(pcmd, &vport->fc_portname, 8);
7673	pcmd += sizeof(uint32_t); /* Node Name */
7674	pcmd += sizeof(uint32_t); /* Node Name */
7675	memcpy(pcmd, &vport->fc_nodename, 8);
7676
7677	lpfc_set_disctmo(vport);
7678
7679	phba->fc_stat.elsXmitFDISC++;
7680	elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
7681
7682	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7683		"Issue FDISC:     did:x%x",
7684		did, 0, 0);
7685
7686	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
7687	if (rc == IOCB_ERROR) {
7688		lpfc_els_free_iocb(phba, elsiocb);
7689		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7690		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7691				 "0256 Issue FDISC: Cannot send IOCB\n");
7692		return 1;
7693	}
7694	lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
7695	return 0;
7696}
7697
7698/**
7699 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
7700 * @phba: pointer to lpfc hba data structure.
7701 * @cmdiocb: pointer to lpfc command iocb data structure.
7702 * @rspiocb: pointer to lpfc response iocb data structure.
7703 *
7704 * This routine is the completion callback function to the issuing of a LOGO
7705 * ELS command off a vport. It frees the command IOCB and then decrement the
7706 * reference count held on ndlp for this completion function, indicating that
7707 * the reference to the ndlp is no long needed. Note that the
7708 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
7709 * callback function and an additional explicit ndlp reference decrementation
7710 * will trigger the actual release of the ndlp.
7711 **/
7712static void
7713lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7714			struct lpfc_iocbq *rspiocb)
7715{
7716	struct lpfc_vport *vport = cmdiocb->vport;
7717	IOCB_t *irsp;
7718	struct lpfc_nodelist *ndlp;
7719	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7720
7721	ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
7722	irsp = &rspiocb->iocb;
7723	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7724		"LOGO npiv cmpl:  status:x%x/x%x did:x%x",
7725		irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
7726
7727	lpfc_els_free_iocb(phba, cmdiocb);
7728	vport->unreg_vpi_cmpl = VPORT_ERROR;
7729
7730	/* Trigger the release of the ndlp after logo */
7731	lpfc_nlp_put(ndlp);
7732
7733	/* NPIV LOGO completes to NPort <nlp_DID> */
7734	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7735			 "2928 NPIV LOGO completes to NPort x%x "
7736			 "Data: x%x x%x x%x x%x\n",
7737			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
7738			 irsp->ulpTimeout, vport->num_disc_nodes);
7739
7740	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
7741		spin_lock_irq(shost->host_lock);
7742		vport->fc_flag &= ~FC_FABRIC;
7743		spin_unlock_irq(shost->host_lock);
7744	}
7745}
7746
7747/**
7748 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
7749 * @vport: pointer to a virtual N_Port data structure.
7750 * @ndlp: pointer to a node-list data structure.
7751 *
7752 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
7753 *
7754 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7755 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7756 * will be stored into the context1 field of the IOCB for the completion
7757 * callback function to the LOGO ELS command.
7758 *
7759 * Return codes
7760 *   0 - Successfully issued logo off the @vport
7761 *   1 - Failed to issue logo off the @vport
7762 **/
7763int
7764lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
7765{
7766	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7767	struct lpfc_hba  *phba = vport->phba;
7768	IOCB_t *icmd;
7769	struct lpfc_iocbq *elsiocb;
7770	uint8_t *pcmd;
7771	uint16_t cmdsize;
7772
7773	cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
7774	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
7775				     ELS_CMD_LOGO);
7776	if (!elsiocb)
7777		return 1;
7778
7779	icmd = &elsiocb->iocb;
7780	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7781	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
7782	pcmd += sizeof(uint32_t);
7783
7784	/* Fill in LOGO payload */
7785	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
7786	pcmd += sizeof(uint32_t);
7787	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
7788
7789	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7790		"Issue LOGO npiv  did:x%x flg:x%x",
7791		ndlp->nlp_DID, ndlp->nlp_flag, 0);
7792
7793	elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
7794	spin_lock_irq(shost->host_lock);
7795	ndlp->nlp_flag |= NLP_LOGO_SND;
7796	spin_unlock_irq(shost->host_lock);
7797	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
7798	    IOCB_ERROR) {
7799		spin_lock_irq(shost->host_lock);
7800		ndlp->nlp_flag &= ~NLP_LOGO_SND;
7801		spin_unlock_irq(shost->host_lock);
7802		lpfc_els_free_iocb(phba, elsiocb);
7803		return 1;
7804	}
7805	return 0;
7806}
7807
7808/**
7809 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
7810 * @ptr: holder for the timer function associated data.
7811 *
7812 * This routine is invoked by the fabric iocb block timer after
7813 * timeout. It posts the fabric iocb block timeout event by setting the
7814 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
7815 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
7816 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
7817 * posted event WORKER_FABRIC_BLOCK_TMO.
7818 **/
7819void
7820lpfc_fabric_block_timeout(unsigned long ptr)
7821{
7822	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
7823	unsigned long iflags;
7824	uint32_t tmo_posted;
7825
7826	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
7827	tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
7828	if (!tmo_posted)
7829		phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
7830	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
7831
7832	if (!tmo_posted)
7833		lpfc_worker_wake_up(phba);
7834	return;
7835}
7836
7837/**
7838 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
7839 * @phba: pointer to lpfc hba data structure.
7840 *
7841 * This routine issues one fabric iocb from the driver internal list to
7842 * the HBA. It first checks whether it's ready to issue one fabric iocb to
7843 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
7844 * remove one pending fabric iocb from the driver internal list and invokes
7845 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
7846 **/
7847static void
7848lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
7849{
7850	struct lpfc_iocbq *iocb;
7851	unsigned long iflags;
7852	int ret;
7853	IOCB_t *cmd;
7854
7855repeat:
7856	iocb = NULL;
7857	spin_lock_irqsave(&phba->hbalock, iflags);
7858	/* Post any pending iocb to the SLI layer */
7859	if (atomic_read(&phba->fabric_iocb_count) == 0) {
7860		list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
7861				 list);
7862		if (iocb)
7863			/* Increment fabric iocb count to hold the position */
7864			atomic_inc(&phba->fabric_iocb_count);
7865	}
7866	spin_unlock_irqrestore(&phba->hbalock, iflags);
7867	if (iocb) {
7868		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
7869		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
7870		iocb->iocb_flag |= LPFC_IO_FABRIC;
7871
7872		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
7873			"Fabric sched1:   ste:x%x",
7874			iocb->vport->port_state, 0, 0);
7875
7876		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
7877
7878		if (ret == IOCB_ERROR) {
7879			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
7880			iocb->fabric_iocb_cmpl = NULL;
7881			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
7882			cmd = &iocb->iocb;
7883			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
7884			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
7885			iocb->iocb_cmpl(phba, iocb, iocb);
7886
7887			atomic_dec(&phba->fabric_iocb_count);
7888			goto repeat;
7889		}
7890	}
7891
7892	return;
7893}
7894
7895/**
7896 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
7897 * @phba: pointer to lpfc hba data structure.
7898 *
7899 * This routine unblocks the  issuing fabric iocb command. The function
7900 * will clear the fabric iocb block bit and then invoke the routine
7901 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
7902 * from the driver internal fabric iocb list.
7903 **/
7904void
7905lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
7906{
7907	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7908
7909	lpfc_resume_fabric_iocbs(phba);
7910	return;
7911}
7912
7913/**
7914 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
7915 * @phba: pointer to lpfc hba data structure.
7916 *
7917 * This routine blocks the issuing fabric iocb for a specified amount of
7918 * time (currently 100 ms). This is done by set the fabric iocb block bit
7919 * and set up a timeout timer for 100ms. When the block bit is set, no more
7920 * fabric iocb will be issued out of the HBA.
7921 **/
7922static void
7923lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
7924{
7925	int blocked;
7926
7927	blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7928	/* Start a timer to unblock fabric iocbs after 100ms */
7929	if (!blocked)
7930		mod_timer(&phba->fabric_block_timer,
7931			  jiffies + msecs_to_jiffies(100));
7932
7933	return;
7934}
7935
7936/**
7937 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
7938 * @phba: pointer to lpfc hba data structure.
7939 * @cmdiocb: pointer to lpfc command iocb data structure.
7940 * @rspiocb: pointer to lpfc response iocb data structure.
7941 *
7942 * This routine is the callback function that is put to the fabric iocb's
7943 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
7944 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
7945 * function first restores and invokes the original iocb's callback function
7946 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
7947 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
7948 **/
7949static void
7950lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7951	struct lpfc_iocbq *rspiocb)
7952{
7953	struct ls_rjt stat;
7954
7955	if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
7956		BUG();
7957
7958	switch (rspiocb->iocb.ulpStatus) {
7959		case IOSTAT_NPORT_RJT:
7960		case IOSTAT_FABRIC_RJT:
7961			if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
7962				lpfc_block_fabric_iocbs(phba);
7963			}
7964			break;
7965
7966		case IOSTAT_NPORT_BSY:
7967		case IOSTAT_FABRIC_BSY:
7968			lpfc_block_fabric_iocbs(phba);
7969			break;
7970
7971		case IOSTAT_LS_RJT:
7972			stat.un.lsRjtError =
7973				be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
7974			if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
7975				(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
7976				lpfc_block_fabric_iocbs(phba);
7977			break;
7978	}
7979
7980	if (atomic_read(&phba->fabric_iocb_count) == 0)
7981		BUG();
7982
7983	cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
7984	cmdiocb->fabric_iocb_cmpl = NULL;
7985	cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
7986	cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
7987
7988	atomic_dec(&phba->fabric_iocb_count);
7989	if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
7990		/* Post any pending iocbs to HBA */
7991		lpfc_resume_fabric_iocbs(phba);
7992	}
7993}
7994
7995/**
7996 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
7997 * @phba: pointer to lpfc hba data structure.
7998 * @iocb: pointer to lpfc command iocb data structure.
7999 *
8000 * This routine is used as the top-level API for issuing a fabric iocb command
8001 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
8002 * function makes sure that only one fabric bound iocb will be outstanding at
8003 * any given time. As such, this function will first check to see whether there
8004 * is already an outstanding fabric iocb on the wire. If so, it will put the
8005 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
8006 * issued later. Otherwise, it will issue the iocb on the wire and update the
8007 * fabric iocb count it indicate that there is one fabric iocb on the wire.
8008 *
8009 * Note, this implementation has a potential sending out fabric IOCBs out of
8010 * order. The problem is caused by the construction of the "ready" boolen does
8011 * not include the condition that the internal fabric IOCB list is empty. As
8012 * such, it is possible a fabric IOCB issued by this routine might be "jump"
8013 * ahead of the fabric IOCBs in the internal list.
8014 *
8015 * Return code
8016 *   IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
8017 *   IOCB_ERROR - failed to issue fabric iocb
8018 **/
8019static int
8020lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
8021{
8022	unsigned long iflags;
8023	int ready;
8024	int ret;
8025
8026	if (atomic_read(&phba->fabric_iocb_count) > 1)
8027		BUG();
8028
8029	spin_lock_irqsave(&phba->hbalock, iflags);
8030	ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
8031		!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
8032
8033	if (ready)
8034		/* Increment fabric iocb count to hold the position */
8035		atomic_inc(&phba->fabric_iocb_count);
8036	spin_unlock_irqrestore(&phba->hbalock, iflags);
8037	if (ready) {
8038		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
8039		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
8040		iocb->iocb_flag |= LPFC_IO_FABRIC;
8041
8042		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
8043			"Fabric sched2:   ste:x%x",
8044			iocb->vport->port_state, 0, 0);
8045
8046		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
8047
8048		if (ret == IOCB_ERROR) {
8049			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
8050			iocb->fabric_iocb_cmpl = NULL;
8051			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
8052			atomic_dec(&phba->fabric_iocb_count);
8053		}
8054	} else {
8055		spin_lock_irqsave(&phba->hbalock, iflags);
8056		list_add_tail(&iocb->list, &phba->fabric_iocb_list);
8057		spin_unlock_irqrestore(&phba->hbalock, iflags);
8058		ret = IOCB_SUCCESS;
8059	}
8060	return ret;
8061}
8062
8063/**
8064 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
8065 * @vport: pointer to a virtual N_Port data structure.
8066 *
8067 * This routine aborts all the IOCBs associated with a @vport from the
8068 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
8069 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
8070 * list, removes each IOCB associated with the @vport off the list, set the
8071 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
8072 * associated with the IOCB.
8073 **/
8074static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
8075{
8076	LIST_HEAD(completions);
8077	struct lpfc_hba  *phba = vport->phba;
8078	struct lpfc_iocbq *tmp_iocb, *piocb;
8079
8080	spin_lock_irq(&phba->hbalock);
8081	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
8082				 list) {
8083
8084		if (piocb->vport != vport)
8085			continue;
8086
8087		list_move_tail(&piocb->list, &completions);
8088	}
8089	spin_unlock_irq(&phba->hbalock);
8090
8091	/* Cancel all the IOCBs from the completions list */
8092	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8093			      IOERR_SLI_ABORTED);
8094}
8095
8096/**
8097 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
8098 * @ndlp: pointer to a node-list data structure.
8099 *
8100 * This routine aborts all the IOCBs associated with an @ndlp from the
8101 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
8102 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
8103 * list, removes each IOCB associated with the @ndlp off the list, set the
8104 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
8105 * associated with the IOCB.
8106 **/
8107void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
8108{
8109	LIST_HEAD(completions);
8110	struct lpfc_hba  *phba = ndlp->phba;
8111	struct lpfc_iocbq *tmp_iocb, *piocb;
8112	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8113
8114	spin_lock_irq(&phba->hbalock);
8115	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
8116				 list) {
8117		if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
8118
8119			list_move_tail(&piocb->list, &completions);
8120		}
8121	}
8122	spin_unlock_irq(&phba->hbalock);
8123
8124	/* Cancel all the IOCBs from the completions list */
8125	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8126			      IOERR_SLI_ABORTED);
8127}
8128
8129/**
8130 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
8131 * @phba: pointer to lpfc hba data structure.
8132 *
8133 * This routine aborts all the IOCBs currently on the driver internal
8134 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
8135 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
8136 * list, removes IOCBs off the list, set the status feild to
8137 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
8138 * the IOCB.
8139 **/
8140void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
8141{
8142	LIST_HEAD(completions);
8143
8144	spin_lock_irq(&phba->hbalock);
8145	list_splice_init(&phba->fabric_iocb_list, &completions);
8146	spin_unlock_irq(&phba->hbalock);
8147
8148	/* Cancel all the IOCBs from the completions list */
8149	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8150			      IOERR_SLI_ABORTED);
8151}
8152
8153/**
8154 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
8155 * @vport: pointer to lpfc vport data structure.
8156 *
8157 * This routine is invoked by the vport cleanup for deletions and the cleanup
8158 * for an ndlp on removal.
8159 **/
8160void
8161lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
8162{
8163	struct lpfc_hba *phba = vport->phba;
8164	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8165	unsigned long iflag = 0;
8166
8167	spin_lock_irqsave(&phba->hbalock, iflag);
8168	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
8169	list_for_each_entry_safe(sglq_entry, sglq_next,
8170			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
8171		if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
8172			sglq_entry->ndlp = NULL;
8173	}
8174	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
8175	spin_unlock_irqrestore(&phba->hbalock, iflag);
8176	return;
8177}
8178
8179/**
8180 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
8181 * @phba: pointer to lpfc hba data structure.
8182 * @axri: pointer to the els xri abort wcqe structure.
8183 *
8184 * This routine is invoked by the worker thread to process a SLI4 slow-path
8185 * ELS aborted xri.
8186 **/
8187void
8188lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
8189			  struct sli4_wcqe_xri_aborted *axri)
8190{
8191	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
8192	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
8193	uint16_t lxri = 0;
8194
8195	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8196	unsigned long iflag = 0;
8197	struct lpfc_nodelist *ndlp;
8198	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8199
8200	spin_lock_irqsave(&phba->hbalock, iflag);
8201	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
8202	list_for_each_entry_safe(sglq_entry, sglq_next,
8203			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
8204		if (sglq_entry->sli4_xritag == xri) {
8205			list_del(&sglq_entry->list);
8206			ndlp = sglq_entry->ndlp;
8207			sglq_entry->ndlp = NULL;
8208			spin_lock(&pring->ring_lock);
8209			list_add_tail(&sglq_entry->list,
8210				&phba->sli4_hba.lpfc_sgl_list);
8211			sglq_entry->state = SGL_FREED;
8212			spin_unlock(&pring->ring_lock);
8213			spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
8214			spin_unlock_irqrestore(&phba->hbalock, iflag);
8215			lpfc_set_rrq_active(phba, ndlp,
8216				sglq_entry->sli4_lxritag,
8217				rxid, 1);
8218
8219			/* Check if TXQ queue needs to be serviced */
8220			if (!(list_empty(&pring->txq)))
8221				lpfc_worker_wake_up(phba);
8222			return;
8223		}
8224	}
8225	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
8226	lxri = lpfc_sli4_xri_inrange(phba, xri);
8227	if (lxri == NO_XRI) {
8228		spin_unlock_irqrestore(&phba->hbalock, iflag);
8229		return;
8230	}
8231	spin_lock(&pring->ring_lock);
8232	sglq_entry = __lpfc_get_active_sglq(phba, lxri);
8233	if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
8234		spin_unlock(&pring->ring_lock);
8235		spin_unlock_irqrestore(&phba->hbalock, iflag);
8236		return;
8237	}
8238	sglq_entry->state = SGL_XRI_ABORTED;
8239	spin_unlock(&pring->ring_lock);
8240	spin_unlock_irqrestore(&phba->hbalock, iflag);
8241	return;
8242}
8243
8244/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
8245 * @vport: pointer to virtual port object.
8246 * @ndlp: nodelist pointer for the impacted node.
8247 *
8248 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
8249 * or an SLI3 ASYNC_STATUS_CN event from the port.  For either event,
8250 * the driver is required to send a LOGO to the remote node before it
8251 * attempts to recover its login to the remote node.
8252 */
8253void
8254lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
8255			   struct lpfc_nodelist *ndlp)
8256{
8257	struct Scsi_Host *shost;
8258	struct lpfc_hba *phba;
8259	unsigned long flags = 0;
8260
8261	shost = lpfc_shost_from_vport(vport);
8262	phba = vport->phba;
8263	if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
8264		lpfc_printf_log(phba, KERN_INFO,
8265				LOG_SLI, "3093 No rport recovery needed. "
8266				"rport in state 0x%x\n", ndlp->nlp_state);
8267		return;
8268	}
8269	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8270			"3094 Start rport recovery on shost id 0x%x "
8271			"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
8272			"flags 0x%x\n",
8273			shost->host_no, ndlp->nlp_DID,
8274			vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
8275			ndlp->nlp_flag);
8276	/*
8277	 * The rport is not responding.  Remove the FCP-2 flag to prevent
8278	 * an ADISC in the follow-up recovery code.
8279	 */
8280	spin_lock_irqsave(shost->host_lock, flags);
8281	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
8282	spin_unlock_irqrestore(shost->host_lock, flags);
8283	lpfc_issue_els_logo(vport, ndlp, 0);
8284	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
8285}
8286
8287