1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
32#include "lpfc_hw4.h"
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_sli4.h"
36#include "lpfc_nl.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_vport.h"
43#include "lpfc_debugfs.h"
44
45
46/* Called to verify a rcv'ed ADISC was intended for us. */
47static int
48lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49		 struct lpfc_name *nn, struct lpfc_name *pn)
50{
51	/* First, we MUST have a RPI registered */
52	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
53		return 0;
54
55	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
56	 * table entry for that node.
57	 */
58	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
59		return 0;
60
61	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
62		return 0;
63
64	/* we match, return success */
65	return 1;
66}
67
68int
69lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
70		 struct serv_parm *sp, uint32_t class, int flogi)
71{
72	volatile struct serv_parm *hsp = &vport->fc_sparam;
73	uint16_t hsp_value, ssp_value = 0;
74
75	/*
76	 * The receive data field size and buffer-to-buffer receive data field
77	 * size entries are 16 bits but are represented as two 8-bit fields in
78	 * the driver data structure to account for rsvd bits and other control
79	 * bits.  Reconstruct and compare the fields as a 16-bit values before
80	 * correcting the byte values.
81	 */
82	if (sp->cls1.classValid) {
83		if (!flogi) {
84			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
85				     hsp->cls1.rcvDataSizeLsb);
86			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
87				     sp->cls1.rcvDataSizeLsb);
88			if (!ssp_value)
89				goto bad_service_param;
90			if (ssp_value > hsp_value) {
91				sp->cls1.rcvDataSizeLsb =
92					hsp->cls1.rcvDataSizeLsb;
93				sp->cls1.rcvDataSizeMsb =
94					hsp->cls1.rcvDataSizeMsb;
95			}
96		}
97	} else if (class == CLASS1)
98		goto bad_service_param;
99	if (sp->cls2.classValid) {
100		if (!flogi) {
101			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
102				     hsp->cls2.rcvDataSizeLsb);
103			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
104				     sp->cls2.rcvDataSizeLsb);
105			if (!ssp_value)
106				goto bad_service_param;
107			if (ssp_value > hsp_value) {
108				sp->cls2.rcvDataSizeLsb =
109					hsp->cls2.rcvDataSizeLsb;
110				sp->cls2.rcvDataSizeMsb =
111					hsp->cls2.rcvDataSizeMsb;
112			}
113		}
114	} else if (class == CLASS2)
115		goto bad_service_param;
116	if (sp->cls3.classValid) {
117		if (!flogi) {
118			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
119				     hsp->cls3.rcvDataSizeLsb);
120			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
121				     sp->cls3.rcvDataSizeLsb);
122			if (!ssp_value)
123				goto bad_service_param;
124			if (ssp_value > hsp_value) {
125				sp->cls3.rcvDataSizeLsb =
126					hsp->cls3.rcvDataSizeLsb;
127				sp->cls3.rcvDataSizeMsb =
128					hsp->cls3.rcvDataSizeMsb;
129			}
130		}
131	} else if (class == CLASS3)
132		goto bad_service_param;
133
134	/*
135	 * Preserve the upper four bits of the MSB from the PLOGI response.
136	 * These bits contain the Buffer-to-Buffer State Change Number
137	 * from the target and need to be passed to the FW.
138	 */
139	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
140	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
141	if (ssp_value > hsp_value) {
142		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
143		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
144				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
145	}
146
147	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
148	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
149	return 1;
150bad_service_param:
151	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
152			 "0207 Device %x "
153			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
154			 "invalid service parameters.  Ignoring device.\n",
155			 ndlp->nlp_DID,
156			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
157			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
158			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
159			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
160	return 0;
161}
162
163static void *
164lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
165			struct lpfc_iocbq *rspiocb)
166{
167	struct lpfc_dmabuf *pcmd, *prsp;
168	uint32_t *lp;
169	void     *ptr = NULL;
170	IOCB_t   *irsp;
171
172	irsp = &rspiocb->iocb;
173	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
174
175	/* For lpfc_els_abort, context2 could be zero'ed to delay
176	 * freeing associated memory till after ABTS completes.
177	 */
178	if (pcmd) {
179		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
180				       list);
181		if (prsp) {
182			lp = (uint32_t *) prsp->virt;
183			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
184		}
185	} else {
186		/* Force ulpStatus error since we are returning NULL ptr */
187		if (!(irsp->ulpStatus)) {
188			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
189			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
190		}
191		ptr = NULL;
192	}
193	return ptr;
194}
195
196
197
198/*
199 * Free resources / clean up outstanding I/Os
200 * associated with a LPFC_NODELIST entry. This
201 * routine effectively results in a "software abort".
202 */
203int
204lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
205{
206	LIST_HEAD(abort_list);
207	struct lpfc_sli  *psli = &phba->sli;
208	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
209	struct lpfc_iocbq *iocb, *next_iocb;
210
211	/* Abort outstanding I/O on NPort <nlp_DID> */
212	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
213			 "2819 Abort outstanding I/O on NPort x%x "
214			 "Data: x%x x%x x%x\n",
215			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
216			 ndlp->nlp_rpi);
217	/* Clean up all fabric IOs first.*/
218	lpfc_fabric_abort_nport(ndlp);
219
220	/*
221	 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
222	 * of all ELS IOs that need an ABTS.  The IOs need to stay on the
223	 * txcmplq so that the abort operation completes them successfully.
224	 */
225	spin_lock_irq(&phba->hbalock);
226	if (phba->sli_rev == LPFC_SLI_REV4)
227		spin_lock(&pring->ring_lock);
228	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
229	/* Add to abort_list on on NDLP match. */
230		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
231			list_add_tail(&iocb->dlist, &abort_list);
232	}
233	if (phba->sli_rev == LPFC_SLI_REV4)
234		spin_unlock(&pring->ring_lock);
235	spin_unlock_irq(&phba->hbalock);
236
237	/* Abort the targeted IOs and remove them from the abort list. */
238	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
239			spin_lock_irq(&phba->hbalock);
240			list_del_init(&iocb->dlist);
241			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
242			spin_unlock_irq(&phba->hbalock);
243	}
244
245	INIT_LIST_HEAD(&abort_list);
246
247	/* Now process the txq */
248	spin_lock_irq(&phba->hbalock);
249	if (phba->sli_rev == LPFC_SLI_REV4)
250		spin_lock(&pring->ring_lock);
251
252	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
253		/* Check to see if iocb matches the nport we are looking for */
254		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
255			list_del_init(&iocb->list);
256			list_add_tail(&iocb->list, &abort_list);
257		}
258	}
259
260	if (phba->sli_rev == LPFC_SLI_REV4)
261		spin_unlock(&pring->ring_lock);
262	spin_unlock_irq(&phba->hbalock);
263
264	/* Cancel all the IOCBs from the completions list */
265	lpfc_sli_cancel_iocbs(phba, &abort_list,
266			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
267
268	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
269	return 0;
270}
271
272static int
273lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
274	       struct lpfc_iocbq *cmdiocb)
275{
276	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
277	struct lpfc_hba    *phba = vport->phba;
278	struct lpfc_dmabuf *pcmd;
279	uint64_t nlp_portwwn = 0;
280	uint32_t *lp;
281	IOCB_t *icmd;
282	struct serv_parm *sp;
283	LPFC_MBOXQ_t *mbox;
284	struct ls_rjt stat;
285	int rc;
286
287	memset(&stat, 0, sizeof (struct ls_rjt));
288	if (vport->port_state <= LPFC_FDISC) {
289		/* Before responding to PLOGI, check for pt2pt mode.
290		 * If we are pt2pt, with an outstanding FLOGI, abort
291		 * the FLOGI and resend it first.
292		 */
293		if (vport->fc_flag & FC_PT2PT) {
294			 lpfc_els_abort_flogi(phba);
295		        if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
296				/* If the other side is supposed to initiate
297				 * the PLOGI anyway, just ACC it now and
298				 * move on with discovery.
299				 */
300				phba->fc_edtov = FF_DEF_EDTOV;
301				phba->fc_ratov = FF_DEF_RATOV;
302				/* Start discovery - this should just do
303				   CLEAR_LA */
304				lpfc_disc_start(vport);
305			} else
306				lpfc_initial_flogi(vport);
307		} else {
308			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
309			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
310			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
311					    ndlp, NULL);
312			return 0;
313		}
314	}
315	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
316	lp = (uint32_t *) pcmd->virt;
317	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
318	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
319		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
320				 "0140 PLOGI Reject: invalid nname\n");
321		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
322		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
323		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
324			NULL);
325		return 0;
326	}
327	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
328		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
329				 "0141 PLOGI Reject: invalid pname\n");
330		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
331		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
332		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
333			NULL);
334		return 0;
335	}
336
337	nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
338	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
339		/* Reject this request because invalid parameters */
340		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
341		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
342		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
343			NULL);
344		return 0;
345	}
346	icmd = &cmdiocb->iocb;
347
348	/* PLOGI chkparm OK */
349	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
350			 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
351			 "x%x x%x x%x\n",
352			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
353			 ndlp->nlp_rpi, vport->port_state,
354			 vport->fc_flag);
355
356	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
357		ndlp->nlp_fcp_info |= CLASS2;
358	else
359		ndlp->nlp_fcp_info |= CLASS3;
360
361	ndlp->nlp_class_sup = 0;
362	if (sp->cls1.classValid)
363		ndlp->nlp_class_sup |= FC_COS_CLASS1;
364	if (sp->cls2.classValid)
365		ndlp->nlp_class_sup |= FC_COS_CLASS2;
366	if (sp->cls3.classValid)
367		ndlp->nlp_class_sup |= FC_COS_CLASS3;
368	if (sp->cls4.classValid)
369		ndlp->nlp_class_sup |= FC_COS_CLASS4;
370	ndlp->nlp_maxframe =
371		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
372
373	/* if already logged in, do implicit logout */
374	switch (ndlp->nlp_state) {
375	case  NLP_STE_NPR_NODE:
376		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
377			break;
378	case  NLP_STE_REG_LOGIN_ISSUE:
379	case  NLP_STE_PRLI_ISSUE:
380	case  NLP_STE_UNMAPPED_NODE:
381	case  NLP_STE_MAPPED_NODE:
382		/* lpfc_plogi_confirm_nport skips fabric did, handle it here */
383		if (!(ndlp->nlp_type & NLP_FABRIC)) {
384			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
385					 ndlp, NULL);
386			return 1;
387		}
388		if (nlp_portwwn != 0 &&
389		    nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
390			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
391					 "0143 PLOGI recv'd from DID: x%x "
392					 "WWPN changed: old %llx new %llx\n",
393					 ndlp->nlp_DID,
394					 (unsigned long long)nlp_portwwn,
395					 (unsigned long long)
396					 wwn_to_u64(sp->portName.u.wwn));
397
398		ndlp->nlp_prev_state = ndlp->nlp_state;
399		/* rport needs to be unregistered first */
400		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
401		break;
402	}
403
404	/* Check for Nport to NPort pt2pt protocol */
405	if ((vport->fc_flag & FC_PT2PT) &&
406	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
407
408		/* rcv'ed PLOGI decides what our NPortId will be */
409		vport->fc_myDID = icmd->un.rcvels.parmRo;
410		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
411		if (mbox == NULL)
412			goto out;
413		lpfc_config_link(phba, mbox);
414		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
415		mbox->vport = vport;
416		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
417		if (rc == MBX_NOT_FINISHED) {
418			mempool_free(mbox, phba->mbox_mem_pool);
419			goto out;
420		}
421		/*
422		 * For SLI4, the VFI/VPI are registered AFTER the
423		 * Nport with the higher WWPN sends us a PLOGI with
424		 * our assigned NPortId.
425		 */
426		if (phba->sli_rev == LPFC_SLI_REV4)
427			lpfc_issue_reg_vfi(vport);
428
429		lpfc_can_disctmo(vport);
430	}
431	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
432	if (!mbox)
433		goto out;
434
435	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
436	if (phba->sli_rev == LPFC_SLI_REV4)
437		lpfc_unreg_rpi(vport, ndlp);
438
439	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
440			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
441	if (rc) {
442		mempool_free(mbox, phba->mbox_mem_pool);
443		goto out;
444	}
445
446	/* ACC PLOGI rsp command needs to execute first,
447	 * queue this mbox command to be processed later.
448	 */
449	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
450	/*
451	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
452	 * command issued in lpfc_cmpl_els_acc().
453	 */
454	mbox->vport = vport;
455	spin_lock_irq(shost->host_lock);
456	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
457	spin_unlock_irq(shost->host_lock);
458
459	/*
460	 * If there is an outstanding PLOGI issued, abort it before
461	 * sending ACC rsp for received PLOGI. If pending plogi
462	 * is not canceled here, the plogi will be rejected by
463	 * remote port and will be retried. On a configuration with
464	 * single discovery thread, this will cause a huge delay in
465	 * discovery. Also this will cause multiple state machines
466	 * running in parallel for this node.
467	 */
468	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
469		/* software abort outstanding PLOGI */
470		lpfc_els_abort(phba, ndlp);
471	}
472
473	if ((vport->port_type == LPFC_NPIV_PORT &&
474	     vport->cfg_restrict_login)) {
475
476		/* In order to preserve RPIs, we want to cleanup
477		 * the default RPI the firmware created to rcv
478		 * this ELS request. The only way to do this is
479		 * to register, then unregister the RPI.
480		 */
481		spin_lock_irq(shost->host_lock);
482		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
483		spin_unlock_irq(shost->host_lock);
484		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
485		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
486		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
487			ndlp, mbox);
488		if (rc)
489			mempool_free(mbox, phba->mbox_mem_pool);
490		return 1;
491	}
492	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
493	if (rc)
494		mempool_free(mbox, phba->mbox_mem_pool);
495	return 1;
496out:
497	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
498	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
499	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
500	return 0;
501}
502
503/**
504 * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
505 * @phba: pointer to lpfc hba data structure.
506 * @mboxq: pointer to mailbox object
507 *
508 * This routine is invoked to issue a completion to a rcv'ed
509 * ADISC or PDISC after the paused RPI has been resumed.
510 **/
511static void
512lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
513{
514	struct lpfc_vport *vport;
515	struct lpfc_iocbq *elsiocb;
516	struct lpfc_nodelist *ndlp;
517	uint32_t cmd;
518
519	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
520	ndlp = (struct lpfc_nodelist *) mboxq->context2;
521	vport = mboxq->vport;
522	cmd = elsiocb->drvrTimeout;
523
524	if (cmd == ELS_CMD_ADISC) {
525		lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
526	} else {
527		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
528			ndlp, NULL);
529	}
530	kfree(elsiocb);
531	mempool_free(mboxq, phba->mbox_mem_pool);
532}
533
534static int
535lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
536		struct lpfc_iocbq *cmdiocb)
537{
538	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
539	struct lpfc_iocbq  *elsiocb;
540	struct lpfc_dmabuf *pcmd;
541	struct serv_parm   *sp;
542	struct lpfc_name   *pnn, *ppn;
543	struct ls_rjt stat;
544	ADISC *ap;
545	IOCB_t *icmd;
546	uint32_t *lp;
547	uint32_t cmd;
548
549	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
550	lp = (uint32_t *) pcmd->virt;
551
552	cmd = *lp++;
553	if (cmd == ELS_CMD_ADISC) {
554		ap = (ADISC *) lp;
555		pnn = (struct lpfc_name *) & ap->nodeName;
556		ppn = (struct lpfc_name *) & ap->portName;
557	} else {
558		sp = (struct serv_parm *) lp;
559		pnn = (struct lpfc_name *) & sp->nodeName;
560		ppn = (struct lpfc_name *) & sp->portName;
561	}
562
563	icmd = &cmdiocb->iocb;
564	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
565
566		/*
567		 * As soon as  we send ACC, the remote NPort can
568		 * start sending us data. Thus, for SLI4 we must
569		 * resume the RPI before the ACC goes out.
570		 */
571		if (vport->phba->sli_rev == LPFC_SLI_REV4) {
572			elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
573				GFP_KERNEL);
574			if (elsiocb) {
575
576				/* Save info from cmd IOCB used in rsp */
577				memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
578					sizeof(struct lpfc_iocbq));
579
580				/* Save the ELS cmd */
581				elsiocb->drvrTimeout = cmd;
582
583				lpfc_sli4_resume_rpi(ndlp,
584					lpfc_mbx_cmpl_resume_rpi, elsiocb);
585				goto out;
586			}
587		}
588
589		if (cmd == ELS_CMD_ADISC) {
590			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
591		} else {
592			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
593				ndlp, NULL);
594		}
595out:
596		/* If we are authenticated, move to the proper state */
597		if (ndlp->nlp_type & NLP_FCP_TARGET)
598			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
599		else
600			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
601
602		return 1;
603	}
604	/* Reject this request because invalid parameters */
605	stat.un.b.lsRjtRsvd0 = 0;
606	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
607	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
608	stat.un.b.vendorUnique = 0;
609	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
610
611	/* 1 sec timeout */
612	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
613
614	spin_lock_irq(shost->host_lock);
615	ndlp->nlp_flag |= NLP_DELAY_TMO;
616	spin_unlock_irq(shost->host_lock);
617	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
618	ndlp->nlp_prev_state = ndlp->nlp_state;
619	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
620	return 0;
621}
622
623static int
624lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
625	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
626{
627	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
628	struct lpfc_hba    *phba = vport->phba;
629	struct lpfc_vport **vports;
630	int i, active_vlink_present = 0 ;
631
632	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
633	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
634	 * PLOGIs during LOGO storms from a device.
635	 */
636	spin_lock_irq(shost->host_lock);
637	ndlp->nlp_flag |= NLP_LOGO_ACC;
638	spin_unlock_irq(shost->host_lock);
639	if (els_cmd == ELS_CMD_PRLO)
640		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
641	else
642		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
643	if (ndlp->nlp_DID == Fabric_DID) {
644		if (vport->port_state <= LPFC_FDISC)
645			goto out;
646		lpfc_linkdown_port(vport);
647		spin_lock_irq(shost->host_lock);
648		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
649		spin_unlock_irq(shost->host_lock);
650		vports = lpfc_create_vport_work_array(phba);
651		if (vports) {
652			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
653					i++) {
654				if ((!(vports[i]->fc_flag &
655					FC_VPORT_LOGO_RCVD)) &&
656					(vports[i]->port_state > LPFC_FDISC)) {
657					active_vlink_present = 1;
658					break;
659				}
660			}
661			lpfc_destroy_vport_work_array(phba, vports);
662		}
663
664		if (active_vlink_present) {
665			/*
666			 * If there are other active VLinks present,
667			 * re-instantiate the Vlink using FDISC.
668			 */
669			mod_timer(&ndlp->nlp_delayfunc,
670				  jiffies + msecs_to_jiffies(1000));
671			spin_lock_irq(shost->host_lock);
672			ndlp->nlp_flag |= NLP_DELAY_TMO;
673			spin_unlock_irq(shost->host_lock);
674			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
675			vport->port_state = LPFC_FDISC;
676		} else {
677			spin_lock_irq(shost->host_lock);
678			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
679			spin_unlock_irq(shost->host_lock);
680			lpfc_retry_pport_discovery(phba);
681		}
682	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
683		((ndlp->nlp_type & NLP_FCP_TARGET) ||
684		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
685		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
686		/* Only try to re-login if this is NOT a Fabric Node */
687		mod_timer(&ndlp->nlp_delayfunc,
688			  jiffies + msecs_to_jiffies(1000 * 1));
689		spin_lock_irq(shost->host_lock);
690		ndlp->nlp_flag |= NLP_DELAY_TMO;
691		spin_unlock_irq(shost->host_lock);
692
693		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
694	}
695out:
696	ndlp->nlp_prev_state = ndlp->nlp_state;
697	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
698
699	spin_lock_irq(shost->host_lock);
700	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
701	spin_unlock_irq(shost->host_lock);
702	/* The driver has to wait until the ACC completes before it continues
703	 * processing the LOGO.  The action will resume in
704	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
705	 * unreg_login, the driver waits so the ACC does not get aborted.
706	 */
707	return 0;
708}
709
710static void
711lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
712	      struct lpfc_iocbq *cmdiocb)
713{
714	struct lpfc_dmabuf *pcmd;
715	uint32_t *lp;
716	PRLI *npr;
717	struct fc_rport *rport = ndlp->rport;
718	u32 roles;
719
720	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
721	lp = (uint32_t *) pcmd->virt;
722	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
723
724	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
725	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
726	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
727	if (npr->prliType == PRLI_FCP_TYPE) {
728		if (npr->initiatorFunc)
729			ndlp->nlp_type |= NLP_FCP_INITIATOR;
730		if (npr->targetFunc) {
731			ndlp->nlp_type |= NLP_FCP_TARGET;
732			if (npr->writeXferRdyDis)
733				ndlp->nlp_flag |= NLP_FIRSTBURST;
734		}
735		if (npr->Retry)
736			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
737	}
738	if (rport) {
739		/* We need to update the rport role values */
740		roles = FC_RPORT_ROLE_UNKNOWN;
741		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
742			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
743		if (ndlp->nlp_type & NLP_FCP_TARGET)
744			roles |= FC_RPORT_ROLE_FCP_TARGET;
745
746		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
747			"rport rolechg:   role:x%x did:x%x flg:x%x",
748			roles, ndlp->nlp_DID, ndlp->nlp_flag);
749
750		fc_remote_port_rolechg(rport, roles);
751	}
752}
753
754static uint32_t
755lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
756{
757	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
758
759	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
760		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
761		return 0;
762	}
763
764	if (!(vport->fc_flag & FC_PT2PT)) {
765		/* Check config parameter use-adisc or FCP-2 */
766		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
767		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
768		     (ndlp->nlp_type & NLP_FCP_TARGET))) {
769			spin_lock_irq(shost->host_lock);
770			ndlp->nlp_flag |= NLP_NPR_ADISC;
771			spin_unlock_irq(shost->host_lock);
772			return 1;
773		}
774	}
775	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
776	lpfc_unreg_rpi(vport, ndlp);
777	return 0;
778}
779
780/**
781 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
782 * @phba : Pointer to lpfc_hba structure.
783 * @vport: Pointer to lpfc_vport structure.
784 * @rpi  : rpi to be release.
785 *
786 * This function will send a unreg_login mailbox command to the firmware
787 * to release a rpi.
788 **/
789void
790lpfc_release_rpi(struct lpfc_hba *phba,
791		struct lpfc_vport *vport,
792		uint16_t rpi)
793{
794	LPFC_MBOXQ_t *pmb;
795	int rc;
796
797	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
798			GFP_KERNEL);
799	if (!pmb)
800		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
801			"2796 mailbox memory allocation failed \n");
802	else {
803		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
804		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
805		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
806		if (rc == MBX_NOT_FINISHED)
807			mempool_free(pmb, phba->mbox_mem_pool);
808	}
809}
810
811static uint32_t
812lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
813		  void *arg, uint32_t evt)
814{
815	struct lpfc_hba *phba;
816	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
817	MAILBOX_t *mb;
818	uint16_t rpi;
819
820	phba = vport->phba;
821	/* Release the RPI if reglogin completing */
822	if (!(phba->pport->load_flag & FC_UNLOADING) &&
823		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
824		(!pmb->u.mb.mbxStatus)) {
825		mb = &pmb->u.mb;
826		rpi = pmb->u.mb.un.varWords[0];
827		lpfc_release_rpi(phba, vport, rpi);
828	}
829	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
830			 "0271 Illegal State Transition: node x%x "
831			 "event x%x, state x%x Data: x%x x%x\n",
832			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
833			 ndlp->nlp_flag);
834	return ndlp->nlp_state;
835}
836
837static uint32_t
838lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
839		  void *arg, uint32_t evt)
840{
841	/* This transition is only legal if we previously
842	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
843	 * working on the same NPortID, do nothing for this thread
844	 * to stop it.
845	 */
846	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
847		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
848			 "0272 Illegal State Transition: node x%x "
849			 "event x%x, state x%x Data: x%x x%x\n",
850			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
851			 ndlp->nlp_flag);
852	}
853	return ndlp->nlp_state;
854}
855
856/* Start of Discovery State Machine routines */
857
858static uint32_t
859lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
860			   void *arg, uint32_t evt)
861{
862	struct lpfc_iocbq *cmdiocb;
863
864	cmdiocb = (struct lpfc_iocbq *) arg;
865
866	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
867		return ndlp->nlp_state;
868	}
869	return NLP_STE_FREED_NODE;
870}
871
872static uint32_t
873lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
874			 void *arg, uint32_t evt)
875{
876	lpfc_issue_els_logo(vport, ndlp, 0);
877	return ndlp->nlp_state;
878}
879
880static uint32_t
881lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
882			  void *arg, uint32_t evt)
883{
884	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
885	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
886
887	spin_lock_irq(shost->host_lock);
888	ndlp->nlp_flag |= NLP_LOGO_ACC;
889	spin_unlock_irq(shost->host_lock);
890	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
891
892	return ndlp->nlp_state;
893}
894
895static uint32_t
896lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
897			   void *arg, uint32_t evt)
898{
899	return NLP_STE_FREED_NODE;
900}
901
902static uint32_t
903lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
904			   void *arg, uint32_t evt)
905{
906	return NLP_STE_FREED_NODE;
907}
908
909static uint32_t
910lpfc_device_recov_unused_node(struct lpfc_vport *vport,
911			struct lpfc_nodelist *ndlp,
912			   void *arg, uint32_t evt)
913{
914	return ndlp->nlp_state;
915}
916
917static uint32_t
918lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
919			   void *arg, uint32_t evt)
920{
921	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
922	struct lpfc_hba   *phba = vport->phba;
923	struct lpfc_iocbq *cmdiocb = arg;
924	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
925	uint32_t *lp = (uint32_t *) pcmd->virt;
926	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
927	struct ls_rjt stat;
928	int port_cmp;
929
930	memset(&stat, 0, sizeof (struct ls_rjt));
931
932	/* For a PLOGI, we only accept if our portname is less
933	 * than the remote portname.
934	 */
935	phba->fc_stat.elsLogiCol++;
936	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
937			  sizeof(struct lpfc_name));
938
939	if (port_cmp >= 0) {
940		/* Reject this request because the remote node will accept
941		   ours */
942		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
943		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
944		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
945			NULL);
946	} else {
947		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
948		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
949		    (vport->num_disc_nodes)) {
950			spin_lock_irq(shost->host_lock);
951			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
952			spin_unlock_irq(shost->host_lock);
953			/* Check if there are more PLOGIs to be sent */
954			lpfc_more_plogi(vport);
955			if (vport->num_disc_nodes == 0) {
956				spin_lock_irq(shost->host_lock);
957				vport->fc_flag &= ~FC_NDISC_ACTIVE;
958				spin_unlock_irq(shost->host_lock);
959				lpfc_can_disctmo(vport);
960				lpfc_end_rscn(vport);
961			}
962		}
963	} /* If our portname was less */
964
965	return ndlp->nlp_state;
966}
967
968static uint32_t
969lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
970			  void *arg, uint32_t evt)
971{
972	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
973	struct ls_rjt     stat;
974
975	memset(&stat, 0, sizeof (struct ls_rjt));
976	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
977	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
978	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
979	return ndlp->nlp_state;
980}
981
982static uint32_t
983lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
984			  void *arg, uint32_t evt)
985{
986	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
987
988				/* software abort outstanding PLOGI */
989	lpfc_els_abort(vport->phba, ndlp);
990
991	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
992	return ndlp->nlp_state;
993}
994
995static uint32_t
996lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
997			 void *arg, uint32_t evt)
998{
999	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1000	struct lpfc_hba   *phba = vport->phba;
1001	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1002
1003	/* software abort outstanding PLOGI */
1004	lpfc_els_abort(phba, ndlp);
1005
1006	if (evt == NLP_EVT_RCV_LOGO) {
1007		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1008	} else {
1009		lpfc_issue_els_logo(vport, ndlp, 0);
1010	}
1011
1012	/* Put ndlp in npr state set plogi timer for 1 sec */
1013	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
1014	spin_lock_irq(shost->host_lock);
1015	ndlp->nlp_flag |= NLP_DELAY_TMO;
1016	spin_unlock_irq(shost->host_lock);
1017	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1018	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1019	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1020
1021	return ndlp->nlp_state;
1022}
1023
1024static uint32_t
1025lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1026			    struct lpfc_nodelist *ndlp,
1027			    void *arg,
1028			    uint32_t evt)
1029{
1030	struct lpfc_hba    *phba = vport->phba;
1031	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1032	struct lpfc_iocbq  *cmdiocb, *rspiocb;
1033	struct lpfc_dmabuf *pcmd, *prsp, *mp;
1034	uint32_t *lp;
1035	IOCB_t *irsp;
1036	struct serv_parm *sp;
1037	LPFC_MBOXQ_t *mbox;
1038
1039	cmdiocb = (struct lpfc_iocbq *) arg;
1040	rspiocb = cmdiocb->context_un.rsp_iocb;
1041
1042	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1043		/* Recovery from PLOGI collision logic */
1044		return ndlp->nlp_state;
1045	}
1046
1047	irsp = &rspiocb->iocb;
1048
1049	if (irsp->ulpStatus)
1050		goto out;
1051
1052	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1053
1054	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1055	if (!prsp)
1056		goto out;
1057
1058	lp = (uint32_t *) prsp->virt;
1059	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1060
1061	/* Some switches have FDMI servers returning 0 for WWN */
1062	if ((ndlp->nlp_DID != FDMI_DID) &&
1063		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
1064		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1065		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1066				 "0142 PLOGI RSP: Invalid WWN.\n");
1067		goto out;
1068	}
1069	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1070		goto out;
1071	/* PLOGI chkparm OK */
1072	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1073			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1074			 ndlp->nlp_DID, ndlp->nlp_state,
1075			 ndlp->nlp_flag, ndlp->nlp_rpi);
1076	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1077		ndlp->nlp_fcp_info |= CLASS2;
1078	else
1079		ndlp->nlp_fcp_info |= CLASS3;
1080
1081	ndlp->nlp_class_sup = 0;
1082	if (sp->cls1.classValid)
1083		ndlp->nlp_class_sup |= FC_COS_CLASS1;
1084	if (sp->cls2.classValid)
1085		ndlp->nlp_class_sup |= FC_COS_CLASS2;
1086	if (sp->cls3.classValid)
1087		ndlp->nlp_class_sup |= FC_COS_CLASS3;
1088	if (sp->cls4.classValid)
1089		ndlp->nlp_class_sup |= FC_COS_CLASS4;
1090	ndlp->nlp_maxframe =
1091		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1092
1093	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1094	if (!mbox) {
1095		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1096			"0133 PLOGI: no memory for reg_login "
1097			"Data: x%x x%x x%x x%x\n",
1098			ndlp->nlp_DID, ndlp->nlp_state,
1099			ndlp->nlp_flag, ndlp->nlp_rpi);
1100		goto out;
1101	}
1102
1103	lpfc_unreg_rpi(vport, ndlp);
1104
1105	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
1106			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1107		switch (ndlp->nlp_DID) {
1108		case NameServer_DID:
1109			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
1110			break;
1111		case FDMI_DID:
1112			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
1113			break;
1114		default:
1115			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1116			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1117		}
1118		mbox->context2 = lpfc_nlp_get(ndlp);
1119		mbox->vport = vport;
1120		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1121		    != MBX_NOT_FINISHED) {
1122			lpfc_nlp_set_state(vport, ndlp,
1123					   NLP_STE_REG_LOGIN_ISSUE);
1124			return ndlp->nlp_state;
1125		}
1126		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1127			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1128		/* decrement node reference count to the failed mbox
1129		 * command
1130		 */
1131		lpfc_nlp_put(ndlp);
1132		mp = (struct lpfc_dmabuf *) mbox->context1;
1133		lpfc_mbuf_free(phba, mp->virt, mp->phys);
1134		kfree(mp);
1135		mempool_free(mbox, phba->mbox_mem_pool);
1136
1137		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1138				 "0134 PLOGI: cannot issue reg_login "
1139				 "Data: x%x x%x x%x x%x\n",
1140				 ndlp->nlp_DID, ndlp->nlp_state,
1141				 ndlp->nlp_flag, ndlp->nlp_rpi);
1142	} else {
1143		mempool_free(mbox, phba->mbox_mem_pool);
1144
1145		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1146				 "0135 PLOGI: cannot format reg_login "
1147				 "Data: x%x x%x x%x x%x\n",
1148				 ndlp->nlp_DID, ndlp->nlp_state,
1149				 ndlp->nlp_flag, ndlp->nlp_rpi);
1150	}
1151
1152
1153out:
1154	if (ndlp->nlp_DID == NameServer_DID) {
1155		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1156		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1157				 "0261 Cannot Register NameServer login\n");
1158	}
1159
1160	/*
1161	** In case the node reference counter does not go to zero, ensure that
1162	** the stale state for the node is not processed.
1163	*/
1164
1165	ndlp->nlp_prev_state = ndlp->nlp_state;
1166	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1167	spin_lock_irq(shost->host_lock);
1168	ndlp->nlp_flag |= NLP_DEFER_RM;
1169	spin_unlock_irq(shost->host_lock);
1170	return NLP_STE_FREED_NODE;
1171}
1172
1173static uint32_t
1174lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1175			   void *arg, uint32_t evt)
1176{
1177	return ndlp->nlp_state;
1178}
1179
1180static uint32_t
1181lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1182	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1183{
1184	struct lpfc_hba *phba;
1185	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1186	MAILBOX_t *mb = &pmb->u.mb;
1187	uint16_t rpi;
1188
1189	phba = vport->phba;
1190	/* Release the RPI */
1191	if (!(phba->pport->load_flag & FC_UNLOADING) &&
1192		!mb->mbxStatus) {
1193		rpi = pmb->u.mb.un.varWords[0];
1194		lpfc_release_rpi(phba, vport, rpi);
1195	}
1196	return ndlp->nlp_state;
1197}
1198
1199static uint32_t
1200lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1201			   void *arg, uint32_t evt)
1202{
1203	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1204
1205	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1206		spin_lock_irq(shost->host_lock);
1207		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1208		spin_unlock_irq(shost->host_lock);
1209		return ndlp->nlp_state;
1210	} else {
1211		/* software abort outstanding PLOGI */
1212		lpfc_els_abort(vport->phba, ndlp);
1213
1214		lpfc_drop_node(vport, ndlp);
1215		return NLP_STE_FREED_NODE;
1216	}
1217}
1218
1219static uint32_t
1220lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1221			      struct lpfc_nodelist *ndlp,
1222			      void *arg,
1223			      uint32_t evt)
1224{
1225	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1226	struct lpfc_hba  *phba = vport->phba;
1227
1228	/* Don't do anything that will mess up processing of the
1229	 * previous RSCN.
1230	 */
1231	if (vport->fc_flag & FC_RSCN_DEFERRED)
1232		return ndlp->nlp_state;
1233
1234	/* software abort outstanding PLOGI */
1235	lpfc_els_abort(phba, ndlp);
1236
1237	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1238	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1239	spin_lock_irq(shost->host_lock);
1240	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1241	spin_unlock_irq(shost->host_lock);
1242
1243	return ndlp->nlp_state;
1244}
1245
1246static uint32_t
1247lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1248			   void *arg, uint32_t evt)
1249{
1250	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1251	struct lpfc_hba   *phba = vport->phba;
1252	struct lpfc_iocbq *cmdiocb;
1253
1254	/* software abort outstanding ADISC */
1255	lpfc_els_abort(phba, ndlp);
1256
1257	cmdiocb = (struct lpfc_iocbq *) arg;
1258
1259	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1260		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1261			spin_lock_irq(shost->host_lock);
1262			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1263			spin_unlock_irq(shost->host_lock);
1264			if (vport->num_disc_nodes)
1265				lpfc_more_adisc(vport);
1266		}
1267		return ndlp->nlp_state;
1268	}
1269	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1270	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1271	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1272
1273	return ndlp->nlp_state;
1274}
1275
1276static uint32_t
1277lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1278			  void *arg, uint32_t evt)
1279{
1280	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1281
1282	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1283	return ndlp->nlp_state;
1284}
1285
1286static uint32_t
1287lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1288			  void *arg, uint32_t evt)
1289{
1290	struct lpfc_hba *phba = vport->phba;
1291	struct lpfc_iocbq *cmdiocb;
1292
1293	cmdiocb = (struct lpfc_iocbq *) arg;
1294
1295	/* software abort outstanding ADISC */
1296	lpfc_els_abort(phba, ndlp);
1297
1298	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1299	return ndlp->nlp_state;
1300}
1301
1302static uint32_t
1303lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1304			    struct lpfc_nodelist *ndlp,
1305			    void *arg, uint32_t evt)
1306{
1307	struct lpfc_iocbq *cmdiocb;
1308
1309	cmdiocb = (struct lpfc_iocbq *) arg;
1310
1311	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1312	return ndlp->nlp_state;
1313}
1314
1315static uint32_t
1316lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1317			  void *arg, uint32_t evt)
1318{
1319	struct lpfc_iocbq *cmdiocb;
1320
1321	cmdiocb = (struct lpfc_iocbq *) arg;
1322
1323	/* Treat like rcv logo */
1324	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1325	return ndlp->nlp_state;
1326}
1327
1328static uint32_t
1329lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1330			    struct lpfc_nodelist *ndlp,
1331			    void *arg, uint32_t evt)
1332{
1333	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1334	struct lpfc_hba   *phba = vport->phba;
1335	struct lpfc_iocbq *cmdiocb, *rspiocb;
1336	IOCB_t *irsp;
1337	ADISC *ap;
1338	int rc;
1339
1340	cmdiocb = (struct lpfc_iocbq *) arg;
1341	rspiocb = cmdiocb->context_un.rsp_iocb;
1342
1343	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1344	irsp = &rspiocb->iocb;
1345
1346	if ((irsp->ulpStatus) ||
1347	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1348		/* 1 sec timeout */
1349		mod_timer(&ndlp->nlp_delayfunc,
1350			  jiffies + msecs_to_jiffies(1000));
1351		spin_lock_irq(shost->host_lock);
1352		ndlp->nlp_flag |= NLP_DELAY_TMO;
1353		spin_unlock_irq(shost->host_lock);
1354		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1355
1356		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1357		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1358
1359		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1360		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1361		lpfc_unreg_rpi(vport, ndlp);
1362		return ndlp->nlp_state;
1363	}
1364
1365	if (phba->sli_rev == LPFC_SLI_REV4) {
1366		rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1367		if (rc) {
1368			/* Stay in state and retry. */
1369			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1370			return ndlp->nlp_state;
1371		}
1372	}
1373
1374	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1375		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1376		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1377	} else {
1378		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1379		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1380	}
1381
1382	return ndlp->nlp_state;
1383}
1384
1385static uint32_t
1386lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1387			   void *arg, uint32_t evt)
1388{
1389	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1390
1391	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1392		spin_lock_irq(shost->host_lock);
1393		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1394		spin_unlock_irq(shost->host_lock);
1395		return ndlp->nlp_state;
1396	} else {
1397		/* software abort outstanding ADISC */
1398		lpfc_els_abort(vport->phba, ndlp);
1399
1400		lpfc_drop_node(vport, ndlp);
1401		return NLP_STE_FREED_NODE;
1402	}
1403}
1404
1405static uint32_t
1406lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1407			      struct lpfc_nodelist *ndlp,
1408			      void *arg,
1409			      uint32_t evt)
1410{
1411	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1412	struct lpfc_hba  *phba = vport->phba;
1413
1414	/* Don't do anything that will mess up processing of the
1415	 * previous RSCN.
1416	 */
1417	if (vport->fc_flag & FC_RSCN_DEFERRED)
1418		return ndlp->nlp_state;
1419
1420	/* software abort outstanding ADISC */
1421	lpfc_els_abort(phba, ndlp);
1422
1423	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1424	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1425	spin_lock_irq(shost->host_lock);
1426	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1427	spin_unlock_irq(shost->host_lock);
1428	lpfc_disc_set_adisc(vport, ndlp);
1429	return ndlp->nlp_state;
1430}
1431
1432static uint32_t
1433lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1434			      struct lpfc_nodelist *ndlp,
1435			      void *arg,
1436			      uint32_t evt)
1437{
1438	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1439
1440	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1441	return ndlp->nlp_state;
1442}
1443
1444static uint32_t
1445lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1446			     struct lpfc_nodelist *ndlp,
1447			     void *arg,
1448			     uint32_t evt)
1449{
1450	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1451
1452	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1453	return ndlp->nlp_state;
1454}
1455
1456static uint32_t
1457lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1458			     struct lpfc_nodelist *ndlp,
1459			     void *arg,
1460			     uint32_t evt)
1461{
1462	struct lpfc_hba   *phba = vport->phba;
1463	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1464	LPFC_MBOXQ_t	  *mb;
1465	LPFC_MBOXQ_t	  *nextmb;
1466	struct lpfc_dmabuf *mp;
1467
1468	cmdiocb = (struct lpfc_iocbq *) arg;
1469
1470	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1471	if ((mb = phba->sli.mbox_active)) {
1472		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1473		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1474			lpfc_nlp_put(ndlp);
1475			mb->context2 = NULL;
1476			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1477		}
1478	}
1479
1480	spin_lock_irq(&phba->hbalock);
1481	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1482		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1483		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1484			mp = (struct lpfc_dmabuf *) (mb->context1);
1485			if (mp) {
1486				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1487				kfree(mp);
1488			}
1489			lpfc_nlp_put(ndlp);
1490			list_del(&mb->list);
1491			phba->sli.mboxq_cnt--;
1492			mempool_free(mb, phba->mbox_mem_pool);
1493		}
1494	}
1495	spin_unlock_irq(&phba->hbalock);
1496
1497	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1498	return ndlp->nlp_state;
1499}
1500
1501static uint32_t
1502lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1503			       struct lpfc_nodelist *ndlp,
1504			       void *arg,
1505			       uint32_t evt)
1506{
1507	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1508
1509	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1510	return ndlp->nlp_state;
1511}
1512
1513static uint32_t
1514lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1515			     struct lpfc_nodelist *ndlp,
1516			     void *arg,
1517			     uint32_t evt)
1518{
1519	struct lpfc_iocbq *cmdiocb;
1520
1521	cmdiocb = (struct lpfc_iocbq *) arg;
1522	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1523	return ndlp->nlp_state;
1524}
1525
1526static uint32_t
1527lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1528				  struct lpfc_nodelist *ndlp,
1529				  void *arg,
1530				  uint32_t evt)
1531{
1532	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1533	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1534	MAILBOX_t *mb = &pmb->u.mb;
1535	uint32_t did  = mb->un.varWords[1];
1536
1537	if (mb->mbxStatus) {
1538		/* RegLogin failed */
1539		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1540				"0246 RegLogin failed Data: x%x x%x x%x x%x "
1541				 "x%x\n",
1542				 did, mb->mbxStatus, vport->port_state,
1543				 mb->un.varRegLogin.vpi,
1544				 mb->un.varRegLogin.rpi);
1545		/*
1546		 * If RegLogin failed due to lack of HBA resources do not
1547		 * retry discovery.
1548		 */
1549		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1550			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1551			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1552			return ndlp->nlp_state;
1553		}
1554
1555		/* Put ndlp in npr state set plogi timer for 1 sec */
1556		mod_timer(&ndlp->nlp_delayfunc,
1557			  jiffies + msecs_to_jiffies(1000 * 1));
1558		spin_lock_irq(shost->host_lock);
1559		ndlp->nlp_flag |= NLP_DELAY_TMO;
1560		spin_unlock_irq(shost->host_lock);
1561		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1562
1563		lpfc_issue_els_logo(vport, ndlp, 0);
1564		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1565		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1566		return ndlp->nlp_state;
1567	}
1568
1569	/* SLI4 ports have preallocated logical rpis. */
1570	if (vport->phba->sli_rev < LPFC_SLI_REV4)
1571		ndlp->nlp_rpi = mb->un.varWords[0];
1572
1573	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1574
1575	/* Only if we are not a fabric nport do we issue PRLI */
1576	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1577		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1578		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1579		lpfc_issue_els_prli(vport, ndlp, 0);
1580	} else {
1581		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1582		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1583	}
1584	return ndlp->nlp_state;
1585}
1586
1587static uint32_t
1588lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1589			      struct lpfc_nodelist *ndlp,
1590			      void *arg,
1591			      uint32_t evt)
1592{
1593	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1594
1595	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1596		spin_lock_irq(shost->host_lock);
1597		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1598		spin_unlock_irq(shost->host_lock);
1599		return ndlp->nlp_state;
1600	} else {
1601		lpfc_drop_node(vport, ndlp);
1602		return NLP_STE_FREED_NODE;
1603	}
1604}
1605
1606static uint32_t
1607lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1608				 struct lpfc_nodelist *ndlp,
1609				 void *arg,
1610				 uint32_t evt)
1611{
1612	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1613
1614	/* Don't do anything that will mess up processing of the
1615	 * previous RSCN.
1616	 */
1617	if (vport->fc_flag & FC_RSCN_DEFERRED)
1618		return ndlp->nlp_state;
1619
1620	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1621	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1622	spin_lock_irq(shost->host_lock);
1623	ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1624	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1625	spin_unlock_irq(shost->host_lock);
1626	lpfc_disc_set_adisc(vport, ndlp);
1627	return ndlp->nlp_state;
1628}
1629
1630static uint32_t
1631lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1632			  void *arg, uint32_t evt)
1633{
1634	struct lpfc_iocbq *cmdiocb;
1635
1636	cmdiocb = (struct lpfc_iocbq *) arg;
1637
1638	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1639	return ndlp->nlp_state;
1640}
1641
1642static uint32_t
1643lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1644			 void *arg, uint32_t evt)
1645{
1646	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1647
1648	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1649	return ndlp->nlp_state;
1650}
1651
1652static uint32_t
1653lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1654			 void *arg, uint32_t evt)
1655{
1656	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1657
1658	/* Software abort outstanding PRLI before sending acc */
1659	lpfc_els_abort(vport->phba, ndlp);
1660
1661	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1662	return ndlp->nlp_state;
1663}
1664
1665static uint32_t
1666lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1667			   void *arg, uint32_t evt)
1668{
1669	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1670
1671	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1672	return ndlp->nlp_state;
1673}
1674
1675/* This routine is envoked when we rcv a PRLO request from a nport
1676 * we are logged into.  We should send back a PRLO rsp setting the
1677 * appropriate bits.
1678 * NEXT STATE = PRLI_ISSUE
1679 */
1680static uint32_t
1681lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1682			 void *arg, uint32_t evt)
1683{
1684	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1685
1686	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1687	return ndlp->nlp_state;
1688}
1689
1690static uint32_t
1691lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1692			  void *arg, uint32_t evt)
1693{
1694	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1695	struct lpfc_iocbq *cmdiocb, *rspiocb;
1696	struct lpfc_hba   *phba = vport->phba;
1697	IOCB_t *irsp;
1698	PRLI *npr;
1699
1700	cmdiocb = (struct lpfc_iocbq *) arg;
1701	rspiocb = cmdiocb->context_un.rsp_iocb;
1702	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1703
1704	irsp = &rspiocb->iocb;
1705	if (irsp->ulpStatus) {
1706		if ((vport->port_type == LPFC_NPIV_PORT) &&
1707		    vport->cfg_restrict_login) {
1708			goto out;
1709		}
1710		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1711		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1712		return ndlp->nlp_state;
1713	}
1714
1715	/* Check out PRLI rsp */
1716	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1717	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1718	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
1719	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1720	    (npr->prliType == PRLI_FCP_TYPE)) {
1721		if (npr->initiatorFunc)
1722			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1723		if (npr->targetFunc) {
1724			ndlp->nlp_type |= NLP_FCP_TARGET;
1725			if (npr->writeXferRdyDis)
1726				ndlp->nlp_flag |= NLP_FIRSTBURST;
1727		}
1728		if (npr->Retry)
1729			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1730	}
1731	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1732	    (vport->port_type == LPFC_NPIV_PORT) &&
1733	     vport->cfg_restrict_login) {
1734out:
1735		spin_lock_irq(shost->host_lock);
1736		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1737		spin_unlock_irq(shost->host_lock);
1738		lpfc_issue_els_logo(vport, ndlp, 0);
1739
1740		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1741		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1742		return ndlp->nlp_state;
1743	}
1744
1745	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1746	if (ndlp->nlp_type & NLP_FCP_TARGET)
1747		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1748	else
1749		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1750	return ndlp->nlp_state;
1751}
1752
1753/*! lpfc_device_rm_prli_issue
1754 *
1755 * \pre
1756 * \post
1757 * \param   phba
1758 * \param   ndlp
1759 * \param   arg
1760 * \param   evt
1761 * \return  uint32_t
1762 *
1763 * \b Description:
1764 *    This routine is envoked when we a request to remove a nport we are in the
1765 *    process of PRLIing. We should software abort outstanding prli, unreg
1766 *    login, send a logout. We will change node state to UNUSED_NODE, put it
1767 *    on plogi list so it can be freed when LOGO completes.
1768 *
1769 */
1770
1771static uint32_t
1772lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1773			  void *arg, uint32_t evt)
1774{
1775	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1776
1777	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1778		spin_lock_irq(shost->host_lock);
1779		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1780		spin_unlock_irq(shost->host_lock);
1781		return ndlp->nlp_state;
1782	} else {
1783		/* software abort outstanding PLOGI */
1784		lpfc_els_abort(vport->phba, ndlp);
1785
1786		lpfc_drop_node(vport, ndlp);
1787		return NLP_STE_FREED_NODE;
1788	}
1789}
1790
1791
1792/*! lpfc_device_recov_prli_issue
1793 *
1794 * \pre
1795 * \post
1796 * \param   phba
1797 * \param   ndlp
1798 * \param   arg
1799 * \param   evt
1800 * \return  uint32_t
1801 *
1802 * \b Description:
1803 *    The routine is envoked when the state of a device is unknown, like
1804 *    during a link down. We should remove the nodelist entry from the
1805 *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1806 *    outstanding PRLI command, then free the node entry.
1807 */
1808static uint32_t
1809lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1810			     struct lpfc_nodelist *ndlp,
1811			     void *arg,
1812			     uint32_t evt)
1813{
1814	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1815	struct lpfc_hba  *phba = vport->phba;
1816
1817	/* Don't do anything that will mess up processing of the
1818	 * previous RSCN.
1819	 */
1820	if (vport->fc_flag & FC_RSCN_DEFERRED)
1821		return ndlp->nlp_state;
1822
1823	/* software abort outstanding PRLI */
1824	lpfc_els_abort(phba, ndlp);
1825
1826	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1827	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1828	spin_lock_irq(shost->host_lock);
1829	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1830	spin_unlock_irq(shost->host_lock);
1831	lpfc_disc_set_adisc(vport, ndlp);
1832	return ndlp->nlp_state;
1833}
1834
1835static uint32_t
1836lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1837			  void *arg, uint32_t evt)
1838{
1839	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1840	struct ls_rjt     stat;
1841
1842	memset(&stat, 0, sizeof(struct ls_rjt));
1843	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1844	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1845	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1846	return ndlp->nlp_state;
1847}
1848
1849static uint32_t
1850lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1851			 void *arg, uint32_t evt)
1852{
1853	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1854	struct ls_rjt     stat;
1855
1856	memset(&stat, 0, sizeof(struct ls_rjt));
1857	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1858	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1859	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1860	return ndlp->nlp_state;
1861}
1862
1863static uint32_t
1864lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1865			 void *arg, uint32_t evt)
1866{
1867	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1868	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1869
1870	spin_lock_irq(shost->host_lock);
1871	ndlp->nlp_flag &= NLP_LOGO_ACC;
1872	spin_unlock_irq(shost->host_lock);
1873	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1874	return ndlp->nlp_state;
1875}
1876
1877static uint32_t
1878lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1879			   void *arg, uint32_t evt)
1880{
1881	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1882	struct ls_rjt     stat;
1883
1884	memset(&stat, 0, sizeof(struct ls_rjt));
1885	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1886	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1887	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1888	return ndlp->nlp_state;
1889}
1890
1891static uint32_t
1892lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1893			 void *arg, uint32_t evt)
1894{
1895	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1896	struct ls_rjt     stat;
1897
1898	memset(&stat, 0, sizeof(struct ls_rjt));
1899	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1900	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1901	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1902	return ndlp->nlp_state;
1903}
1904
1905static uint32_t
1906lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1907			  void *arg, uint32_t evt)
1908{
1909	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1910
1911	ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
1912	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1913	spin_lock_irq(shost->host_lock);
1914	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1915	spin_unlock_irq(shost->host_lock);
1916	lpfc_disc_set_adisc(vport, ndlp);
1917	return ndlp->nlp_state;
1918}
1919
1920static uint32_t
1921lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1922			  void *arg, uint32_t evt)
1923{
1924	/*
1925	 * Take no action.  If a LOGO is outstanding, then possibly DevLoss has
1926	 * timed out and is calling for Device Remove.  In this case, the LOGO
1927	 * must be allowed to complete in state LOGO_ISSUE so that the rpi
1928	 * and other NLP flags are correctly cleaned up.
1929	 */
1930	return ndlp->nlp_state;
1931}
1932
1933static uint32_t
1934lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
1935			     struct lpfc_nodelist *ndlp,
1936			     void *arg, uint32_t evt)
1937{
1938	/*
1939	 * Device Recovery events have no meaning for a node with a LOGO
1940	 * outstanding.  The LOGO has to complete first and handle the
1941	 * node from that point.
1942	 */
1943	return ndlp->nlp_state;
1944}
1945
1946static uint32_t
1947lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1948			  void *arg, uint32_t evt)
1949{
1950	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1951
1952	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1953	return ndlp->nlp_state;
1954}
1955
1956static uint32_t
1957lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1958			 void *arg, uint32_t evt)
1959{
1960	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1961
1962	lpfc_rcv_prli(vport, ndlp, cmdiocb);
1963	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1964	return ndlp->nlp_state;
1965}
1966
1967static uint32_t
1968lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1969			 void *arg, uint32_t evt)
1970{
1971	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1972
1973	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1974	return ndlp->nlp_state;
1975}
1976
1977static uint32_t
1978lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1979			   void *arg, uint32_t evt)
1980{
1981	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1982
1983	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1984	return ndlp->nlp_state;
1985}
1986
1987static uint32_t
1988lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1989			 void *arg, uint32_t evt)
1990{
1991	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1992
1993	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1994	return ndlp->nlp_state;
1995}
1996
1997static uint32_t
1998lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1999			     struct lpfc_nodelist *ndlp,
2000			     void *arg,
2001			     uint32_t evt)
2002{
2003	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2004
2005	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
2006	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2007	spin_lock_irq(shost->host_lock);
2008	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2009	spin_unlock_irq(shost->host_lock);
2010	lpfc_disc_set_adisc(vport, ndlp);
2011
2012	return ndlp->nlp_state;
2013}
2014
2015static uint32_t
2016lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2017			   void *arg, uint32_t evt)
2018{
2019	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2020
2021	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2022	return ndlp->nlp_state;
2023}
2024
2025static uint32_t
2026lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2027			  void *arg, uint32_t evt)
2028{
2029	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2030
2031	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2032	return ndlp->nlp_state;
2033}
2034
2035static uint32_t
2036lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2037			  void *arg, uint32_t evt)
2038{
2039	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2040
2041	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2042	return ndlp->nlp_state;
2043}
2044
2045static uint32_t
2046lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
2047			    struct lpfc_nodelist *ndlp,
2048			    void *arg, uint32_t evt)
2049{
2050	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2051
2052	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2053	return ndlp->nlp_state;
2054}
2055
2056static uint32_t
2057lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2058			  void *arg, uint32_t evt)
2059{
2060	struct lpfc_hba  *phba = vport->phba;
2061	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2062
2063	/* flush the target */
2064	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2065			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
2066
2067	/* Treat like rcv logo */
2068	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
2069	return ndlp->nlp_state;
2070}
2071
2072static uint32_t
2073lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2074			      struct lpfc_nodelist *ndlp,
2075			      void *arg,
2076			      uint32_t evt)
2077{
2078	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2079
2080	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
2081	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2082	spin_lock_irq(shost->host_lock);
2083	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2084	spin_unlock_irq(shost->host_lock);
2085	lpfc_disc_set_adisc(vport, ndlp);
2086	return ndlp->nlp_state;
2087}
2088
2089static uint32_t
2090lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2091			void *arg, uint32_t evt)
2092{
2093	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2094	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
2095
2096	/* Ignore PLOGI if we have an outstanding LOGO */
2097	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
2098		return ndlp->nlp_state;
2099	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
2100		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2101		spin_lock_irq(shost->host_lock);
2102		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
2103		spin_unlock_irq(shost->host_lock);
2104	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2105		/* send PLOGI immediately, move to PLOGI issue state */
2106		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2107			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2108			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2109			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2110		}
2111	}
2112	return ndlp->nlp_state;
2113}
2114
2115static uint32_t
2116lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2117		       void *arg, uint32_t evt)
2118{
2119	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2120	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2121	struct ls_rjt     stat;
2122
2123	memset(&stat, 0, sizeof (struct ls_rjt));
2124	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2125	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2126	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2127
2128	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2129		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2130			spin_lock_irq(shost->host_lock);
2131			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2132			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2133			spin_unlock_irq(shost->host_lock);
2134			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2135			lpfc_issue_els_adisc(vport, ndlp, 0);
2136		} else {
2137			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2138			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2139			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2140		}
2141	}
2142	return ndlp->nlp_state;
2143}
2144
2145static uint32_t
2146lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
2147		       void *arg, uint32_t evt)
2148{
2149	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2150
2151	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2152	return ndlp->nlp_state;
2153}
2154
2155static uint32_t
2156lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2157			 void *arg, uint32_t evt)
2158{
2159	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2160
2161	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2162	/*
2163	 * Do not start discovery if discovery is about to start
2164	 * or discovery in progress for this node. Starting discovery
2165	 * here will affect the counting of discovery threads.
2166	 */
2167	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
2168	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2169		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2170			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2171			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2172			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2173			lpfc_issue_els_adisc(vport, ndlp, 0);
2174		} else {
2175			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2176			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2177			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2178		}
2179	}
2180	return ndlp->nlp_state;
2181}
2182
2183static uint32_t
2184lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2185		       void *arg, uint32_t evt)
2186{
2187	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2188	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2189
2190	spin_lock_irq(shost->host_lock);
2191	ndlp->nlp_flag |= NLP_LOGO_ACC;
2192	spin_unlock_irq(shost->host_lock);
2193
2194	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2195
2196	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2197		mod_timer(&ndlp->nlp_delayfunc,
2198			  jiffies + msecs_to_jiffies(1000 * 1));
2199		spin_lock_irq(shost->host_lock);
2200		ndlp->nlp_flag |= NLP_DELAY_TMO;
2201		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2202		spin_unlock_irq(shost->host_lock);
2203		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
2204	} else {
2205		spin_lock_irq(shost->host_lock);
2206		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2207		spin_unlock_irq(shost->host_lock);
2208	}
2209	return ndlp->nlp_state;
2210}
2211
2212static uint32_t
2213lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2214			 void *arg, uint32_t evt)
2215{
2216	struct lpfc_iocbq *cmdiocb, *rspiocb;
2217	IOCB_t *irsp;
2218	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2219
2220	cmdiocb = (struct lpfc_iocbq *) arg;
2221	rspiocb = cmdiocb->context_un.rsp_iocb;
2222
2223	irsp = &rspiocb->iocb;
2224	if (irsp->ulpStatus) {
2225		spin_lock_irq(shost->host_lock);
2226		ndlp->nlp_flag |= NLP_DEFER_RM;
2227		spin_unlock_irq(shost->host_lock);
2228		return NLP_STE_FREED_NODE;
2229	}
2230	return ndlp->nlp_state;
2231}
2232
2233static uint32_t
2234lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2235			void *arg, uint32_t evt)
2236{
2237	struct lpfc_iocbq *cmdiocb, *rspiocb;
2238	IOCB_t *irsp;
2239
2240	cmdiocb = (struct lpfc_iocbq *) arg;
2241	rspiocb = cmdiocb->context_un.rsp_iocb;
2242
2243	irsp = &rspiocb->iocb;
2244	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2245		lpfc_drop_node(vport, ndlp);
2246		return NLP_STE_FREED_NODE;
2247	}
2248	return ndlp->nlp_state;
2249}
2250
2251static uint32_t
2252lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2253			void *arg, uint32_t evt)
2254{
2255	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2256
2257	/* For the fabric port just clear the fc flags. */
2258	if (ndlp->nlp_DID == Fabric_DID) {
2259		spin_lock_irq(shost->host_lock);
2260		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2261		spin_unlock_irq(shost->host_lock);
2262	}
2263	lpfc_unreg_rpi(vport, ndlp);
2264	return ndlp->nlp_state;
2265}
2266
2267static uint32_t
2268lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2269			 void *arg, uint32_t evt)
2270{
2271	struct lpfc_iocbq *cmdiocb, *rspiocb;
2272	IOCB_t *irsp;
2273
2274	cmdiocb = (struct lpfc_iocbq *) arg;
2275	rspiocb = cmdiocb->context_un.rsp_iocb;
2276
2277	irsp = &rspiocb->iocb;
2278	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2279		lpfc_drop_node(vport, ndlp);
2280		return NLP_STE_FREED_NODE;
2281	}
2282	return ndlp->nlp_state;
2283}
2284
2285static uint32_t
2286lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2287			    struct lpfc_nodelist *ndlp,
2288			    void *arg, uint32_t evt)
2289{
2290	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2291	MAILBOX_t    *mb = &pmb->u.mb;
2292
2293	if (!mb->mbxStatus) {
2294		/* SLI4 ports have preallocated logical rpis. */
2295		if (vport->phba->sli_rev < LPFC_SLI_REV4)
2296			ndlp->nlp_rpi = mb->un.varWords[0];
2297		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2298	} else {
2299		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2300			lpfc_drop_node(vport, ndlp);
2301			return NLP_STE_FREED_NODE;
2302		}
2303	}
2304	return ndlp->nlp_state;
2305}
2306
2307static uint32_t
2308lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2309			void *arg, uint32_t evt)
2310{
2311	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2312
2313	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2314		spin_lock_irq(shost->host_lock);
2315		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2316		spin_unlock_irq(shost->host_lock);
2317		return ndlp->nlp_state;
2318	}
2319	lpfc_drop_node(vport, ndlp);
2320	return NLP_STE_FREED_NODE;
2321}
2322
2323static uint32_t
2324lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2325			   void *arg, uint32_t evt)
2326{
2327	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2328
2329	/* Don't do anything that will mess up processing of the
2330	 * previous RSCN.
2331	 */
2332	if (vport->fc_flag & FC_RSCN_DEFERRED)
2333		return ndlp->nlp_state;
2334
2335	lpfc_cancel_retry_delay_tmo(vport, ndlp);
2336	spin_lock_irq(shost->host_lock);
2337	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2338	spin_unlock_irq(shost->host_lock);
2339	return ndlp->nlp_state;
2340}
2341
2342
2343/* This next section defines the NPort Discovery State Machine */
2344
2345/* There are 4 different double linked lists nodelist entries can reside on.
2346 * The plogi list and adisc list are used when Link Up discovery or RSCN
2347 * processing is needed. Each list holds the nodes that we will send PLOGI
2348 * or ADISC on. These lists will keep track of what nodes will be effected
2349 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2350 * The unmapped_list will contain all nodes that we have successfully logged
2351 * into at the Fibre Channel level. The mapped_list will contain all nodes
2352 * that are mapped FCP targets.
2353 */
2354/*
2355 * The bind list is a list of undiscovered (potentially non-existent) nodes
2356 * that we have saved binding information on. This information is used when
2357 * nodes transition from the unmapped to the mapped list.
2358 */
2359/* For UNUSED_NODE state, the node has just been allocated .
2360 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2361 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2362 * and put on the unmapped list. For ADISC processing, the node is taken off
2363 * the ADISC list and placed on either the mapped or unmapped list (depending
2364 * on its previous state). Once on the unmapped list, a PRLI is issued and the
2365 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2366 * changed to UNMAPPED_NODE. If the completion indicates a mapped
2367 * node, the node is taken off the unmapped list. The binding list is checked
2368 * for a valid binding, or a binding is automatically assigned. If binding
2369 * assignment is unsuccessful, the node is left on the unmapped list. If
2370 * binding assignment is successful, the associated binding list entry (if
2371 * any) is removed, and the node is placed on the mapped list.
2372 */
2373/*
2374 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2375 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2376 * expire, all effected nodes will receive a DEVICE_RM event.
2377 */
2378/*
2379 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2380 * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2381 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2382 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2383 * we will first process the ADISC list.  32 entries are processed initially and
2384 * ADISC is initited for each one.  Completions / Events for each node are
2385 * funnelled thru the state machine.  As each node finishes ADISC processing, it
2386 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2387 * waiting, and the ADISC list count is identically 0, then we are done. For
2388 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2389 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2390 * list.  32 entries are processed initially and PLOGI is initited for each one.
2391 * Completions / Events for each node are funnelled thru the state machine.  As
2392 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2393 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2394 * indentically 0, then we are done. We have now completed discovery / RSCN
2395 * handling. Upon completion, ALL nodes should be on either the mapped or
2396 * unmapped lists.
2397 */
2398
2399static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2400     (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2401	/* Action routine                  Event       Current State  */
2402	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
2403	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
2404	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
2405	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
2406	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
2407	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
2408	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2409	lpfc_disc_illegal,		/* CMPL_PRLI       */
2410	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
2411	lpfc_disc_illegal,		/* CMPL_ADISC      */
2412	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2413	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
2414	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
2415
2416	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
2417	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
2418	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
2419	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
2420	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
2421	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
2422	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
2423	lpfc_disc_illegal,		/* CMPL_PRLI       */
2424	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
2425	lpfc_disc_illegal,		/* CMPL_ADISC      */
2426	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2427	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
2428	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
2429
2430	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
2431	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
2432	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
2433	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
2434	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
2435	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
2436	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2437	lpfc_disc_illegal,		/* CMPL_PRLI       */
2438	lpfc_disc_illegal,		/* CMPL_LOGO       */
2439	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
2440	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2441	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
2442	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
2443
2444	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
2445	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
2446	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
2447	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
2448	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
2449	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
2450	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2451	lpfc_disc_illegal,		/* CMPL_PRLI       */
2452	lpfc_disc_illegal,		/* CMPL_LOGO       */
2453	lpfc_disc_illegal,		/* CMPL_ADISC      */
2454	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2455	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
2456	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2457
2458	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
2459	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
2460	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
2461	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
2462	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
2463	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
2464	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2465	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
2466	lpfc_disc_illegal,		/* CMPL_LOGO       */
2467	lpfc_disc_illegal,		/* CMPL_ADISC      */
2468	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2469	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2470	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2471
2472	lpfc_rcv_plogi_logo_issue,	/* RCV_PLOGI   LOGO_ISSUE     */
2473	lpfc_rcv_prli_logo_issue,	/* RCV_PRLI        */
2474	lpfc_rcv_logo_logo_issue,	/* RCV_LOGO        */
2475	lpfc_rcv_padisc_logo_issue,	/* RCV_ADISC       */
2476	lpfc_rcv_padisc_logo_issue,	/* RCV_PDISC       */
2477	lpfc_rcv_prlo_logo_issue,	/* RCV_PRLO        */
2478	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2479	lpfc_disc_illegal,		/* CMPL_PRLI       */
2480	lpfc_cmpl_logo_logo_issue,	/* CMPL_LOGO       */
2481	lpfc_disc_illegal,		/* CMPL_ADISC      */
2482	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2483	lpfc_device_rm_logo_issue,	/* DEVICE_RM       */
2484	lpfc_device_recov_logo_issue,	/* DEVICE_RECOVERY */
2485
2486	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2487	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2488	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2489	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2490	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2491	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2492	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2493	lpfc_disc_illegal,		/* CMPL_PRLI       */
2494	lpfc_disc_illegal,		/* CMPL_LOGO       */
2495	lpfc_disc_illegal,		/* CMPL_ADISC      */
2496	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2497	lpfc_disc_illegal,		/* DEVICE_RM       */
2498	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2499
2500	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2501	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2502	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2503	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2504	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2505	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2506	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2507	lpfc_disc_illegal,		/* CMPL_PRLI       */
2508	lpfc_disc_illegal,		/* CMPL_LOGO       */
2509	lpfc_disc_illegal,		/* CMPL_ADISC      */
2510	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2511	lpfc_disc_illegal,		/* DEVICE_RM       */
2512	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2513
2514	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2515	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2516	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2517	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2518	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2519	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2520	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2521	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2522	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2523	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2524	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2525	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2526	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2527};
2528
2529int
2530lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2531			void *arg, uint32_t evt)
2532{
2533	uint32_t cur_state, rc;
2534	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2535			 uint32_t);
2536	uint32_t got_ndlp = 0;
2537
2538	if (lpfc_nlp_get(ndlp))
2539		got_ndlp = 1;
2540
2541	cur_state = ndlp->nlp_state;
2542
2543	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2544	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2545			 "0211 DSM in event x%x on NPort x%x in "
2546			 "state %d Data: x%x\n",
2547			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2548
2549	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2550		 "DSM in:          evt:%d ste:%d did:x%x",
2551		evt, cur_state, ndlp->nlp_DID);
2552
2553	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2554	rc = (func) (vport, ndlp, arg, evt);
2555
2556	/* DSM out state <rc> on NPort <nlp_DID> */
2557	if (got_ndlp) {
2558		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2559			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2560			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2561
2562		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2563			"DSM out:         ste:%d did:x%x flg:x%x",
2564			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2565		/* Decrement the ndlp reference count held for this function */
2566		lpfc_nlp_put(ndlp);
2567	} else {
2568		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2569			"0213 DSM out state %d on NPort free\n", rc);
2570
2571		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2572			"DSM out:         ste:%d did:x%x flg:x%x",
2573			rc, 0, 0);
2574	}
2575
2576	return rc;
2577}
2578