1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c)  2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include "qla_gbl.h"
9#include "qla_target.h"
10
11#include <linux/moduleparam.h>
12#include <linux/vmalloc.h>
13#include <linux/slab.h>
14#include <linux/list.h>
15
16#include <scsi/scsi_tcq.h>
17#include <scsi/scsicam.h>
18#include <linux/delay.h>
19
20void
21qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
22{
23	if (vha->vp_idx && vha->timer_active) {
24		del_timer_sync(&vha->timer);
25		vha->timer_active = 0;
26	}
27}
28
29static uint32_t
30qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
31{
32	uint32_t vp_id;
33	struct qla_hw_data *ha = vha->hw;
34	unsigned long flags;
35
36	/* Find an empty slot and assign an vp_id */
37	mutex_lock(&ha->vport_lock);
38	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
39	if (vp_id > ha->max_npiv_vports) {
40		ql_dbg(ql_dbg_vport, vha, 0xa000,
41		    "vp_id %d is bigger than max-supported %d.\n",
42		    vp_id, ha->max_npiv_vports);
43		mutex_unlock(&ha->vport_lock);
44		return vp_id;
45	}
46
47	set_bit(vp_id, ha->vp_idx_map);
48	ha->num_vhosts++;
49	vha->vp_idx = vp_id;
50
51	spin_lock_irqsave(&ha->vport_slock, flags);
52	list_add_tail(&vha->list, &ha->vp_list);
53
54	qlt_update_vp_map(vha, SET_VP_IDX);
55
56	spin_unlock_irqrestore(&ha->vport_slock, flags);
57
58	mutex_unlock(&ha->vport_lock);
59	return vp_id;
60}
61
62void
63qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
64{
65	uint16_t vp_id;
66	struct qla_hw_data *ha = vha->hw;
67	unsigned long flags = 0;
68
69	mutex_lock(&ha->vport_lock);
70	/*
71	 * Wait for all pending activities to finish before removing vport from
72	 * the list.
73	 * Lock needs to be held for safe removal from the list (it
74	 * ensures no active vp_list traversal while the vport is removed
75	 * from the queue)
76	 */
77	spin_lock_irqsave(&ha->vport_slock, flags);
78	while (atomic_read(&vha->vref_count)) {
79		spin_unlock_irqrestore(&ha->vport_slock, flags);
80
81		msleep(500);
82
83		spin_lock_irqsave(&ha->vport_slock, flags);
84	}
85	list_del(&vha->list);
86	qlt_update_vp_map(vha, RESET_VP_IDX);
87	spin_unlock_irqrestore(&ha->vport_slock, flags);
88
89	vp_id = vha->vp_idx;
90	ha->num_vhosts--;
91	clear_bit(vp_id, ha->vp_idx_map);
92
93	mutex_unlock(&ha->vport_lock);
94}
95
96static scsi_qla_host_t *
97qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
98{
99	scsi_qla_host_t *vha;
100	struct scsi_qla_host *tvha;
101	unsigned long flags;
102
103	spin_lock_irqsave(&ha->vport_slock, flags);
104	/* Locate matching device in database. */
105	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
106		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
107			spin_unlock_irqrestore(&ha->vport_slock, flags);
108			return vha;
109		}
110	}
111	spin_unlock_irqrestore(&ha->vport_slock, flags);
112	return NULL;
113}
114
115/*
116 * qla2x00_mark_vp_devices_dead
117 *	Updates fcport state when device goes offline.
118 *
119 * Input:
120 *	ha = adapter block pointer.
121 *	fcport = port structure pointer.
122 *
123 * Return:
124 *	None.
125 *
126 * Context:
127 */
128static void
129qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
130{
131	/*
132	 * !!! NOTE !!!
133	 * This function, if called in contexts other than vp create, disable
134	 * or delete, please make sure this is synchronized with the
135	 * delete thread.
136	 */
137	fc_port_t *fcport;
138
139	list_for_each_entry(fcport, &vha->vp_fcports, list) {
140		ql_dbg(ql_dbg_vport, vha, 0xa001,
141		    "Marking port dead, loop_id=0x%04x : %x.\n",
142		    fcport->loop_id, fcport->vha->vp_idx);
143
144		qla2x00_mark_device_lost(vha, fcport, 0, 0);
145		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
146	}
147}
148
149int
150qla24xx_disable_vp(scsi_qla_host_t *vha)
151{
152	unsigned long flags;
153	int ret;
154
155	ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
156	atomic_set(&vha->loop_state, LOOP_DOWN);
157	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
158
159	/* Remove port id from vp target map */
160	spin_lock_irqsave(&vha->hw->vport_slock, flags);
161	qlt_update_vp_map(vha, RESET_AL_PA);
162	spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
163
164	qla2x00_mark_vp_devices_dead(vha);
165	atomic_set(&vha->vp_state, VP_FAILED);
166	vha->flags.management_server_logged_in = 0;
167	if (ret == QLA_SUCCESS) {
168		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
169	} else {
170		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
171		return -1;
172	}
173	return 0;
174}
175
176int
177qla24xx_enable_vp(scsi_qla_host_t *vha)
178{
179	int ret;
180	struct qla_hw_data *ha = vha->hw;
181	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
182
183	/* Check if physical ha port is Up */
184	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
185		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
186		!(ha->current_topology & ISP_CFG_F)) {
187		vha->vp_err_state =  VP_ERR_PORTDWN;
188		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
189		goto enable_failed;
190	}
191
192	/* Initialize the new vport unless it is a persistent port */
193	mutex_lock(&ha->vport_lock);
194	ret = qla24xx_modify_vp_config(vha);
195	mutex_unlock(&ha->vport_lock);
196
197	if (ret != QLA_SUCCESS) {
198		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
199		goto enable_failed;
200	}
201
202	ql_dbg(ql_dbg_taskm, vha, 0x801a,
203	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
204	return 0;
205
206enable_failed:
207	ql_dbg(ql_dbg_taskm, vha, 0x801b,
208	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
209	return 1;
210}
211
212static void
213qla24xx_configure_vp(scsi_qla_host_t *vha)
214{
215	struct fc_vport *fc_vport;
216	int ret;
217
218	fc_vport = vha->fc_vport;
219
220	ql_dbg(ql_dbg_vport, vha, 0xa002,
221	    "%s: change request #3.\n", __func__);
222	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
223	if (ret != QLA_SUCCESS) {
224		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
225		    "receiving of RSCN requests: 0x%x.\n", ret);
226		return;
227	} else {
228		/* Corresponds to SCR enabled */
229		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
230	}
231
232	vha->flags.online = 1;
233	if (qla24xx_configure_vhba(vha))
234		return;
235
236	atomic_set(&vha->vp_state, VP_ACTIVE);
237	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
238}
239
240void
241qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
242{
243	scsi_qla_host_t *vha;
244	struct qla_hw_data *ha = rsp->hw;
245	int i = 0;
246	unsigned long flags;
247
248	spin_lock_irqsave(&ha->vport_slock, flags);
249	list_for_each_entry(vha, &ha->vp_list, list) {
250		if (vha->vp_idx) {
251			atomic_inc(&vha->vref_count);
252			spin_unlock_irqrestore(&ha->vport_slock, flags);
253
254			switch (mb[0]) {
255			case MBA_LIP_OCCURRED:
256			case MBA_LOOP_UP:
257			case MBA_LOOP_DOWN:
258			case MBA_LIP_RESET:
259			case MBA_POINT_TO_POINT:
260			case MBA_CHG_IN_CONNECTION:
261			case MBA_PORT_UPDATE:
262			case MBA_RSCN_UPDATE:
263				ql_dbg(ql_dbg_async, vha, 0x5024,
264				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
265				    i, *mb, vha);
266				qla2x00_async_event(vha, rsp, mb);
267				break;
268			}
269
270			spin_lock_irqsave(&ha->vport_slock, flags);
271			atomic_dec(&vha->vref_count);
272		}
273		i++;
274	}
275	spin_unlock_irqrestore(&ha->vport_slock, flags);
276}
277
278int
279qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
280{
281	/*
282	 * Physical port will do most of the abort and recovery work. We can
283	 * just treat it as a loop down
284	 */
285	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
286		atomic_set(&vha->loop_state, LOOP_DOWN);
287		qla2x00_mark_all_devices_lost(vha, 0);
288	} else {
289		if (!atomic_read(&vha->loop_down_timer))
290			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
291	}
292
293	/*
294	 * To exclusively reset vport, we need to log it out first.  Note: this
295	 * control_vp can fail if ISP reset is already issued, this is
296	 * expected, as the vp would be already logged out due to ISP reset.
297	 */
298	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
299		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
300
301	ql_dbg(ql_dbg_taskm, vha, 0x801d,
302	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
303	return qla24xx_enable_vp(vha);
304}
305
306static int
307qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
308{
309	struct qla_hw_data *ha = vha->hw;
310	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
311
312	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
313	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
314
315	qla2x00_do_work(vha);
316
317	/* Check if Fw is ready to configure VP first */
318	if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
319		if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
320			/* VP acquired. complete port configuration */
321			ql_dbg(ql_dbg_dpc, vha, 0x4014,
322			    "Configure VP scheduled.\n");
323			qla24xx_configure_vp(vha);
324			ql_dbg(ql_dbg_dpc, vha, 0x4015,
325			    "Configure VP end.\n");
326			return 0;
327		}
328	}
329
330	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
331		ql_dbg(ql_dbg_dpc, vha, 0x4016,
332		    "FCPort update scheduled.\n");
333		qla2x00_update_fcports(vha);
334		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
335		ql_dbg(ql_dbg_dpc, vha, 0x4017,
336		    "FCPort update end.\n");
337	}
338
339	if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
340		!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
341		atomic_read(&vha->loop_state) != LOOP_DOWN) {
342
343		ql_dbg(ql_dbg_dpc, vha, 0x4018,
344		    "Relogin needed scheduled.\n");
345		qla2x00_relogin(vha);
346		ql_dbg(ql_dbg_dpc, vha, 0x4019,
347		    "Relogin needed end.\n");
348	}
349
350	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
351	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
352		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
353	}
354
355	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
356		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
357			ql_dbg(ql_dbg_dpc, vha, 0x401a,
358			    "Loop resync scheduled.\n");
359			qla2x00_loop_resync(vha);
360			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
361			ql_dbg(ql_dbg_dpc, vha, 0x401b,
362			    "Loop resync end.\n");
363		}
364	}
365
366	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
367	    "Exiting %s.\n", __func__);
368	return 0;
369}
370
371void
372qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
373{
374	int ret;
375	struct qla_hw_data *ha = vha->hw;
376	scsi_qla_host_t *vp;
377	unsigned long flags = 0;
378
379	if (vha->vp_idx)
380		return;
381	if (list_empty(&ha->vp_list))
382		return;
383
384	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
385
386	if (!(ha->current_topology & ISP_CFG_F))
387		return;
388
389	spin_lock_irqsave(&ha->vport_slock, flags);
390	list_for_each_entry(vp, &ha->vp_list, list) {
391		if (vp->vp_idx) {
392			atomic_inc(&vp->vref_count);
393			spin_unlock_irqrestore(&ha->vport_slock, flags);
394
395			ret = qla2x00_do_dpc_vp(vp);
396
397			spin_lock_irqsave(&ha->vport_slock, flags);
398			atomic_dec(&vp->vref_count);
399		}
400	}
401	spin_unlock_irqrestore(&ha->vport_slock, flags);
402}
403
404int
405qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
406{
407	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
408	struct qla_hw_data *ha = base_vha->hw;
409	scsi_qla_host_t *vha;
410	uint8_t port_name[WWN_SIZE];
411
412	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
413		return VPCERR_UNSUPPORTED;
414
415	/* Check up the F/W and H/W support NPIV */
416	if (!ha->flags.npiv_supported)
417		return VPCERR_UNSUPPORTED;
418
419	/* Check up whether npiv supported switch presented */
420	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
421		return VPCERR_NO_FABRIC_SUPP;
422
423	/* Check up unique WWPN */
424	u64_to_wwn(fc_vport->port_name, port_name);
425	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
426		return VPCERR_BAD_WWN;
427	vha = qla24xx_find_vhost_by_name(ha, port_name);
428	if (vha)
429		return VPCERR_BAD_WWN;
430
431	/* Check up max-npiv-supports */
432	if (ha->num_vhosts > ha->max_npiv_vports) {
433		ql_dbg(ql_dbg_vport, vha, 0xa004,
434		    "num_vhosts %ud is bigger "
435		    "than max_npiv_vports %ud.\n",
436		    ha->num_vhosts, ha->max_npiv_vports);
437		return VPCERR_UNSUPPORTED;
438	}
439	return 0;
440}
441
442scsi_qla_host_t *
443qla24xx_create_vhost(struct fc_vport *fc_vport)
444{
445	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
446	struct qla_hw_data *ha = base_vha->hw;
447	scsi_qla_host_t *vha;
448	struct scsi_host_template *sht = &qla2xxx_driver_template;
449	struct Scsi_Host *host;
450
451	vha = qla2x00_create_host(sht, ha);
452	if (!vha) {
453		ql_log(ql_log_warn, vha, 0xa005,
454		    "scsi_host_alloc() failed for vport.\n");
455		return(NULL);
456	}
457
458	host = vha->host;
459	fc_vport->dd_data = vha;
460	/* New host info */
461	u64_to_wwn(fc_vport->node_name, vha->node_name);
462	u64_to_wwn(fc_vport->port_name, vha->port_name);
463
464	vha->fc_vport = fc_vport;
465	vha->device_flags = 0;
466	vha->vp_idx = qla24xx_allocate_vp_id(vha);
467	if (vha->vp_idx > ha->max_npiv_vports) {
468		ql_dbg(ql_dbg_vport, vha, 0xa006,
469		    "Couldn't allocate vp_id.\n");
470		goto create_vhost_failed;
471	}
472	vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
473
474	vha->dpc_flags = 0L;
475
476	/*
477	 * To fix the issue of processing a parent's RSCN for the vport before
478	 * its SCR is complete.
479	 */
480	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
481	atomic_set(&vha->loop_state, LOOP_DOWN);
482	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
483
484	qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
485
486	vha->req = base_vha->req;
487	host->can_queue = base_vha->req->length + 128;
488	host->cmd_per_lun = 3;
489	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
490		host->max_cmd_len = 32;
491	else
492		host->max_cmd_len = MAX_CMDSZ;
493	host->max_channel = MAX_BUSES - 1;
494	host->max_lun = ql2xmaxlun;
495	host->unique_id = host->host_no;
496	host->max_id = ha->max_fibre_devices;
497	host->transportt = qla2xxx_transport_vport_template;
498
499	ql_dbg(ql_dbg_vport, vha, 0xa007,
500	    "Detect vport hba %ld at address = %p.\n",
501	    vha->host_no, vha);
502
503	vha->flags.init_done = 1;
504
505	mutex_lock(&ha->vport_lock);
506	set_bit(vha->vp_idx, ha->vp_idx_map);
507	ha->cur_vport_count++;
508	mutex_unlock(&ha->vport_lock);
509
510	return vha;
511
512create_vhost_failed:
513	return NULL;
514}
515
516static void
517qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
518{
519	struct qla_hw_data *ha = vha->hw;
520	uint16_t que_id = req->id;
521
522	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
523		sizeof(request_t), req->ring, req->dma);
524	req->ring = NULL;
525	req->dma = 0;
526	if (que_id) {
527		ha->req_q_map[que_id] = NULL;
528		mutex_lock(&ha->vport_lock);
529		clear_bit(que_id, ha->req_qid_map);
530		mutex_unlock(&ha->vport_lock);
531	}
532	kfree(req->outstanding_cmds);
533	kfree(req);
534	req = NULL;
535}
536
537static void
538qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
539{
540	struct qla_hw_data *ha = vha->hw;
541	uint16_t que_id = rsp->id;
542
543	if (rsp->msix && rsp->msix->have_irq) {
544		free_irq(rsp->msix->vector, rsp);
545		rsp->msix->have_irq = 0;
546		rsp->msix->rsp = NULL;
547	}
548	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
549		sizeof(response_t), rsp->ring, rsp->dma);
550	rsp->ring = NULL;
551	rsp->dma = 0;
552	if (que_id) {
553		ha->rsp_q_map[que_id] = NULL;
554		mutex_lock(&ha->vport_lock);
555		clear_bit(que_id, ha->rsp_qid_map);
556		mutex_unlock(&ha->vport_lock);
557	}
558	kfree(rsp);
559	rsp = NULL;
560}
561
562int
563qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
564{
565	int ret = -1;
566
567	if (req) {
568		req->options |= BIT_0;
569		ret = qla25xx_init_req_que(vha, req);
570	}
571	if (ret == QLA_SUCCESS)
572		qla25xx_free_req_que(vha, req);
573
574	return ret;
575}
576
577static int
578qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
579{
580	int ret = -1;
581
582	if (rsp) {
583		rsp->options |= BIT_0;
584		ret = qla25xx_init_rsp_que(vha, rsp);
585	}
586	if (ret == QLA_SUCCESS)
587		qla25xx_free_rsp_que(vha, rsp);
588
589	return ret;
590}
591
592/* Delete all queues for a given vhost */
593int
594qla25xx_delete_queues(struct scsi_qla_host *vha)
595{
596	int cnt, ret = 0;
597	struct req_que *req = NULL;
598	struct rsp_que *rsp = NULL;
599	struct qla_hw_data *ha = vha->hw;
600
601	/* Delete request queues */
602	for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
603		req = ha->req_q_map[cnt];
604		if (req && test_bit(cnt, ha->req_qid_map)) {
605			ret = qla25xx_delete_req_que(vha, req);
606			if (ret != QLA_SUCCESS) {
607				ql_log(ql_log_warn, vha, 0x00ea,
608				    "Couldn't delete req que %d.\n",
609				    req->id);
610				return ret;
611			}
612		}
613	}
614
615	/* Delete response queues */
616	for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
617		rsp = ha->rsp_q_map[cnt];
618		if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
619			ret = qla25xx_delete_rsp_que(vha, rsp);
620			if (ret != QLA_SUCCESS) {
621				ql_log(ql_log_warn, vha, 0x00eb,
622				    "Couldn't delete rsp que %d.\n",
623				    rsp->id);
624				return ret;
625			}
626		}
627	}
628	return ret;
629}
630
631int
632qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
633	uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
634{
635	int ret = 0;
636	struct req_que *req = NULL;
637	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
638	uint16_t que_id = 0;
639	device_reg_t *reg;
640	uint32_t cnt;
641
642	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
643	if (req == NULL) {
644		ql_log(ql_log_fatal, base_vha, 0x00d9,
645		    "Failed to allocate memory for request queue.\n");
646		goto failed;
647	}
648
649	req->length = REQUEST_ENTRY_CNT_24XX;
650	req->ring = dma_alloc_coherent(&ha->pdev->dev,
651			(req->length + 1) * sizeof(request_t),
652			&req->dma, GFP_KERNEL);
653	if (req->ring == NULL) {
654		ql_log(ql_log_fatal, base_vha, 0x00da,
655		    "Failed to allocate memory for request_ring.\n");
656		goto que_failed;
657	}
658
659	ret = qla2x00_alloc_outstanding_cmds(ha, req);
660	if (ret != QLA_SUCCESS)
661		goto que_failed;
662
663	mutex_lock(&ha->vport_lock);
664	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
665	if (que_id >= ha->max_req_queues) {
666		mutex_unlock(&ha->vport_lock);
667		ql_log(ql_log_warn, base_vha, 0x00db,
668		    "No resources to create additional request queue.\n");
669		goto que_failed;
670	}
671	set_bit(que_id, ha->req_qid_map);
672	ha->req_q_map[que_id] = req;
673	req->rid = rid;
674	req->vp_idx = vp_idx;
675	req->qos = qos;
676
677	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
678	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
679	    que_id, req->rid, req->vp_idx, req->qos);
680	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
681	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
682	    que_id, req->rid, req->vp_idx, req->qos);
683	if (rsp_que < 0)
684		req->rsp = NULL;
685	else
686		req->rsp = ha->rsp_q_map[rsp_que];
687	/* Use alternate PCI bus number */
688	if (MSB(req->rid))
689		options |= BIT_4;
690	/* Use alternate PCI devfn */
691	if (LSB(req->rid))
692		options |= BIT_5;
693	req->options = options;
694
695	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
696	    "options=0x%x.\n", req->options);
697	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
698	    "options=0x%x.\n", req->options);
699	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
700		req->outstanding_cmds[cnt] = NULL;
701	req->current_outstanding_cmd = 1;
702
703	req->ring_ptr = req->ring;
704	req->ring_index = 0;
705	req->cnt = req->length;
706	req->id = que_id;
707	reg = ISP_QUE_REG(ha, que_id);
708	req->req_q_in = &reg->isp25mq.req_q_in;
709	req->req_q_out = &reg->isp25mq.req_q_out;
710	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
711	req->out_ptr = (void *)(req->ring + req->length);
712	mutex_unlock(&ha->vport_lock);
713	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
714	    "ring_ptr=%p ring_index=%d, "
715	    "cnt=%d id=%d max_q_depth=%d.\n",
716	    req->ring_ptr, req->ring_index,
717	    req->cnt, req->id, req->max_q_depth);
718	ql_dbg(ql_dbg_init, base_vha, 0x00de,
719	    "ring_ptr=%p ring_index=%d, "
720	    "cnt=%d id=%d max_q_depth=%d.\n",
721	    req->ring_ptr, req->ring_index, req->cnt,
722	    req->id, req->max_q_depth);
723
724	ret = qla25xx_init_req_que(base_vha, req);
725	if (ret != QLA_SUCCESS) {
726		ql_log(ql_log_fatal, base_vha, 0x00df,
727		    "%s failed.\n", __func__);
728		mutex_lock(&ha->vport_lock);
729		clear_bit(que_id, ha->req_qid_map);
730		mutex_unlock(&ha->vport_lock);
731		goto que_failed;
732	}
733
734	return req->id;
735
736que_failed:
737	qla25xx_free_req_que(base_vha, req);
738failed:
739	return 0;
740}
741
742static void qla_do_work(struct work_struct *work)
743{
744	unsigned long flags;
745	struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
746	struct scsi_qla_host *vha;
747	struct qla_hw_data *ha = rsp->hw;
748
749	spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
750	vha = pci_get_drvdata(ha->pdev);
751	qla24xx_process_response_queue(vha, rsp);
752	spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
753}
754
755/* create response queue */
756int
757qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
758	uint8_t vp_idx, uint16_t rid, int req)
759{
760	int ret = 0;
761	struct rsp_que *rsp = NULL;
762	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
763	uint16_t que_id = 0;
764	device_reg_t *reg;
765
766	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
767	if (rsp == NULL) {
768		ql_log(ql_log_warn, base_vha, 0x0066,
769		    "Failed to allocate memory for response queue.\n");
770		goto failed;
771	}
772
773	rsp->length = RESPONSE_ENTRY_CNT_MQ;
774	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
775			(rsp->length + 1) * sizeof(response_t),
776			&rsp->dma, GFP_KERNEL);
777	if (rsp->ring == NULL) {
778		ql_log(ql_log_warn, base_vha, 0x00e1,
779		    "Failed to allocate memory for response ring.\n");
780		goto que_failed;
781	}
782
783	mutex_lock(&ha->vport_lock);
784	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
785	if (que_id >= ha->max_rsp_queues) {
786		mutex_unlock(&ha->vport_lock);
787		ql_log(ql_log_warn, base_vha, 0x00e2,
788		    "No resources to create additional request queue.\n");
789		goto que_failed;
790	}
791	set_bit(que_id, ha->rsp_qid_map);
792
793	if (ha->flags.msix_enabled)
794		rsp->msix = &ha->msix_entries[que_id + 1];
795	else
796		ql_log(ql_log_warn, base_vha, 0x00e3,
797		    "MSIX not enabled.\n");
798
799	ha->rsp_q_map[que_id] = rsp;
800	rsp->rid = rid;
801	rsp->vp_idx = vp_idx;
802	rsp->hw = ha;
803	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
804	    "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
805	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
806	/* Use alternate PCI bus number */
807	if (MSB(rsp->rid))
808		options |= BIT_4;
809	/* Use alternate PCI devfn */
810	if (LSB(rsp->rid))
811		options |= BIT_5;
812	/* Enable MSIX handshake mode on for uncapable adapters */
813	if (!IS_MSIX_NACK_CAPABLE(ha))
814		options |= BIT_6;
815
816	rsp->options = options;
817	rsp->id = que_id;
818	reg = ISP_QUE_REG(ha, que_id);
819	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
820	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
821	rsp->in_ptr = (void *)(rsp->ring + rsp->length);
822	mutex_unlock(&ha->vport_lock);
823	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
824	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
825	    rsp->options, rsp->id, rsp->rsp_q_in,
826	    rsp->rsp_q_out);
827	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
828	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
829	    rsp->options, rsp->id, rsp->rsp_q_in,
830	    rsp->rsp_q_out);
831
832	ret = qla25xx_request_irq(rsp);
833	if (ret)
834		goto que_failed;
835
836	ret = qla25xx_init_rsp_que(base_vha, rsp);
837	if (ret != QLA_SUCCESS) {
838		ql_log(ql_log_fatal, base_vha, 0x00e7,
839		    "%s failed.\n", __func__);
840		mutex_lock(&ha->vport_lock);
841		clear_bit(que_id, ha->rsp_qid_map);
842		mutex_unlock(&ha->vport_lock);
843		goto que_failed;
844	}
845	if (req >= 0)
846		rsp->req = ha->req_q_map[req];
847	else
848		rsp->req = NULL;
849
850	qla2x00_init_response_q_entries(rsp);
851	if (rsp->hw->wq)
852		INIT_WORK(&rsp->q_work, qla_do_work);
853	return rsp->id;
854
855que_failed:
856	qla25xx_free_rsp_que(base_vha, rsp);
857failed:
858	return 0;
859}
860