1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2014  LSI Corporation
7 * Copyright (C) 20013-2014 Avago Technologies
8 *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43 * USA.
44 */
45
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/sort.h>
59#include <linux/io.h>
60#include <linux/time.h>
61#include <linux/kthread.h>
62#include <linux/aer.h>
63
64#include "mpt2sas_base.h"
65
66static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
67
68#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
69
70#define MAX_HBA_QUEUE_DEPTH	30000
71#define MAX_CHAIN_DEPTH		100000
72static int max_queue_depth = -1;
73module_param(max_queue_depth, int, 0);
74MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
75
76static int max_sgl_entries = -1;
77module_param(max_sgl_entries, int, 0);
78MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
79
80static int msix_disable = -1;
81module_param(msix_disable, int, 0);
82MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
83
84static int max_msix_vectors = -1;
85module_param(max_msix_vectors, int, 0);
86MODULE_PARM_DESC(max_msix_vectors, " max msix vectors ");
87
88static int mpt2sas_fwfault_debug;
89MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
90	"and halt firmware - (default=0)");
91
92static int disable_discovery = -1;
93module_param(disable_discovery, int, 0);
94MODULE_PARM_DESC(disable_discovery, " disable discovery ");
95
96static int
97_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
98
99static int
100_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
101
102/**
103 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
104 *
105 */
106static int
107_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
108{
109	int ret = param_set_int(val, kp);
110	struct MPT2SAS_ADAPTER *ioc;
111
112	if (ret)
113		return ret;
114
115	printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
116	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
117		ioc->fwfault_debug = mpt2sas_fwfault_debug;
118	return 0;
119}
120
121module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
122    param_get_int, &mpt2sas_fwfault_debug, 0644);
123
124/**
125 *  mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
126 * @arg: input argument, used to derive ioc
127 *
128 * Return 0 if controller is removed from pci subsystem.
129 * Return -1 for other case.
130 */
131static int mpt2sas_remove_dead_ioc_func(void *arg)
132{
133		struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
134		struct pci_dev *pdev;
135
136		if ((ioc == NULL))
137			return -1;
138
139		pdev = ioc->pdev;
140		if ((pdev == NULL))
141			return -1;
142		pci_stop_and_remove_bus_device_locked(pdev);
143		return 0;
144}
145
146
147/**
148 * _base_fault_reset_work - workq handling ioc fault conditions
149 * @work: input argument, used to derive ioc
150 * Context: sleep.
151 *
152 * Return nothing.
153 */
154static void
155_base_fault_reset_work(struct work_struct *work)
156{
157	struct MPT2SAS_ADAPTER *ioc =
158	    container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
159	unsigned long	 flags;
160	u32 doorbell;
161	int rc;
162	struct task_struct *p;
163
164	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
165	if (ioc->shost_recovery || ioc->pci_error_recovery)
166		goto rearm_timer;
167	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
168
169	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
170	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
171		printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
172			ioc->name, __func__);
173
174		/* It may be possible that EEH recovery can resolve some of
175		 * pci bus failure issues rather removing the dead ioc function
176		 * by considering controller is in a non-operational state. So
177		 * here priority is given to the EEH recovery. If it doesn't
178		 * not resolve this issue, mpt2sas driver will consider this
179		 * controller to non-operational state and remove the dead ioc
180		 * function.
181		 */
182		if (ioc->non_operational_loop++ < 5) {
183			spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
184							 flags);
185			goto rearm_timer;
186		}
187
188		/*
189		 * Call _scsih_flush_pending_cmds callback so that we flush all
190		 * pending commands back to OS. This call is required to aovid
191		 * deadlock at block layer. Dead IOC will fail to do diag reset,
192		 * and this call is safe since dead ioc will never return any
193		 * command back from HW.
194		 */
195		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
196		/*
197		 * Set remove_host flag early since kernel thread will
198		 * take some time to execute.
199		 */
200		ioc->remove_host = 1;
201		/*Remove the Dead Host */
202		p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
203		    "mpt2sas_dead_ioc_%d", ioc->id);
204		if (IS_ERR(p)) {
205			printk(MPT2SAS_ERR_FMT
206			"%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
207			ioc->name, __func__);
208		} else {
209		    printk(MPT2SAS_ERR_FMT
210			"%s: Running mpt2sas_dead_ioc thread success !!!!\n",
211			ioc->name, __func__);
212		}
213
214		return; /* don't rearm timer */
215	}
216
217	ioc->non_operational_loop = 0;
218
219	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
220		rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
221		    FORCE_BIG_HAMMER);
222		printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
223		    __func__, (rc == 0) ? "success" : "failed");
224		doorbell = mpt2sas_base_get_iocstate(ioc, 0);
225		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
226			mpt2sas_base_fault_info(ioc, doorbell &
227			    MPI2_DOORBELL_DATA_MASK);
228	}
229
230	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
231 rearm_timer:
232	if (ioc->fault_reset_work_q)
233		queue_delayed_work(ioc->fault_reset_work_q,
234		    &ioc->fault_reset_work,
235		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
236	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
237}
238
239/**
240 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
241 * @ioc: per adapter object
242 * Context: sleep.
243 *
244 * Return nothing.
245 */
246void
247mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
248{
249	unsigned long	 flags;
250
251	if (ioc->fault_reset_work_q)
252		return;
253
254	/* initialize fault polling */
255	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
256	snprintf(ioc->fault_reset_work_q_name,
257	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
258	ioc->fault_reset_work_q =
259		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
260	if (!ioc->fault_reset_work_q) {
261		printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
262		    ioc->name, __func__, __LINE__);
263			return;
264	}
265	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
266	if (ioc->fault_reset_work_q)
267		queue_delayed_work(ioc->fault_reset_work_q,
268		    &ioc->fault_reset_work,
269		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
270	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
271}
272
273/**
274 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
275 * @ioc: per adapter object
276 * Context: sleep.
277 *
278 * Return nothing.
279 */
280void
281mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
282{
283	unsigned long	 flags;
284	struct workqueue_struct *wq;
285
286	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
287	wq = ioc->fault_reset_work_q;
288	ioc->fault_reset_work_q = NULL;
289	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
290	if (wq) {
291		if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
292			flush_workqueue(wq);
293		destroy_workqueue(wq);
294	}
295}
296
297/**
298 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
299 * @ioc: per adapter object
300 * @fault_code: fault code
301 *
302 * Return nothing.
303 */
304void
305mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
306{
307	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
308	    ioc->name, fault_code);
309}
310
311/**
312 * mpt2sas_halt_firmware - halt's mpt controller firmware
313 * @ioc: per adapter object
314 *
315 * For debugging timeout related issues.  Writing 0xCOFFEE00
316 * to the doorbell register will halt controller firmware. With
317 * the purpose to stop both driver and firmware, the enduser can
318 * obtain a ring buffer from controller UART.
319 */
320void
321mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
322{
323	u32 doorbell;
324
325	if (!ioc->fwfault_debug)
326		return;
327
328	dump_stack();
329
330	doorbell = readl(&ioc->chip->Doorbell);
331	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
332		mpt2sas_base_fault_info(ioc , doorbell);
333	else {
334		writel(0xC0FFEE00, &ioc->chip->Doorbell);
335		printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
336		    "timeout\n", ioc->name);
337	}
338
339	panic("panic in %s\n", __func__);
340}
341
342#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
343/**
344 * _base_sas_ioc_info - verbose translation of the ioc status
345 * @ioc: per adapter object
346 * @mpi_reply: reply mf payload returned from firmware
347 * @request_hdr: request mf
348 *
349 * Return nothing.
350 */
351static void
352_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
353     MPI2RequestHeader_t *request_hdr)
354{
355	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
356	    MPI2_IOCSTATUS_MASK;
357	char *desc = NULL;
358	u16 frame_sz;
359	char *func_str = NULL;
360
361	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
362	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
363	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
364	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
365		return;
366
367	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
368		return;
369
370	switch (ioc_status) {
371
372/****************************************************************************
373*  Common IOCStatus values for all replies
374****************************************************************************/
375
376	case MPI2_IOCSTATUS_INVALID_FUNCTION:
377		desc = "invalid function";
378		break;
379	case MPI2_IOCSTATUS_BUSY:
380		desc = "busy";
381		break;
382	case MPI2_IOCSTATUS_INVALID_SGL:
383		desc = "invalid sgl";
384		break;
385	case MPI2_IOCSTATUS_INTERNAL_ERROR:
386		desc = "internal error";
387		break;
388	case MPI2_IOCSTATUS_INVALID_VPID:
389		desc = "invalid vpid";
390		break;
391	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
392		desc = "insufficient resources";
393		break;
394	case MPI2_IOCSTATUS_INVALID_FIELD:
395		desc = "invalid field";
396		break;
397	case MPI2_IOCSTATUS_INVALID_STATE:
398		desc = "invalid state";
399		break;
400	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
401		desc = "op state not supported";
402		break;
403
404/****************************************************************************
405*  Config IOCStatus values
406****************************************************************************/
407
408	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
409		desc = "config invalid action";
410		break;
411	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
412		desc = "config invalid type";
413		break;
414	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
415		desc = "config invalid page";
416		break;
417	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
418		desc = "config invalid data";
419		break;
420	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
421		desc = "config no defaults";
422		break;
423	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
424		desc = "config cant commit";
425		break;
426
427/****************************************************************************
428*  SCSI IO Reply
429****************************************************************************/
430
431	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
432	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
433	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
434	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
435	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
436	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
437	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
438	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
439	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
440	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
441	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
442	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
443		break;
444
445/****************************************************************************
446*  For use by SCSI Initiator and SCSI Target end-to-end data protection
447****************************************************************************/
448
449	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
450		desc = "eedp guard error";
451		break;
452	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
453		desc = "eedp ref tag error";
454		break;
455	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
456		desc = "eedp app tag error";
457		break;
458
459/****************************************************************************
460*  SCSI Target values
461****************************************************************************/
462
463	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
464		desc = "target invalid io index";
465		break;
466	case MPI2_IOCSTATUS_TARGET_ABORTED:
467		desc = "target aborted";
468		break;
469	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
470		desc = "target no conn retryable";
471		break;
472	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
473		desc = "target no connection";
474		break;
475	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
476		desc = "target xfer count mismatch";
477		break;
478	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
479		desc = "target data offset error";
480		break;
481	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
482		desc = "target too much write data";
483		break;
484	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
485		desc = "target iu too short";
486		break;
487	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
488		desc = "target ack nak timeout";
489		break;
490	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
491		desc = "target nak received";
492		break;
493
494/****************************************************************************
495*  Serial Attached SCSI values
496****************************************************************************/
497
498	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
499		desc = "smp request failed";
500		break;
501	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
502		desc = "smp data overrun";
503		break;
504
505/****************************************************************************
506*  Diagnostic Buffer Post / Diagnostic Release values
507****************************************************************************/
508
509	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
510		desc = "diagnostic released";
511		break;
512	default:
513		break;
514	}
515
516	if (!desc)
517		return;
518
519	switch (request_hdr->Function) {
520	case MPI2_FUNCTION_CONFIG:
521		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
522		func_str = "config_page";
523		break;
524	case MPI2_FUNCTION_SCSI_TASK_MGMT:
525		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
526		func_str = "task_mgmt";
527		break;
528	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
529		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
530		func_str = "sas_iounit_ctl";
531		break;
532	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
533		frame_sz = sizeof(Mpi2SepRequest_t);
534		func_str = "enclosure";
535		break;
536	case MPI2_FUNCTION_IOC_INIT:
537		frame_sz = sizeof(Mpi2IOCInitRequest_t);
538		func_str = "ioc_init";
539		break;
540	case MPI2_FUNCTION_PORT_ENABLE:
541		frame_sz = sizeof(Mpi2PortEnableRequest_t);
542		func_str = "port_enable";
543		break;
544	case MPI2_FUNCTION_SMP_PASSTHROUGH:
545		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
546		func_str = "smp_passthru";
547		break;
548	default:
549		frame_sz = 32;
550		func_str = "unknown";
551		break;
552	}
553
554	printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
555	    " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
556
557	_debug_dump_mf(request_hdr, frame_sz/4);
558}
559
560/**
561 * _base_display_event_data - verbose translation of firmware asyn events
562 * @ioc: per adapter object
563 * @mpi_reply: reply mf payload returned from firmware
564 *
565 * Return nothing.
566 */
567static void
568_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
569    Mpi2EventNotificationReply_t *mpi_reply)
570{
571	char *desc = NULL;
572	u16 event;
573
574	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
575		return;
576
577	event = le16_to_cpu(mpi_reply->Event);
578
579	switch (event) {
580	case MPI2_EVENT_LOG_DATA:
581		desc = "Log Data";
582		break;
583	case MPI2_EVENT_STATE_CHANGE:
584		desc = "Status Change";
585		break;
586	case MPI2_EVENT_HARD_RESET_RECEIVED:
587		desc = "Hard Reset Received";
588		break;
589	case MPI2_EVENT_EVENT_CHANGE:
590		desc = "Event Change";
591		break;
592	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
593		desc = "Device Status Change";
594		break;
595	case MPI2_EVENT_IR_OPERATION_STATUS:
596		if (!ioc->hide_ir_msg)
597			desc = "IR Operation Status";
598		break;
599	case MPI2_EVENT_SAS_DISCOVERY:
600	{
601		Mpi2EventDataSasDiscovery_t *event_data =
602		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
603		printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
604		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
605		    "start" : "stop");
606		if (event_data->DiscoveryStatus)
607			printk("discovery_status(0x%08x)",
608			    le32_to_cpu(event_data->DiscoveryStatus));
609		printk("\n");
610		return;
611	}
612	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
613		desc = "SAS Broadcast Primitive";
614		break;
615	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
616		desc = "SAS Init Device Status Change";
617		break;
618	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
619		desc = "SAS Init Table Overflow";
620		break;
621	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
622		desc = "SAS Topology Change List";
623		break;
624	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
625		desc = "SAS Enclosure Device Status Change";
626		break;
627	case MPI2_EVENT_IR_VOLUME:
628		if (!ioc->hide_ir_msg)
629			desc = "IR Volume";
630		break;
631	case MPI2_EVENT_IR_PHYSICAL_DISK:
632		if (!ioc->hide_ir_msg)
633			desc = "IR Physical Disk";
634		break;
635	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
636		if (!ioc->hide_ir_msg)
637			desc = "IR Configuration Change List";
638		break;
639	case MPI2_EVENT_LOG_ENTRY_ADDED:
640		if (!ioc->hide_ir_msg)
641			desc = "Log Entry Added";
642		break;
643	case MPI2_EVENT_TEMP_THRESHOLD:
644		desc = "Temperature Threshold";
645		break;
646	}
647
648	if (!desc)
649		return;
650
651	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
652}
653#endif
654
655/**
656 * _base_sas_log_info - verbose translation of firmware log info
657 * @ioc: per adapter object
658 * @log_info: log info
659 *
660 * Return nothing.
661 */
662static void
663_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
664{
665	union loginfo_type {
666		u32	loginfo;
667		struct {
668			u32	subcode:16;
669			u32	code:8;
670			u32	originator:4;
671			u32	bus_type:4;
672		} dw;
673	};
674	union loginfo_type sas_loginfo;
675	char *originator_str = NULL;
676
677	sas_loginfo.loginfo = log_info;
678	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
679		return;
680
681	/* each nexus loss loginfo */
682	if (log_info == 0x31170000)
683		return;
684
685	/* eat the loginfos associated with task aborts */
686	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
687	    0x31140000 || log_info == 0x31130000))
688		return;
689
690	switch (sas_loginfo.dw.originator) {
691	case 0:
692		originator_str = "IOP";
693		break;
694	case 1:
695		originator_str = "PL";
696		break;
697	case 2:
698		if (!ioc->hide_ir_msg)
699			originator_str = "IR";
700		else
701			originator_str = "WarpDrive";
702		break;
703	}
704
705	printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
706	    "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
707	     originator_str, sas_loginfo.dw.code,
708	     sas_loginfo.dw.subcode);
709}
710
711/**
712 * _base_display_reply_info -
713 * @ioc: per adapter object
714 * @smid: system request message index
715 * @msix_index: MSIX table index supplied by the OS
716 * @reply: reply message frame(lower 32bit addr)
717 *
718 * Return nothing.
719 */
720static void
721_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
722    u32 reply)
723{
724	MPI2DefaultReply_t *mpi_reply;
725	u16 ioc_status;
726
727	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
728	if (unlikely(!mpi_reply)) {
729		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
730			ioc->name, __FILE__, __LINE__, __func__);
731		return;
732	}
733	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
734#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
735	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
736	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
737		_base_sas_ioc_info(ioc , mpi_reply,
738		   mpt2sas_base_get_msg_frame(ioc, smid));
739	}
740#endif
741	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
742		_base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
743}
744
745/**
746 * mpt2sas_base_done - base internal command completion routine
747 * @ioc: per adapter object
748 * @smid: system request message index
749 * @msix_index: MSIX table index supplied by the OS
750 * @reply: reply message frame(lower 32bit addr)
751 *
752 * Return 1 meaning mf should be freed from _base_interrupt
753 *        0 means the mf is freed from this function.
754 */
755u8
756mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
757    u32 reply)
758{
759	MPI2DefaultReply_t *mpi_reply;
760
761	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
762	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
763		return 1;
764
765	if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
766		return 1;
767
768	ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
769	if (mpi_reply) {
770		ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
771		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
772	}
773	ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
774
775	complete(&ioc->base_cmds.done);
776	return 1;
777}
778
779/**
780 * _base_async_event - main callback handler for firmware asyn events
781 * @ioc: per adapter object
782 * @msix_index: MSIX table index supplied by the OS
783 * @reply: reply message frame(lower 32bit addr)
784 *
785 * Returns void.
786 */
787static void
788_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
789{
790	Mpi2EventNotificationReply_t *mpi_reply;
791	Mpi2EventAckRequest_t *ack_request;
792	u16 smid;
793
794	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
795	if (!mpi_reply)
796		return;
797	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
798		return;
799#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
800	_base_display_event_data(ioc, mpi_reply);
801#endif
802	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
803		goto out;
804	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
805	if (!smid) {
806		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
807		    ioc->name, __func__);
808		goto out;
809	}
810
811	ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
812	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
813	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
814	ack_request->Event = mpi_reply->Event;
815	ack_request->EventContext = mpi_reply->EventContext;
816	ack_request->VF_ID = 0;  /* TODO */
817	ack_request->VP_ID = 0;
818	mpt2sas_base_put_smid_default(ioc, smid);
819
820 out:
821
822	/* scsih callback handler */
823	mpt2sas_scsih_event_callback(ioc, msix_index, reply);
824
825	/* ctl callback handler */
826	mpt2sas_ctl_event_callback(ioc, msix_index, reply);
827
828	return;
829}
830
831/**
832 * _base_get_cb_idx - obtain the callback index
833 * @ioc: per adapter object
834 * @smid: system request message index
835 *
836 * Return callback index.
837 */
838static u8
839_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
840{
841	int i;
842	u8 cb_idx;
843
844	if (smid < ioc->hi_priority_smid) {
845		i = smid - 1;
846		cb_idx = ioc->scsi_lookup[i].cb_idx;
847	} else if (smid < ioc->internal_smid) {
848		i = smid - ioc->hi_priority_smid;
849		cb_idx = ioc->hpr_lookup[i].cb_idx;
850	} else if (smid <= ioc->hba_queue_depth) {
851		i = smid - ioc->internal_smid;
852		cb_idx = ioc->internal_lookup[i].cb_idx;
853	} else
854		cb_idx = 0xFF;
855	return cb_idx;
856}
857
858/**
859 * _base_mask_interrupts - disable interrupts
860 * @ioc: per adapter object
861 *
862 * Disabling ResetIRQ, Reply and Doorbell Interrupts
863 *
864 * Return nothing.
865 */
866static void
867_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
868{
869	u32 him_register;
870
871	ioc->mask_interrupts = 1;
872	him_register = readl(&ioc->chip->HostInterruptMask);
873	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
874	writel(him_register, &ioc->chip->HostInterruptMask);
875	readl(&ioc->chip->HostInterruptMask);
876}
877
878/**
879 * _base_unmask_interrupts - enable interrupts
880 * @ioc: per adapter object
881 *
882 * Enabling only Reply Interrupts
883 *
884 * Return nothing.
885 */
886static void
887_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
888{
889	u32 him_register;
890
891	him_register = readl(&ioc->chip->HostInterruptMask);
892	him_register &= ~MPI2_HIM_RIM;
893	writel(him_register, &ioc->chip->HostInterruptMask);
894	ioc->mask_interrupts = 0;
895}
896
897union reply_descriptor {
898	u64 word;
899	struct {
900		u32 low;
901		u32 high;
902	} u;
903};
904
905/**
906 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
907 * @irq: irq number (not used)
908 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
909 * @r: pt_regs pointer (not used)
910 *
911 * Return IRQ_HANDLE if processed, else IRQ_NONE.
912 */
913static irqreturn_t
914_base_interrupt(int irq, void *bus_id)
915{
916	struct adapter_reply_queue *reply_q = bus_id;
917	union reply_descriptor rd;
918	u32 completed_cmds;
919	u8 request_desript_type;
920	u16 smid;
921	u8 cb_idx;
922	u32 reply;
923	u8 msix_index = reply_q->msix_index;
924	struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
925	Mpi2ReplyDescriptorsUnion_t *rpf;
926	u8 rc;
927
928	if (ioc->mask_interrupts)
929		return IRQ_NONE;
930
931	if (!atomic_add_unless(&reply_q->busy, 1, 1))
932		return IRQ_NONE;
933
934	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
935	request_desript_type = rpf->Default.ReplyFlags
936	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
937	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
938		atomic_dec(&reply_q->busy);
939		return IRQ_NONE;
940	}
941
942	completed_cmds = 0;
943	cb_idx = 0xFF;
944	do {
945		rd.word = le64_to_cpu(rpf->Words);
946		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
947			goto out;
948		reply = 0;
949		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
950		if (request_desript_type ==
951		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
952			reply = le32_to_cpu
953				(rpf->AddressReply.ReplyFrameAddress);
954			if (reply > ioc->reply_dma_max_address ||
955			    reply < ioc->reply_dma_min_address)
956				reply = 0;
957		} else if (request_desript_type ==
958		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
959			goto next;
960		else if (request_desript_type ==
961		    MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
962			goto next;
963		if (smid) {
964			cb_idx = _base_get_cb_idx(ioc, smid);
965		if ((likely(cb_idx < MPT_MAX_CALLBACKS))
966			    && (likely(mpt_callbacks[cb_idx] != NULL))) {
967				rc = mpt_callbacks[cb_idx](ioc, smid,
968				    msix_index, reply);
969			if (reply)
970				_base_display_reply_info(ioc, smid,
971				    msix_index, reply);
972			if (rc)
973				mpt2sas_base_free_smid(ioc, smid);
974			}
975		}
976		if (!smid)
977			_base_async_event(ioc, msix_index, reply);
978
979		/* reply free queue handling */
980		if (reply) {
981			ioc->reply_free_host_index =
982			    (ioc->reply_free_host_index ==
983			    (ioc->reply_free_queue_depth - 1)) ?
984			    0 : ioc->reply_free_host_index + 1;
985			ioc->reply_free[ioc->reply_free_host_index] =
986			    cpu_to_le32(reply);
987			wmb();
988			writel(ioc->reply_free_host_index,
989			    &ioc->chip->ReplyFreeHostIndex);
990		}
991
992 next:
993
994		rpf->Words = cpu_to_le64(ULLONG_MAX);
995		reply_q->reply_post_host_index =
996		    (reply_q->reply_post_host_index ==
997		    (ioc->reply_post_queue_depth - 1)) ? 0 :
998		    reply_q->reply_post_host_index + 1;
999		request_desript_type =
1000		    reply_q->reply_post_free[reply_q->reply_post_host_index].
1001		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1002		completed_cmds++;
1003		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1004			goto out;
1005		if (!reply_q->reply_post_host_index)
1006			rpf = reply_q->reply_post_free;
1007		else
1008			rpf++;
1009	} while (1);
1010
1011 out:
1012
1013	if (!completed_cmds) {
1014		atomic_dec(&reply_q->busy);
1015		return IRQ_NONE;
1016	}
1017	wmb();
1018	if (ioc->is_warpdrive) {
1019		writel(reply_q->reply_post_host_index,
1020		ioc->reply_post_host_index[msix_index]);
1021		atomic_dec(&reply_q->busy);
1022		return IRQ_HANDLED;
1023	}
1024	writel(reply_q->reply_post_host_index | (msix_index <<
1025	    MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
1026	atomic_dec(&reply_q->busy);
1027	return IRQ_HANDLED;
1028}
1029
1030/**
1031 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1032 * @ioc: per adapter object
1033 *
1034 */
1035static inline int
1036_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
1037{
1038	return (ioc->facts.IOCCapabilities &
1039	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1040}
1041
1042/**
1043 * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
1044 * @ioc: per adapter object
1045 * Context: ISR conext
1046 *
1047 * Called when a Task Management request has completed. We want
1048 * to flush the other reply queues so all the outstanding IO has been
1049 * completed back to OS before we process the TM completetion.
1050 *
1051 * Return nothing.
1052 */
1053void
1054mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1055{
1056	struct adapter_reply_queue *reply_q;
1057
1058	/* If MSIX capability is turned off
1059	 * then multi-queues are not enabled
1060	 */
1061	if (!_base_is_controller_msix_enabled(ioc))
1062		return;
1063
1064	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1065		if (ioc->shost_recovery)
1066			return;
1067		/* TMs are on msix_index == 0 */
1068		if (reply_q->msix_index == 0)
1069			continue;
1070		_base_interrupt(reply_q->vector, (void *)reply_q);
1071	}
1072}
1073
1074/**
1075 * mpt2sas_base_release_callback_handler - clear interrupt callback handler
1076 * @cb_idx: callback index
1077 *
1078 * Return nothing.
1079 */
1080void
1081mpt2sas_base_release_callback_handler(u8 cb_idx)
1082{
1083	mpt_callbacks[cb_idx] = NULL;
1084}
1085
1086/**
1087 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
1088 * @cb_func: callback function
1089 *
1090 * Returns cb_func.
1091 */
1092u8
1093mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1094{
1095	u8 cb_idx;
1096
1097	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1098		if (mpt_callbacks[cb_idx] == NULL)
1099			break;
1100
1101	mpt_callbacks[cb_idx] = cb_func;
1102	return cb_idx;
1103}
1104
1105/**
1106 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
1107 *
1108 * Return nothing.
1109 */
1110void
1111mpt2sas_base_initialize_callback_handler(void)
1112{
1113	u8 cb_idx;
1114
1115	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1116		mpt2sas_base_release_callback_handler(cb_idx);
1117}
1118
1119/**
1120 * mpt2sas_base_build_zero_len_sge - build zero length sg entry
1121 * @ioc: per adapter object
1122 * @paddr: virtual address for SGE
1123 *
1124 * Create a zero length scatter gather entry to insure the IOCs hardware has
1125 * something to use if the target device goes brain dead and tries
1126 * to send data even when none is asked for.
1127 *
1128 * Return nothing.
1129 */
1130void
1131mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
1132{
1133	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1134	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1135	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1136	    MPI2_SGE_FLAGS_SHIFT);
1137	ioc->base_add_sg_single(paddr, flags_length, -1);
1138}
1139
1140/**
1141 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1142 * @paddr: virtual address for SGE
1143 * @flags_length: SGE flags and data transfer length
1144 * @dma_addr: Physical address
1145 *
1146 * Return nothing.
1147 */
1148static void
1149_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1150{
1151	Mpi2SGESimple32_t *sgel = paddr;
1152
1153	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1154	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1155	sgel->FlagsLength = cpu_to_le32(flags_length);
1156	sgel->Address = cpu_to_le32(dma_addr);
1157}
1158
1159
1160/**
1161 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1162 * @paddr: virtual address for SGE
1163 * @flags_length: SGE flags and data transfer length
1164 * @dma_addr: Physical address
1165 *
1166 * Return nothing.
1167 */
1168static void
1169_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1170{
1171	Mpi2SGESimple64_t *sgel = paddr;
1172
1173	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1174	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1175	sgel->FlagsLength = cpu_to_le32(flags_length);
1176	sgel->Address = cpu_to_le64(dma_addr);
1177}
1178
1179#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1180
1181/**
1182 * _base_config_dma_addressing - set dma addressing
1183 * @ioc: per adapter object
1184 * @pdev: PCI device struct
1185 *
1186 * Returns 0 for success, non-zero for failure.
1187 */
1188static int
1189_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1190{
1191	struct sysinfo s;
1192	u64 consistent_dma_mask;
1193
1194	if (ioc->dma_mask)
1195		consistent_dma_mask = DMA_BIT_MASK(64);
1196	else
1197		consistent_dma_mask = DMA_BIT_MASK(32);
1198
1199	if (sizeof(dma_addr_t) > 4) {
1200		const uint64_t required_mask =
1201		    dma_get_required_mask(&pdev->dev);
1202		if ((required_mask > DMA_BIT_MASK(32)) &&
1203		    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1204		    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
1205			ioc->base_add_sg_single = &_base_add_sg_single_64;
1206			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1207			ioc->dma_mask = 64;
1208			goto out;
1209		}
1210	}
1211
1212	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1213	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1214		ioc->base_add_sg_single = &_base_add_sg_single_32;
1215		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1216		ioc->dma_mask = 32;
1217	} else
1218		return -ENODEV;
1219
1220 out:
1221	si_meminfo(&s);
1222	printk(MPT2SAS_INFO_FMT
1223	    "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1224	    ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
1225
1226	return 0;
1227}
1228
1229static int
1230_base_change_consistent_dma_mask(struct MPT2SAS_ADAPTER *ioc,
1231				  struct pci_dev *pdev)
1232{
1233	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1234		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1235			return -ENODEV;
1236	}
1237	return 0;
1238}
1239/**
1240 * _base_check_enable_msix - checks MSIX capabable.
1241 * @ioc: per adapter object
1242 *
1243 * Check to see if card is capable of MSIX, and set number
1244 * of available msix vectors
1245 */
1246static int
1247_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1248{
1249	int base;
1250	u16 message_control;
1251
1252
1253	/* Check whether controller SAS2008 B0 controller,
1254	   if it is SAS2008 B0 controller use IO-APIC instead of MSIX */
1255	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1256	    ioc->pdev->revision == 0x01) {
1257		return -EINVAL;
1258	}
1259
1260	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1261	if (!base) {
1262		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1263		    "supported\n", ioc->name));
1264		return -EINVAL;
1265	}
1266
1267	/* get msix vector count */
1268	/* NUMA_IO not supported for older controllers */
1269	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1270	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1271	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1272	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1273	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1274	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1275	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1276		ioc->msix_vector_count = 1;
1277	else {
1278		pci_read_config_word(ioc->pdev, base + 2, &message_control);
1279		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1280	}
1281	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1282	    "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
1283
1284	return 0;
1285}
1286
1287/**
1288 * _base_free_irq - free irq
1289 * @ioc: per adapter object
1290 *
1291 * Freeing respective reply_queue from the list.
1292 */
1293static void
1294_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
1295{
1296	struct adapter_reply_queue *reply_q, *next;
1297
1298	if (list_empty(&ioc->reply_queue_list))
1299		return;
1300
1301	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1302		list_del(&reply_q->list);
1303		irq_set_affinity_hint(reply_q->vector, NULL);
1304		free_cpumask_var(reply_q->affinity_hint);
1305		synchronize_irq(reply_q->vector);
1306		free_irq(reply_q->vector, reply_q);
1307		kfree(reply_q);
1308	}
1309}
1310
1311/**
1312 * _base_request_irq - request irq
1313 * @ioc: per adapter object
1314 * @index: msix index into vector table
1315 * @vector: irq vector
1316 *
1317 * Inserting respective reply_queue into the list.
1318 */
1319static int
1320_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
1321{
1322	struct adapter_reply_queue *reply_q;
1323	int r;
1324
1325	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1326	if (!reply_q) {
1327		printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
1328		    ioc->name, (int)sizeof(struct adapter_reply_queue));
1329		return -ENOMEM;
1330	}
1331	reply_q->ioc = ioc;
1332	reply_q->msix_index = index;
1333	reply_q->vector = vector;
1334
1335	if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
1336		return -ENOMEM;
1337	cpumask_clear(reply_q->affinity_hint);
1338
1339	atomic_set(&reply_q->busy, 0);
1340	if (ioc->msix_enable)
1341		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1342		    MPT2SAS_DRIVER_NAME, ioc->id, index);
1343	else
1344		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1345		    MPT2SAS_DRIVER_NAME, ioc->id);
1346	r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1347	    reply_q);
1348	if (r) {
1349		printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1350		    reply_q->name, vector);
1351		kfree(reply_q);
1352		return -EBUSY;
1353	}
1354
1355	INIT_LIST_HEAD(&reply_q->list);
1356	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1357	return 0;
1358}
1359
1360/**
1361 * _base_assign_reply_queues - assigning msix index for each cpu
1362 * @ioc: per adapter object
1363 *
1364 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1365 *
1366 * It would nice if we could call irq_set_affinity, however it is not
1367 * an exported symbol
1368 */
1369static void
1370_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1371{
1372	unsigned int cpu, nr_cpus, nr_msix, index = 0;
1373	struct adapter_reply_queue *reply_q;
1374
1375	if (!_base_is_controller_msix_enabled(ioc))
1376		return;
1377
1378	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1379
1380	nr_cpus = num_online_cpus();
1381	nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
1382					       ioc->facts.MaxMSIxVectors);
1383	if (!nr_msix)
1384		return;
1385
1386	cpu = cpumask_first(cpu_online_mask);
1387
1388	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1389
1390		unsigned int i, group = nr_cpus / nr_msix;
1391
1392		if (cpu >= nr_cpus)
1393			break;
1394
1395		if (index < nr_cpus % nr_msix)
1396			group++;
1397
1398		for (i = 0 ; i < group ; i++) {
1399			ioc->cpu_msix_table[cpu] = index;
1400			cpumask_or(reply_q->affinity_hint,
1401				   reply_q->affinity_hint, get_cpu_mask(cpu));
1402			cpu = cpumask_next(cpu, cpu_online_mask);
1403		}
1404
1405		if (irq_set_affinity_hint(reply_q->vector,
1406					   reply_q->affinity_hint))
1407			dinitprintk(ioc, pr_info(MPT2SAS_FMT
1408			    "error setting affinity hint for irq vector %d\n",
1409			    ioc->name, reply_q->vector));
1410		index++;
1411	}
1412}
1413
1414/**
1415 * _base_disable_msix - disables msix
1416 * @ioc: per adapter object
1417 *
1418 */
1419static void
1420_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1421{
1422	if (ioc->msix_enable) {
1423		pci_disable_msix(ioc->pdev);
1424		ioc->msix_enable = 0;
1425	}
1426}
1427
1428/**
1429 * _base_enable_msix - enables msix, failback to io_apic
1430 * @ioc: per adapter object
1431 *
1432 */
1433static int
1434_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1435{
1436	struct msix_entry *entries, *a;
1437	int r;
1438	int i;
1439	u8 try_msix = 0;
1440
1441	if (msix_disable == -1 || msix_disable == 0)
1442		try_msix = 1;
1443
1444	if (!try_msix)
1445		goto try_ioapic;
1446
1447	if (_base_check_enable_msix(ioc) != 0)
1448		goto try_ioapic;
1449
1450	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1451	    ioc->msix_vector_count);
1452
1453	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1454		max_msix_vectors = 8;
1455
1456	if (max_msix_vectors > 0) {
1457		ioc->reply_queue_count = min_t(int, max_msix_vectors,
1458		    ioc->reply_queue_count);
1459		ioc->msix_vector_count = ioc->reply_queue_count;
1460	} else if (max_msix_vectors == 0)
1461		goto try_ioapic;
1462
1463	printk(MPT2SAS_INFO_FMT
1464	"MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
1465	 ioc->name, ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
1466
1467	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1468	    GFP_KERNEL);
1469	if (!entries) {
1470		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
1471		    "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
1472		    __LINE__, __func__));
1473		goto try_ioapic;
1474	}
1475
1476	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1477		a->entry = i;
1478
1479	r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
1480	if (r) {
1481		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT
1482		    "pci_enable_msix_exact failed (r=%d) !!!\n", ioc->name, r));
1483		kfree(entries);
1484		goto try_ioapic;
1485	}
1486
1487	ioc->msix_enable = 1;
1488	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1489		r = _base_request_irq(ioc, i, a->vector);
1490		if (r) {
1491			_base_free_irq(ioc);
1492			_base_disable_msix(ioc);
1493			kfree(entries);
1494			goto try_ioapic;
1495		}
1496	}
1497
1498	kfree(entries);
1499	return 0;
1500
1501/* failback to io_apic interrupt routing */
1502 try_ioapic:
1503
1504	ioc->reply_queue_count = 1;
1505	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1506
1507	return r;
1508}
1509
1510/**
1511 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
1512 * @ioc: per adapter object
1513 *
1514 * Returns 0 for success, non-zero for failure.
1515 */
1516int
1517mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1518{
1519	struct pci_dev *pdev = ioc->pdev;
1520	u32 memap_sz;
1521	u32 pio_sz;
1522	int i, r = 0;
1523	u64 pio_chip = 0;
1524	u64 chip_phys = 0;
1525	struct adapter_reply_queue *reply_q;
1526
1527	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1528	    ioc->name, __func__));
1529
1530	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1531	if (pci_enable_device_mem(pdev)) {
1532		printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1533		    "failed\n", ioc->name);
1534		ioc->bars = 0;
1535		return -ENODEV;
1536	}
1537
1538
1539	if (pci_request_selected_regions(pdev, ioc->bars,
1540	    MPT2SAS_DRIVER_NAME)) {
1541		printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1542		    "failed\n", ioc->name);
1543		ioc->bars = 0;
1544		r = -ENODEV;
1545		goto out_fail;
1546	}
1547
1548	/* AER (Advanced Error Reporting) hooks */
1549	pci_enable_pcie_error_reporting(pdev);
1550
1551	pci_set_master(pdev);
1552
1553	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1554		printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
1555		    ioc->name, pci_name(pdev));
1556		r = -ENODEV;
1557		goto out_fail;
1558	}
1559
1560	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1561		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1562			if (pio_sz)
1563				continue;
1564			pio_chip = (u64)pci_resource_start(pdev, i);
1565			pio_sz = pci_resource_len(pdev, i);
1566		} else {
1567			if (memap_sz)
1568				continue;
1569			/* verify memory resource is valid before using */
1570			if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1571				ioc->chip_phys = pci_resource_start(pdev, i);
1572				chip_phys = (u64)ioc->chip_phys;
1573				memap_sz = pci_resource_len(pdev, i);
1574				ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1575				if (ioc->chip == NULL) {
1576					printk(MPT2SAS_ERR_FMT "unable to map "
1577					    "adapter memory!\n", ioc->name);
1578					r = -EINVAL;
1579					goto out_fail;
1580				}
1581			}
1582		}
1583	}
1584
1585	_base_mask_interrupts(ioc);
1586
1587	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
1588	if (r)
1589		goto out_fail;
1590
1591	if (!ioc->rdpq_array_enable_assigned) {
1592		ioc->rdpq_array_enable = ioc->rdpq_array_capable;
1593		ioc->rdpq_array_enable_assigned = 1;
1594	}
1595
1596	r = _base_enable_msix(ioc);
1597	if (r)
1598		goto out_fail;
1599
1600	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1601		printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1602		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1603		    "IO-APIC enabled"), reply_q->vector);
1604
1605	printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1606	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1607	printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1608	    ioc->name, (unsigned long long)pio_chip, pio_sz);
1609
1610	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
1611	pci_save_state(pdev);
1612
1613	return 0;
1614
1615 out_fail:
1616	if (ioc->chip_phys)
1617		iounmap(ioc->chip);
1618	ioc->chip_phys = 0;
1619	pci_release_selected_regions(ioc->pdev, ioc->bars);
1620	pci_disable_pcie_error_reporting(pdev);
1621	pci_disable_device(pdev);
1622	return r;
1623}
1624
1625/**
1626 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1627 * @ioc: per adapter object
1628 * @smid: system request message index(smid zero is invalid)
1629 *
1630 * Returns virt pointer to message frame.
1631 */
1632void *
1633mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1634{
1635	return (void *)(ioc->request + (smid * ioc->request_sz));
1636}
1637
1638/**
1639 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
1640 * @ioc: per adapter object
1641 * @smid: system request message index
1642 *
1643 * Returns virt pointer to sense buffer.
1644 */
1645void *
1646mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1647{
1648	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1649}
1650
1651/**
1652 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
1653 * @ioc: per adapter object
1654 * @smid: system request message index
1655 *
1656 * Returns phys pointer to the low 32bit address of the sense buffer.
1657 */
1658__le32
1659mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1660{
1661	return cpu_to_le32(ioc->sense_dma +
1662			((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1663}
1664
1665/**
1666 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
1667 * @ioc: per adapter object
1668 * @phys_addr: lower 32 physical addr of the reply
1669 *
1670 * Converts 32bit lower physical addr into a virt address.
1671 */
1672void *
1673mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1674{
1675	if (!phys_addr)
1676		return NULL;
1677	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1678}
1679
1680/**
1681 * mpt2sas_base_get_smid - obtain a free smid from internal queue
1682 * @ioc: per adapter object
1683 * @cb_idx: callback index
1684 *
1685 * Returns smid (zero is invalid)
1686 */
1687u16
1688mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1689{
1690	unsigned long flags;
1691	struct request_tracker *request;
1692	u16 smid;
1693
1694	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1695	if (list_empty(&ioc->internal_free_list)) {
1696		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1697		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1698		    ioc->name, __func__);
1699		return 0;
1700	}
1701
1702	request = list_entry(ioc->internal_free_list.next,
1703	    struct request_tracker, tracker_list);
1704	request->cb_idx = cb_idx;
1705	smid = request->smid;
1706	list_del(&request->tracker_list);
1707	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1708	return smid;
1709}
1710
1711/**
1712 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1713 * @ioc: per adapter object
1714 * @cb_idx: callback index
1715 * @scmd: pointer to scsi command object
1716 *
1717 * Returns smid (zero is invalid)
1718 */
1719u16
1720mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
1721    struct scsi_cmnd *scmd)
1722{
1723	unsigned long flags;
1724	struct scsiio_tracker *request;
1725	u16 smid;
1726
1727	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1728	if (list_empty(&ioc->free_list)) {
1729		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1730		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1731		    ioc->name, __func__);
1732		return 0;
1733	}
1734
1735	request = list_entry(ioc->free_list.next,
1736	    struct scsiio_tracker, tracker_list);
1737	request->scmd = scmd;
1738	request->cb_idx = cb_idx;
1739	smid = request->smid;
1740	list_del(&request->tracker_list);
1741	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1742	return smid;
1743}
1744
1745/**
1746 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1747 * @ioc: per adapter object
1748 * @cb_idx: callback index
1749 *
1750 * Returns smid (zero is invalid)
1751 */
1752u16
1753mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1754{
1755	unsigned long flags;
1756	struct request_tracker *request;
1757	u16 smid;
1758
1759	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1760	if (list_empty(&ioc->hpr_free_list)) {
1761		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1762		return 0;
1763	}
1764
1765	request = list_entry(ioc->hpr_free_list.next,
1766	    struct request_tracker, tracker_list);
1767	request->cb_idx = cb_idx;
1768	smid = request->smid;
1769	list_del(&request->tracker_list);
1770	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1771	return smid;
1772}
1773
1774
1775/**
1776 * mpt2sas_base_free_smid - put smid back on free_list
1777 * @ioc: per adapter object
1778 * @smid: system request message index
1779 *
1780 * Return nothing.
1781 */
1782void
1783mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1784{
1785	unsigned long flags;
1786	int i;
1787	struct chain_tracker *chain_req, *next;
1788
1789	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1790	if (smid < ioc->hi_priority_smid) {
1791		/* scsiio queue */
1792		i = smid - 1;
1793		if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
1794			list_for_each_entry_safe(chain_req, next,
1795			    &ioc->scsi_lookup[i].chain_list, tracker_list) {
1796				list_del_init(&chain_req->tracker_list);
1797				list_add(&chain_req->tracker_list,
1798				    &ioc->free_chain_list);
1799			}
1800		}
1801		ioc->scsi_lookup[i].cb_idx = 0xFF;
1802		ioc->scsi_lookup[i].scmd = NULL;
1803		ioc->scsi_lookup[i].direct_io = 0;
1804		list_add(&ioc->scsi_lookup[i].tracker_list,
1805		    &ioc->free_list);
1806		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1807
1808		/*
1809		 * See _wait_for_commands_to_complete() call with regards
1810		 * to this code.
1811		 */
1812		if (ioc->shost_recovery && ioc->pending_io_count) {
1813			if (ioc->pending_io_count == 1)
1814				wake_up(&ioc->reset_wq);
1815			ioc->pending_io_count--;
1816		}
1817		return;
1818	} else if (smid < ioc->internal_smid) {
1819		/* hi-priority */
1820		i = smid - ioc->hi_priority_smid;
1821		ioc->hpr_lookup[i].cb_idx = 0xFF;
1822		list_add(&ioc->hpr_lookup[i].tracker_list,
1823		    &ioc->hpr_free_list);
1824	} else if (smid <= ioc->hba_queue_depth) {
1825		/* internal queue */
1826		i = smid - ioc->internal_smid;
1827		ioc->internal_lookup[i].cb_idx = 0xFF;
1828		list_add(&ioc->internal_lookup[i].tracker_list,
1829		    &ioc->internal_free_list);
1830	}
1831	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1832}
1833
1834/**
1835 * _base_writeq - 64 bit write to MMIO
1836 * @ioc: per adapter object
1837 * @b: data payload
1838 * @addr: address in MMIO space
1839 * @writeq_lock: spin lock
1840 *
1841 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
1842 * care of 32 bit environment where its not quarenteed to send the entire word
1843 * in one transfer.
1844 */
1845#ifndef writeq
1846static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1847    spinlock_t *writeq_lock)
1848{
1849	unsigned long flags;
1850	__u64 data_out = cpu_to_le64(b);
1851
1852	spin_lock_irqsave(writeq_lock, flags);
1853	writel((u32)(data_out), addr);
1854	writel((u32)(data_out >> 32), (addr + 4));
1855	spin_unlock_irqrestore(writeq_lock, flags);
1856}
1857#else
1858static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1859    spinlock_t *writeq_lock)
1860{
1861	writeq(cpu_to_le64(b), addr);
1862}
1863#endif
1864
1865static inline u8
1866_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1867{
1868	return ioc->cpu_msix_table[raw_smp_processor_id()];
1869}
1870
1871/**
1872 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1873 * @ioc: per adapter object
1874 * @smid: system request message index
1875 * @handle: device handle
1876 *
1877 * Return nothing.
1878 */
1879void
1880mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1881{
1882	Mpi2RequestDescriptorUnion_t descriptor;
1883	u64 *request = (u64 *)&descriptor;
1884
1885
1886	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1887	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
1888	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1889	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1890	descriptor.SCSIIO.LMID = 0;
1891	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1892	    &ioc->scsi_lookup_lock);
1893}
1894
1895
1896/**
1897 * mpt2sas_base_put_smid_hi_priority - send Task Management request to firmware
1898 * @ioc: per adapter object
1899 * @smid: system request message index
1900 *
1901 * Return nothing.
1902 */
1903void
1904mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1905{
1906	Mpi2RequestDescriptorUnion_t descriptor;
1907	u64 *request = (u64 *)&descriptor;
1908
1909	descriptor.HighPriority.RequestFlags =
1910	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1911	descriptor.HighPriority.MSIxIndex =  0;
1912	descriptor.HighPriority.SMID = cpu_to_le16(smid);
1913	descriptor.HighPriority.LMID = 0;
1914	descriptor.HighPriority.Reserved1 = 0;
1915	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1916	    &ioc->scsi_lookup_lock);
1917}
1918
1919/**
1920 * mpt2sas_base_put_smid_default - Default, primarily used for config pages
1921 * @ioc: per adapter object
1922 * @smid: system request message index
1923 *
1924 * Return nothing.
1925 */
1926void
1927mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1928{
1929	Mpi2RequestDescriptorUnion_t descriptor;
1930	u64 *request = (u64 *)&descriptor;
1931
1932	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1933	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
1934	descriptor.Default.SMID = cpu_to_le16(smid);
1935	descriptor.Default.LMID = 0;
1936	descriptor.Default.DescriptorTypeDependent = 0;
1937	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1938	    &ioc->scsi_lookup_lock);
1939}
1940
1941/**
1942 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
1943 * @ioc: per adapter object
1944 * @smid: system request message index
1945 * @io_index: value used to track the IO
1946 *
1947 * Return nothing.
1948 */
1949void
1950mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1951    u16 io_index)
1952{
1953	Mpi2RequestDescriptorUnion_t descriptor;
1954	u64 *request = (u64 *)&descriptor;
1955
1956	descriptor.SCSITarget.RequestFlags =
1957	    MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1958	descriptor.SCSITarget.MSIxIndex =  _base_get_msix_index(ioc);
1959	descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1960	descriptor.SCSITarget.LMID = 0;
1961	descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
1962	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1963	    &ioc->scsi_lookup_lock);
1964}
1965
1966/**
1967 * _base_display_dell_branding - Disply branding string
1968 * @ioc: per adapter object
1969 *
1970 * Return nothing.
1971 */
1972static void
1973_base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
1974{
1975	char dell_branding[MPT2SAS_DELL_BRANDING_SIZE];
1976
1977	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
1978		return;
1979
1980	memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE);
1981	switch (ioc->pdev->subsystem_device) {
1982	case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
1983		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING,
1984		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1985		break;
1986	case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
1987		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING,
1988		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1989		break;
1990	case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
1991		strncpy(dell_branding,
1992		    MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING,
1993		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1994		break;
1995	case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
1996		strncpy(dell_branding,
1997		    MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING,
1998		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1999		break;
2000	case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
2001		strncpy(dell_branding,
2002		    MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING,
2003		    MPT2SAS_DELL_BRANDING_SIZE - 1);
2004		break;
2005	case MPT2SAS_DELL_PERC_H200_SSDID:
2006		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING,
2007		    MPT2SAS_DELL_BRANDING_SIZE - 1);
2008		break;
2009	case MPT2SAS_DELL_6GBPS_SAS_SSDID:
2010		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING,
2011		    MPT2SAS_DELL_BRANDING_SIZE - 1);
2012		break;
2013	default:
2014		sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device);
2015		break;
2016	}
2017
2018	printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X),"
2019	    " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding,
2020	    ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor,
2021	    ioc->pdev->subsystem_device);
2022}
2023
2024/**
2025 * _base_display_intel_branding - Display branding string
2026 * @ioc: per adapter object
2027 *
2028 * Return nothing.
2029 */
2030static void
2031_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
2032{
2033	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
2034		return;
2035
2036	switch (ioc->pdev->device) {
2037	case MPI2_MFGPAGE_DEVID_SAS2008:
2038		switch (ioc->pdev->subsystem_device) {
2039		case MPT2SAS_INTEL_RMS2LL080_SSDID:
2040			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2041			    MPT2SAS_INTEL_RMS2LL080_BRANDING);
2042			break;
2043		case MPT2SAS_INTEL_RMS2LL040_SSDID:
2044			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2045			    MPT2SAS_INTEL_RMS2LL040_BRANDING);
2046			break;
2047		case MPT2SAS_INTEL_SSD910_SSDID:
2048			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2049			    MPT2SAS_INTEL_SSD910_BRANDING);
2050			break;
2051		default:
2052			break;
2053		}
2054	case MPI2_MFGPAGE_DEVID_SAS2308_2:
2055		switch (ioc->pdev->subsystem_device) {
2056		case MPT2SAS_INTEL_RS25GB008_SSDID:
2057			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2058			    MPT2SAS_INTEL_RS25GB008_BRANDING);
2059			break;
2060		case MPT2SAS_INTEL_RMS25JB080_SSDID:
2061			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2062			    MPT2SAS_INTEL_RMS25JB080_BRANDING);
2063			break;
2064		case MPT2SAS_INTEL_RMS25JB040_SSDID:
2065			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2066			    MPT2SAS_INTEL_RMS25JB040_BRANDING);
2067			break;
2068		case MPT2SAS_INTEL_RMS25KB080_SSDID:
2069			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2070			    MPT2SAS_INTEL_RMS25KB080_BRANDING);
2071			break;
2072		case MPT2SAS_INTEL_RMS25KB040_SSDID:
2073			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2074			    MPT2SAS_INTEL_RMS25KB040_BRANDING);
2075			break;
2076		case MPT2SAS_INTEL_RMS25LB040_SSDID:
2077			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2078			    MPT2SAS_INTEL_RMS25LB040_BRANDING);
2079			break;
2080		case MPT2SAS_INTEL_RMS25LB080_SSDID:
2081			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2082			    MPT2SAS_INTEL_RMS25LB080_BRANDING);
2083			break;
2084		default:
2085			break;
2086		}
2087	default:
2088		break;
2089	}
2090}
2091
2092/**
2093 * _base_display_hp_branding - Display branding string
2094 * @ioc: per adapter object
2095 *
2096 * Return nothing.
2097 */
2098static void
2099_base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc)
2100{
2101	if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID)
2102		return;
2103
2104	switch (ioc->pdev->device) {
2105	case MPI2_MFGPAGE_DEVID_SAS2004:
2106		switch (ioc->pdev->subsystem_device) {
2107		case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2108			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2109			    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2110			break;
2111		default:
2112			break;
2113		}
2114	case MPI2_MFGPAGE_DEVID_SAS2308_2:
2115		switch (ioc->pdev->subsystem_device) {
2116		case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2117			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2118			    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2119			break;
2120		case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2121			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2122			    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2123			break;
2124		case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2125			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2126			    MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2127			break;
2128		case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2129			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2130			    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2131			break;
2132		default:
2133			break;
2134		}
2135	default:
2136		break;
2137	}
2138}
2139
2140/**
2141 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2142 * @ioc: per adapter object
2143 *
2144 * Return nothing.
2145 */
2146static void
2147_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
2148{
2149	int i = 0;
2150	char desc[16];
2151	u32 iounit_pg1_flags;
2152	u32 bios_version;
2153
2154	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2155	strncpy(desc, ioc->manu_pg0.ChipName, 16);
2156	printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
2157	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2158	    ioc->name, desc,
2159	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2160	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2161	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2162	   ioc->facts.FWVersion.Word & 0x000000FF,
2163	   ioc->pdev->revision,
2164	   (bios_version & 0xFF000000) >> 24,
2165	   (bios_version & 0x00FF0000) >> 16,
2166	   (bios_version & 0x0000FF00) >> 8,
2167	    bios_version & 0x000000FF);
2168
2169	_base_display_dell_branding(ioc);
2170	_base_display_intel_branding(ioc);
2171	_base_display_hp_branding(ioc);
2172
2173	printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
2174
2175	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2176		printk("Initiator");
2177		i++;
2178	}
2179
2180	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2181		printk("%sTarget", i ? "," : "");
2182		i++;
2183	}
2184
2185	i = 0;
2186	printk("), ");
2187	printk("Capabilities=(");
2188
2189	if (!ioc->hide_ir_msg) {
2190		if (ioc->facts.IOCCapabilities &
2191		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2192			printk("Raid");
2193			i++;
2194		}
2195	}
2196
2197	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2198		printk("%sTLR", i ? "," : "");
2199		i++;
2200	}
2201
2202	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2203		printk("%sMulticast", i ? "," : "");
2204		i++;
2205	}
2206
2207	if (ioc->facts.IOCCapabilities &
2208	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2209		printk("%sBIDI Target", i ? "," : "");
2210		i++;
2211	}
2212
2213	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2214		printk("%sEEDP", i ? "," : "");
2215		i++;
2216	}
2217
2218	if (ioc->facts.IOCCapabilities &
2219	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2220		printk("%sSnapshot Buffer", i ? "," : "");
2221		i++;
2222	}
2223
2224	if (ioc->facts.IOCCapabilities &
2225	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2226		printk("%sDiag Trace Buffer", i ? "," : "");
2227		i++;
2228	}
2229
2230	if (ioc->facts.IOCCapabilities &
2231	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2232		printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
2233		i++;
2234	}
2235
2236	if (ioc->facts.IOCCapabilities &
2237	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2238		printk("%sTask Set Full", i ? "," : "");
2239		i++;
2240	}
2241
2242	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2243	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2244		printk("%sNCQ", i ? "," : "");
2245		i++;
2246	}
2247
2248	printk(")\n");
2249}
2250
2251/**
2252 * mpt2sas_base_update_missing_delay - change the missing delay timers
2253 * @ioc: per adapter object
2254 * @device_missing_delay: amount of time till device is reported missing
2255 * @io_missing_delay: interval IO is returned when there is a missing device
2256 *
2257 * Return nothing.
2258 *
2259 * Passed on the command line, this function will modify the device missing
2260 * delay, as well as the io missing delay. This should be called at driver
2261 * load time.
2262 */
2263void
2264mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
2265	u16 device_missing_delay, u8 io_missing_delay)
2266{
2267	u16 dmd, dmd_new, dmd_orignal;
2268	u8 io_missing_delay_original;
2269	u16 sz;
2270	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2271	Mpi2ConfigReply_t mpi_reply;
2272	u8 num_phys = 0;
2273	u16 ioc_status;
2274
2275	mpt2sas_config_get_number_hba_phys(ioc, &num_phys);
2276	if (!num_phys)
2277		return;
2278
2279	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2280	    sizeof(Mpi2SasIOUnit1PhyData_t));
2281	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2282	if (!sas_iounit_pg1) {
2283		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2284		    ioc->name, __FILE__, __LINE__, __func__);
2285		goto out;
2286	}
2287	if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2288	    sas_iounit_pg1, sz))) {
2289		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2290		    ioc->name, __FILE__, __LINE__, __func__);
2291		goto out;
2292	}
2293	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2294	    MPI2_IOCSTATUS_MASK;
2295	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2296		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2297		    ioc->name, __FILE__, __LINE__, __func__);
2298		goto out;
2299	}
2300
2301	/* device missing delay */
2302	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2303	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2304		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2305	else
2306		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2307	dmd_orignal = dmd;
2308	if (device_missing_delay > 0x7F) {
2309		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2310		    device_missing_delay;
2311		dmd = dmd / 16;
2312		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2313	} else
2314		dmd = device_missing_delay;
2315	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2316
2317	/* io missing delay */
2318	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2319	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2320
2321	if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2322	    sz)) {
2323		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2324			dmd_new = (dmd &
2325			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2326		else
2327			dmd_new =
2328		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2329		printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), "
2330		    "new(%d)\n", ioc->name, dmd_orignal, dmd_new);
2331		printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), "
2332		    "new(%d)\n", ioc->name, io_missing_delay_original,
2333		    io_missing_delay);
2334		ioc->device_missing_delay = dmd_new;
2335		ioc->io_missing_delay = io_missing_delay;
2336	}
2337
2338out:
2339	kfree(sas_iounit_pg1);
2340}
2341
2342/**
2343 * _base_static_config_pages - static start of day config pages
2344 * @ioc: per adapter object
2345 *
2346 * Return nothing.
2347 */
2348static void
2349_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
2350{
2351	Mpi2ConfigReply_t mpi_reply;
2352	u32 iounit_pg1_flags;
2353
2354	mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
2355	if (ioc->ir_firmware)
2356		mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
2357		    &ioc->manu_pg10);
2358	mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
2359	mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
2360	mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
2361	mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
2362	mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2363	mpt2sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
2364	_base_display_ioc_capabilities(ioc);
2365
2366	/*
2367	 * Enable task_set_full handling in iounit_pg1 when the
2368	 * facts capabilities indicate that its supported.
2369	 */
2370	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2371	if ((ioc->facts.IOCCapabilities &
2372	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
2373		iounit_pg1_flags &=
2374		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2375	else
2376		iounit_pg1_flags |=
2377		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2378	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
2379	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2380
2381	if (ioc->iounit_pg8.NumSensors)
2382		ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
2383}
2384
2385/**
2386 * _base_release_memory_pools - release memory
2387 * @ioc: per adapter object
2388 *
2389 * Free memory allocated from _base_allocate_memory_pools.
2390 *
2391 * Return nothing.
2392 */
2393static void
2394_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2395{
2396	int i = 0;
2397	struct reply_post_struct *rps;
2398
2399	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2400	    __func__));
2401
2402	if (ioc->request) {
2403		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
2404		    ioc->request,  ioc->request_dma);
2405		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)"
2406		    ": free\n", ioc->name, ioc->request));
2407		ioc->request = NULL;
2408	}
2409
2410	if (ioc->sense) {
2411		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
2412		if (ioc->sense_dma_pool)
2413			pci_pool_destroy(ioc->sense_dma_pool);
2414		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)"
2415		    ": free\n", ioc->name, ioc->sense));
2416		ioc->sense = NULL;
2417	}
2418
2419	if (ioc->reply) {
2420		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2421		if (ioc->reply_dma_pool)
2422			pci_pool_destroy(ioc->reply_dma_pool);
2423		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)"
2424		     ": free\n", ioc->name, ioc->reply));
2425		ioc->reply = NULL;
2426	}
2427
2428	if (ioc->reply_free) {
2429		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2430		    ioc->reply_free_dma);
2431		if (ioc->reply_free_dma_pool)
2432			pci_pool_destroy(ioc->reply_free_dma_pool);
2433		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool"
2434		    "(0x%p): free\n", ioc->name, ioc->reply_free));
2435		ioc->reply_free = NULL;
2436	}
2437
2438	if (ioc->reply_post) {
2439		do {
2440			rps = &ioc->reply_post[i];
2441			if (rps->reply_post_free) {
2442				pci_pool_free(
2443				    ioc->reply_post_free_dma_pool,
2444				    rps->reply_post_free,
2445				    rps->reply_post_free_dma);
2446				dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2447				    "reply_post_free_pool(0x%p): free\n",
2448				    ioc->name, rps->reply_post_free));
2449				rps->reply_post_free = NULL;
2450			}
2451		} while (ioc->rdpq_array_enable &&
2452			   (++i < ioc->reply_queue_count));
2453
2454		if (ioc->reply_post_free_dma_pool)
2455			pci_pool_destroy(ioc->reply_post_free_dma_pool);
2456		kfree(ioc->reply_post);
2457	}
2458
2459	if (ioc->config_page) {
2460		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2461		    "config_page(0x%p): free\n", ioc->name,
2462		    ioc->config_page));
2463		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2464		    ioc->config_page, ioc->config_page_dma);
2465	}
2466
2467	if (ioc->scsi_lookup) {
2468		free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2469		ioc->scsi_lookup = NULL;
2470	}
2471	kfree(ioc->hpr_lookup);
2472	kfree(ioc->internal_lookup);
2473	if (ioc->chain_lookup) {
2474		for (i = 0; i < ioc->chain_depth; i++) {
2475			if (ioc->chain_lookup[i].chain_buffer)
2476				pci_pool_free(ioc->chain_dma_pool,
2477				    ioc->chain_lookup[i].chain_buffer,
2478				    ioc->chain_lookup[i].chain_buffer_dma);
2479		}
2480		if (ioc->chain_dma_pool)
2481			pci_pool_destroy(ioc->chain_dma_pool);
2482		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2483		ioc->chain_lookup = NULL;
2484	}
2485}
2486
2487
2488/**
2489 * _base_allocate_memory_pools - allocate start of day memory pools
2490 * @ioc: per adapter object
2491 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2492 *
2493 * Returns 0 success, anything else error
2494 */
2495static int
2496_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
2497{
2498	struct mpt2sas_facts *facts;
2499	u16 max_sge_elements;
2500	u16 chains_needed_per_io;
2501	u32 sz, total_sz, reply_post_free_sz;
2502	u32 retry_sz;
2503	u16 max_request_credit;
2504	int i;
2505
2506	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2507	    __func__));
2508
2509	retry_sz = 0;
2510	facts = &ioc->facts;
2511
2512	/* command line tunables  for max sgl entries */
2513	if (max_sgl_entries != -1) {
2514		ioc->shost->sg_tablesize =  min_t(unsigned short,
2515			     max_sgl_entries, SCSI_MAX_SG_CHAIN_SEGMENTS);
2516		if (ioc->shost->sg_tablesize > MPT2SAS_SG_DEPTH)
2517			printk(MPT2SAS_WARN_FMT
2518			 "sg_tablesize(%u) is bigger than kernel defined"
2519			 " SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
2520			  ioc->shost->sg_tablesize, MPT2SAS_SG_DEPTH);
2521	} else {
2522		ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
2523	}
2524
2525	/* command line tunables  for max controller queue depth */
2526	if (max_queue_depth != -1 && max_queue_depth != 0) {
2527		max_request_credit = min_t(u16, max_queue_depth +
2528			ioc->hi_priority_depth + ioc->internal_depth,
2529			facts->RequestCredit);
2530		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
2531			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
2532	} else
2533		max_request_credit = min_t(u16, facts->RequestCredit,
2534		    MAX_HBA_QUEUE_DEPTH);
2535
2536	ioc->hba_queue_depth = max_request_credit;
2537	ioc->hi_priority_depth = facts->HighPriorityCredit;
2538	ioc->internal_depth = ioc->hi_priority_depth + 5;
2539
2540	/* request frame size */
2541	ioc->request_sz = facts->IOCRequestFrameSize * 4;
2542
2543	/* reply frame size */
2544	ioc->reply_sz = facts->ReplyFrameSize * 4;
2545
2546 retry_allocation:
2547	total_sz = 0;
2548	/* calculate number of sg elements left over in the 1st frame */
2549	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
2550	    sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size);
2551	ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size;
2552
2553	/* now do the same for a chain buffer */
2554	max_sge_elements = ioc->request_sz - ioc->sge_size;
2555	ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size;
2556
2557	ioc->chain_offset_value_for_main_message =
2558	    ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) +
2559	     (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4;
2560
2561	/*
2562	 *  MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
2563	 */
2564	chains_needed_per_io = ((ioc->shost->sg_tablesize -
2565	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
2566	    + 1;
2567	if (chains_needed_per_io > facts->MaxChainDepth) {
2568		chains_needed_per_io = facts->MaxChainDepth;
2569		ioc->shost->sg_tablesize = min_t(u16,
2570		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
2571		* chains_needed_per_io), ioc->shost->sg_tablesize);
2572	}
2573	ioc->chains_needed_per_io = chains_needed_per_io;
2574
2575	/* reply free queue sizing - taking into account for 64 FW events */
2576	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2577
2578	/* calculate reply descriptor post queue depth */
2579	ioc->reply_post_queue_depth = ioc->hba_queue_depth +
2580					ioc->reply_free_queue_depth +  1;
2581	/* align the reply post queue on the next 16 count boundary */
2582	if (ioc->reply_post_queue_depth % 16)
2583		ioc->reply_post_queue_depth += 16 -
2584			(ioc->reply_post_queue_depth % 16);
2585
2586
2587	if (ioc->reply_post_queue_depth >
2588	    facts->MaxReplyDescriptorPostQueueDepth) {
2589		ioc->reply_post_queue_depth =
2590			facts->MaxReplyDescriptorPostQueueDepth -
2591		    (facts->MaxReplyDescriptorPostQueueDepth % 16);
2592		ioc->hba_queue_depth =
2593			((ioc->reply_post_queue_depth - 64) / 2) - 1;
2594		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2595	}
2596
2597	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2598	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2599	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2600	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2601	    ioc->chains_needed_per_io));
2602
2603	/* reply post queue, 16 byte align */
2604	reply_post_free_sz = ioc->reply_post_queue_depth *
2605	    sizeof(Mpi2DefaultReplyDescriptor_t);
2606
2607	sz = reply_post_free_sz;
2608	if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
2609		sz *= ioc->reply_queue_count;
2610
2611	ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
2612	    (ioc->reply_queue_count):1,
2613	    sizeof(struct reply_post_struct), GFP_KERNEL);
2614
2615	if (!ioc->reply_post) {
2616		printk(MPT2SAS_ERR_FMT "reply_post_free pool: kcalloc failed\n",
2617			ioc->name);
2618		goto out;
2619	}
2620	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2621	    ioc->pdev, sz, 16, 0);
2622	if (!ioc->reply_post_free_dma_pool) {
2623		printk(MPT2SAS_ERR_FMT
2624		 "reply_post_free pool: pci_pool_create failed\n",
2625		 ioc->name);
2626		goto out;
2627	}
2628	i = 0;
2629	do {
2630		ioc->reply_post[i].reply_post_free =
2631		    pci_pool_alloc(ioc->reply_post_free_dma_pool,
2632		    GFP_KERNEL,
2633		    &ioc->reply_post[i].reply_post_free_dma);
2634		if (!ioc->reply_post[i].reply_post_free) {
2635			printk(MPT2SAS_ERR_FMT
2636			"reply_post_free pool: pci_pool_alloc failed\n",
2637			ioc->name);
2638			goto out;
2639		}
2640		memset(ioc->reply_post[i].reply_post_free, 0, sz);
2641		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2642		    "reply post free pool (0x%p): depth(%d),"
2643		    "element_size(%d), pool_size(%d kB)\n", ioc->name,
2644		    ioc->reply_post[i].reply_post_free,
2645		    ioc->reply_post_queue_depth, 8, sz/1024));
2646		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2647		    "reply_post_free_dma = (0x%llx)\n", ioc->name,
2648		    (unsigned long long)
2649		    ioc->reply_post[i].reply_post_free_dma));
2650		total_sz += sz;
2651	} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
2652
2653	if (ioc->dma_mask == 64) {
2654		if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
2655			printk(MPT2SAS_WARN_FMT
2656			    "no suitable consistent DMA mask for %s\n",
2657			    ioc->name, pci_name(ioc->pdev));
2658			goto out;
2659		}
2660	}
2661
2662	ioc->scsiio_depth = ioc->hba_queue_depth -
2663	    ioc->hi_priority_depth - ioc->internal_depth;
2664
2665	/* set the scsi host can_queue depth
2666	 * with some internal commands that could be outstanding
2667	 */
2668	ioc->shost->can_queue = ioc->scsiio_depth;
2669	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
2670	    "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
2671
2672	/* contiguous pool for request and chains, 16 byte align, one extra "
2673	 * "frame for smid=0
2674	 */
2675	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2676	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2677
2678	/* hi-priority queue */
2679	sz += (ioc->hi_priority_depth * ioc->request_sz);
2680
2681	/* internal queue */
2682	sz += (ioc->internal_depth * ioc->request_sz);
2683
2684	ioc->request_dma_sz = sz;
2685	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2686	if (!ioc->request) {
2687		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2688		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2689		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2690		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2691		if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH)
2692			goto out;
2693		retry_sz += 64;
2694		ioc->hba_queue_depth = max_request_credit - retry_sz;
2695		goto retry_allocation;
2696	}
2697
2698	if (retry_sz)
2699		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2700		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2701		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2702		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2703
2704
2705	/* hi-priority queue */
2706	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2707	    ioc->request_sz);
2708	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2709	    ioc->request_sz);
2710
2711	/* internal queue */
2712	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2713	    ioc->request_sz);
2714	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2715	    ioc->request_sz);
2716
2717
2718	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
2719	    "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2720	    ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2721	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
2722	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
2723	    ioc->name, (unsigned long long) ioc->request_dma));
2724	total_sz += sz;
2725
2726	sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
2727	ioc->scsi_lookup_pages = get_order(sz);
2728	ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
2729	    GFP_KERNEL, ioc->scsi_lookup_pages);
2730	if (!ioc->scsi_lookup) {
2731		printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
2732		    "sz(%d)\n", ioc->name, (int)sz);
2733		goto out;
2734	}
2735
2736	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): "
2737	    "depth(%d)\n", ioc->name, ioc->request,
2738	    ioc->scsiio_depth));
2739
2740	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2741	sz = ioc->chain_depth * sizeof(struct chain_tracker);
2742	ioc->chain_pages = get_order(sz);
2743
2744	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2745	    GFP_KERNEL, ioc->chain_pages);
2746	if (!ioc->chain_lookup) {
2747		printk(MPT2SAS_ERR_FMT "chain_lookup: get_free_pages failed, "
2748		    "sz(%d)\n", ioc->name, (int)sz);
2749		goto out;
2750	}
2751	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2752	    ioc->request_sz, 16, 0);
2753	if (!ioc->chain_dma_pool) {
2754		printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create "
2755		    "failed\n", ioc->name);
2756		goto out;
2757	}
2758	for (i = 0; i < ioc->chain_depth; i++) {
2759		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2760		    ioc->chain_dma_pool , GFP_KERNEL,
2761		    &ioc->chain_lookup[i].chain_buffer_dma);
2762		if (!ioc->chain_lookup[i].chain_buffer) {
2763			ioc->chain_depth = i;
2764			goto chain_done;
2765		}
2766		total_sz += ioc->request_sz;
2767	}
2768chain_done:
2769	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth"
2770	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2771	    ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
2772	    ioc->request_sz))/1024));
2773
2774	/* initialize hi-priority queue smid's */
2775	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2776	    sizeof(struct request_tracker), GFP_KERNEL);
2777	if (!ioc->hpr_lookup) {
2778		printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n",
2779		    ioc->name);
2780		goto out;
2781	}
2782	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2783	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): "
2784	    "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority,
2785	    ioc->hi_priority_depth, ioc->hi_priority_smid));
2786
2787	/* initialize internal queue smid's */
2788	ioc->internal_lookup = kcalloc(ioc->internal_depth,
2789	    sizeof(struct request_tracker), GFP_KERNEL);
2790	if (!ioc->internal_lookup) {
2791		printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n",
2792		    ioc->name);
2793		goto out;
2794	}
2795	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2796	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): "
2797	    "depth(%d), start smid(%d)\n", ioc->name, ioc->internal,
2798	     ioc->internal_depth, ioc->internal_smid));
2799
2800	/* sense buffers, 4 byte align */
2801	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2802	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2803	    0);
2804	if (!ioc->sense_dma_pool) {
2805		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n",
2806		    ioc->name);
2807		goto out;
2808	}
2809	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2810	    &ioc->sense_dma);
2811	if (!ioc->sense) {
2812		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n",
2813		    ioc->name);
2814		goto out;
2815	}
2816	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2817	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2818	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2819	    SCSI_SENSE_BUFFERSIZE, sz/1024));
2820	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
2821	    ioc->name, (unsigned long long)ioc->sense_dma));
2822	total_sz += sz;
2823
2824	/* reply pool, 4 byte align */
2825	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2826	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2827	    0);
2828	if (!ioc->reply_dma_pool) {
2829		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n",
2830		    ioc->name);
2831		goto out;
2832	}
2833	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2834	    &ioc->reply_dma);
2835	if (!ioc->reply) {
2836		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n",
2837		    ioc->name);
2838		goto out;
2839	}
2840	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2841	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2842	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
2843	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
2844	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2845	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n",
2846	    ioc->name, (unsigned long long)ioc->reply_dma));
2847	total_sz += sz;
2848
2849	/* reply free queue, 16 byte align */
2850	sz = ioc->reply_free_queue_depth * 4;
2851	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2852	    ioc->pdev, sz, 16, 0);
2853	if (!ioc->reply_free_dma_pool) {
2854		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create "
2855		    "failed\n", ioc->name);
2856		goto out;
2857	}
2858	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2859	    &ioc->reply_free_dma);
2860	if (!ioc->reply_free) {
2861		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc "
2862		    "failed\n", ioc->name);
2863		goto out;
2864	}
2865	memset(ioc->reply_free, 0, sz);
2866	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): "
2867	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2868	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2869	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma"
2870	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
2871	total_sz += sz;
2872
2873	ioc->config_page_sz = 512;
2874	ioc->config_page = pci_alloc_consistent(ioc->pdev,
2875	    ioc->config_page_sz, &ioc->config_page_dma);
2876	if (!ioc->config_page) {
2877		printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc "
2878		    "failed\n", ioc->name);
2879		goto out;
2880	}
2881	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size"
2882	    "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz));
2883	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma"
2884	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma));
2885	total_sz += ioc->config_page_sz;
2886
2887	printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n",
2888	    ioc->name, total_sz/1024);
2889	printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), "
2890	    "Max Controller Queue Depth(%d)\n",
2891	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2892	printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n",
2893	    ioc->name, ioc->shost->sg_tablesize);
2894	return 0;
2895
2896 out:
2897	return -ENOMEM;
2898}
2899
2900
2901/**
2902 * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter.
2903 * @ioc: Pointer to MPT_ADAPTER structure
2904 * @cooked: Request raw or cooked IOC state
2905 *
2906 * Returns all IOC Doorbell register bits if cooked==0, else just the
2907 * Doorbell bits in MPI_IOC_STATE_MASK.
2908 */
2909u32
2910mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked)
2911{
2912	u32 s, sc;
2913
2914	s = readl(&ioc->chip->Doorbell);
2915	sc = s & MPI2_IOC_STATE_MASK;
2916	return cooked ? sc : s;
2917}
2918
2919/**
2920 * _base_wait_on_iocstate - waiting on a particular ioc state
2921 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2922 * @timeout: timeout in second
2923 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2924 *
2925 * Returns 0 for success, non-zero for failure.
2926 */
2927static int
2928_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2929    int sleep_flag)
2930{
2931	u32 count, cntdn;
2932	u32 current_state;
2933
2934	count = 0;
2935	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2936	do {
2937		current_state = mpt2sas_base_get_iocstate(ioc, 1);
2938		if (current_state == ioc_state)
2939			return 0;
2940		if (count && current_state == MPI2_IOC_STATE_FAULT)
2941			break;
2942		if (sleep_flag == CAN_SLEEP)
2943			msleep(1);
2944		else
2945			udelay(500);
2946		count++;
2947	} while (--cntdn);
2948
2949	return current_state;
2950}
2951
2952/**
2953 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
2954 * a write to the doorbell)
2955 * @ioc: per adapter object
2956 * @timeout: timeout in second
2957 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2958 *
2959 * Returns 0 for success, non-zero for failure.
2960 *
2961 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
2962 */
2963static int
2964_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
2965    int sleep_flag)
2966{
2967	u32 cntdn, count;
2968	u32 int_status;
2969
2970	count = 0;
2971	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2972	do {
2973		int_status = readl(&ioc->chip->HostInterruptStatus);
2974		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2975			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2976			    "successful count(%d), timeout(%d)\n", ioc->name,
2977			    __func__, count, timeout));
2978			return 0;
2979		}
2980		if (sleep_flag == CAN_SLEEP)
2981			msleep(1);
2982		else
2983			udelay(500);
2984		count++;
2985	} while (--cntdn);
2986
2987	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2988	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2989	return -EFAULT;
2990}
2991
2992/**
2993 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
2994 * @ioc: per adapter object
2995 * @timeout: timeout in second
2996 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2997 *
2998 * Returns 0 for success, non-zero for failure.
2999 *
3000 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3001 * doorbell.
3002 */
3003static int
3004_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
3005    int sleep_flag)
3006{
3007	u32 cntdn, count;
3008	u32 int_status;
3009	u32 doorbell;
3010
3011	count = 0;
3012	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3013	do {
3014		int_status = readl(&ioc->chip->HostInterruptStatus);
3015		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3016			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
3017			    "successful count(%d), timeout(%d)\n", ioc->name,
3018			    __func__, count, timeout));
3019			return 0;
3020		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3021			doorbell = readl(&ioc->chip->Doorbell);
3022			if ((doorbell & MPI2_IOC_STATE_MASK) ==
3023			    MPI2_IOC_STATE_FAULT) {
3024				mpt2sas_base_fault_info(ioc , doorbell);
3025				return -EFAULT;
3026			}
3027		} else if (int_status == 0xFFFFFFFF)
3028			goto out;
3029
3030		if (sleep_flag == CAN_SLEEP)
3031			msleep(1);
3032		else
3033			udelay(500);
3034		count++;
3035	} while (--cntdn);
3036
3037 out:
3038	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
3039	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
3040	return -EFAULT;
3041}
3042
3043/**
3044 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3045 * @ioc: per adapter object
3046 * @timeout: timeout in second
3047 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3048 *
3049 * Returns 0 for success, non-zero for failure.
3050 *
3051 */
3052static int
3053_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
3054    int sleep_flag)
3055{
3056	u32 cntdn, count;
3057	u32 doorbell_reg;
3058
3059	count = 0;
3060	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3061	do {
3062		doorbell_reg = readl(&ioc->chip->Doorbell);
3063		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3064			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
3065			    "successful count(%d), timeout(%d)\n", ioc->name,
3066			    __func__, count, timeout));
3067			return 0;
3068		}
3069		if (sleep_flag == CAN_SLEEP)
3070			msleep(1);
3071		else
3072			udelay(500);
3073		count++;
3074	} while (--cntdn);
3075
3076	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
3077	    "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg);
3078	return -EFAULT;
3079}
3080
3081/**
3082 * _base_send_ioc_reset - send doorbell reset
3083 * @ioc: per adapter object
3084 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3085 * @timeout: timeout in second
3086 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3087 *
3088 * Returns 0 for success, non-zero for failure.
3089 */
3090static int
3091_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3092    int sleep_flag)
3093{
3094	u32 ioc_state;
3095	int r = 0;
3096
3097	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3098		printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n",
3099		    ioc->name, __func__);
3100		return -EFAULT;
3101	}
3102
3103	if (!(ioc->facts.IOCCapabilities &
3104	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3105		return -EFAULT;
3106
3107	printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name);
3108
3109	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3110	    &ioc->chip->Doorbell);
3111	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3112		r = -EFAULT;
3113		goto out;
3114	}
3115	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3116	    timeout, sleep_flag);
3117	if (ioc_state) {
3118		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
3119		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3120		r = -EFAULT;
3121		goto out;
3122	}
3123 out:
3124	printk(MPT2SAS_INFO_FMT "message unit reset: %s\n",
3125	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3126	return r;
3127}
3128
3129/**
3130 * _base_handshake_req_reply_wait - send request thru doorbell interface
3131 * @ioc: per adapter object
3132 * @request_bytes: request length
3133 * @request: pointer having request payload
3134 * @reply_bytes: reply length
3135 * @reply: pointer to reply payload
3136 * @timeout: timeout in second
3137 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3138 *
3139 * Returns 0 for success, non-zero for failure.
3140 */
3141static int
3142_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
3143    u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3144{
3145	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3146	int i;
3147	u8 failed;
3148	u16 dummy;
3149	__le32 *mfp;
3150
3151	/* make sure doorbell is not in use */
3152	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3153		printk(MPT2SAS_ERR_FMT "doorbell is in use "
3154		    " (line=%d)\n", ioc->name, __LINE__);
3155		return -EFAULT;
3156	}
3157
3158	/* clear pending doorbell interrupts from previous state changes */
3159	if (readl(&ioc->chip->HostInterruptStatus) &
3160	    MPI2_HIS_IOC2SYS_DB_STATUS)
3161		writel(0, &ioc->chip->HostInterruptStatus);
3162
3163	/* send message to ioc */
3164	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3165	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3166	    &ioc->chip->Doorbell);
3167
3168	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3169		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3170		   "int failed (line=%d)\n", ioc->name, __LINE__);
3171		return -EFAULT;
3172	}
3173	writel(0, &ioc->chip->HostInterruptStatus);
3174
3175	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3176		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3177		    "ack failed (line=%d)\n", ioc->name, __LINE__);
3178		return -EFAULT;
3179	}
3180
3181	/* send message 32-bits at a time */
3182	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3183		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3184		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3185			failed = 1;
3186	}
3187
3188	if (failed) {
3189		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3190		    "sending request failed (line=%d)\n", ioc->name, __LINE__);
3191		return -EFAULT;
3192	}
3193
3194	/* now wait for the reply */
3195	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3196		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3197		   "int failed (line=%d)\n", ioc->name, __LINE__);
3198		return -EFAULT;
3199	}
3200
3201	/* read the first two 16-bits, it gives the total length of the reply */
3202	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3203	    & MPI2_DOORBELL_DATA_MASK);
3204	writel(0, &ioc->chip->HostInterruptStatus);
3205	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3206		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3207		   "int failed (line=%d)\n", ioc->name, __LINE__);
3208		return -EFAULT;
3209	}
3210	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3211	    & MPI2_DOORBELL_DATA_MASK);
3212	writel(0, &ioc->chip->HostInterruptStatus);
3213
3214	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3215		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3216			printk(MPT2SAS_ERR_FMT "doorbell "
3217			    "handshake int failed (line=%d)\n", ioc->name,
3218			    __LINE__);
3219			return -EFAULT;
3220		}
3221		if (i >=  reply_bytes/2) /* overflow case */
3222			dummy = readl(&ioc->chip->Doorbell);
3223		else
3224			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3225			    & MPI2_DOORBELL_DATA_MASK);
3226		writel(0, &ioc->chip->HostInterruptStatus);
3227	}
3228
3229	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3230	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3231		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use "
3232		    " (line=%d)\n", ioc->name, __LINE__));
3233	}
3234	writel(0, &ioc->chip->HostInterruptStatus);
3235
3236	if (ioc->logging_level & MPT_DEBUG_INIT) {
3237		mfp = (__le32 *)reply;
3238		printk(KERN_INFO "\toffset:data\n");
3239		for (i = 0; i < reply_bytes/4; i++)
3240			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3241			    le32_to_cpu(mfp[i]));
3242	}
3243	return 0;
3244}
3245
3246/**
3247 * mpt2sas_base_sas_iounit_control - send sas iounit control to FW
3248 * @ioc: per adapter object
3249 * @mpi_reply: the reply payload from FW
3250 * @mpi_request: the request payload sent to FW
3251 *
3252 * The SAS IO Unit Control Request message allows the host to perform low-level
3253 * operations, such as resets on the PHYs of the IO Unit, also allows the host
3254 * to obtain the IOC assigned device handles for a device if it has other
3255 * identifying information about the device, in addition allows the host to
3256 * remove IOC resources associated with the device.
3257 *
3258 * Returns 0 for success, non-zero for failure.
3259 */
3260int
3261mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
3262    Mpi2SasIoUnitControlReply_t *mpi_reply,
3263    Mpi2SasIoUnitControlRequest_t *mpi_request)
3264{
3265	u16 smid;
3266	u32 ioc_state;
3267	unsigned long timeleft;
3268	bool issue_reset = false;
3269	int rc;
3270	void *request;
3271	u16 wait_state_count;
3272
3273	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3274	    __func__));
3275
3276	mutex_lock(&ioc->base_cmds.mutex);
3277
3278	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3279		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3280		    ioc->name, __func__);
3281		rc = -EAGAIN;
3282		goto out;
3283	}
3284
3285	wait_state_count = 0;
3286	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3287	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3288		if (wait_state_count++ == 10) {
3289			printk(MPT2SAS_ERR_FMT
3290			    "%s: failed due to ioc not operational\n",
3291			    ioc->name, __func__);
3292			rc = -EFAULT;
3293			goto out;
3294		}
3295		ssleep(1);
3296		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3297		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3298		    "operational state(count=%d)\n", ioc->name,
3299		    __func__, wait_state_count);
3300	}
3301
3302	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3303	if (!smid) {
3304		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3305		    ioc->name, __func__);
3306		rc = -EAGAIN;
3307		goto out;
3308	}
3309
3310	rc = 0;
3311	ioc->base_cmds.status = MPT2_CMD_PENDING;
3312	request = mpt2sas_base_get_msg_frame(ioc, smid);
3313	ioc->base_cmds.smid = smid;
3314	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
3315	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3316	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3317		ioc->ioc_link_reset_in_progress = 1;
3318	init_completion(&ioc->base_cmds.done);
3319	mpt2sas_base_put_smid_default(ioc, smid);
3320	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3321	    msecs_to_jiffies(10000));
3322	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3323	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
3324	    ioc->ioc_link_reset_in_progress)
3325		ioc->ioc_link_reset_in_progress = 0;
3326	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3327		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3328		    ioc->name, __func__);
3329		_debug_dump_mf(mpi_request,
3330		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
3331		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3332			issue_reset = true;
3333		goto issue_host_reset;
3334	}
3335	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3336		memcpy(mpi_reply, ioc->base_cmds.reply,
3337		    sizeof(Mpi2SasIoUnitControlReply_t));
3338	else
3339		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
3340	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3341	goto out;
3342
3343 issue_host_reset:
3344	if (issue_reset)
3345		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3346		    FORCE_BIG_HAMMER);
3347	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3348	rc = -EFAULT;
3349 out:
3350	mutex_unlock(&ioc->base_cmds.mutex);
3351	return rc;
3352}
3353
3354
3355/**
3356 * mpt2sas_base_scsi_enclosure_processor - sending request to sep device
3357 * @ioc: per adapter object
3358 * @mpi_reply: the reply payload from FW
3359 * @mpi_request: the request payload sent to FW
3360 *
3361 * The SCSI Enclosure Processor request message causes the IOC to
3362 * communicate with SES devices to control LED status signals.
3363 *
3364 * Returns 0 for success, non-zero for failure.
3365 */
3366int
3367mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
3368    Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
3369{
3370	u16 smid;
3371	u32 ioc_state;
3372	unsigned long timeleft;
3373	bool issue_reset = false;
3374	int rc;
3375	void *request;
3376	u16 wait_state_count;
3377
3378	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3379	    __func__));
3380
3381	mutex_lock(&ioc->base_cmds.mutex);
3382
3383	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3384		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3385		    ioc->name, __func__);
3386		rc = -EAGAIN;
3387		goto out;
3388	}
3389
3390	wait_state_count = 0;
3391	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3392	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3393		if (wait_state_count++ == 10) {
3394			printk(MPT2SAS_ERR_FMT
3395			    "%s: failed due to ioc not operational\n",
3396			    ioc->name, __func__);
3397			rc = -EFAULT;
3398			goto out;
3399		}
3400		ssleep(1);
3401		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3402		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3403		    "operational state(count=%d)\n", ioc->name,
3404		    __func__, wait_state_count);
3405	}
3406
3407	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3408	if (!smid) {
3409		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3410		    ioc->name, __func__);
3411		rc = -EAGAIN;
3412		goto out;
3413	}
3414
3415	rc = 0;
3416	ioc->base_cmds.status = MPT2_CMD_PENDING;
3417	request = mpt2sas_base_get_msg_frame(ioc, smid);
3418	ioc->base_cmds.smid = smid;
3419	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3420	init_completion(&ioc->base_cmds.done);
3421	mpt2sas_base_put_smid_default(ioc, smid);
3422	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3423	    msecs_to_jiffies(10000));
3424	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3425		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3426		    ioc->name, __func__);
3427		_debug_dump_mf(mpi_request,
3428		    sizeof(Mpi2SepRequest_t)/4);
3429		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3430			issue_reset = true;
3431		goto issue_host_reset;
3432	}
3433	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3434		memcpy(mpi_reply, ioc->base_cmds.reply,
3435		    sizeof(Mpi2SepReply_t));
3436	else
3437		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
3438	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3439	goto out;
3440
3441 issue_host_reset:
3442	if (issue_reset)
3443		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3444		    FORCE_BIG_HAMMER);
3445	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3446	rc = -EFAULT;
3447 out:
3448	mutex_unlock(&ioc->base_cmds.mutex);
3449	return rc;
3450}
3451
3452/**
3453 * _base_get_port_facts - obtain port facts reply and save in ioc
3454 * @ioc: per adapter object
3455 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3456 *
3457 * Returns 0 for success, non-zero for failure.
3458 */
3459static int
3460_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
3461{
3462	Mpi2PortFactsRequest_t mpi_request;
3463	Mpi2PortFactsReply_t mpi_reply;
3464	struct mpt2sas_port_facts *pfacts;
3465	int mpi_reply_sz, mpi_request_sz, r;
3466
3467	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3468	    __func__));
3469
3470	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
3471	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
3472	memset(&mpi_request, 0, mpi_request_sz);
3473	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
3474	mpi_request.PortNumber = port;
3475	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3476	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3477
3478	if (r != 0) {
3479		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3480		    ioc->name, __func__, r);
3481		return r;
3482	}
3483
3484	pfacts = &ioc->pfacts[port];
3485	memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
3486	pfacts->PortNumber = mpi_reply.PortNumber;
3487	pfacts->VP_ID = mpi_reply.VP_ID;
3488	pfacts->VF_ID = mpi_reply.VF_ID;
3489	pfacts->MaxPostedCmdBuffers =
3490	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
3491
3492	return 0;
3493}
3494
3495/**
3496 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
3497 * @ioc: per adapter object
3498 * @timeout:
3499 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3500 *
3501 * Returns 0 for success, non-zero for failure.
3502 */
3503static int
3504_base_wait_for_iocstate(struct MPT2SAS_ADAPTER *ioc, int timeout,
3505	int sleep_flag)
3506{
3507	u32 ioc_state, doorbell;
3508	int rc;
3509
3510	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3511	    __func__));
3512
3513	if (ioc->pci_error_recovery)
3514		return 0;
3515
3516	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
3517	ioc_state = doorbell & MPI2_IOC_STATE_MASK;
3518	dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
3519	    ioc->name, __func__, ioc_state));
3520
3521	switch (ioc_state) {
3522	case MPI2_IOC_STATE_READY:
3523	case MPI2_IOC_STATE_OPERATIONAL:
3524		return 0;
3525	}
3526
3527	if (doorbell & MPI2_DOORBELL_USED) {
3528		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT
3529		    "unexpected doorbell activ!e\n", ioc->name));
3530		goto issue_diag_reset;
3531	}
3532
3533	if (ioc_state == MPI2_IOC_STATE_FAULT) {
3534		mpt2sas_base_fault_info(ioc, doorbell &
3535		    MPI2_DOORBELL_DATA_MASK);
3536		goto issue_diag_reset;
3537	}
3538
3539	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3540	    timeout, sleep_flag);
3541	if (ioc_state) {
3542		printk(MPT2SAS_ERR_FMT
3543		    "%s: failed going to ready state (ioc_state=0x%x)\n",
3544		    ioc->name, __func__, ioc_state);
3545		return -EFAULT;
3546	}
3547
3548 issue_diag_reset:
3549	rc = _base_diag_reset(ioc, sleep_flag);
3550	return rc;
3551}
3552
3553/**
3554 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3555 * @ioc: per adapter object
3556 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3557 *
3558 * Returns 0 for success, non-zero for failure.
3559 */
3560static int
3561_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3562{
3563	Mpi2IOCFactsRequest_t mpi_request;
3564	Mpi2IOCFactsReply_t mpi_reply;
3565	struct mpt2sas_facts *facts;
3566	int mpi_reply_sz, mpi_request_sz, r;
3567
3568	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3569	    __func__));
3570
3571	r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
3572	if (r) {
3573		printk(MPT2SAS_ERR_FMT "%s: failed getting to correct state\n",
3574			ioc->name, __func__);
3575		return r;
3576	}
3577
3578	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3579	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3580	memset(&mpi_request, 0, mpi_request_sz);
3581	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
3582	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3583	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3584
3585	if (r != 0) {
3586		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3587		    ioc->name, __func__, r);
3588		return r;
3589	}
3590
3591	facts = &ioc->facts;
3592	memset(facts, 0, sizeof(struct mpt2sas_facts));
3593	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3594	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3595	facts->VP_ID = mpi_reply.VP_ID;
3596	facts->VF_ID = mpi_reply.VF_ID;
3597	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
3598	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3599	facts->WhoInit = mpi_reply.WhoInit;
3600	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3601	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3602	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3603	facts->MaxReplyDescriptorPostQueueDepth =
3604	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
3605	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
3606	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3607	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3608		ioc->ir_firmware = 1;
3609	if ((facts->IOCCapabilities &
3610	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
3611		ioc->rdpq_array_capable = 1;
3612	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3613	facts->IOCRequestFrameSize =
3614	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
3615	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
3616	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
3617	ioc->shost->max_id = -1;
3618	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
3619	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
3620	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
3621	facts->HighPriorityCredit =
3622	    le16_to_cpu(mpi_reply.HighPriorityCredit);
3623	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
3624	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
3625
3626	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), "
3627	    "max chains per io(%d)\n", ioc->name, facts->RequestCredit,
3628	    facts->MaxChainDepth));
3629	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), "
3630	    "reply frame size(%d)\n", ioc->name,
3631	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
3632	return 0;
3633}
3634
3635/**
3636 * _base_send_ioc_init - send ioc_init to firmware
3637 * @ioc: per adapter object
3638 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3639 *
3640 * Returns 0 for success, non-zero for failure.
3641 */
3642static int
3643_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3644{
3645	Mpi2IOCInitRequest_t mpi_request;
3646	Mpi2IOCInitReply_t mpi_reply;
3647	int i, r = 0;
3648	struct timeval current_time;
3649	u16 ioc_status;
3650	u32 reply_post_free_array_sz = 0;
3651	Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
3652	dma_addr_t reply_post_free_array_dma;
3653
3654	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3655	    __func__));
3656
3657	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
3658	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
3659	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3660	mpi_request.VF_ID = 0; /* TODO */
3661	mpi_request.VP_ID = 0;
3662	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3663	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3664
3665	if (_base_is_controller_msix_enabled(ioc))
3666		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3667	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3668	mpi_request.ReplyDescriptorPostQueueDepth =
3669	    cpu_to_le16(ioc->reply_post_queue_depth);
3670	mpi_request.ReplyFreeQueueDepth =
3671	    cpu_to_le16(ioc->reply_free_queue_depth);
3672
3673	mpi_request.SenseBufferAddressHigh =
3674	    cpu_to_le32((u64)ioc->sense_dma >> 32);
3675	mpi_request.SystemReplyAddressHigh =
3676	    cpu_to_le32((u64)ioc->reply_dma >> 32);
3677	mpi_request.SystemRequestFrameBaseAddress =
3678	    cpu_to_le64((u64)ioc->request_dma);
3679	mpi_request.ReplyFreeQueueAddress =
3680	    cpu_to_le64((u64)ioc->reply_free_dma);
3681
3682	if (ioc->rdpq_array_enable) {
3683		reply_post_free_array_sz = ioc->reply_queue_count *
3684		    sizeof(Mpi2IOCInitRDPQArrayEntry);
3685		reply_post_free_array = pci_alloc_consistent(ioc->pdev,
3686			reply_post_free_array_sz, &reply_post_free_array_dma);
3687		if (!reply_post_free_array) {
3688			printk(MPT2SAS_ERR_FMT
3689			"reply_post_free_array: pci_alloc_consistent failed\n",
3690			ioc->name);
3691			r = -ENOMEM;
3692			goto out;
3693		}
3694		memset(reply_post_free_array, 0, reply_post_free_array_sz);
3695		for (i = 0; i < ioc->reply_queue_count; i++)
3696			reply_post_free_array[i].RDPQBaseAddress =
3697			    cpu_to_le64(
3698				(u64)ioc->reply_post[i].reply_post_free_dma);
3699		mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
3700		mpi_request.ReplyDescriptorPostQueueAddress =
3701		    cpu_to_le64((u64)reply_post_free_array_dma);
3702	} else {
3703		mpi_request.ReplyDescriptorPostQueueAddress =
3704		    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
3705	}
3706
3707	/* This time stamp specifies number of milliseconds
3708	 * since epoch ~ midnight January 1, 1970.
3709	 */
3710	do_gettimeofday(&current_time);
3711	mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3712	    (current_time.tv_usec / 1000));
3713
3714	if (ioc->logging_level & MPT_DEBUG_INIT) {
3715		__le32 *mfp;
3716		int i;
3717
3718		mfp = (__le32 *)&mpi_request;
3719		printk(KERN_INFO "\toffset:data\n");
3720		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3721			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3722			    le32_to_cpu(mfp[i]));
3723	}
3724
3725	r = _base_handshake_req_reply_wait(ioc,
3726	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
3727	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
3728	    sleep_flag);
3729
3730	if (r != 0) {
3731		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3732		    ioc->name, __func__, r);
3733		goto out;
3734	}
3735
3736	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3737	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
3738	    mpi_reply.IOCLogInfo) {
3739		printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
3740		r = -EIO;
3741	}
3742
3743out:
3744	if (reply_post_free_array)
3745		pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
3746				    reply_post_free_array,
3747				    reply_post_free_array_dma);
3748	return r;
3749}
3750
3751/**
3752 * mpt2sas_port_enable_done - command completion routine for port enable
3753 * @ioc: per adapter object
3754 * @smid: system request message index
3755 * @msix_index: MSIX table index supplied by the OS
3756 * @reply: reply message frame(lower 32bit addr)
3757 *
3758 * Return 1 meaning mf should be freed from _base_interrupt
3759 *        0 means the mf is freed from this function.
3760 */
3761u8
3762mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3763	u32 reply)
3764{
3765	MPI2DefaultReply_t *mpi_reply;
3766	u16 ioc_status;
3767
3768	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3769	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
3770		return 1;
3771
3772	if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED)
3773		return 1;
3774
3775	ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE;
3776	if (mpi_reply) {
3777		ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID;
3778		memcpy(ioc->port_enable_cmds.reply, mpi_reply,
3779		    mpi_reply->MsgLength*4);
3780	}
3781	ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING;
3782
3783	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3784
3785	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3786		ioc->port_enable_failed = 1;
3787
3788	if (ioc->is_driver_loading) {
3789		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3790			mpt2sas_port_enable_complete(ioc);
3791			return 1;
3792		} else {
3793			ioc->start_scan_failed = ioc_status;
3794			ioc->start_scan = 0;
3795			return 1;
3796		}
3797	}
3798	complete(&ioc->port_enable_cmds.done);
3799	return 1;
3800}
3801
3802
3803/**
3804 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3805 * @ioc: per adapter object
3806 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3807 *
3808 * Returns 0 for success, non-zero for failure.
3809 */
3810static int
3811_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3812{
3813	Mpi2PortEnableRequest_t *mpi_request;
3814	Mpi2PortEnableReply_t *mpi_reply;
3815	unsigned long timeleft;
3816	int r = 0;
3817	u16 smid;
3818	u16 ioc_status;
3819
3820	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3821
3822	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3823		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3824		    ioc->name, __func__);
3825		return -EAGAIN;
3826	}
3827
3828	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3829	if (!smid) {
3830		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3831		    ioc->name, __func__);
3832		return -EAGAIN;
3833	}
3834
3835	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3836	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3837	ioc->port_enable_cmds.smid = smid;
3838	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3839	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3840
3841	init_completion(&ioc->port_enable_cmds.done);
3842	mpt2sas_base_put_smid_default(ioc, smid);
3843	timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
3844	    300*HZ);
3845	if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) {
3846		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3847		    ioc->name, __func__);
3848		_debug_dump_mf(mpi_request,
3849		    sizeof(Mpi2PortEnableRequest_t)/4);
3850		if (ioc->port_enable_cmds.status & MPT2_CMD_RESET)
3851			r = -EFAULT;
3852		else
3853			r = -ETIME;
3854		goto out;
3855	}
3856	mpi_reply = ioc->port_enable_cmds.reply;
3857
3858	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3859	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3860		printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n",
3861		    ioc->name, __func__, ioc_status);
3862		r = -EFAULT;
3863		goto out;
3864	}
3865 out:
3866	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
3867	printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
3868	    "SUCCESS" : "FAILED"));
3869	return r;
3870}
3871
3872/**
3873 * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply)
3874 * @ioc: per adapter object
3875 *
3876 * Returns 0 for success, non-zero for failure.
3877 */
3878int
3879mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc)
3880{
3881	Mpi2PortEnableRequest_t *mpi_request;
3882	u16 smid;
3883
3884	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3885
3886	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3887		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3888		    ioc->name, __func__);
3889		return -EAGAIN;
3890	}
3891
3892	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3893	if (!smid) {
3894		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3895		    ioc->name, __func__);
3896		return -EAGAIN;
3897	}
3898
3899	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3900	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3901	ioc->port_enable_cmds.smid = smid;
3902	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3903	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3904
3905	mpt2sas_base_put_smid_default(ioc, smid);
3906	return 0;
3907}
3908
3909/**
3910 * _base_determine_wait_on_discovery - desposition
3911 * @ioc: per adapter object
3912 *
3913 * Decide whether to wait on discovery to complete. Used to either
3914 * locate boot device, or report volumes ahead of physical devices.
3915 *
3916 * Returns 1 for wait, 0 for don't wait
3917 */
3918static int
3919_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc)
3920{
3921	/* We wait for discovery to complete if IR firmware is loaded.
3922	 * The sas topology events arrive before PD events, so we need time to
3923	 * turn on the bit in ioc->pd_handles to indicate PD
3924	 * Also, it maybe required to report Volumes ahead of physical
3925	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
3926	 */
3927	if (ioc->ir_firmware)
3928		return 1;
3929
3930	/* if no Bios, then we don't need to wait */
3931	if (!ioc->bios_pg3.BiosVersion)
3932		return 0;
3933
3934	/* Bios is present, then we drop down here.
3935	 *
3936	 * If there any entries in the Bios Page 2, then we wait
3937	 * for discovery to complete.
3938	 */
3939
3940	/* Current Boot Device */
3941	if ((ioc->bios_pg2.CurrentBootDeviceForm &
3942	    MPI2_BIOSPAGE2_FORM_MASK) ==
3943	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3944	/* Request Boot Device */
3945	   (ioc->bios_pg2.ReqBootDeviceForm &
3946	    MPI2_BIOSPAGE2_FORM_MASK) ==
3947	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3948	/* Alternate Request Boot Device */
3949	   (ioc->bios_pg2.ReqAltBootDeviceForm &
3950	    MPI2_BIOSPAGE2_FORM_MASK) ==
3951	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
3952		return 0;
3953
3954	return 1;
3955}
3956
3957
3958/**
3959 * _base_unmask_events - turn on notification for this event
3960 * @ioc: per adapter object
3961 * @event: firmware event
3962 *
3963 * The mask is stored in ioc->event_masks.
3964 */
3965static void
3966_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
3967{
3968	u32 desired_event;
3969
3970	if (event >= 128)
3971		return;
3972
3973	desired_event = (1 << (event % 32));
3974
3975	if (event < 32)
3976		ioc->event_masks[0] &= ~desired_event;
3977	else if (event < 64)
3978		ioc->event_masks[1] &= ~desired_event;
3979	else if (event < 96)
3980		ioc->event_masks[2] &= ~desired_event;
3981	else if (event < 128)
3982		ioc->event_masks[3] &= ~desired_event;
3983}
3984
3985/**
3986 * _base_event_notification - send event notification
3987 * @ioc: per adapter object
3988 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3989 *
3990 * Returns 0 for success, non-zero for failure.
3991 */
3992static int
3993_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3994{
3995	Mpi2EventNotificationRequest_t *mpi_request;
3996	unsigned long timeleft;
3997	u16 smid;
3998	int r = 0;
3999	int i;
4000
4001	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4002	    __func__));
4003
4004	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
4005		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
4006		    ioc->name, __func__);
4007		return -EAGAIN;
4008	}
4009
4010	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
4011	if (!smid) {
4012		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
4013		    ioc->name, __func__);
4014		return -EAGAIN;
4015	}
4016	ioc->base_cmds.status = MPT2_CMD_PENDING;
4017	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
4018	ioc->base_cmds.smid = smid;
4019	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
4020	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
4021	mpi_request->VF_ID = 0; /* TODO */
4022	mpi_request->VP_ID = 0;
4023	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4024		mpi_request->EventMasks[i] =
4025		    cpu_to_le32(ioc->event_masks[i]);
4026	init_completion(&ioc->base_cmds.done);
4027	mpt2sas_base_put_smid_default(ioc, smid);
4028	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
4029	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
4030		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
4031		    ioc->name, __func__);
4032		_debug_dump_mf(mpi_request,
4033		    sizeof(Mpi2EventNotificationRequest_t)/4);
4034		if (ioc->base_cmds.status & MPT2_CMD_RESET)
4035			r = -EFAULT;
4036		else
4037			r = -ETIME;
4038	} else
4039		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
4040		    ioc->name, __func__));
4041	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
4042	return r;
4043}
4044
4045/**
4046 * mpt2sas_base_validate_event_type - validating event types
4047 * @ioc: per adapter object
4048 * @event: firmware event
4049 *
4050 * This will turn on firmware event notification when application
4051 * ask for that event. We don't mask events that are already enabled.
4052 */
4053void
4054mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
4055{
4056	int i, j;
4057	u32 event_mask, desired_event;
4058	u8 send_update_to_fw;
4059
4060	for (i = 0, send_update_to_fw = 0; i <
4061	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
4062		event_mask = ~event_type[i];
4063		desired_event = 1;
4064		for (j = 0; j < 32; j++) {
4065			if (!(event_mask & desired_event) &&
4066			    (ioc->event_masks[i] & desired_event)) {
4067				ioc->event_masks[i] &= ~desired_event;
4068				send_update_to_fw = 1;
4069			}
4070			desired_event = (desired_event << 1);
4071		}
4072	}
4073
4074	if (!send_update_to_fw)
4075		return;
4076
4077	mutex_lock(&ioc->base_cmds.mutex);
4078	_base_event_notification(ioc, CAN_SLEEP);
4079	mutex_unlock(&ioc->base_cmds.mutex);
4080}
4081
4082/**
4083 * _base_diag_reset - the "big hammer" start of day reset
4084 * @ioc: per adapter object
4085 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4086 *
4087 * Returns 0 for success, non-zero for failure.
4088 */
4089static int
4090_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4091{
4092	u32 host_diagnostic;
4093	u32 ioc_state;
4094	u32 count;
4095	u32 hcb_size;
4096
4097	printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
4098	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
4099	    ioc->name));
4100
4101	count = 0;
4102	do {
4103		/* Write magic sequence to WriteSequence register
4104		 * Loop until in diagnostic mode
4105		 */
4106		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "write magic "
4107		    "sequence\n", ioc->name));
4108		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4109		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4110		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4111		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4112		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4113		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4114		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4115
4116		/* wait 100 msec */
4117		if (sleep_flag == CAN_SLEEP)
4118			msleep(100);
4119		else
4120			mdelay(100);
4121
4122		if (count++ > 20)
4123			goto out;
4124
4125		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4126		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "wrote magic "
4127		    "sequence: count(%d), host_diagnostic(0x%08x)\n",
4128		    ioc->name, count, host_diagnostic));
4129
4130	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4131
4132	hcb_size = readl(&ioc->chip->HCBSize);
4133
4134	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "diag reset: issued\n",
4135	    ioc->name));
4136	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4137	     &ioc->chip->HostDiagnostic);
4138
4139	/* This delay allows the chip PCIe hardware time to finish reset tasks*/
4140	if (sleep_flag == CAN_SLEEP)
4141		msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4142	else
4143		mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4144
4145	/* Approximately 300 second max wait */
4146	for (count = 0; count < (300000000 /
4147	    MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
4148
4149		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4150
4151		if (host_diagnostic == 0xFFFFFFFF)
4152			goto out;
4153		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4154			break;
4155
4156		/* Wait to pass the second read delay window */
4157		if (sleep_flag == CAN_SLEEP)
4158			msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4159			       /1000);
4160		else
4161			mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4162			       /1000);
4163	}
4164
4165	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
4166
4167		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter "
4168		    "assuming the HCB Address points to good F/W\n",
4169		    ioc->name));
4170		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
4171		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
4172		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
4173
4174		drsprintk(ioc, printk(MPT2SAS_INFO_FMT
4175		    "re-enable the HCDW\n", ioc->name));
4176		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
4177		    &ioc->chip->HCBSize);
4178	}
4179
4180	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter\n",
4181	    ioc->name));
4182	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
4183	    &ioc->chip->HostDiagnostic);
4184
4185	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "disable writes to the "
4186	    "diagnostic register\n", ioc->name));
4187	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4188
4189	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "Wait for FW to go to the "
4190	    "READY state\n", ioc->name));
4191	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
4192	    sleep_flag);
4193	if (ioc_state) {
4194		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
4195		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
4196		goto out;
4197	}
4198
4199	printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
4200	return 0;
4201
4202 out:
4203	printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name);
4204	return -EFAULT;
4205}
4206
4207/**
4208 * _base_make_ioc_ready - put controller in READY state
4209 * @ioc: per adapter object
4210 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4211 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4212 *
4213 * Returns 0 for success, non-zero for failure.
4214 */
4215static int
4216_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4217    enum reset_type type)
4218{
4219	u32 ioc_state;
4220	int rc;
4221
4222	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4223	    __func__));
4224
4225	if (ioc->pci_error_recovery)
4226		return 0;
4227
4228	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
4229	dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
4230	    ioc->name, __func__, ioc_state));
4231
4232	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4233		return 0;
4234
4235	if (ioc_state & MPI2_DOORBELL_USED) {
4236		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
4237		    "active!\n", ioc->name));
4238		goto issue_diag_reset;
4239	}
4240
4241	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4242		mpt2sas_base_fault_info(ioc, ioc_state &
4243		    MPI2_DOORBELL_DATA_MASK);
4244		goto issue_diag_reset;
4245	}
4246
4247	if (type == FORCE_BIG_HAMMER)
4248		goto issue_diag_reset;
4249
4250	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4251		if (!(_base_send_ioc_reset(ioc,
4252		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4253			ioc->ioc_reset_count++;
4254			return 0;
4255	}
4256
4257 issue_diag_reset:
4258	rc = _base_diag_reset(ioc, CAN_SLEEP);
4259	ioc->ioc_reset_count++;
4260	return rc;
4261}
4262
4263/**
4264 * _base_make_ioc_operational - put controller in OPERATIONAL state
4265 * @ioc: per adapter object
4266 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4267 *
4268 * Returns 0 for success, non-zero for failure.
4269 */
4270static int
4271_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4272{
4273	int r, i;
4274	unsigned long	flags;
4275	u32 reply_address;
4276	u16 smid;
4277	struct _tr_list *delayed_tr, *delayed_tr_next;
4278	u8 hide_flag;
4279	struct adapter_reply_queue *reply_q;
4280	long reply_post_free;
4281	u32 reply_post_free_sz, index = 0;
4282
4283	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4284	    __func__));
4285
4286	/* clean the delayed target reset list */
4287	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4288	    &ioc->delayed_tr_list, list) {
4289		list_del(&delayed_tr->list);
4290		kfree(delayed_tr);
4291	}
4292
4293	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4294	    &ioc->delayed_tr_volume_list, list) {
4295		list_del(&delayed_tr->list);
4296		kfree(delayed_tr);
4297	}
4298
4299	/* initialize the scsi lookup free list */
4300	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4301	INIT_LIST_HEAD(&ioc->free_list);
4302	smid = 1;
4303	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
4304		INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
4305		ioc->scsi_lookup[i].cb_idx = 0xFF;
4306		ioc->scsi_lookup[i].smid = smid;
4307		ioc->scsi_lookup[i].scmd = NULL;
4308		ioc->scsi_lookup[i].direct_io = 0;
4309		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
4310		    &ioc->free_list);
4311	}
4312
4313	/* hi-priority queue */
4314	INIT_LIST_HEAD(&ioc->hpr_free_list);
4315	smid = ioc->hi_priority_smid;
4316	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
4317		ioc->hpr_lookup[i].cb_idx = 0xFF;
4318		ioc->hpr_lookup[i].smid = smid;
4319		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
4320		    &ioc->hpr_free_list);
4321	}
4322
4323	/* internal queue */
4324	INIT_LIST_HEAD(&ioc->internal_free_list);
4325	smid = ioc->internal_smid;
4326	for (i = 0; i < ioc->internal_depth; i++, smid++) {
4327		ioc->internal_lookup[i].cb_idx = 0xFF;
4328		ioc->internal_lookup[i].smid = smid;
4329		list_add_tail(&ioc->internal_lookup[i].tracker_list,
4330		    &ioc->internal_free_list);
4331	}
4332
4333	/* chain pool */
4334	INIT_LIST_HEAD(&ioc->free_chain_list);
4335	for (i = 0; i < ioc->chain_depth; i++)
4336		list_add_tail(&ioc->chain_lookup[i].tracker_list,
4337		    &ioc->free_chain_list);
4338
4339	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4340
4341	/* initialize Reply Free Queue */
4342	for (i = 0, reply_address = (u32)ioc->reply_dma ;
4343	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
4344	    ioc->reply_sz)
4345		ioc->reply_free[i] = cpu_to_le32(reply_address);
4346
4347	/* initialize reply queues */
4348	if (ioc->is_driver_loading)
4349		_base_assign_reply_queues(ioc);
4350
4351	/* initialize Reply Post Free Queue */
4352	reply_post_free_sz = ioc->reply_post_queue_depth *
4353	    sizeof(Mpi2DefaultReplyDescriptor_t);
4354	reply_post_free = (long)ioc->reply_post[index].reply_post_free;
4355	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4356		reply_q->reply_post_host_index = 0;
4357		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4358		    reply_post_free;
4359		for (i = 0; i < ioc->reply_post_queue_depth; i++)
4360			reply_q->reply_post_free[i].Words =
4361						     cpu_to_le64(ULLONG_MAX);
4362		if (!_base_is_controller_msix_enabled(ioc))
4363			goto skip_init_reply_post_free_queue;
4364		/*
4365		 * If RDPQ is enabled, switch to the next allocation.
4366		 * Otherwise advance within the contiguous region.
4367		 */
4368		if (ioc->rdpq_array_enable)
4369			reply_post_free = (long)
4370			    ioc->reply_post[++index].reply_post_free;
4371		else
4372			reply_post_free += reply_post_free_sz;
4373	}
4374 skip_init_reply_post_free_queue:
4375
4376	r = _base_send_ioc_init(ioc, sleep_flag);
4377	if (r)
4378		return r;
4379
4380	/* initialize reply free host index */
4381	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
4382	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
4383
4384	/* initialize reply post host index */
4385	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4386		writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
4387		    &ioc->chip->ReplyPostHostIndex);
4388		if (!_base_is_controller_msix_enabled(ioc))
4389			goto skip_init_reply_post_host_index;
4390	}
4391
4392 skip_init_reply_post_host_index:
4393
4394	_base_unmask_interrupts(ioc);
4395
4396	r = _base_event_notification(ioc, sleep_flag);
4397	if (r)
4398		return r;
4399
4400	if (sleep_flag == CAN_SLEEP)
4401		_base_static_config_pages(ioc);
4402
4403
4404	if (ioc->is_driver_loading) {
4405		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
4406		    == 0x80) {
4407			hide_flag = (u8) (
4408			    le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
4409			    MFG_PAGE10_HIDE_SSDS_MASK);
4410			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
4411				ioc->mfg_pg10_hide_flag = hide_flag;
4412		}
4413		ioc->wait_for_discovery_to_complete =
4414		    _base_determine_wait_on_discovery(ioc);
4415		return r; /* scan_start and scan_finished support */
4416	}
4417	r = _base_send_port_enable(ioc, sleep_flag);
4418	if (r)
4419		return r;
4420
4421	return r;
4422}
4423
4424/**
4425 * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap)
4426 * @ioc: per adapter object
4427 *
4428 * Return nothing.
4429 */
4430void
4431mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
4432{
4433	struct pci_dev *pdev = ioc->pdev;
4434
4435	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4436	    __func__));
4437
4438	if (ioc->chip_phys && ioc->chip) {
4439		_base_mask_interrupts(ioc);
4440		ioc->shost_recovery = 1;
4441		_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4442		ioc->shost_recovery = 0;
4443	}
4444
4445	_base_free_irq(ioc);
4446	_base_disable_msix(ioc);
4447
4448	if (ioc->chip_phys && ioc->chip)
4449		iounmap(ioc->chip);
4450	ioc->chip_phys = 0;
4451
4452	if (pci_is_enabled(pdev)) {
4453		pci_release_selected_regions(ioc->pdev, ioc->bars);
4454		pci_disable_pcie_error_reporting(pdev);
4455		pci_disable_device(pdev);
4456	}
4457	return;
4458}
4459
4460/**
4461 * mpt2sas_base_attach - attach controller instance
4462 * @ioc: per adapter object
4463 *
4464 * Returns 0 for success, non-zero for failure.
4465 */
4466int
4467mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4468{
4469	int r, i;
4470	int cpu_id, last_cpu_id = 0;
4471
4472	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4473	    __func__));
4474
4475	/* setup cpu_msix_table */
4476	ioc->cpu_count = num_online_cpus();
4477	for_each_online_cpu(cpu_id)
4478		last_cpu_id = cpu_id;
4479	ioc->cpu_msix_table_sz = last_cpu_id + 1;
4480	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4481	ioc->reply_queue_count = 1;
4482	if (!ioc->cpu_msix_table) {
4483		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
4484		    "cpu_msix_table failed!!!\n", ioc->name));
4485		r = -ENOMEM;
4486		goto out_free_resources;
4487	}
4488
4489	if (ioc->is_warpdrive) {
4490		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
4491		    sizeof(resource_size_t *), GFP_KERNEL);
4492		if (!ioc->reply_post_host_index) {
4493			dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
4494				"for cpu_msix_table failed!!!\n", ioc->name));
4495			r = -ENOMEM;
4496			goto out_free_resources;
4497		}
4498	}
4499
4500	ioc->rdpq_array_enable_assigned = 0;
4501	ioc->dma_mask = 0;
4502	r = mpt2sas_base_map_resources(ioc);
4503	if (r)
4504		goto out_free_resources;
4505
4506	if (ioc->is_warpdrive) {
4507		ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
4508		    &ioc->chip->ReplyPostHostIndex;
4509
4510		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
4511			ioc->reply_post_host_index[i] =
4512			(resource_size_t __iomem *)
4513			((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
4514			* 4)));
4515	}
4516
4517	pci_set_drvdata(ioc->pdev, ioc->shost);
4518	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4519	if (r)
4520		goto out_free_resources;
4521
4522	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4523	if (r)
4524		goto out_free_resources;
4525
4526	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
4527	    sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
4528	if (!ioc->pfacts) {
4529		r = -ENOMEM;
4530		goto out_free_resources;
4531	}
4532
4533	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
4534		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
4535		if (r)
4536			goto out_free_resources;
4537	}
4538
4539	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
4540	if (r)
4541		goto out_free_resources;
4542
4543	init_waitqueue_head(&ioc->reset_wq);
4544	/* allocate memory pd handle bitmask list */
4545	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
4546	if (ioc->facts.MaxDevHandle % 8)
4547		ioc->pd_handles_sz++;
4548	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
4549	    GFP_KERNEL);
4550	if (!ioc->pd_handles) {
4551		r = -ENOMEM;
4552		goto out_free_resources;
4553	}
4554	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
4555	    GFP_KERNEL);
4556	if (!ioc->blocking_handles) {
4557		r = -ENOMEM;
4558		goto out_free_resources;
4559	}
4560	ioc->fwfault_debug = mpt2sas_fwfault_debug;
4561
4562	/* base internal command bits */
4563	mutex_init(&ioc->base_cmds.mutex);
4564	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4565	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
4566
4567	/* port_enable command bits */
4568	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4569	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
4570
4571	/* transport internal command bits */
4572	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4573	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
4574	mutex_init(&ioc->transport_cmds.mutex);
4575
4576	/* scsih internal command bits */
4577	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4578	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
4579	mutex_init(&ioc->scsih_cmds.mutex);
4580
4581	/* task management internal command bits */
4582	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4583	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4584	mutex_init(&ioc->tm_cmds.mutex);
4585
4586	/* config page internal command bits */
4587	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4588	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
4589	mutex_init(&ioc->config_cmds.mutex);
4590
4591	/* ctl module internal command bits */
4592	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4593	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4594	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
4595	mutex_init(&ioc->ctl_cmds.mutex);
4596
4597	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4598	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4599	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
4600	    !ioc->ctl_cmds.sense) {
4601		r = -ENOMEM;
4602		goto out_free_resources;
4603	}
4604
4605	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4606	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4607	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
4608		r = -ENOMEM;
4609		goto out_free_resources;
4610	}
4611
4612	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4613		ioc->event_masks[i] = -1;
4614
4615	/* here we enable the events we care about */
4616	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
4617	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
4618	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4619	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
4620	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
4621	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
4622	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
4623	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
4624	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
4625	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
4626	_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
4627	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
4628	if (r)
4629		goto out_free_resources;
4630
4631	ioc->non_operational_loop = 0;
4632
4633	return 0;
4634
4635 out_free_resources:
4636
4637	ioc->remove_host = 1;
4638	mpt2sas_base_free_resources(ioc);
4639	_base_release_memory_pools(ioc);
4640	pci_set_drvdata(ioc->pdev, NULL);
4641	kfree(ioc->cpu_msix_table);
4642	if (ioc->is_warpdrive)
4643		kfree(ioc->reply_post_host_index);
4644	kfree(ioc->pd_handles);
4645	kfree(ioc->blocking_handles);
4646	kfree(ioc->tm_cmds.reply);
4647	kfree(ioc->transport_cmds.reply);
4648	kfree(ioc->scsih_cmds.reply);
4649	kfree(ioc->config_cmds.reply);
4650	kfree(ioc->base_cmds.reply);
4651	kfree(ioc->port_enable_cmds.reply);
4652	kfree(ioc->ctl_cmds.reply);
4653	kfree(ioc->ctl_cmds.sense);
4654	kfree(ioc->pfacts);
4655	ioc->ctl_cmds.reply = NULL;
4656	ioc->base_cmds.reply = NULL;
4657	ioc->tm_cmds.reply = NULL;
4658	ioc->scsih_cmds.reply = NULL;
4659	ioc->transport_cmds.reply = NULL;
4660	ioc->config_cmds.reply = NULL;
4661	ioc->pfacts = NULL;
4662	return r;
4663}
4664
4665
4666/**
4667 * mpt2sas_base_detach - remove controller instance
4668 * @ioc: per adapter object
4669 *
4670 * Return nothing.
4671 */
4672void
4673mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
4674{
4675
4676	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4677	    __func__));
4678
4679	mpt2sas_base_stop_watchdog(ioc);
4680	mpt2sas_base_free_resources(ioc);
4681	_base_release_memory_pools(ioc);
4682	pci_set_drvdata(ioc->pdev, NULL);
4683	kfree(ioc->cpu_msix_table);
4684	if (ioc->is_warpdrive)
4685		kfree(ioc->reply_post_host_index);
4686	kfree(ioc->pd_handles);
4687	kfree(ioc->blocking_handles);
4688	kfree(ioc->pfacts);
4689	kfree(ioc->ctl_cmds.reply);
4690	kfree(ioc->ctl_cmds.sense);
4691	kfree(ioc->base_cmds.reply);
4692	kfree(ioc->port_enable_cmds.reply);
4693	kfree(ioc->tm_cmds.reply);
4694	kfree(ioc->transport_cmds.reply);
4695	kfree(ioc->scsih_cmds.reply);
4696	kfree(ioc->config_cmds.reply);
4697}
4698
4699/**
4700 * _base_reset_handler - reset callback handler (for base)
4701 * @ioc: per adapter object
4702 * @reset_phase: phase
4703 *
4704 * The handler for doing any required cleanup or initialization.
4705 *
4706 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
4707 * MPT2_IOC_DONE_RESET
4708 *
4709 * Return nothing.
4710 */
4711static void
4712_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
4713{
4714	mpt2sas_scsih_reset_handler(ioc, reset_phase);
4715	mpt2sas_ctl_reset_handler(ioc, reset_phase);
4716	switch (reset_phase) {
4717	case MPT2_IOC_PRE_RESET:
4718		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4719		    "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
4720		break;
4721	case MPT2_IOC_AFTER_RESET:
4722		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4723		    "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
4724		if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
4725			ioc->transport_cmds.status |= MPT2_CMD_RESET;
4726			mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid);
4727			complete(&ioc->transport_cmds.done);
4728		}
4729		if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
4730			ioc->base_cmds.status |= MPT2_CMD_RESET;
4731			mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
4732			complete(&ioc->base_cmds.done);
4733		}
4734		if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
4735			ioc->port_enable_failed = 1;
4736			ioc->port_enable_cmds.status |= MPT2_CMD_RESET;
4737			mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
4738			if (ioc->is_driver_loading) {
4739				ioc->start_scan_failed =
4740				    MPI2_IOCSTATUS_INTERNAL_ERROR;
4741				ioc->start_scan = 0;
4742				ioc->port_enable_cmds.status =
4743						MPT2_CMD_NOT_USED;
4744			} else
4745				complete(&ioc->port_enable_cmds.done);
4746
4747		}
4748		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
4749			ioc->config_cmds.status |= MPT2_CMD_RESET;
4750			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
4751			ioc->config_cmds.smid = USHRT_MAX;
4752			complete(&ioc->config_cmds.done);
4753		}
4754		break;
4755	case MPT2_IOC_DONE_RESET:
4756		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4757		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
4758		break;
4759	}
4760}
4761
4762/**
4763 * _wait_for_commands_to_complete - reset controller
4764 * @ioc: Pointer to MPT_ADAPTER structure
4765 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4766 *
4767 * This function waiting(3s) for all pending commands to complete
4768 * prior to putting controller in reset.
4769 */
4770static void
4771_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4772{
4773	u32 ioc_state;
4774	unsigned long flags;
4775	u16 i;
4776
4777	ioc->pending_io_count = 0;
4778	if (sleep_flag != CAN_SLEEP)
4779		return;
4780
4781	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
4782	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
4783		return;
4784
4785	/* pending command count */
4786	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4787	for (i = 0; i < ioc->scsiio_depth; i++)
4788		if (ioc->scsi_lookup[i].cb_idx != 0xFF)
4789			ioc->pending_io_count++;
4790	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4791
4792	if (!ioc->pending_io_count)
4793		return;
4794
4795	/* wait for pending commands to complete */
4796	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
4797}
4798
4799/**
4800 * mpt2sas_base_hard_reset_handler - reset controller
4801 * @ioc: Pointer to MPT_ADAPTER structure
4802 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4803 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4804 *
4805 * Returns 0 for success, non-zero for failure.
4806 */
4807int
4808mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4809    enum reset_type type)
4810{
4811	int r;
4812	unsigned long flags;
4813
4814	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4815	    __func__));
4816
4817	if (ioc->pci_error_recovery) {
4818		printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n",
4819		    ioc->name, __func__);
4820		r = 0;
4821		goto out_unlocked;
4822	}
4823
4824	if (mpt2sas_fwfault_debug)
4825		mpt2sas_halt_firmware(ioc);
4826
4827	/* TODO - What we really should be doing is pulling
4828	 * out all the code associated with NO_SLEEP; its never used.
4829	 * That is legacy code from mpt fusion driver, ported over.
4830	 * I will leave this BUG_ON here for now till its been resolved.
4831	 */
4832	BUG_ON(sleep_flag == NO_SLEEP);
4833
4834	/* wait for an active reset in progress to complete */
4835	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
4836		do {
4837			ssleep(1);
4838		} while (ioc->shost_recovery == 1);
4839		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4840		    __func__));
4841		return ioc->ioc_reset_in_progress_status;
4842	}
4843
4844	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4845	ioc->shost_recovery = 1;
4846	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4847
4848	_base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
4849	_wait_for_commands_to_complete(ioc, sleep_flag);
4850	_base_mask_interrupts(ioc);
4851	r = _base_make_ioc_ready(ioc, sleep_flag, type);
4852	if (r)
4853		goto out;
4854	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4855
4856	/* If this hard reset is called while port enable is active, then
4857	 * there is no reason to call make_ioc_operational
4858	 */
4859	if (ioc->is_driver_loading && ioc->port_enable_failed) {
4860		ioc->remove_host = 1;
4861		r = -EFAULT;
4862		goto out;
4863	}
4864
4865	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4866	if (r)
4867		goto out;
4868
4869	if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
4870		panic("%s: Issue occurred with flashing controller firmware."
4871		      "Please reboot the system and ensure that the correct"
4872		      " firmware version is running\n", ioc->name);
4873
4874	r = _base_make_ioc_operational(ioc, sleep_flag);
4875	if (!r)
4876		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
4877 out:
4878	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %s\n",
4879	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
4880
4881	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4882	ioc->ioc_reset_in_progress_status = r;
4883	ioc->shost_recovery = 0;
4884	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4885	mutex_unlock(&ioc->reset_in_progress_mutex);
4886
4887 out_unlocked:
4888	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4889	    __func__));
4890	return r;
4891}
4892