1 /***************************************************************************
2                           dpti.c  -  description
3                              -------------------
4     begin                : Thu Sep 7 2000
5     copyright            : (C) 2000 by Adaptec
6 
7 			   July 30, 2001 First version being submitted
8 			   for inclusion in the kernel.  V2.4
9 
10     See Documentation/scsi/dpti.txt for history, notes, license info
11     and credits
12  ***************************************************************************/
13 
14 /***************************************************************************
15  *                                                                         *
16  *   This program is free software; you can redistribute it and/or modify  *
17  *   it under the terms of the GNU General Public License as published by  *
18  *   the Free Software Foundation; either version 2 of the License, or     *
19  *   (at your option) any later version.                                   *
20  *                                                                         *
21  ***************************************************************************/
22 /***************************************************************************
23  * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24  - Support 2.6 kernel and DMA-mapping
25  - ioctl fix for raid tools
26  - use schedule_timeout in long long loop
27  **************************************************************************/
28 
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
31 
32 #include <linux/module.h>
33 
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36 
37 ////////////////////////////////////////////////////////////////
38 
39 #include <linux/ioctl.h>	/* For SCSI-Passthrough */
40 #include <asm/uaccess.h>
41 
42 #include <linux/stat.h>
43 #include <linux/slab.h>		/* for kmalloc() */
44 #include <linux/pci.h>		/* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h>	/* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h>	/* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
54 
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
59 
60 #include <asm/processor.h>	/* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h>		/* for virt_to_bus, etc. */
63 
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
69 
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
72 
73 /*============================================================================
74  * Create a binary signature - this is read by dptsig
75  * Needed for our management apps
76  *============================================================================
77  */
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80 	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
82 	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83 #elif defined(__ia64__)
84 	PROC_INTEL, PROC_IA64,
85 #elif defined(__sparc__)
86 	PROC_ULTRASPARC, PROC_ULTRASPARC,
87 #elif defined(__alpha__)
88 	PROC_ALPHA, PROC_ALPHA,
89 #else
90 	(-1),(-1),
91 #endif
92 	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95 };
96 
97 
98 
99 
100 /*============================================================================
101  * Globals
102  *============================================================================
103  */
104 
105 static DEFINE_MUTEX(adpt_configuration_lock);
106 
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
111 
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
114 
115 static struct class *adpt_sysfs_class;
116 
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
121 
122 static const struct file_operations adpt_fops = {
123 	.unlocked_ioctl	= adpt_unlocked_ioctl,
124 	.open		= adpt_open,
125 	.release	= adpt_close,
126 #ifdef CONFIG_COMPAT
127 	.compat_ioctl	= compat_adpt_ioctl,
128 #endif
129 	.llseek		= noop_llseek,
130 };
131 
132 /* Structures and definitions for synchronous message posting.
133  * See adpt_i2o_post_wait() for description
134  * */
135 struct adpt_i2o_post_wait_data
136 {
137 	int status;
138 	u32 id;
139 	adpt_wait_queue_head_t *wq;
140 	struct adpt_i2o_post_wait_data *next;
141 };
142 
143 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144 static u32 adpt_post_wait_id = 0;
145 static DEFINE_SPINLOCK(adpt_post_wait_lock);
146 
147 
148 /*============================================================================
149  * 				Functions
150  *============================================================================
151  */
152 
dpt_dma64(adpt_hba * pHba)153 static inline int dpt_dma64(adpt_hba *pHba)
154 {
155 	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156 }
157 
dma_high(dma_addr_t addr)158 static inline u32 dma_high(dma_addr_t addr)
159 {
160 	return upper_32_bits(addr);
161 }
162 
dma_low(dma_addr_t addr)163 static inline u32 dma_low(dma_addr_t addr)
164 {
165 	return (u32)addr;
166 }
167 
adpt_read_blink_led(adpt_hba * host)168 static u8 adpt_read_blink_led(adpt_hba* host)
169 {
170 	if (host->FwDebugBLEDflag_P) {
171 		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 			return readb(host->FwDebugBLEDvalue_P);
173 		}
174 	}
175 	return 0;
176 }
177 
178 /*============================================================================
179  * Scsi host template interface functions
180  *============================================================================
181  */
182 
183 static struct pci_device_id dptids[] = {
184 	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 	{ 0, }
187 };
188 MODULE_DEVICE_TABLE(pci,dptids);
189 
adpt_detect(struct scsi_host_template * sht)190 static int adpt_detect(struct scsi_host_template* sht)
191 {
192 	struct pci_dev *pDev = NULL;
193 	adpt_hba *pHba;
194 	adpt_hba *next;
195 
196 	PINFO("Detecting Adaptec I2O RAID controllers...\n");
197 
198         /* search for all Adatpec I2O RAID cards */
199 	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 		if(pDev->device == PCI_DPT_DEVICE_ID ||
201 		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 			if(adpt_install_hba(sht, pDev) ){
203 				PERROR("Could not Init an I2O RAID device\n");
204 				PERROR("Will not try to detect others.\n");
205 				return hba_count-1;
206 			}
207 			pci_dev_get(pDev);
208 		}
209 	}
210 
211 	/* In INIT state, Activate IOPs */
212 	for (pHba = hba_chain; pHba; pHba = next) {
213 		next = pHba->next;
214 		// Activate does get status , init outbound, and get hrt
215 		if (adpt_i2o_activate_hba(pHba) < 0) {
216 			adpt_i2o_delete_hba(pHba);
217 		}
218 	}
219 
220 
221 	/* Active IOPs in HOLD state */
222 
223 rebuild_sys_tab:
224 	if (hba_chain == NULL)
225 		return 0;
226 
227 	/*
228 	 * If build_sys_table fails, we kill everything and bail
229 	 * as we can't init the IOPs w/o a system table
230 	 */
231 	if (adpt_i2o_build_sys_table() < 0) {
232 		adpt_i2o_sys_shutdown();
233 		return 0;
234 	}
235 
236 	PDEBUG("HBA's in HOLD state\n");
237 
238 	/* If IOP don't get online, we need to rebuild the System table */
239 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 		if (adpt_i2o_online_hba(pHba) < 0) {
241 			adpt_i2o_delete_hba(pHba);
242 			goto rebuild_sys_tab;
243 		}
244 	}
245 
246 	/* Active IOPs now in OPERATIONAL state */
247 	PDEBUG("HBA's in OPERATIONAL state\n");
248 
249 	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 	for (pHba = hba_chain; pHba; pHba = next) {
251 		next = pHba->next;
252 		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 		if (adpt_i2o_lct_get(pHba) < 0){
254 			adpt_i2o_delete_hba(pHba);
255 			continue;
256 		}
257 
258 		if (adpt_i2o_parse_lct(pHba) < 0){
259 			adpt_i2o_delete_hba(pHba);
260 			continue;
261 		}
262 		adpt_inquiry(pHba);
263 	}
264 
265 	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 	if (IS_ERR(adpt_sysfs_class)) {
267 		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 		adpt_sysfs_class = NULL;
269 	}
270 
271 	for (pHba = hba_chain; pHba; pHba = next) {
272 		next = pHba->next;
273 		if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 			adpt_i2o_delete_hba(pHba);
275 			continue;
276 		}
277 		pHba->initialized = TRUE;
278 		pHba->state &= ~DPTI_STATE_RESET;
279 		if (adpt_sysfs_class) {
280 			struct device *dev = device_create(adpt_sysfs_class,
281 				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 				"dpti%d", pHba->unit);
283 			if (IS_ERR(dev)) {
284 				printk(KERN_WARNING"dpti%d: unable to "
285 					"create device in dpt_i2o class\n",
286 					pHba->unit);
287 			}
288 		}
289 	}
290 
291 	// Register our control device node
292 	// nodes will need to be created in /dev to access this
293 	// the nodes can not be created from within the driver
294 	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 		adpt_i2o_sys_shutdown();
296 		return 0;
297 	}
298 	return hba_count;
299 }
300 
301 
302 /*
303  * scsi_unregister will be called AFTER we return.
304  */
adpt_release(struct Scsi_Host * host)305 static int adpt_release(struct Scsi_Host *host)
306 {
307 	adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
308 //	adpt_i2o_quiesce_hba(pHba);
309 	adpt_i2o_delete_hba(pHba);
310 	scsi_unregister(host);
311 	return 0;
312 }
313 
314 
adpt_inquiry(adpt_hba * pHba)315 static void adpt_inquiry(adpt_hba* pHba)
316 {
317 	u32 msg[17];
318 	u32 *mptr;
319 	u32 *lenptr;
320 	int direction;
321 	int scsidir;
322 	u32 len;
323 	u32 reqlen;
324 	u8* buf;
325 	dma_addr_t addr;
326 	u8  scb[16];
327 	s32 rcode;
328 
329 	memset(msg, 0, sizeof(msg));
330 	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
331 	if(!buf){
332 		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 		return;
334 	}
335 	memset((void*)buf, 0, 36);
336 
337 	len = 36;
338 	direction = 0x00000000;
339 	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
340 
341 	if (dpt_dma64(pHba))
342 		reqlen = 17;		// SINGLE SGE, 64 bit
343 	else
344 		reqlen = 14;		// SINGLE SGE, 32 bit
345 	/* Stick the headers on */
346 	msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 	msg[2] = 0;
349 	msg[3]  = 0;
350 	// Adaptec/DPT Private stuff
351 	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
353 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
354 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
355 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
356 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
357 	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
358 
359 	mptr=msg+7;
360 
361 	memset(scb, 0, sizeof(scb));
362 	// Write SCSI command into the message - always 16 byte block
363 	scb[0] = INQUIRY;
364 	scb[1] = 0;
365 	scb[2] = 0;
366 	scb[3] = 0;
367 	scb[4] = 36;
368 	scb[5] = 0;
369 	// Don't care about the rest of scb
370 
371 	memcpy(mptr, scb, sizeof(scb));
372 	mptr+=4;
373 	lenptr=mptr++;		/* Remember me - fill in when we know */
374 
375 	/* Now fill in the SGList and command */
376 	*lenptr = len;
377 	if (dpt_dma64(pHba)) {
378 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
379 		*mptr++ = 1 << PAGE_SHIFT;
380 		*mptr++ = 0xD0000000|direction|len;
381 		*mptr++ = dma_low(addr);
382 		*mptr++ = dma_high(addr);
383 	} else {
384 		*mptr++ = 0xD0000000|direction|len;
385 		*mptr++ = addr;
386 	}
387 
388 	// Send it on it's way
389 	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 	if (rcode != 0) {
391 		sprintf(pHba->detail, "Adaptec I2O RAID");
392 		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 		if (rcode != -ETIME && rcode != -EINTR)
394 			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
395 	} else {
396 		memset(pHba->detail, 0, sizeof(pHba->detail));
397 		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 		memcpy(&(pHba->detail[16]), " Model: ", 8);
399 		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 		memcpy(&(pHba->detail[40]), " FW: ", 4);
401 		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 		pHba->detail[48] = '\0';	/* precautionary */
403 		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
404 	}
405 	adpt_i2o_status_get(pHba);
406 	return ;
407 }
408 
409 
adpt_slave_configure(struct scsi_device * device)410 static int adpt_slave_configure(struct scsi_device * device)
411 {
412 	struct Scsi_Host *host = device->host;
413 	adpt_hba* pHba;
414 
415 	pHba = (adpt_hba *) host->hostdata[0];
416 
417 	if (host->can_queue && device->tagged_supported) {
418 		scsi_change_queue_depth(device,
419 				host->can_queue - 1);
420 	}
421 	return 0;
422 }
423 
adpt_queue_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))424 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
425 {
426 	adpt_hba* pHba = NULL;
427 	struct adpt_device* pDev = NULL;	/* dpt per device information */
428 
429 	cmd->scsi_done = done;
430 	/*
431 	 * SCSI REQUEST_SENSE commands will be executed automatically by the
432 	 * Host Adapter for any errors, so they should not be executed
433 	 * explicitly unless the Sense Data is zero indicating that no error
434 	 * occurred.
435 	 */
436 
437 	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
438 		cmd->result = (DID_OK << 16);
439 		cmd->scsi_done(cmd);
440 		return 0;
441 	}
442 
443 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
444 	if (!pHba) {
445 		return FAILED;
446 	}
447 
448 	rmb();
449 	if ((pHba->state) & DPTI_STATE_RESET)
450 		return SCSI_MLQUEUE_HOST_BUSY;
451 
452 	// TODO if the cmd->device if offline then I may need to issue a bus rescan
453 	// followed by a get_lct to see if the device is there anymore
454 	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
455 		/*
456 		 * First command request for this device.  Set up a pointer
457 		 * to the device structure.  This should be a TEST_UNIT_READY
458 		 * command from scan_scsis_single.
459 		 */
460 		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
461 			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
462 			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
463 			cmd->result = (DID_NO_CONNECT << 16);
464 			cmd->scsi_done(cmd);
465 			return 0;
466 		}
467 		cmd->device->hostdata = pDev;
468 	}
469 	pDev->pScsi_dev = cmd->device;
470 
471 	/*
472 	 * If we are being called from when the device is being reset,
473 	 * delay processing of the command until later.
474 	 */
475 	if (pDev->state & DPTI_DEV_RESET ) {
476 		return FAILED;
477 	}
478 	return adpt_scsi_to_i2o(pHba, cmd, pDev);
479 }
480 
DEF_SCSI_QCMD(adpt_queue)481 static DEF_SCSI_QCMD(adpt_queue)
482 
483 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
484 		sector_t capacity, int geom[])
485 {
486 	int heads=-1;
487 	int sectors=-1;
488 	int cylinders=-1;
489 
490 	// *** First lets set the default geometry ****
491 
492 	// If the capacity is less than ox2000
493 	if (capacity < 0x2000 ) {	// floppy
494 		heads = 18;
495 		sectors = 2;
496 	}
497 	// else if between 0x2000 and 0x20000
498 	else if (capacity < 0x20000) {
499 		heads = 64;
500 		sectors = 32;
501 	}
502 	// else if between 0x20000 and 0x40000
503 	else if (capacity < 0x40000) {
504 		heads = 65;
505 		sectors = 63;
506 	}
507 	// else if between 0x4000 and 0x80000
508 	else if (capacity < 0x80000) {
509 		heads = 128;
510 		sectors = 63;
511 	}
512 	// else if greater than 0x80000
513 	else {
514 		heads = 255;
515 		sectors = 63;
516 	}
517 	cylinders = sector_div(capacity, heads * sectors);
518 
519 	// Special case if CDROM
520 	if(sdev->type == 5) {  // CDROM
521 		heads = 252;
522 		sectors = 63;
523 		cylinders = 1111;
524 	}
525 
526 	geom[0] = heads;
527 	geom[1] = sectors;
528 	geom[2] = cylinders;
529 
530 	PDEBUG("adpt_bios_param: exit\n");
531 	return 0;
532 }
533 
534 
adpt_info(struct Scsi_Host * host)535 static const char *adpt_info(struct Scsi_Host *host)
536 {
537 	adpt_hba* pHba;
538 
539 	pHba = (adpt_hba *) host->hostdata[0];
540 	return (char *) (pHba->detail);
541 }
542 
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)543 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
544 {
545 	struct adpt_device* d;
546 	int id;
547 	int chan;
548 	adpt_hba* pHba;
549 	int unit;
550 
551 	// Find HBA (host bus adapter) we are looking for
552 	mutex_lock(&adpt_configuration_lock);
553 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
554 		if (pHba->host == host) {
555 			break;	/* found adapter */
556 		}
557 	}
558 	mutex_unlock(&adpt_configuration_lock);
559 	if (pHba == NULL) {
560 		return 0;
561 	}
562 	host = pHba->host;
563 
564 	seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
565 	seq_printf(m, "%s\n", pHba->detail);
566 	seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n",
567 			pHba->host->host_no, pHba->name, host->irq);
568 	seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
569 			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
570 
571 	seq_puts(m, "Devices:\n");
572 	for(chan = 0; chan < MAX_CHANNEL; chan++) {
573 		for(id = 0; id < MAX_ID; id++) {
574 			d = pHba->channel[chan].device[id];
575 			while(d) {
576 				seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
577 				seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
578 
579 				unit = d->pI2o_dev->lct_data.tid;
580 				seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
581 					       unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
582 					       scsi_device_online(d->pScsi_dev)? "online":"offline");
583 				d = d->next_lun;
584 			}
585 		}
586 	}
587 	return 0;
588 }
589 
590 /*
591  *	Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
592  */
adpt_cmd_to_context(struct scsi_cmnd * cmd)593 static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
594 {
595 	return (u32)cmd->serial_number;
596 }
597 
598 /*
599  *	Go from a u32 'context' to a struct scsi_cmnd * .
600  *	This could probably be made more efficient.
601  */
602 static struct scsi_cmnd *
adpt_cmd_from_context(adpt_hba * pHba,u32 context)603 	adpt_cmd_from_context(adpt_hba * pHba, u32 context)
604 {
605 	struct scsi_cmnd * cmd;
606 	struct scsi_device * d;
607 
608 	if (context == 0)
609 		return NULL;
610 
611 	spin_unlock(pHba->host->host_lock);
612 	shost_for_each_device(d, pHba->host) {
613 		unsigned long flags;
614 		spin_lock_irqsave(&d->list_lock, flags);
615 		list_for_each_entry(cmd, &d->cmd_list, list) {
616 			if (((u32)cmd->serial_number == context)) {
617 				spin_unlock_irqrestore(&d->list_lock, flags);
618 				scsi_device_put(d);
619 				spin_lock(pHba->host->host_lock);
620 				return cmd;
621 			}
622 		}
623 		spin_unlock_irqrestore(&d->list_lock, flags);
624 	}
625 	spin_lock(pHba->host->host_lock);
626 
627 	return NULL;
628 }
629 
630 /*
631  *	Turn a pointer to ioctl reply data into an u32 'context'
632  */
adpt_ioctl_to_context(adpt_hba * pHba,void * reply)633 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
634 {
635 #if BITS_PER_LONG == 32
636 	return (u32)(unsigned long)reply;
637 #else
638 	ulong flags = 0;
639 	u32 nr, i;
640 
641 	spin_lock_irqsave(pHba->host->host_lock, flags);
642 	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
643 	for (i = 0; i < nr; i++) {
644 		if (pHba->ioctl_reply_context[i] == NULL) {
645 			pHba->ioctl_reply_context[i] = reply;
646 			break;
647 		}
648 	}
649 	spin_unlock_irqrestore(pHba->host->host_lock, flags);
650 	if (i >= nr) {
651 		kfree (reply);
652 		printk(KERN_WARNING"%s: Too many outstanding "
653 				"ioctl commands\n", pHba->name);
654 		return (u32)-1;
655 	}
656 
657 	return i;
658 #endif
659 }
660 
661 /*
662  *	Go from an u32 'context' to a pointer to ioctl reply data.
663  */
adpt_ioctl_from_context(adpt_hba * pHba,u32 context)664 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
665 {
666 #if BITS_PER_LONG == 32
667 	return (void *)(unsigned long)context;
668 #else
669 	void *p = pHba->ioctl_reply_context[context];
670 	pHba->ioctl_reply_context[context] = NULL;
671 
672 	return p;
673 #endif
674 }
675 
676 /*===========================================================================
677  * Error Handling routines
678  *===========================================================================
679  */
680 
adpt_abort(struct scsi_cmnd * cmd)681 static int adpt_abort(struct scsi_cmnd * cmd)
682 {
683 	adpt_hba* pHba = NULL;	/* host bus adapter structure */
684 	struct adpt_device* dptdevice;	/* dpt per device information */
685 	u32 msg[5];
686 	int rcode;
687 
688 	if(cmd->serial_number == 0){
689 		return FAILED;
690 	}
691 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
692 	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
693 	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
694 		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
695 		return FAILED;
696 	}
697 
698 	memset(msg, 0, sizeof(msg));
699 	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
700 	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
701 	msg[2] = 0;
702 	msg[3]= 0;
703 	msg[4] = adpt_cmd_to_context(cmd);
704 	if (pHba->host)
705 		spin_lock_irq(pHba->host->host_lock);
706 	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
707 	if (pHba->host)
708 		spin_unlock_irq(pHba->host->host_lock);
709 	if (rcode != 0) {
710 		if(rcode == -EOPNOTSUPP ){
711 			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
712 			return FAILED;
713 		}
714 		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
715 		return FAILED;
716 	}
717 	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
718 	return SUCCESS;
719 }
720 
721 
722 #define I2O_DEVICE_RESET 0x27
723 // This is the same for BLK and SCSI devices
724 // NOTE this is wrong in the i2o.h definitions
725 // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)726 static int adpt_device_reset(struct scsi_cmnd* cmd)
727 {
728 	adpt_hba* pHba;
729 	u32 msg[4];
730 	u32 rcode;
731 	int old_state;
732 	struct adpt_device* d = cmd->device->hostdata;
733 
734 	pHba = (void*) cmd->device->host->hostdata[0];
735 	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
736 	if (!d) {
737 		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
738 		return FAILED;
739 	}
740 	memset(msg, 0, sizeof(msg));
741 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
742 	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
743 	msg[2] = 0;
744 	msg[3] = 0;
745 
746 	if (pHba->host)
747 		spin_lock_irq(pHba->host->host_lock);
748 	old_state = d->state;
749 	d->state |= DPTI_DEV_RESET;
750 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
751 	d->state = old_state;
752 	if (pHba->host)
753 		spin_unlock_irq(pHba->host->host_lock);
754 	if (rcode != 0) {
755 		if(rcode == -EOPNOTSUPP ){
756 			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
757 			return FAILED;
758 		}
759 		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
760 		return FAILED;
761 	} else {
762 		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
763 		return SUCCESS;
764 	}
765 }
766 
767 
768 #define I2O_HBA_BUS_RESET 0x87
769 // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)770 static int adpt_bus_reset(struct scsi_cmnd* cmd)
771 {
772 	adpt_hba* pHba;
773 	u32 msg[4];
774 	u32 rcode;
775 
776 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
777 	memset(msg, 0, sizeof(msg));
778 	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
779 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
780 	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
781 	msg[2] = 0;
782 	msg[3] = 0;
783 	if (pHba->host)
784 		spin_lock_irq(pHba->host->host_lock);
785 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
786 	if (pHba->host)
787 		spin_unlock_irq(pHba->host->host_lock);
788 	if (rcode != 0) {
789 		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
790 		return FAILED;
791 	} else {
792 		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
793 		return SUCCESS;
794 	}
795 }
796 
797 // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)798 static int __adpt_reset(struct scsi_cmnd* cmd)
799 {
800 	adpt_hba* pHba;
801 	int rcode;
802 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
803 	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
804 	rcode =  adpt_hba_reset(pHba);
805 	if(rcode == 0){
806 		printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
807 		return SUCCESS;
808 	} else {
809 		printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
810 		return FAILED;
811 	}
812 }
813 
adpt_reset(struct scsi_cmnd * cmd)814 static int adpt_reset(struct scsi_cmnd* cmd)
815 {
816 	int rc;
817 
818 	spin_lock_irq(cmd->device->host->host_lock);
819 	rc = __adpt_reset(cmd);
820 	spin_unlock_irq(cmd->device->host->host_lock);
821 
822 	return rc;
823 }
824 
825 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)826 static int adpt_hba_reset(adpt_hba* pHba)
827 {
828 	int rcode;
829 
830 	pHba->state |= DPTI_STATE_RESET;
831 
832 	// Activate does get status , init outbound, and get hrt
833 	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
834 		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
835 		adpt_i2o_delete_hba(pHba);
836 		return rcode;
837 	}
838 
839 	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
840 		adpt_i2o_delete_hba(pHba);
841 		return rcode;
842 	}
843 	PDEBUG("%s: in HOLD state\n",pHba->name);
844 
845 	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
846 		adpt_i2o_delete_hba(pHba);
847 		return rcode;
848 	}
849 	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
850 
851 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
852 		adpt_i2o_delete_hba(pHba);
853 		return rcode;
854 	}
855 
856 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
857 		adpt_i2o_delete_hba(pHba);
858 		return rcode;
859 	}
860 	pHba->state &= ~DPTI_STATE_RESET;
861 
862 	adpt_fail_posted_scbs(pHba);
863 	return 0;	/* return success */
864 }
865 
866 /*===========================================================================
867  *
868  *===========================================================================
869  */
870 
871 
adpt_i2o_sys_shutdown(void)872 static void adpt_i2o_sys_shutdown(void)
873 {
874 	adpt_hba *pHba, *pNext;
875 	struct adpt_i2o_post_wait_data *p1, *old;
876 
877 	 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
878 	 printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
879 	/* Delete all IOPs from the controller chain */
880 	/* They should have already been released by the
881 	 * scsi-core
882 	 */
883 	for (pHba = hba_chain; pHba; pHba = pNext) {
884 		pNext = pHba->next;
885 		adpt_i2o_delete_hba(pHba);
886 	}
887 
888 	/* Remove any timedout entries from the wait queue.  */
889 //	spin_lock_irqsave(&adpt_post_wait_lock, flags);
890 	/* Nothing should be outstanding at this point so just
891 	 * free them
892 	 */
893 	for(p1 = adpt_post_wait_queue; p1;) {
894 		old = p1;
895 		p1 = p1->next;
896 		kfree(old);
897 	}
898 //	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
899 	adpt_post_wait_queue = NULL;
900 
901 	 printk(KERN_INFO "Adaptec I2O controllers down.\n");
902 }
903 
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)904 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
905 {
906 
907 	adpt_hba* pHba = NULL;
908 	adpt_hba* p = NULL;
909 	ulong base_addr0_phys = 0;
910 	ulong base_addr1_phys = 0;
911 	u32 hba_map0_area_size = 0;
912 	u32 hba_map1_area_size = 0;
913 	void __iomem *base_addr_virt = NULL;
914 	void __iomem *msg_addr_virt = NULL;
915 	int dma64 = 0;
916 
917 	int raptorFlag = FALSE;
918 
919 	if(pci_enable_device(pDev)) {
920 		return -EINVAL;
921 	}
922 
923 	if (pci_request_regions(pDev, "dpt_i2o")) {
924 		PERROR("dpti: adpt_config_hba: pci request region failed\n");
925 		return -EINVAL;
926 	}
927 
928 	pci_set_master(pDev);
929 
930 	/*
931 	 *	See if we should enable dma64 mode.
932 	 */
933 	if (sizeof(dma_addr_t) > 4 &&
934 	    pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
935 		if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
936 			dma64 = 1;
937 	}
938 	if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
939 		return -EINVAL;
940 
941 	/* adapter only supports message blocks below 4GB */
942 	pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
943 
944 	base_addr0_phys = pci_resource_start(pDev,0);
945 	hba_map0_area_size = pci_resource_len(pDev,0);
946 
947 	// Check if standard PCI card or single BAR Raptor
948 	if(pDev->device == PCI_DPT_DEVICE_ID){
949 		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
950 			// Raptor card with this device id needs 4M
951 			hba_map0_area_size = 0x400000;
952 		} else { // Not Raptor - it is a PCI card
953 			if(hba_map0_area_size > 0x100000 ){
954 				hba_map0_area_size = 0x100000;
955 			}
956 		}
957 	} else {// Raptor split BAR config
958 		// Use BAR1 in this configuration
959 		base_addr1_phys = pci_resource_start(pDev,1);
960 		hba_map1_area_size = pci_resource_len(pDev,1);
961 		raptorFlag = TRUE;
962 	}
963 
964 #if BITS_PER_LONG == 64
965 	/*
966 	 *	The original Adaptec 64 bit driver has this comment here:
967 	 *	"x86_64 machines need more optimal mappings"
968 	 *
969 	 *	I assume some HBAs report ridiculously large mappings
970 	 *	and we need to limit them on platforms with IOMMUs.
971 	 */
972 	if (raptorFlag == TRUE) {
973 		if (hba_map0_area_size > 128)
974 			hba_map0_area_size = 128;
975 		if (hba_map1_area_size > 524288)
976 			hba_map1_area_size = 524288;
977 	} else {
978 		if (hba_map0_area_size > 524288)
979 			hba_map0_area_size = 524288;
980 	}
981 #endif
982 
983 	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
984 	if (!base_addr_virt) {
985 		pci_release_regions(pDev);
986 		PERROR("dpti: adpt_config_hba: io remap failed\n");
987 		return -EINVAL;
988 	}
989 
990         if(raptorFlag == TRUE) {
991 		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
992 		if (!msg_addr_virt) {
993 			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
994 			iounmap(base_addr_virt);
995 			pci_release_regions(pDev);
996 			return -EINVAL;
997 		}
998 	} else {
999 		msg_addr_virt = base_addr_virt;
1000 	}
1001 
1002 	// Allocate and zero the data structure
1003 	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1004 	if (!pHba) {
1005 		if (msg_addr_virt != base_addr_virt)
1006 			iounmap(msg_addr_virt);
1007 		iounmap(base_addr_virt);
1008 		pci_release_regions(pDev);
1009 		return -ENOMEM;
1010 	}
1011 
1012 	mutex_lock(&adpt_configuration_lock);
1013 
1014 	if(hba_chain != NULL){
1015 		for(p = hba_chain; p->next; p = p->next);
1016 		p->next = pHba;
1017 	} else {
1018 		hba_chain = pHba;
1019 	}
1020 	pHba->next = NULL;
1021 	pHba->unit = hba_count;
1022 	sprintf(pHba->name, "dpti%d", hba_count);
1023 	hba_count++;
1024 
1025 	mutex_unlock(&adpt_configuration_lock);
1026 
1027 	pHba->pDev = pDev;
1028 	pHba->base_addr_phys = base_addr0_phys;
1029 
1030 	// Set up the Virtual Base Address of the I2O Device
1031 	pHba->base_addr_virt = base_addr_virt;
1032 	pHba->msg_addr_virt = msg_addr_virt;
1033 	pHba->irq_mask = base_addr_virt+0x30;
1034 	pHba->post_port = base_addr_virt+0x40;
1035 	pHba->reply_port = base_addr_virt+0x44;
1036 
1037 	pHba->hrt = NULL;
1038 	pHba->lct = NULL;
1039 	pHba->lct_size = 0;
1040 	pHba->status_block = NULL;
1041 	pHba->post_count = 0;
1042 	pHba->state = DPTI_STATE_RESET;
1043 	pHba->pDev = pDev;
1044 	pHba->devices = NULL;
1045 	pHba->dma64 = dma64;
1046 
1047 	// Initializing the spinlocks
1048 	spin_lock_init(&pHba->state_lock);
1049 	spin_lock_init(&adpt_post_wait_lock);
1050 
1051 	if(raptorFlag == 0){
1052 		printk(KERN_INFO "Adaptec I2O RAID controller"
1053 				 " %d at %p size=%x irq=%d%s\n",
1054 			hba_count-1, base_addr_virt,
1055 			hba_map0_area_size, pDev->irq,
1056 			dma64 ? " (64-bit DMA)" : "");
1057 	} else {
1058 		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1059 			hba_count-1, pDev->irq,
1060 			dma64 ? " (64-bit DMA)" : "");
1061 		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1062 		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1063 	}
1064 
1065 	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1066 		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1067 		adpt_i2o_delete_hba(pHba);
1068 		return -EINVAL;
1069 	}
1070 
1071 	return 0;
1072 }
1073 
1074 
adpt_i2o_delete_hba(adpt_hba * pHba)1075 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1076 {
1077 	adpt_hba* p1;
1078 	adpt_hba* p2;
1079 	struct i2o_device* d;
1080 	struct i2o_device* next;
1081 	int i;
1082 	int j;
1083 	struct adpt_device* pDev;
1084 	struct adpt_device* pNext;
1085 
1086 
1087 	mutex_lock(&adpt_configuration_lock);
1088 	// scsi_unregister calls our adpt_release which
1089 	// does a quiese
1090 	if(pHba->host){
1091 		free_irq(pHba->host->irq, pHba);
1092 	}
1093 	p2 = NULL;
1094 	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1095 		if(p1 == pHba) {
1096 			if(p2) {
1097 				p2->next = p1->next;
1098 			} else {
1099 				hba_chain = p1->next;
1100 			}
1101 			break;
1102 		}
1103 	}
1104 
1105 	hba_count--;
1106 	mutex_unlock(&adpt_configuration_lock);
1107 
1108 	iounmap(pHba->base_addr_virt);
1109 	pci_release_regions(pHba->pDev);
1110 	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1111 		iounmap(pHba->msg_addr_virt);
1112 	}
1113 	if(pHba->FwDebugBuffer_P)
1114 	   	iounmap(pHba->FwDebugBuffer_P);
1115 	if(pHba->hrt) {
1116 		dma_free_coherent(&pHba->pDev->dev,
1117 			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1118 			pHba->hrt, pHba->hrt_pa);
1119 	}
1120 	if(pHba->lct) {
1121 		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1122 			pHba->lct, pHba->lct_pa);
1123 	}
1124 	if(pHba->status_block) {
1125 		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1126 			pHba->status_block, pHba->status_block_pa);
1127 	}
1128 	if(pHba->reply_pool) {
1129 		dma_free_coherent(&pHba->pDev->dev,
1130 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1131 			pHba->reply_pool, pHba->reply_pool_pa);
1132 	}
1133 
1134 	for(d = pHba->devices; d ; d = next){
1135 		next = d->next;
1136 		kfree(d);
1137 	}
1138 	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1139 		for(j = 0; j < MAX_ID; j++){
1140 			if(pHba->channel[i].device[j] != NULL){
1141 				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1142 					pNext = pDev->next_lun;
1143 					kfree(pDev);
1144 				}
1145 			}
1146 		}
1147 	}
1148 	pci_dev_put(pHba->pDev);
1149 	if (adpt_sysfs_class)
1150 		device_destroy(adpt_sysfs_class,
1151 				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1152 	kfree(pHba);
1153 
1154 	if(hba_count <= 0){
1155 		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1156 		if (adpt_sysfs_class) {
1157 			class_destroy(adpt_sysfs_class);
1158 			adpt_sysfs_class = NULL;
1159 		}
1160 	}
1161 }
1162 
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1163 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1164 {
1165 	struct adpt_device* d;
1166 
1167 	if(chan < 0 || chan >= MAX_CHANNEL)
1168 		return NULL;
1169 
1170 	if( pHba->channel[chan].device == NULL){
1171 		printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1172 		return NULL;
1173 	}
1174 
1175 	d = pHba->channel[chan].device[id];
1176 	if(!d || d->tid == 0) {
1177 		return NULL;
1178 	}
1179 
1180 	/* If it is the only lun at that address then this should match*/
1181 	if(d->scsi_lun == lun){
1182 		return d;
1183 	}
1184 
1185 	/* else we need to look through all the luns */
1186 	for(d=d->next_lun ; d ; d = d->next_lun){
1187 		if(d->scsi_lun == lun){
1188 			return d;
1189 		}
1190 	}
1191 	return NULL;
1192 }
1193 
1194 
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1195 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1196 {
1197 	// I used my own version of the WAIT_QUEUE_HEAD
1198 	// to handle some version differences
1199 	// When embedded in the kernel this could go back to the vanilla one
1200 	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1201 	int status = 0;
1202 	ulong flags = 0;
1203 	struct adpt_i2o_post_wait_data *p1, *p2;
1204 	struct adpt_i2o_post_wait_data *wait_data =
1205 		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1206 	DECLARE_WAITQUEUE(wait, current);
1207 
1208 	if (!wait_data)
1209 		return -ENOMEM;
1210 
1211 	/*
1212 	 * The spin locking is needed to keep anyone from playing
1213 	 * with the queue pointers and id while we do the same
1214 	 */
1215 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1216        // TODO we need a MORE unique way of getting ids
1217        // to support async LCT get
1218 	wait_data->next = adpt_post_wait_queue;
1219 	adpt_post_wait_queue = wait_data;
1220 	adpt_post_wait_id++;
1221 	adpt_post_wait_id &= 0x7fff;
1222 	wait_data->id =  adpt_post_wait_id;
1223 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1224 
1225 	wait_data->wq = &adpt_wq_i2o_post;
1226 	wait_data->status = -ETIMEDOUT;
1227 
1228 	add_wait_queue(&adpt_wq_i2o_post, &wait);
1229 
1230 	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1231 	timeout *= HZ;
1232 	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1233 		set_current_state(TASK_INTERRUPTIBLE);
1234 		if(pHba->host)
1235 			spin_unlock_irq(pHba->host->host_lock);
1236 		if (!timeout)
1237 			schedule();
1238 		else{
1239 			timeout = schedule_timeout(timeout);
1240 			if (timeout == 0) {
1241 				// I/O issued, but cannot get result in
1242 				// specified time. Freeing resorces is
1243 				// dangerous.
1244 				status = -ETIME;
1245 			}
1246 		}
1247 		if(pHba->host)
1248 			spin_lock_irq(pHba->host->host_lock);
1249 	}
1250 	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1251 
1252 	if(status == -ETIMEDOUT){
1253 		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1254 		// We will have to free the wait_data memory during shutdown
1255 		return status;
1256 	}
1257 
1258 	/* Remove the entry from the queue.  */
1259 	p2 = NULL;
1260 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1261 	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1262 		if(p1 == wait_data) {
1263 			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1264 				status = -EOPNOTSUPP;
1265 			}
1266 			if(p2) {
1267 				p2->next = p1->next;
1268 			} else {
1269 				adpt_post_wait_queue = p1->next;
1270 			}
1271 			break;
1272 		}
1273 	}
1274 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1275 
1276 	kfree(wait_data);
1277 
1278 	return status;
1279 }
1280 
1281 
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1282 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1283 {
1284 
1285 	u32 m = EMPTY_QUEUE;
1286 	u32 __iomem *msg;
1287 	ulong timeout = jiffies + 30*HZ;
1288 	do {
1289 		rmb();
1290 		m = readl(pHba->post_port);
1291 		if (m != EMPTY_QUEUE) {
1292 			break;
1293 		}
1294 		if(time_after(jiffies,timeout)){
1295 			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1296 			return -ETIMEDOUT;
1297 		}
1298 		schedule_timeout_uninterruptible(1);
1299 	} while(m == EMPTY_QUEUE);
1300 
1301 	msg = pHba->msg_addr_virt + m;
1302 	memcpy_toio(msg, data, len);
1303 	wmb();
1304 
1305 	//post message
1306 	writel(m, pHba->post_port);
1307 	wmb();
1308 
1309 	return 0;
1310 }
1311 
1312 
adpt_i2o_post_wait_complete(u32 context,int status)1313 static void adpt_i2o_post_wait_complete(u32 context, int status)
1314 {
1315 	struct adpt_i2o_post_wait_data *p1 = NULL;
1316 	/*
1317 	 * We need to search through the adpt_post_wait
1318 	 * queue to see if the given message is still
1319 	 * outstanding.  If not, it means that the IOP
1320 	 * took longer to respond to the message than we
1321 	 * had allowed and timer has already expired.
1322 	 * Not much we can do about that except log
1323 	 * it for debug purposes, increase timeout, and recompile
1324 	 *
1325 	 * Lock needed to keep anyone from moving queue pointers
1326 	 * around while we're looking through them.
1327 	 */
1328 
1329 	context &= 0x7fff;
1330 
1331 	spin_lock(&adpt_post_wait_lock);
1332 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1333 		if(p1->id == context) {
1334 			p1->status = status;
1335 			spin_unlock(&adpt_post_wait_lock);
1336 			wake_up_interruptible(p1->wq);
1337 			return;
1338 		}
1339 	}
1340 	spin_unlock(&adpt_post_wait_lock);
1341         // If this happens we lose commands that probably really completed
1342 	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1343 	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1344 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1345 		printk(KERN_DEBUG"           %d\n",p1->id);
1346 	}
1347 	return;
1348 }
1349 
adpt_i2o_reset_hba(adpt_hba * pHba)1350 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1351 {
1352 	u32 msg[8];
1353 	u8* status;
1354 	dma_addr_t addr;
1355 	u32 m = EMPTY_QUEUE ;
1356 	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1357 
1358 	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1359 		timeout = jiffies + (25*HZ);
1360 	} else {
1361 		adpt_i2o_quiesce_hba(pHba);
1362 	}
1363 
1364 	do {
1365 		rmb();
1366 		m = readl(pHba->post_port);
1367 		if (m != EMPTY_QUEUE) {
1368 			break;
1369 		}
1370 		if(time_after(jiffies,timeout)){
1371 			printk(KERN_WARNING"Timeout waiting for message!\n");
1372 			return -ETIMEDOUT;
1373 		}
1374 		schedule_timeout_uninterruptible(1);
1375 	} while (m == EMPTY_QUEUE);
1376 
1377 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1378 	if(status == NULL) {
1379 		adpt_send_nop(pHba, m);
1380 		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1381 		return -ENOMEM;
1382 	}
1383 	memset(status,0,4);
1384 
1385 	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1386 	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1387 	msg[2]=0;
1388 	msg[3]=0;
1389 	msg[4]=0;
1390 	msg[5]=0;
1391 	msg[6]=dma_low(addr);
1392 	msg[7]=dma_high(addr);
1393 
1394 	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1395 	wmb();
1396 	writel(m, pHba->post_port);
1397 	wmb();
1398 
1399 	while(*status == 0){
1400 		if(time_after(jiffies,timeout)){
1401 			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1402 			/* We lose 4 bytes of "status" here, but we cannot
1403 			   free these because controller may awake and corrupt
1404 			   those bytes at any time */
1405 			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1406 			return -ETIMEDOUT;
1407 		}
1408 		rmb();
1409 		schedule_timeout_uninterruptible(1);
1410 	}
1411 
1412 	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1413 		PDEBUG("%s: Reset in progress...\n", pHba->name);
1414 		// Here we wait for message frame to become available
1415 		// indicated that reset has finished
1416 		do {
1417 			rmb();
1418 			m = readl(pHba->post_port);
1419 			if (m != EMPTY_QUEUE) {
1420 				break;
1421 			}
1422 			if(time_after(jiffies,timeout)){
1423 				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1424 				/* We lose 4 bytes of "status" here, but we
1425 				   cannot free these because controller may
1426 				   awake and corrupt those bytes at any time */
1427 				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1428 				return -ETIMEDOUT;
1429 			}
1430 			schedule_timeout_uninterruptible(1);
1431 		} while (m == EMPTY_QUEUE);
1432 		// Flush the offset
1433 		adpt_send_nop(pHba, m);
1434 	}
1435 	adpt_i2o_status_get(pHba);
1436 	if(*status == 0x02 ||
1437 			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1438 		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1439 				pHba->name);
1440 	} else {
1441 		PDEBUG("%s: Reset completed.\n", pHba->name);
1442 	}
1443 
1444 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1445 #ifdef UARTDELAY
1446 	// This delay is to allow someone attached to the card through the debug UART to
1447 	// set up the dump levels that they want before the rest of the initialization sequence
1448 	adpt_delay(20000);
1449 #endif
1450 	return 0;
1451 }
1452 
1453 
adpt_i2o_parse_lct(adpt_hba * pHba)1454 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1455 {
1456 	int i;
1457 	int max;
1458 	int tid;
1459 	struct i2o_device *d;
1460 	i2o_lct *lct = pHba->lct;
1461 	u8 bus_no = 0;
1462 	s16 scsi_id;
1463 	u64 scsi_lun;
1464 	u32 buf[10]; // larger than 7, or 8 ...
1465 	struct adpt_device* pDev;
1466 
1467 	if (lct == NULL) {
1468 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1469 		return -1;
1470 	}
1471 
1472 	max = lct->table_size;
1473 	max -= 3;
1474 	max /= 9;
1475 
1476 	for(i=0;i<max;i++) {
1477 		if( lct->lct_entry[i].user_tid != 0xfff){
1478 			/*
1479 			 * If we have hidden devices, we need to inform the upper layers about
1480 			 * the possible maximum id reference to handle device access when
1481 			 * an array is disassembled. This code has no other purpose but to
1482 			 * allow us future access to devices that are currently hidden
1483 			 * behind arrays, hotspares or have not been configured (JBOD mode).
1484 			 */
1485 			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1486 			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1487 			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1488 			    	continue;
1489 			}
1490 			tid = lct->lct_entry[i].tid;
1491 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1492 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1493 				continue;
1494 			}
1495 			bus_no = buf[0]>>16;
1496 			scsi_id = buf[1];
1497 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1498 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1499 				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1500 				continue;
1501 			}
1502 			if (scsi_id >= MAX_ID){
1503 				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1504 				continue;
1505 			}
1506 			if(bus_no > pHba->top_scsi_channel){
1507 				pHba->top_scsi_channel = bus_no;
1508 			}
1509 			if(scsi_id > pHba->top_scsi_id){
1510 				pHba->top_scsi_id = scsi_id;
1511 			}
1512 			if(scsi_lun > pHba->top_scsi_lun){
1513 				pHba->top_scsi_lun = scsi_lun;
1514 			}
1515 			continue;
1516 		}
1517 		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1518 		if(d==NULL)
1519 		{
1520 			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1521 			return -ENOMEM;
1522 		}
1523 
1524 		d->controller = pHba;
1525 		d->next = NULL;
1526 
1527 		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1528 
1529 		d->flags = 0;
1530 		tid = d->lct_data.tid;
1531 		adpt_i2o_report_hba_unit(pHba, d);
1532 		adpt_i2o_install_device(pHba, d);
1533 	}
1534 	bus_no = 0;
1535 	for(d = pHba->devices; d ; d = d->next) {
1536 		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1537 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1538 			tid = d->lct_data.tid;
1539 			// TODO get the bus_no from hrt-but for now they are in order
1540 			//bus_no =
1541 			if(bus_no > pHba->top_scsi_channel){
1542 				pHba->top_scsi_channel = bus_no;
1543 			}
1544 			pHba->channel[bus_no].type = d->lct_data.class_id;
1545 			pHba->channel[bus_no].tid = tid;
1546 			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1547 			{
1548 				pHba->channel[bus_no].scsi_id = buf[1];
1549 				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1550 			}
1551 			// TODO remove - this is just until we get from hrt
1552 			bus_no++;
1553 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1554 				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1555 				break;
1556 			}
1557 		}
1558 	}
1559 
1560 	// Setup adpt_device table
1561 	for(d = pHba->devices; d ; d = d->next) {
1562 		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1563 		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1564 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1565 
1566 			tid = d->lct_data.tid;
1567 			scsi_id = -1;
1568 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1569 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1570 				bus_no = buf[0]>>16;
1571 				scsi_id = buf[1];
1572 				scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1573 				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1574 					continue;
1575 				}
1576 				if (scsi_id >= MAX_ID) {
1577 					continue;
1578 				}
1579 				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1580 					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1581 					if(pDev == NULL) {
1582 						return -ENOMEM;
1583 					}
1584 					pHba->channel[bus_no].device[scsi_id] = pDev;
1585 				} else {
1586 					for( pDev = pHba->channel[bus_no].device[scsi_id];
1587 							pDev->next_lun; pDev = pDev->next_lun){
1588 					}
1589 					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1590 					if(pDev->next_lun == NULL) {
1591 						return -ENOMEM;
1592 					}
1593 					pDev = pDev->next_lun;
1594 				}
1595 				pDev->tid = tid;
1596 				pDev->scsi_channel = bus_no;
1597 				pDev->scsi_id = scsi_id;
1598 				pDev->scsi_lun = scsi_lun;
1599 				pDev->pI2o_dev = d;
1600 				d->owner = pDev;
1601 				pDev->type = (buf[0])&0xff;
1602 				pDev->flags = (buf[0]>>8)&0xff;
1603 				if(scsi_id > pHba->top_scsi_id){
1604 					pHba->top_scsi_id = scsi_id;
1605 				}
1606 				if(scsi_lun > pHba->top_scsi_lun){
1607 					pHba->top_scsi_lun = scsi_lun;
1608 				}
1609 			}
1610 			if(scsi_id == -1){
1611 				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1612 						d->lct_data.identity_tag);
1613 			}
1614 		}
1615 	}
1616 	return 0;
1617 }
1618 
1619 
1620 /*
1621  *	Each I2O controller has a chain of devices on it - these match
1622  *	the useful parts of the LCT of the board.
1623  */
1624 
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1625 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1626 {
1627 	mutex_lock(&adpt_configuration_lock);
1628 	d->controller=pHba;
1629 	d->owner=NULL;
1630 	d->next=pHba->devices;
1631 	d->prev=NULL;
1632 	if (pHba->devices != NULL){
1633 		pHba->devices->prev=d;
1634 	}
1635 	pHba->devices=d;
1636 	*d->dev_name = 0;
1637 
1638 	mutex_unlock(&adpt_configuration_lock);
1639 	return 0;
1640 }
1641 
adpt_open(struct inode * inode,struct file * file)1642 static int adpt_open(struct inode *inode, struct file *file)
1643 {
1644 	int minor;
1645 	adpt_hba* pHba;
1646 
1647 	mutex_lock(&adpt_mutex);
1648 	//TODO check for root access
1649 	//
1650 	minor = iminor(inode);
1651 	if (minor >= hba_count) {
1652 		mutex_unlock(&adpt_mutex);
1653 		return -ENXIO;
1654 	}
1655 	mutex_lock(&adpt_configuration_lock);
1656 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1657 		if (pHba->unit == minor) {
1658 			break;	/* found adapter */
1659 		}
1660 	}
1661 	if (pHba == NULL) {
1662 		mutex_unlock(&adpt_configuration_lock);
1663 		mutex_unlock(&adpt_mutex);
1664 		return -ENXIO;
1665 	}
1666 
1667 //	if(pHba->in_use){
1668 	//	mutex_unlock(&adpt_configuration_lock);
1669 //		return -EBUSY;
1670 //	}
1671 
1672 	pHba->in_use = 1;
1673 	mutex_unlock(&adpt_configuration_lock);
1674 	mutex_unlock(&adpt_mutex);
1675 
1676 	return 0;
1677 }
1678 
adpt_close(struct inode * inode,struct file * file)1679 static int adpt_close(struct inode *inode, struct file *file)
1680 {
1681 	int minor;
1682 	adpt_hba* pHba;
1683 
1684 	minor = iminor(inode);
1685 	if (minor >= hba_count) {
1686 		return -ENXIO;
1687 	}
1688 	mutex_lock(&adpt_configuration_lock);
1689 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1690 		if (pHba->unit == minor) {
1691 			break;	/* found adapter */
1692 		}
1693 	}
1694 	mutex_unlock(&adpt_configuration_lock);
1695 	if (pHba == NULL) {
1696 		return -ENXIO;
1697 	}
1698 
1699 	pHba->in_use = 0;
1700 
1701 	return 0;
1702 }
1703 
1704 
adpt_i2o_passthru(adpt_hba * pHba,u32 __user * arg)1705 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1706 {
1707 	u32 msg[MAX_MESSAGE_SIZE];
1708 	u32* reply = NULL;
1709 	u32 size = 0;
1710 	u32 reply_size = 0;
1711 	u32 __user *user_msg = arg;
1712 	u32 __user * user_reply = NULL;
1713 	void *sg_list[pHba->sg_tablesize];
1714 	u32 sg_offset = 0;
1715 	u32 sg_count = 0;
1716 	int sg_index = 0;
1717 	u32 i = 0;
1718 	u32 rcode = 0;
1719 	void *p = NULL;
1720 	dma_addr_t addr;
1721 	ulong flags = 0;
1722 
1723 	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1724 	// get user msg size in u32s
1725 	if(get_user(size, &user_msg[0])){
1726 		return -EFAULT;
1727 	}
1728 	size = size>>16;
1729 
1730 	user_reply = &user_msg[size];
1731 	if(size > MAX_MESSAGE_SIZE){
1732 		return -EFAULT;
1733 	}
1734 	size *= 4; // Convert to bytes
1735 
1736 	/* Copy in the user's I2O command */
1737 	if(copy_from_user(msg, user_msg, size)) {
1738 		return -EFAULT;
1739 	}
1740 	get_user(reply_size, &user_reply[0]);
1741 	reply_size = reply_size>>16;
1742 	if(reply_size > REPLY_FRAME_SIZE){
1743 		reply_size = REPLY_FRAME_SIZE;
1744 	}
1745 	reply_size *= 4;
1746 	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1747 	if(reply == NULL) {
1748 		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1749 		return -ENOMEM;
1750 	}
1751 	sg_offset = (msg[0]>>4)&0xf;
1752 	msg[2] = 0x40000000; // IOCTL context
1753 	msg[3] = adpt_ioctl_to_context(pHba, reply);
1754 	if (msg[3] == (u32)-1)
1755 		return -EBUSY;
1756 
1757 	memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1758 	if(sg_offset) {
1759 		// TODO add 64 bit API
1760 		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1761 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1762 		if (sg_count > pHba->sg_tablesize){
1763 			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1764 			kfree (reply);
1765 			return -EINVAL;
1766 		}
1767 
1768 		for(i = 0; i < sg_count; i++) {
1769 			int sg_size;
1770 
1771 			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1772 				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1773 				rcode = -EINVAL;
1774 				goto cleanup;
1775 			}
1776 			sg_size = sg[i].flag_count & 0xffffff;
1777 			/* Allocate memory for the transfer */
1778 			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1779 			if(!p) {
1780 				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1781 						pHba->name,sg_size,i,sg_count);
1782 				rcode = -ENOMEM;
1783 				goto cleanup;
1784 			}
1785 			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1786 			/* Copy in the user's SG buffer if necessary */
1787 			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1788 				// sg_simple_element API is 32 bit
1789 				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1790 					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1791 					rcode = -EFAULT;
1792 					goto cleanup;
1793 				}
1794 			}
1795 			/* sg_simple_element API is 32 bit, but addr < 4GB */
1796 			sg[i].addr_bus = addr;
1797 		}
1798 	}
1799 
1800 	do {
1801 		/*
1802 		 * Stop any new commands from enterring the
1803 		 * controller while processing the ioctl
1804 		 */
1805 		if (pHba->host) {
1806 			scsi_block_requests(pHba->host);
1807 			spin_lock_irqsave(pHba->host->host_lock, flags);
1808 		}
1809 		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1810 		if (rcode != 0)
1811 			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1812 					rcode, reply);
1813 		if (pHba->host) {
1814 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1815 			scsi_unblock_requests(pHba->host);
1816 		}
1817 	} while (rcode == -ETIMEDOUT);
1818 
1819 	if(rcode){
1820 		goto cleanup;
1821 	}
1822 
1823 	if(sg_offset) {
1824 	/* Copy back the Scatter Gather buffers back to user space */
1825 		u32 j;
1826 		// TODO add 64 bit API
1827 		struct sg_simple_element* sg;
1828 		int sg_size;
1829 
1830 		// re-acquire the original message to handle correctly the sg copy operation
1831 		memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1832 		// get user msg size in u32s
1833 		if(get_user(size, &user_msg[0])){
1834 			rcode = -EFAULT;
1835 			goto cleanup;
1836 		}
1837 		size = size>>16;
1838 		size *= 4;
1839 		if (size > MAX_MESSAGE_SIZE) {
1840 			rcode = -EINVAL;
1841 			goto cleanup;
1842 		}
1843 		/* Copy in the user's I2O command */
1844 		if (copy_from_user (msg, user_msg, size)) {
1845 			rcode = -EFAULT;
1846 			goto cleanup;
1847 		}
1848 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1849 
1850 		// TODO add 64 bit API
1851 		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1852 		for (j = 0; j < sg_count; j++) {
1853 			/* Copy out the SG list to user's buffer if necessary */
1854 			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1855 				sg_size = sg[j].flag_count & 0xffffff;
1856 				// sg_simple_element API is 32 bit
1857 				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1858 					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1859 					rcode = -EFAULT;
1860 					goto cleanup;
1861 				}
1862 			}
1863 		}
1864 	}
1865 
1866 	/* Copy back the reply to user space */
1867 	if (reply_size) {
1868 		// we wrote our own values for context - now restore the user supplied ones
1869 		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1870 			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1871 			rcode = -EFAULT;
1872 		}
1873 		if(copy_to_user(user_reply, reply, reply_size)) {
1874 			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1875 			rcode = -EFAULT;
1876 		}
1877 	}
1878 
1879 
1880 cleanup:
1881 	if (rcode != -ETIME && rcode != -EINTR) {
1882 		struct sg_simple_element *sg =
1883 				(struct sg_simple_element*) (msg +sg_offset);
1884 		kfree (reply);
1885 		while(sg_index) {
1886 			if(sg_list[--sg_index]) {
1887 				dma_free_coherent(&pHba->pDev->dev,
1888 					sg[sg_index].flag_count & 0xffffff,
1889 					sg_list[sg_index],
1890 					sg[sg_index].addr_bus);
1891 			}
1892 		}
1893 	}
1894 	return rcode;
1895 }
1896 
1897 #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1898 static void adpt_ia64_info(sysInfo_S* si)
1899 {
1900 	// This is all the info we need for now
1901 	// We will add more info as our new
1902 	// managmenent utility requires it
1903 	si->processorType = PROC_IA64;
1904 }
1905 #endif
1906 
1907 #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1908 static void adpt_sparc_info(sysInfo_S* si)
1909 {
1910 	// This is all the info we need for now
1911 	// We will add more info as our new
1912 	// managmenent utility requires it
1913 	si->processorType = PROC_ULTRASPARC;
1914 }
1915 #endif
1916 #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1917 static void adpt_alpha_info(sysInfo_S* si)
1918 {
1919 	// This is all the info we need for now
1920 	// We will add more info as our new
1921 	// managmenent utility requires it
1922 	si->processorType = PROC_ALPHA;
1923 }
1924 #endif
1925 
1926 #if defined __i386__
1927 
1928 #include <uapi/asm/vm86.h>
1929 
adpt_i386_info(sysInfo_S * si)1930 static void adpt_i386_info(sysInfo_S* si)
1931 {
1932 	// This is all the info we need for now
1933 	// We will add more info as our new
1934 	// managmenent utility requires it
1935 	switch (boot_cpu_data.x86) {
1936 	case CPU_386:
1937 		si->processorType = PROC_386;
1938 		break;
1939 	case CPU_486:
1940 		si->processorType = PROC_486;
1941 		break;
1942 	case CPU_586:
1943 		si->processorType = PROC_PENTIUM;
1944 		break;
1945 	default:  // Just in case
1946 		si->processorType = PROC_PENTIUM;
1947 		break;
1948 	}
1949 }
1950 #endif
1951 
1952 /*
1953  * This routine returns information about the system.  This does not effect
1954  * any logic and if the info is wrong - it doesn't matter.
1955  */
1956 
1957 /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1958 static int adpt_system_info(void __user *buffer)
1959 {
1960 	sysInfo_S si;
1961 
1962 	memset(&si, 0, sizeof(si));
1963 
1964 	si.osType = OS_LINUX;
1965 	si.osMajorVersion = 0;
1966 	si.osMinorVersion = 0;
1967 	si.osRevision = 0;
1968 	si.busType = SI_PCI_BUS;
1969 	si.processorFamily = DPTI_sig.dsProcessorFamily;
1970 
1971 #if defined __i386__
1972 	adpt_i386_info(&si);
1973 #elif defined (__ia64__)
1974 	adpt_ia64_info(&si);
1975 #elif defined(__sparc__)
1976 	adpt_sparc_info(&si);
1977 #elif defined (__alpha__)
1978 	adpt_alpha_info(&si);
1979 #else
1980 	si.processorType = 0xff ;
1981 #endif
1982 	if (copy_to_user(buffer, &si, sizeof(si))){
1983 		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1984 		return -EFAULT;
1985 	}
1986 
1987 	return 0;
1988 }
1989 
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1990 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1991 {
1992 	int minor;
1993 	int error = 0;
1994 	adpt_hba* pHba;
1995 	ulong flags = 0;
1996 	void __user *argp = (void __user *)arg;
1997 
1998 	minor = iminor(inode);
1999 	if (minor >= DPTI_MAX_HBA){
2000 		return -ENXIO;
2001 	}
2002 	mutex_lock(&adpt_configuration_lock);
2003 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
2004 		if (pHba->unit == minor) {
2005 			break;	/* found adapter */
2006 		}
2007 	}
2008 	mutex_unlock(&adpt_configuration_lock);
2009 	if(pHba == NULL){
2010 		return -ENXIO;
2011 	}
2012 
2013 	while((volatile u32) pHba->state & DPTI_STATE_RESET )
2014 		schedule_timeout_uninterruptible(2);
2015 
2016 	switch (cmd) {
2017 	// TODO: handle 3 cases
2018 	case DPT_SIGNATURE:
2019 		if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2020 			return -EFAULT;
2021 		}
2022 		break;
2023 	case I2OUSRCMD:
2024 		return adpt_i2o_passthru(pHba, argp);
2025 
2026 	case DPT_CTRLINFO:{
2027 		drvrHBAinfo_S HbaInfo;
2028 
2029 #define FLG_OSD_PCI_VALID 0x0001
2030 #define FLG_OSD_DMA	  0x0002
2031 #define FLG_OSD_I2O	  0x0004
2032 		memset(&HbaInfo, 0, sizeof(HbaInfo));
2033 		HbaInfo.drvrHBAnum = pHba->unit;
2034 		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2035 		HbaInfo.blinkState = adpt_read_blink_led(pHba);
2036 		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
2037 		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2038 		HbaInfo.Interrupt = pHba->pDev->irq;
2039 		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2040 		if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2041 			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2042 			return -EFAULT;
2043 		}
2044 		break;
2045 		}
2046 	case DPT_SYSINFO:
2047 		return adpt_system_info(argp);
2048 	case DPT_BLINKLED:{
2049 		u32 value;
2050 		value = (u32)adpt_read_blink_led(pHba);
2051 		if (copy_to_user(argp, &value, sizeof(value))) {
2052 			return -EFAULT;
2053 		}
2054 		break;
2055 		}
2056 	case I2ORESETCMD:
2057 		if(pHba->host)
2058 			spin_lock_irqsave(pHba->host->host_lock, flags);
2059 		adpt_hba_reset(pHba);
2060 		if(pHba->host)
2061 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
2062 		break;
2063 	case I2ORESCANCMD:
2064 		adpt_rescan(pHba);
2065 		break;
2066 	default:
2067 		return -EINVAL;
2068 	}
2069 
2070 	return error;
2071 }
2072 
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)2073 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2074 {
2075 	struct inode *inode;
2076 	long ret;
2077 
2078 	inode = file_inode(file);
2079 
2080 	mutex_lock(&adpt_mutex);
2081 	ret = adpt_ioctl(inode, file, cmd, arg);
2082 	mutex_unlock(&adpt_mutex);
2083 
2084 	return ret;
2085 }
2086 
2087 #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2088 static long compat_adpt_ioctl(struct file *file,
2089 				unsigned int cmd, unsigned long arg)
2090 {
2091 	struct inode *inode;
2092 	long ret;
2093 
2094 	inode = file_inode(file);
2095 
2096 	mutex_lock(&adpt_mutex);
2097 
2098 	switch(cmd) {
2099 		case DPT_SIGNATURE:
2100 		case I2OUSRCMD:
2101 		case DPT_CTRLINFO:
2102 		case DPT_SYSINFO:
2103 		case DPT_BLINKLED:
2104 		case I2ORESETCMD:
2105 		case I2ORESCANCMD:
2106 		case (DPT_TARGET_BUSY & 0xFFFF):
2107 		case DPT_TARGET_BUSY:
2108 			ret = adpt_ioctl(inode, file, cmd, arg);
2109 			break;
2110 		default:
2111 			ret =  -ENOIOCTLCMD;
2112 	}
2113 
2114 	mutex_unlock(&adpt_mutex);
2115 
2116 	return ret;
2117 }
2118 #endif
2119 
adpt_isr(int irq,void * dev_id)2120 static irqreturn_t adpt_isr(int irq, void *dev_id)
2121 {
2122 	struct scsi_cmnd* cmd;
2123 	adpt_hba* pHba = dev_id;
2124 	u32 m;
2125 	void __iomem *reply;
2126 	u32 status=0;
2127 	u32 context;
2128 	ulong flags = 0;
2129 	int handled = 0;
2130 
2131 	if (pHba == NULL){
2132 		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2133 		return IRQ_NONE;
2134 	}
2135 	if(pHba->host)
2136 		spin_lock_irqsave(pHba->host->host_lock, flags);
2137 
2138 	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2139 		m = readl(pHba->reply_port);
2140 		if(m == EMPTY_QUEUE){
2141 			// Try twice then give up
2142 			rmb();
2143 			m = readl(pHba->reply_port);
2144 			if(m == EMPTY_QUEUE){
2145 				// This really should not happen
2146 				printk(KERN_ERR"dpti: Could not get reply frame\n");
2147 				goto out;
2148 			}
2149 		}
2150 		if (pHba->reply_pool_pa <= m &&
2151 		    m < pHba->reply_pool_pa +
2152 			(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2153 			reply = (u8 *)pHba->reply_pool +
2154 						(m - pHba->reply_pool_pa);
2155 		} else {
2156 			/* Ick, we should *never* be here */
2157 			printk(KERN_ERR "dpti: reply frame not from pool\n");
2158 			reply = (u8 *)bus_to_virt(m);
2159 		}
2160 
2161 		if (readl(reply) & MSG_FAIL) {
2162 			u32 old_m = readl(reply+28);
2163 			void __iomem *msg;
2164 			u32 old_context;
2165 			PDEBUG("%s: Failed message\n",pHba->name);
2166 			if(old_m >= 0x100000){
2167 				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2168 				writel(m,pHba->reply_port);
2169 				continue;
2170 			}
2171 			// Transaction context is 0 in failed reply frame
2172 			msg = pHba->msg_addr_virt + old_m;
2173 			old_context = readl(msg+12);
2174 			writel(old_context, reply+12);
2175 			adpt_send_nop(pHba, old_m);
2176 		}
2177 		context = readl(reply+8);
2178 		if(context & 0x40000000){ // IOCTL
2179 			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2180 			if( p != NULL) {
2181 				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2182 			}
2183 			// All IOCTLs will also be post wait
2184 		}
2185 		if(context & 0x80000000){ // Post wait message
2186 			status = readl(reply+16);
2187 			if(status  >> 24){
2188 				status &=  0xffff; /* Get detail status */
2189 			} else {
2190 				status = I2O_POST_WAIT_OK;
2191 			}
2192 			if(!(context & 0x40000000)) {
2193 				cmd = adpt_cmd_from_context(pHba,
2194 							readl(reply+12));
2195 				if(cmd != NULL) {
2196 					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2197 				}
2198 			}
2199 			adpt_i2o_post_wait_complete(context, status);
2200 		} else { // SCSI message
2201 			cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2202 			if(cmd != NULL){
2203 				scsi_dma_unmap(cmd);
2204 				if(cmd->serial_number != 0) { // If not timedout
2205 					adpt_i2o_to_scsi(reply, cmd);
2206 				}
2207 			}
2208 		}
2209 		writel(m, pHba->reply_port);
2210 		wmb();
2211 		rmb();
2212 	}
2213 	handled = 1;
2214 out:	if(pHba->host)
2215 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2216 	return IRQ_RETVAL(handled);
2217 }
2218 
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)2219 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2220 {
2221 	int i;
2222 	u32 msg[MAX_MESSAGE_SIZE];
2223 	u32* mptr;
2224 	u32* lptr;
2225 	u32 *lenptr;
2226 	int direction;
2227 	int scsidir;
2228 	int nseg;
2229 	u32 len;
2230 	u32 reqlen;
2231 	s32 rcode;
2232 	dma_addr_t addr;
2233 
2234 	memset(msg, 0 , sizeof(msg));
2235 	len = scsi_bufflen(cmd);
2236 	direction = 0x00000000;
2237 
2238 	scsidir = 0x00000000;			// DATA NO XFER
2239 	if(len) {
2240 		/*
2241 		 * Set SCBFlags to indicate if data is being transferred
2242 		 * in or out, or no data transfer
2243 		 * Note:  Do not have to verify index is less than 0 since
2244 		 * cmd->cmnd[0] is an unsigned char
2245 		 */
2246 		switch(cmd->sc_data_direction){
2247 		case DMA_FROM_DEVICE:
2248 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2249 			break;
2250 		case DMA_TO_DEVICE:
2251 			direction=0x04000000;	// SGL OUT
2252 			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2253 			break;
2254 		case DMA_NONE:
2255 			break;
2256 		case DMA_BIDIRECTIONAL:
2257 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2258 			// Assume In - and continue;
2259 			break;
2260 		default:
2261 			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2262 			     pHba->name, cmd->cmnd[0]);
2263 			cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2264 			cmd->scsi_done(cmd);
2265 			return 	0;
2266 		}
2267 	}
2268 	// msg[0] is set later
2269 	// I2O_CMD_SCSI_EXEC
2270 	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2271 	msg[2] = 0;
2272 	msg[3] = adpt_cmd_to_context(cmd);  /* Want SCSI control block back */
2273 	// Our cards use the transaction context as the tag for queueing
2274 	// Adaptec/DPT Private stuff
2275 	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2276 	msg[5] = d->tid;
2277 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2278 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
2279 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2280 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2281 	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2282 
2283 	mptr=msg+7;
2284 
2285 	// Write SCSI command into the message - always 16 byte block
2286 	memset(mptr, 0,  16);
2287 	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2288 	mptr+=4;
2289 	lenptr=mptr++;		/* Remember me - fill in when we know */
2290 	if (dpt_dma64(pHba)) {
2291 		reqlen = 16;		// SINGLE SGE
2292 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2293 		*mptr++ = 1 << PAGE_SHIFT;
2294 	} else {
2295 		reqlen = 14;		// SINGLE SGE
2296 	}
2297 	/* Now fill in the SGList and command */
2298 
2299 	nseg = scsi_dma_map(cmd);
2300 	BUG_ON(nseg < 0);
2301 	if (nseg) {
2302 		struct scatterlist *sg;
2303 
2304 		len = 0;
2305 		scsi_for_each_sg(cmd, sg, nseg, i) {
2306 			lptr = mptr;
2307 			*mptr++ = direction|0x10000000|sg_dma_len(sg);
2308 			len+=sg_dma_len(sg);
2309 			addr = sg_dma_address(sg);
2310 			*mptr++ = dma_low(addr);
2311 			if (dpt_dma64(pHba))
2312 				*mptr++ = dma_high(addr);
2313 			/* Make this an end of list */
2314 			if (i == nseg - 1)
2315 				*lptr = direction|0xD0000000|sg_dma_len(sg);
2316 		}
2317 		reqlen = mptr - msg;
2318 		*lenptr = len;
2319 
2320 		if(cmd->underflow && len != cmd->underflow){
2321 			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2322 				len, cmd->underflow);
2323 		}
2324 	} else {
2325 		*lenptr = len = 0;
2326 		reqlen = 12;
2327 	}
2328 
2329 	/* Stick the headers on */
2330 	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2331 
2332 	// Send it on it's way
2333 	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2334 	if (rcode == 0) {
2335 		return 0;
2336 	}
2337 	return rcode;
2338 }
2339 
2340 
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2341 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2342 {
2343 	struct Scsi_Host *host;
2344 
2345 	host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2346 	if (host == NULL) {
2347 		printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2348 		return -1;
2349 	}
2350 	host->hostdata[0] = (unsigned long)pHba;
2351 	pHba->host = host;
2352 
2353 	host->irq = pHba->pDev->irq;
2354 	/* no IO ports, so don't have to set host->io_port and
2355 	 * host->n_io_port
2356 	 */
2357 	host->io_port = 0;
2358 	host->n_io_port = 0;
2359 				/* see comments in scsi_host.h */
2360 	host->max_id = 16;
2361 	host->max_lun = 256;
2362 	host->max_channel = pHba->top_scsi_channel + 1;
2363 	host->cmd_per_lun = 1;
2364 	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2365 	host->sg_tablesize = pHba->sg_tablesize;
2366 	host->can_queue = pHba->post_fifo_size;
2367 	host->use_cmd_list = 1;
2368 
2369 	return 0;
2370 }
2371 
2372 
adpt_i2o_to_scsi(void __iomem * reply,struct scsi_cmnd * cmd)2373 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2374 {
2375 	adpt_hba* pHba;
2376 	u32 hba_status;
2377 	u32 dev_status;
2378 	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2379 	// I know this would look cleaner if I just read bytes
2380 	// but the model I have been using for all the rest of the
2381 	// io is in 4 byte words - so I keep that model
2382 	u16 detailed_status = readl(reply+16) &0xffff;
2383 	dev_status = (detailed_status & 0xff);
2384 	hba_status = detailed_status >> 8;
2385 
2386 	// calculate resid for sg
2387 	scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2388 
2389 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2390 
2391 	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2392 
2393 	if(!(reply_flags & MSG_FAIL)) {
2394 		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2395 		case I2O_SCSI_DSC_SUCCESS:
2396 			cmd->result = (DID_OK << 16);
2397 			// handle underflow
2398 			if (readl(reply+20) < cmd->underflow) {
2399 				cmd->result = (DID_ERROR <<16);
2400 				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2401 			}
2402 			break;
2403 		case I2O_SCSI_DSC_REQUEST_ABORTED:
2404 			cmd->result = (DID_ABORT << 16);
2405 			break;
2406 		case I2O_SCSI_DSC_PATH_INVALID:
2407 		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2408 		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2409 		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2410 		case I2O_SCSI_DSC_NO_ADAPTER:
2411 		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2412 			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2413 				pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2414 			cmd->result = (DID_TIME_OUT << 16);
2415 			break;
2416 		case I2O_SCSI_DSC_ADAPTER_BUSY:
2417 		case I2O_SCSI_DSC_BUS_BUSY:
2418 			cmd->result = (DID_BUS_BUSY << 16);
2419 			break;
2420 		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2421 		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2422 			cmd->result = (DID_RESET << 16);
2423 			break;
2424 		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2425 			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2426 			cmd->result = (DID_PARITY << 16);
2427 			break;
2428 		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2429 		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2430 		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2431 		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2432 		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2433 		case I2O_SCSI_DSC_DATA_OVERRUN:
2434 		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2435 		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2436 		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2437 		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2438 		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2439 		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2440 		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2441 		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2442 		case I2O_SCSI_DSC_INVALID_CDB:
2443 		case I2O_SCSI_DSC_LUN_INVALID:
2444 		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2445 		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2446 		case I2O_SCSI_DSC_NO_NEXUS:
2447 		case I2O_SCSI_DSC_CDB_RECEIVED:
2448 		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2449 		case I2O_SCSI_DSC_QUEUE_FROZEN:
2450 		case I2O_SCSI_DSC_REQUEST_INVALID:
2451 		default:
2452 			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2453 				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2454 			       hba_status, dev_status, cmd->cmnd[0]);
2455 			cmd->result = (DID_ERROR << 16);
2456 			break;
2457 		}
2458 
2459 		// copy over the request sense data if it was a check
2460 		// condition status
2461 		if (dev_status == SAM_STAT_CHECK_CONDITION) {
2462 			u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2463 			// Copy over the sense data
2464 			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2465 			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2466 			   cmd->sense_buffer[2] == DATA_PROTECT ){
2467 				/* This is to handle an array failed */
2468 				cmd->result = (DID_TIME_OUT << 16);
2469 				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2470 					pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2471 					hba_status, dev_status, cmd->cmnd[0]);
2472 
2473 			}
2474 		}
2475 	} else {
2476 		/* In this condtion we could not talk to the tid
2477 		 * the card rejected it.  We should signal a retry
2478 		 * for a limitted number of retries.
2479 		 */
2480 		cmd->result = (DID_TIME_OUT << 16);
2481 		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2482 			pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2483 			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2484 	}
2485 
2486 	cmd->result |= (dev_status);
2487 
2488 	if(cmd->scsi_done != NULL){
2489 		cmd->scsi_done(cmd);
2490 	}
2491 	return cmd->result;
2492 }
2493 
2494 
adpt_rescan(adpt_hba * pHba)2495 static s32 adpt_rescan(adpt_hba* pHba)
2496 {
2497 	s32 rcode;
2498 	ulong flags = 0;
2499 
2500 	if(pHba->host)
2501 		spin_lock_irqsave(pHba->host->host_lock, flags);
2502 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2503 		goto out;
2504 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2505 		goto out;
2506 	rcode = 0;
2507 out:	if(pHba->host)
2508 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2509 	return rcode;
2510 }
2511 
2512 
adpt_i2o_reparse_lct(adpt_hba * pHba)2513 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2514 {
2515 	int i;
2516 	int max;
2517 	int tid;
2518 	struct i2o_device *d;
2519 	i2o_lct *lct = pHba->lct;
2520 	u8 bus_no = 0;
2521 	s16 scsi_id;
2522 	u64 scsi_lun;
2523 	u32 buf[10]; // at least 8 u32's
2524 	struct adpt_device* pDev = NULL;
2525 	struct i2o_device* pI2o_dev = NULL;
2526 
2527 	if (lct == NULL) {
2528 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2529 		return -1;
2530 	}
2531 
2532 	max = lct->table_size;
2533 	max -= 3;
2534 	max /= 9;
2535 
2536 	// Mark each drive as unscanned
2537 	for (d = pHba->devices; d; d = d->next) {
2538 		pDev =(struct adpt_device*) d->owner;
2539 		if(!pDev){
2540 			continue;
2541 		}
2542 		pDev->state |= DPTI_DEV_UNSCANNED;
2543 	}
2544 
2545 	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2546 
2547 	for(i=0;i<max;i++) {
2548 		if( lct->lct_entry[i].user_tid != 0xfff){
2549 			continue;
2550 		}
2551 
2552 		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2553 		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2554 		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2555 			tid = lct->lct_entry[i].tid;
2556 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2557 				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2558 				continue;
2559 			}
2560 			bus_no = buf[0]>>16;
2561 			if (bus_no >= MAX_CHANNEL) {	/* Something wrong skip it */
2562 				printk(KERN_WARNING
2563 					"%s: Channel number %d out of range\n",
2564 					pHba->name, bus_no);
2565 				continue;
2566 			}
2567 
2568 			scsi_id = buf[1];
2569 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2570 			pDev = pHba->channel[bus_no].device[scsi_id];
2571 			/* da lun */
2572 			while(pDev) {
2573 				if(pDev->scsi_lun == scsi_lun) {
2574 					break;
2575 				}
2576 				pDev = pDev->next_lun;
2577 			}
2578 			if(!pDev ) { // Something new add it
2579 				d = kmalloc(sizeof(struct i2o_device),
2580 					    GFP_ATOMIC);
2581 				if(d==NULL)
2582 				{
2583 					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2584 					return -ENOMEM;
2585 				}
2586 
2587 				d->controller = pHba;
2588 				d->next = NULL;
2589 
2590 				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2591 
2592 				d->flags = 0;
2593 				adpt_i2o_report_hba_unit(pHba, d);
2594 				adpt_i2o_install_device(pHba, d);
2595 
2596 				pDev = pHba->channel[bus_no].device[scsi_id];
2597 				if( pDev == NULL){
2598 					pDev =
2599 					  kzalloc(sizeof(struct adpt_device),
2600 						  GFP_ATOMIC);
2601 					if(pDev == NULL) {
2602 						return -ENOMEM;
2603 					}
2604 					pHba->channel[bus_no].device[scsi_id] = pDev;
2605 				} else {
2606 					while (pDev->next_lun) {
2607 						pDev = pDev->next_lun;
2608 					}
2609 					pDev = pDev->next_lun =
2610 					  kzalloc(sizeof(struct adpt_device),
2611 						  GFP_ATOMIC);
2612 					if(pDev == NULL) {
2613 						return -ENOMEM;
2614 					}
2615 				}
2616 				pDev->tid = d->lct_data.tid;
2617 				pDev->scsi_channel = bus_no;
2618 				pDev->scsi_id = scsi_id;
2619 				pDev->scsi_lun = scsi_lun;
2620 				pDev->pI2o_dev = d;
2621 				d->owner = pDev;
2622 				pDev->type = (buf[0])&0xff;
2623 				pDev->flags = (buf[0]>>8)&0xff;
2624 				// Too late, SCSI system has made up it's mind, but what the hey ...
2625 				if(scsi_id > pHba->top_scsi_id){
2626 					pHba->top_scsi_id = scsi_id;
2627 				}
2628 				if(scsi_lun > pHba->top_scsi_lun){
2629 					pHba->top_scsi_lun = scsi_lun;
2630 				}
2631 				continue;
2632 			} // end of new i2o device
2633 
2634 			// We found an old device - check it
2635 			while(pDev) {
2636 				if(pDev->scsi_lun == scsi_lun) {
2637 					if(!scsi_device_online(pDev->pScsi_dev)) {
2638 						printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2639 								pHba->name,bus_no,scsi_id,scsi_lun);
2640 						if (pDev->pScsi_dev) {
2641 							scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2642 						}
2643 					}
2644 					d = pDev->pI2o_dev;
2645 					if(d->lct_data.tid != tid) { // something changed
2646 						pDev->tid = tid;
2647 						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2648 						if (pDev->pScsi_dev) {
2649 							pDev->pScsi_dev->changed = TRUE;
2650 							pDev->pScsi_dev->removable = TRUE;
2651 						}
2652 					}
2653 					// Found it - mark it scanned
2654 					pDev->state = DPTI_DEV_ONLINE;
2655 					break;
2656 				}
2657 				pDev = pDev->next_lun;
2658 			}
2659 		}
2660 	}
2661 	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2662 		pDev =(struct adpt_device*) pI2o_dev->owner;
2663 		if(!pDev){
2664 			continue;
2665 		}
2666 		// Drive offline drives that previously existed but could not be found
2667 		// in the LCT table
2668 		if (pDev->state & DPTI_DEV_UNSCANNED){
2669 			pDev->state = DPTI_DEV_OFFLINE;
2670 			printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2671 			if (pDev->pScsi_dev) {
2672 				scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2673 			}
2674 		}
2675 	}
2676 	return 0;
2677 }
2678 
adpt_fail_posted_scbs(adpt_hba * pHba)2679 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2680 {
2681 	struct scsi_cmnd* 	cmd = NULL;
2682 	struct scsi_device* 	d = NULL;
2683 
2684 	shost_for_each_device(d, pHba->host) {
2685 		unsigned long flags;
2686 		spin_lock_irqsave(&d->list_lock, flags);
2687 		list_for_each_entry(cmd, &d->cmd_list, list) {
2688 			if(cmd->serial_number == 0){
2689 				continue;
2690 			}
2691 			cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2692 			cmd->scsi_done(cmd);
2693 		}
2694 		spin_unlock_irqrestore(&d->list_lock, flags);
2695 	}
2696 }
2697 
2698 
2699 /*============================================================================
2700  *  Routines from i2o subsystem
2701  *============================================================================
2702  */
2703 
2704 
2705 
2706 /*
2707  *	Bring an I2O controller into HOLD state. See the spec.
2708  */
adpt_i2o_activate_hba(adpt_hba * pHba)2709 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2710 {
2711 	int rcode;
2712 
2713 	if(pHba->initialized ) {
2714 		if (adpt_i2o_status_get(pHba) < 0) {
2715 			if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2716 				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2717 				return rcode;
2718 			}
2719 			if (adpt_i2o_status_get(pHba) < 0) {
2720 				printk(KERN_INFO "HBA not responding.\n");
2721 				return -1;
2722 			}
2723 		}
2724 
2725 		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2726 			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2727 			return -1;
2728 		}
2729 
2730 		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2731 		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2732 		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2733 		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2734 			adpt_i2o_reset_hba(pHba);
2735 			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2736 				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2737 				return -1;
2738 			}
2739 		}
2740 	} else {
2741 		if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2742 			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2743 			return rcode;
2744 		}
2745 
2746 	}
2747 
2748 	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2749 		return -1;
2750 	}
2751 
2752 	/* In HOLD state */
2753 
2754 	if (adpt_i2o_hrt_get(pHba) < 0) {
2755 		return -1;
2756 	}
2757 
2758 	return 0;
2759 }
2760 
2761 /*
2762  *	Bring a controller online into OPERATIONAL state.
2763  */
2764 
adpt_i2o_online_hba(adpt_hba * pHba)2765 static int adpt_i2o_online_hba(adpt_hba* pHba)
2766 {
2767 	if (adpt_i2o_systab_send(pHba) < 0) {
2768 		adpt_i2o_delete_hba(pHba);
2769 		return -1;
2770 	}
2771 	/* In READY state */
2772 
2773 	if (adpt_i2o_enable_hba(pHba) < 0) {
2774 		adpt_i2o_delete_hba(pHba);
2775 		return -1;
2776 	}
2777 
2778 	/* In OPERATIONAL state  */
2779 	return 0;
2780 }
2781 
adpt_send_nop(adpt_hba * pHba,u32 m)2782 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2783 {
2784 	u32 __iomem *msg;
2785 	ulong timeout = jiffies + 5*HZ;
2786 
2787 	while(m == EMPTY_QUEUE){
2788 		rmb();
2789 		m = readl(pHba->post_port);
2790 		if(m != EMPTY_QUEUE){
2791 			break;
2792 		}
2793 		if(time_after(jiffies,timeout)){
2794 			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2795 			return 2;
2796 		}
2797 		schedule_timeout_uninterruptible(1);
2798 	}
2799 	msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2800 	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2801 	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2802 	writel( 0,&msg[2]);
2803 	wmb();
2804 
2805 	writel(m, pHba->post_port);
2806 	wmb();
2807 	return 0;
2808 }
2809 
adpt_i2o_init_outbound_q(adpt_hba * pHba)2810 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2811 {
2812 	u8 *status;
2813 	dma_addr_t addr;
2814 	u32 __iomem *msg = NULL;
2815 	int i;
2816 	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2817 	u32 m;
2818 
2819 	do {
2820 		rmb();
2821 		m = readl(pHba->post_port);
2822 		if (m != EMPTY_QUEUE) {
2823 			break;
2824 		}
2825 
2826 		if(time_after(jiffies,timeout)){
2827 			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2828 			return -ETIMEDOUT;
2829 		}
2830 		schedule_timeout_uninterruptible(1);
2831 	} while(m == EMPTY_QUEUE);
2832 
2833 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2834 
2835 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2836 	if (!status) {
2837 		adpt_send_nop(pHba, m);
2838 		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2839 			pHba->name);
2840 		return -ENOMEM;
2841 	}
2842 	memset(status, 0, 4);
2843 
2844 	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2845 	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2846 	writel(0, &msg[2]);
2847 	writel(0x0106, &msg[3]);	/* Transaction context */
2848 	writel(4096, &msg[4]);		/* Host page frame size */
2849 	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2850 	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2851 	writel((u32)addr, &msg[7]);
2852 
2853 	writel(m, pHba->post_port);
2854 	wmb();
2855 
2856 	// Wait for the reply status to come back
2857 	do {
2858 		if (*status) {
2859 			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2860 				break;
2861 			}
2862 		}
2863 		rmb();
2864 		if(time_after(jiffies,timeout)){
2865 			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2866 			/* We lose 4 bytes of "status" here, but we
2867 			   cannot free these because controller may
2868 			   awake and corrupt those bytes at any time */
2869 			/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2870 			return -ETIMEDOUT;
2871 		}
2872 		schedule_timeout_uninterruptible(1);
2873 	} while (1);
2874 
2875 	// If the command was successful, fill the fifo with our reply
2876 	// message packets
2877 	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2878 		dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2879 		return -2;
2880 	}
2881 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2882 
2883 	if(pHba->reply_pool != NULL) {
2884 		dma_free_coherent(&pHba->pDev->dev,
2885 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2886 			pHba->reply_pool, pHba->reply_pool_pa);
2887 	}
2888 
2889 	pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2890 				pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2891 				&pHba->reply_pool_pa, GFP_KERNEL);
2892 	if (!pHba->reply_pool) {
2893 		printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2894 		return -ENOMEM;
2895 	}
2896 	memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2897 
2898 	for(i = 0; i < pHba->reply_fifo_size; i++) {
2899 		writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2900 			pHba->reply_port);
2901 		wmb();
2902 	}
2903 	adpt_i2o_status_get(pHba);
2904 	return 0;
2905 }
2906 
2907 
2908 /*
2909  * I2O System Table.  Contains information about
2910  * all the IOPs in the system.  Used to inform IOPs
2911  * about each other's existence.
2912  *
2913  * sys_tbl_ver is the CurrentChangeIndicator that is
2914  * used by IOPs to track changes.
2915  */
2916 
2917 
2918 
adpt_i2o_status_get(adpt_hba * pHba)2919 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2920 {
2921 	ulong timeout;
2922 	u32 m;
2923 	u32 __iomem *msg;
2924 	u8 *status_block=NULL;
2925 
2926 	if(pHba->status_block == NULL) {
2927 		pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2928 					sizeof(i2o_status_block),
2929 					&pHba->status_block_pa, GFP_KERNEL);
2930 		if(pHba->status_block == NULL) {
2931 			printk(KERN_ERR
2932 			"dpti%d: Get Status Block failed; Out of memory. \n",
2933 			pHba->unit);
2934 			return -ENOMEM;
2935 		}
2936 	}
2937 	memset(pHba->status_block, 0, sizeof(i2o_status_block));
2938 	status_block = (u8*)(pHba->status_block);
2939 	timeout = jiffies+TMOUT_GETSTATUS*HZ;
2940 	do {
2941 		rmb();
2942 		m = readl(pHba->post_port);
2943 		if (m != EMPTY_QUEUE) {
2944 			break;
2945 		}
2946 		if(time_after(jiffies,timeout)){
2947 			printk(KERN_ERR "%s: Timeout waiting for message !\n",
2948 					pHba->name);
2949 			return -ETIMEDOUT;
2950 		}
2951 		schedule_timeout_uninterruptible(1);
2952 	} while(m==EMPTY_QUEUE);
2953 
2954 
2955 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2956 
2957 	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2958 	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2959 	writel(1, &msg[2]);
2960 	writel(0, &msg[3]);
2961 	writel(0, &msg[4]);
2962 	writel(0, &msg[5]);
2963 	writel( dma_low(pHba->status_block_pa), &msg[6]);
2964 	writel( dma_high(pHba->status_block_pa), &msg[7]);
2965 	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2966 
2967 	//post message
2968 	writel(m, pHba->post_port);
2969 	wmb();
2970 
2971 	while(status_block[87]!=0xff){
2972 		if(time_after(jiffies,timeout)){
2973 			printk(KERN_ERR"dpti%d: Get status timeout.\n",
2974 				pHba->unit);
2975 			return -ETIMEDOUT;
2976 		}
2977 		rmb();
2978 		schedule_timeout_uninterruptible(1);
2979 	}
2980 
2981 	// Set up our number of outbound and inbound messages
2982 	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2983 	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2984 		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2985 	}
2986 
2987 	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2988 	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2989 		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2990 	}
2991 
2992 	// Calculate the Scatter Gather list size
2993 	if (dpt_dma64(pHba)) {
2994 		pHba->sg_tablesize
2995 		  = ((pHba->status_block->inbound_frame_size * 4
2996 		  - 14 * sizeof(u32))
2997 		  / (sizeof(struct sg_simple_element) + sizeof(u32)));
2998 	} else {
2999 		pHba->sg_tablesize
3000 		  = ((pHba->status_block->inbound_frame_size * 4
3001 		  - 12 * sizeof(u32))
3002 		  / sizeof(struct sg_simple_element));
3003 	}
3004 	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3005 		pHba->sg_tablesize = SG_LIST_ELEMENTS;
3006 	}
3007 
3008 
3009 #ifdef DEBUG
3010 	printk("dpti%d: State = ",pHba->unit);
3011 	switch(pHba->status_block->iop_state) {
3012 		case 0x01:
3013 			printk("INIT\n");
3014 			break;
3015 		case 0x02:
3016 			printk("RESET\n");
3017 			break;
3018 		case 0x04:
3019 			printk("HOLD\n");
3020 			break;
3021 		case 0x05:
3022 			printk("READY\n");
3023 			break;
3024 		case 0x08:
3025 			printk("OPERATIONAL\n");
3026 			break;
3027 		case 0x10:
3028 			printk("FAILED\n");
3029 			break;
3030 		case 0x11:
3031 			printk("FAULTED\n");
3032 			break;
3033 		default:
3034 			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3035 	}
3036 #endif
3037 	return 0;
3038 }
3039 
3040 /*
3041  * Get the IOP's Logical Configuration Table
3042  */
adpt_i2o_lct_get(adpt_hba * pHba)3043 static int adpt_i2o_lct_get(adpt_hba* pHba)
3044 {
3045 	u32 msg[8];
3046 	int ret;
3047 	u32 buf[16];
3048 
3049 	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3050 		pHba->lct_size = pHba->status_block->expected_lct_size;
3051 	}
3052 	do {
3053 		if (pHba->lct == NULL) {
3054 			pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3055 					pHba->lct_size, &pHba->lct_pa,
3056 					GFP_ATOMIC);
3057 			if(pHba->lct == NULL) {
3058 				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3059 					pHba->name);
3060 				return -ENOMEM;
3061 			}
3062 		}
3063 		memset(pHba->lct, 0, pHba->lct_size);
3064 
3065 		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3066 		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3067 		msg[2] = 0;
3068 		msg[3] = 0;
3069 		msg[4] = 0xFFFFFFFF;	/* All devices */
3070 		msg[5] = 0x00000000;	/* Report now */
3071 		msg[6] = 0xD0000000|pHba->lct_size;
3072 		msg[7] = (u32)pHba->lct_pa;
3073 
3074 		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3075 			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3076 				pHba->name, ret);
3077 			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3078 			return ret;
3079 		}
3080 
3081 		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3082 			pHba->lct_size = pHba->lct->table_size << 2;
3083 			dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3084 					pHba->lct, pHba->lct_pa);
3085 			pHba->lct = NULL;
3086 		}
3087 	} while (pHba->lct == NULL);
3088 
3089 	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3090 
3091 
3092 	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3093 	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3094 		pHba->FwDebugBufferSize = buf[1];
3095 		pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3096 						pHba->FwDebugBufferSize);
3097 		if (pHba->FwDebugBuffer_P) {
3098 			pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3099 							FW_DEBUG_FLAGS_OFFSET;
3100 			pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3101 							FW_DEBUG_BLED_OFFSET;
3102 			pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3103 			pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3104 						FW_DEBUG_STR_LENGTH_OFFSET;
3105 			pHba->FwDebugBuffer_P += buf[2];
3106 			pHba->FwDebugFlags = 0;
3107 		}
3108 	}
3109 
3110 	return 0;
3111 }
3112 
adpt_i2o_build_sys_table(void)3113 static int adpt_i2o_build_sys_table(void)
3114 {
3115 	adpt_hba* pHba = hba_chain;
3116 	int count = 0;
3117 
3118 	if (sys_tbl)
3119 		dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3120 					sys_tbl, sys_tbl_pa);
3121 
3122 	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
3123 				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
3124 
3125 	sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3126 				sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3127 	if (!sys_tbl) {
3128 		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3129 		return -ENOMEM;
3130 	}
3131 	memset(sys_tbl, 0, sys_tbl_len);
3132 
3133 	sys_tbl->num_entries = hba_count;
3134 	sys_tbl->version = I2OVERSION;
3135 	sys_tbl->change_ind = sys_tbl_ind++;
3136 
3137 	for(pHba = hba_chain; pHba; pHba = pHba->next) {
3138 		u64 addr;
3139 		// Get updated Status Block so we have the latest information
3140 		if (adpt_i2o_status_get(pHba)) {
3141 			sys_tbl->num_entries--;
3142 			continue; // try next one
3143 		}
3144 
3145 		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3146 		sys_tbl->iops[count].iop_id = pHba->unit + 2;
3147 		sys_tbl->iops[count].seg_num = 0;
3148 		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3149 		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3150 		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3151 		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3152 		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3153 		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3154 		addr = pHba->base_addr_phys + 0x40;
3155 		sys_tbl->iops[count].inbound_low = dma_low(addr);
3156 		sys_tbl->iops[count].inbound_high = dma_high(addr);
3157 
3158 		count++;
3159 	}
3160 
3161 #ifdef DEBUG
3162 {
3163 	u32 *table = (u32*)sys_tbl;
3164 	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3165 	for(count = 0; count < (sys_tbl_len >>2); count++) {
3166 		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3167 			count, table[count]);
3168 	}
3169 }
3170 #endif
3171 
3172 	return 0;
3173 }
3174 
3175 
3176 /*
3177  *	 Dump the information block associated with a given unit (TID)
3178  */
3179 
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)3180 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3181 {
3182 	char buf[64];
3183 	int unit = d->lct_data.tid;
3184 
3185 	printk(KERN_INFO "TID %3.3d ", unit);
3186 
3187 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3188 	{
3189 		buf[16]=0;
3190 		printk(" Vendor: %-12.12s", buf);
3191 	}
3192 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3193 	{
3194 		buf[16]=0;
3195 		printk(" Device: %-12.12s", buf);
3196 	}
3197 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3198 	{
3199 		buf[8]=0;
3200 		printk(" Rev: %-12.12s\n", buf);
3201 	}
3202 #ifdef DEBUG
3203 	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3204 	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3205 	 printk(KERN_INFO "\tFlags: ");
3206 
3207 	 if(d->lct_data.device_flags&(1<<0))
3208 		  printk("C");	     // ConfigDialog requested
3209 	 if(d->lct_data.device_flags&(1<<1))
3210 		  printk("U");	     // Multi-user capable
3211 	 if(!(d->lct_data.device_flags&(1<<4)))
3212 		  printk("P");	     // Peer service enabled!
3213 	 if(!(d->lct_data.device_flags&(1<<5)))
3214 		  printk("M");	     // Mgmt service enabled!
3215 	 printk("\n");
3216 #endif
3217 }
3218 
3219 #ifdef DEBUG
3220 /*
3221  *	Do i2o class name lookup
3222  */
adpt_i2o_get_class_name(int class)3223 static const char *adpt_i2o_get_class_name(int class)
3224 {
3225 	int idx = 16;
3226 	static char *i2o_class_name[] = {
3227 		"Executive",
3228 		"Device Driver Module",
3229 		"Block Device",
3230 		"Tape Device",
3231 		"LAN Interface",
3232 		"WAN Interface",
3233 		"Fibre Channel Port",
3234 		"Fibre Channel Device",
3235 		"SCSI Device",
3236 		"ATE Port",
3237 		"ATE Device",
3238 		"Floppy Controller",
3239 		"Floppy Device",
3240 		"Secondary Bus Port",
3241 		"Peer Transport Agent",
3242 		"Peer Transport",
3243 		"Unknown"
3244 	};
3245 
3246 	switch(class&0xFFF) {
3247 	case I2O_CLASS_EXECUTIVE:
3248 		idx = 0; break;
3249 	case I2O_CLASS_DDM:
3250 		idx = 1; break;
3251 	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3252 		idx = 2; break;
3253 	case I2O_CLASS_SEQUENTIAL_STORAGE:
3254 		idx = 3; break;
3255 	case I2O_CLASS_LAN:
3256 		idx = 4; break;
3257 	case I2O_CLASS_WAN:
3258 		idx = 5; break;
3259 	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3260 		idx = 6; break;
3261 	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3262 		idx = 7; break;
3263 	case I2O_CLASS_SCSI_PERIPHERAL:
3264 		idx = 8; break;
3265 	case I2O_CLASS_ATE_PORT:
3266 		idx = 9; break;
3267 	case I2O_CLASS_ATE_PERIPHERAL:
3268 		idx = 10; break;
3269 	case I2O_CLASS_FLOPPY_CONTROLLER:
3270 		idx = 11; break;
3271 	case I2O_CLASS_FLOPPY_DEVICE:
3272 		idx = 12; break;
3273 	case I2O_CLASS_BUS_ADAPTER_PORT:
3274 		idx = 13; break;
3275 	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3276 		idx = 14; break;
3277 	case I2O_CLASS_PEER_TRANSPORT:
3278 		idx = 15; break;
3279 	}
3280 	return i2o_class_name[idx];
3281 }
3282 #endif
3283 
3284 
adpt_i2o_hrt_get(adpt_hba * pHba)3285 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3286 {
3287 	u32 msg[6];
3288 	int ret, size = sizeof(i2o_hrt);
3289 
3290 	do {
3291 		if (pHba->hrt == NULL) {
3292 			pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3293 					size, &pHba->hrt_pa, GFP_KERNEL);
3294 			if (pHba->hrt == NULL) {
3295 				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3296 				return -ENOMEM;
3297 			}
3298 		}
3299 
3300 		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3301 		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3302 		msg[2]= 0;
3303 		msg[3]= 0;
3304 		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3305 		msg[5]= (u32)pHba->hrt_pa;	/* Dump it here */
3306 
3307 		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3308 			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3309 			return ret;
3310 		}
3311 
3312 		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3313 			int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3314 			dma_free_coherent(&pHba->pDev->dev, size,
3315 				pHba->hrt, pHba->hrt_pa);
3316 			size = newsize;
3317 			pHba->hrt = NULL;
3318 		}
3319 	} while(pHba->hrt == NULL);
3320 	return 0;
3321 }
3322 
3323 /*
3324  *	 Query one scalar group value or a whole scalar group.
3325  */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3326 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3327 			int group, int field, void *buf, int buflen)
3328 {
3329 	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3330 	u8 *opblk_va;
3331 	dma_addr_t opblk_pa;
3332 	u8 *resblk_va;
3333 	dma_addr_t resblk_pa;
3334 
3335 	int size;
3336 
3337 	/* 8 bytes for header */
3338 	resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3339 			sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3340 	if (resblk_va == NULL) {
3341 		printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3342 		return -ENOMEM;
3343 	}
3344 
3345 	opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3346 			sizeof(opblk), &opblk_pa, GFP_KERNEL);
3347 	if (opblk_va == NULL) {
3348 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3349 			resblk_va, resblk_pa);
3350 		printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3351 			pHba->name);
3352 		return -ENOMEM;
3353 	}
3354 	if (field == -1)  		/* whole group */
3355 			opblk[4] = -1;
3356 
3357 	memcpy(opblk_va, opblk, sizeof(opblk));
3358 	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3359 		opblk_va, opblk_pa, sizeof(opblk),
3360 		resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3361 	dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3362 	if (size == -ETIME) {
3363 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3364 							resblk_va, resblk_pa);
3365 		printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3366 		return -ETIME;
3367 	} else if (size == -EINTR) {
3368 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3369 							resblk_va, resblk_pa);
3370 		printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3371 		return -EINTR;
3372 	}
3373 
3374 	memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3375 
3376 	dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3377 						resblk_va, resblk_pa);
3378 	if (size < 0)
3379 		return size;
3380 
3381 	return buflen;
3382 }
3383 
3384 
3385 /*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3386  *
3387  *	This function can be used for all UtilParamsGet/Set operations.
3388  *	The OperationBlock is given in opblk-buffer,
3389  *	and results are returned in resblk-buffer.
3390  *	Note that the minimum sized resblk is 8 bytes and contains
3391  *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3392  */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3393 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3394 		  void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3395 		void *resblk_va, dma_addr_t resblk_pa, int reslen)
3396 {
3397 	u32 msg[9];
3398 	u32 *res = (u32 *)resblk_va;
3399 	int wait_status;
3400 
3401 	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3402 	msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3403 	msg[2] = 0;
3404 	msg[3] = 0;
3405 	msg[4] = 0;
3406 	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3407 	msg[6] = (u32)opblk_pa;
3408 	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3409 	msg[8] = (u32)resblk_pa;
3410 
3411 	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3412 		printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3413    		return wait_status; 	/* -DetailedStatus */
3414 	}
3415 
3416 	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3417 		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3418 			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3419 			pHba->name,
3420 			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3421 							 : "PARAMS_GET",
3422 			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3423 		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3424 	}
3425 
3426 	 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3427 }
3428 
3429 
adpt_i2o_quiesce_hba(adpt_hba * pHba)3430 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3431 {
3432 	u32 msg[4];
3433 	int ret;
3434 
3435 	adpt_i2o_status_get(pHba);
3436 
3437 	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3438 
3439 	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3440    	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3441 		return 0;
3442 	}
3443 
3444 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3445 	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3446 	msg[2] = 0;
3447 	msg[3] = 0;
3448 
3449 	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3450 		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3451 				pHba->unit, -ret);
3452 	} else {
3453 		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3454 	}
3455 
3456 	adpt_i2o_status_get(pHba);
3457 	return ret;
3458 }
3459 
3460 
3461 /*
3462  * Enable IOP. Allows the IOP to resume external operations.
3463  */
adpt_i2o_enable_hba(adpt_hba * pHba)3464 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3465 {
3466 	u32 msg[4];
3467 	int ret;
3468 
3469 	adpt_i2o_status_get(pHba);
3470 	if(!pHba->status_block){
3471 		return -ENOMEM;
3472 	}
3473 	/* Enable only allowed on READY state */
3474 	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3475 		return 0;
3476 
3477 	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3478 		return -EINVAL;
3479 
3480 	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3481 	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3482 	msg[2]= 0;
3483 	msg[3]= 0;
3484 
3485 	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3486 		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3487 			pHba->name, ret);
3488 	} else {
3489 		PDEBUG("%s: Enabled.\n", pHba->name);
3490 	}
3491 
3492 	adpt_i2o_status_get(pHba);
3493 	return ret;
3494 }
3495 
3496 
adpt_i2o_systab_send(adpt_hba * pHba)3497 static int adpt_i2o_systab_send(adpt_hba* pHba)
3498 {
3499 	 u32 msg[12];
3500 	 int ret;
3501 
3502 	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3503 	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3504 	msg[2] = 0;
3505 	msg[3] = 0;
3506 	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3507 	msg[5] = 0;				   /* Segment 0 */
3508 
3509 	/*
3510 	 * Provide three SGL-elements:
3511 	 * System table (SysTab), Private memory space declaration and
3512 	 * Private i/o space declaration
3513 	 */
3514 	msg[6] = 0x54000000 | sys_tbl_len;
3515 	msg[7] = (u32)sys_tbl_pa;
3516 	msg[8] = 0x54000000 | 0;
3517 	msg[9] = 0;
3518 	msg[10] = 0xD4000000 | 0;
3519 	msg[11] = 0;
3520 
3521 	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3522 		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3523 			pHba->name, ret);
3524 	}
3525 #ifdef DEBUG
3526 	else {
3527 		PINFO("%s: SysTab set.\n", pHba->name);
3528 	}
3529 #endif
3530 
3531 	return ret;
3532  }
3533 
3534 
3535 /*============================================================================
3536  *
3537  *============================================================================
3538  */
3539 
3540 
3541 #ifdef UARTDELAY
3542 
adpt_delay(int millisec)3543 static static void adpt_delay(int millisec)
3544 {
3545 	int i;
3546 	for (i = 0; i < millisec; i++) {
3547 		udelay(1000);	/* delay for one millisecond */
3548 	}
3549 }
3550 
3551 #endif
3552 
3553 static struct scsi_host_template driver_template = {
3554 	.module			= THIS_MODULE,
3555 	.name			= "dpt_i2o",
3556 	.proc_name		= "dpt_i2o",
3557 	.show_info		= adpt_show_info,
3558 	.info			= adpt_info,
3559 	.queuecommand		= adpt_queue,
3560 	.eh_abort_handler	= adpt_abort,
3561 	.eh_device_reset_handler = adpt_device_reset,
3562 	.eh_bus_reset_handler	= adpt_bus_reset,
3563 	.eh_host_reset_handler	= adpt_reset,
3564 	.bios_param		= adpt_bios_param,
3565 	.slave_configure	= adpt_slave_configure,
3566 	.can_queue		= MAX_TO_IOP_MESSAGES,
3567 	.this_id		= 7,
3568 	.use_clustering		= ENABLE_CLUSTERING,
3569 };
3570 
adpt_init(void)3571 static int __init adpt_init(void)
3572 {
3573 	int		error;
3574 	adpt_hba	*pHba, *next;
3575 
3576 	printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3577 
3578 	error = adpt_detect(&driver_template);
3579 	if (error < 0)
3580 		return error;
3581 	if (hba_chain == NULL)
3582 		return -ENODEV;
3583 
3584 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
3585 		error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3586 		if (error)
3587 			goto fail;
3588 		scsi_scan_host(pHba->host);
3589 	}
3590 	return 0;
3591 fail:
3592 	for (pHba = hba_chain; pHba; pHba = next) {
3593 		next = pHba->next;
3594 		scsi_remove_host(pHba->host);
3595 	}
3596 	return error;
3597 }
3598 
adpt_exit(void)3599 static void __exit adpt_exit(void)
3600 {
3601 	adpt_hba	*pHba, *next;
3602 
3603 	for (pHba = hba_chain; pHba; pHba = pHba->next)
3604 		scsi_remove_host(pHba->host);
3605 	for (pHba = hba_chain; pHba; pHba = next) {
3606 		next = pHba->next;
3607 		adpt_release(pHba->host);
3608 	}
3609 }
3610 
3611 module_init(adpt_init);
3612 module_exit(adpt_exit);
3613 
3614 MODULE_LICENSE("GPL");
3615