1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* NCR (or Symbios) 53c700 and 53c700-66 Driver
4 *
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6**-----------------------------------------------------------------------------
7**
8**  This program is free software; you can redistribute it and/or modify
9**  it under the terms of the GNU General Public License as published by
10**  the Free Software Foundation; either version 2 of the License, or
11**  (at your option) any later version.
12**
13**  This program is distributed in the hope that it will be useful,
14**  but WITHOUT ANY WARRANTY; without even the implied warranty of
15**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16**  GNU General Public License for more details.
17**
18**  You should have received a copy of the GNU General Public License
19**  along with this program; if not, write to the Free Software
20**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21**
22**-----------------------------------------------------------------------------
23 */
24
25/* Notes:
26 *
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips).  They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
31 *
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
34 *
35 * The 700 chip has no host bus interface logic of its own.  However,
36 * it is usually mapped to a location with well defined register
37 * offsets.  Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
42 *
43 *
44 * TODO List:
45 *
46 * 1. Better statistics in the proc fs
47 *
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 *    the abort and device reset functions use them.
50 * */
51
52/* CHANGELOG
53 *
54 * Version 2.8
55 *
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved.  Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
61 *
62 * Version 2.7
63 *
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
69 *
70 * Version 2.6
71 *
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected.  Also adds support for
74 * consistent memory allocation.
75 *
76 * Version 2.5
77 *
78 * More Compatibility changes for 710 (now actually works).  Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures.  Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
83 *
84 * Version 2.4
85 *
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
88 *
89 * Version 2.3
90 *
91 * More endianness/cache coherency changes.
92 *
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
96 *
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
100 *
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
103 *
104 * Version 2.2
105 *
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
109 *
110 * Version 2.1
111 *
112 * Initial modularisation from the D700.  See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115#define NCR_700_VERSION "2.8"
116
117#include <linux/kernel.h>
118#include <linux/types.h>
119#include <linux/string.h>
120#include <linux/slab.h>
121#include <linux/ioport.h>
122#include <linux/delay.h>
123#include <linux/spinlock.h>
124#include <linux/completion.h>
125#include <linux/init.h>
126#include <linux/proc_fs.h>
127#include <linux/blkdev.h>
128#include <linux/module.h>
129#include <linux/interrupt.h>
130#include <linux/device.h>
131#include <asm/dma.h>
132#include <asm/io.h>
133#include <asm/pgtable.h>
134#include <asm/byteorder.h>
135
136#include <scsi/scsi.h>
137#include <scsi/scsi_cmnd.h>
138#include <scsi/scsi_dbg.h>
139#include <scsi/scsi_eh.h>
140#include <scsi/scsi_host.h>
141#include <scsi/scsi_tcq.h>
142#include <scsi/scsi_transport.h>
143#include <scsi/scsi_transport_spi.h>
144
145#include "53c700.h"
146
147/* NOTE: For 64 bit drivers there are points in the code where we use
148 * a non dereferenceable pointer to point to a structure in dma-able
149 * memory (which is 32 bits) so that we can use all of the structure
150 * operations but take the address at the end.  This macro allows us
151 * to truncate the 64 bit pointer down to 32 bits without the compiler
152 * complaining */
153#define to32bit(x)	((__u32)((unsigned long)(x)))
154
155#ifdef NCR_700_DEBUG
156#define STATIC
157#else
158#define STATIC static
159#endif
160
161MODULE_AUTHOR("James Bottomley");
162MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
163MODULE_LICENSE("GPL");
164
165/* This is the script */
166#include "53c700_d.h"
167
168
169STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
170STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
171STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
172STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
173STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
174STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
175STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
179
180STATIC struct device_attribute *NCR_700_dev_attrs[];
181
182STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
183
184static char *NCR_700_phase[] = {
185	"",
186	"after selection",
187	"before command phase",
188	"after command phase",
189	"after status phase",
190	"after data in phase",
191	"after data out phase",
192	"during data phase",
193};
194
195static char *NCR_700_condition[] = {
196	"",
197	"NOT MSG_OUT",
198	"UNEXPECTED PHASE",
199	"NOT MSG_IN",
200	"UNEXPECTED MSG",
201	"MSG_IN",
202	"SDTR_MSG RECEIVED",
203	"REJECT_MSG RECEIVED",
204	"DISCONNECT_MSG RECEIVED",
205	"MSG_OUT",
206	"DATA_IN",
207
208};
209
210static char *NCR_700_fatal_messages[] = {
211	"unexpected message after reselection",
212	"still MSG_OUT after message injection",
213	"not MSG_IN after selection",
214	"Illegal message length received",
215};
216
217static char *NCR_700_SBCL_bits[] = {
218	"IO ",
219	"CD ",
220	"MSG ",
221	"ATN ",
222	"SEL ",
223	"BSY ",
224	"ACK ",
225	"REQ ",
226};
227
228static char *NCR_700_SBCL_to_phase[] = {
229	"DATA_OUT",
230	"DATA_IN",
231	"CMD_OUT",
232	"STATE",
233	"ILLEGAL PHASE",
234	"ILLEGAL PHASE",
235	"MSG OUT",
236	"MSG IN",
237};
238
239/* This translates the SDTR message offset and period to a value
240 * which can be loaded into the SXFER_REG.
241 *
242 * NOTE: According to SCSI-2, the true transfer period (in ns) is
243 *       actually four times this period value */
244static inline __u8
245NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
246			       __u8 offset, __u8 period)
247{
248	int XFERP;
249
250	__u8 min_xferp = (hostdata->chip710
251			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
252	__u8 max_offset = (hostdata->chip710
253			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
254
255	if(offset == 0)
256		return 0;
257
258	if(period < hostdata->min_period) {
259		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
260		period = hostdata->min_period;
261	}
262	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
263	if(offset > max_offset) {
264		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
265		       offset, max_offset);
266		offset = max_offset;
267	}
268	if(XFERP < min_xferp) {
269		XFERP =  min_xferp;
270	}
271	return (offset & 0x0f) | (XFERP & 0x07)<<4;
272}
273
274static inline __u8
275NCR_700_get_SXFER(struct scsi_device *SDp)
276{
277	struct NCR_700_Host_Parameters *hostdata =
278		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
279
280	return NCR_700_offset_period_to_sxfer(hostdata,
281					      spi_offset(SDp->sdev_target),
282					      spi_period(SDp->sdev_target));
283}
284
285struct Scsi_Host *
286NCR_700_detect(struct scsi_host_template *tpnt,
287	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
288{
289	dma_addr_t pScript, pSlots;
290	__u8 *memory;
291	__u32 *script;
292	struct Scsi_Host *host;
293	static int banner = 0;
294	int j;
295
296	if(tpnt->sdev_attrs == NULL)
297		tpnt->sdev_attrs = NCR_700_dev_attrs;
298
299	memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
300				       &pScript, GFP_KERNEL);
301	if(memory == NULL) {
302		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
303		return NULL;
304	}
305
306	script = (__u32 *)memory;
307	hostdata->msgin = memory + MSGIN_OFFSET;
308	hostdata->msgout = memory + MSGOUT_OFFSET;
309	hostdata->status = memory + STATUS_OFFSET;
310	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
311	hostdata->dev = dev;
312
313	pSlots = pScript + SLOTS_OFFSET;
314
315	/* Fill in the missing routines from the host template */
316	tpnt->queuecommand = NCR_700_queuecommand;
317	tpnt->eh_abort_handler = NCR_700_abort;
318	tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
319	tpnt->eh_host_reset_handler = NCR_700_host_reset;
320	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
321	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
322	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
323	tpnt->use_clustering = ENABLE_CLUSTERING;
324	tpnt->slave_configure = NCR_700_slave_configure;
325	tpnt->slave_destroy = NCR_700_slave_destroy;
326	tpnt->slave_alloc = NCR_700_slave_alloc;
327	tpnt->change_queue_depth = NCR_700_change_queue_depth;
328	tpnt->use_blk_tags = 1;
329
330	if(tpnt->name == NULL)
331		tpnt->name = "53c700";
332	if(tpnt->proc_name == NULL)
333		tpnt->proc_name = "53c700";
334
335	host = scsi_host_alloc(tpnt, 4);
336	if (!host)
337		return NULL;
338	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
339	       * NCR_700_COMMAND_SLOTS_PER_HOST);
340	for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
341		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
342					  - (unsigned long)&hostdata->slots[0].SG[0]);
343		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
344		if(j == 0)
345			hostdata->free_list = &hostdata->slots[j];
346		else
347			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
348		hostdata->slots[j].state = NCR_700_SLOT_FREE;
349	}
350
351	for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
352		script[j] = bS_to_host(SCRIPT[j]);
353
354	/* adjust all labels to be bus physical */
355	for (j = 0; j < PATCHES; j++)
356		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
357	/* now patch up fixed addresses. */
358	script_patch_32(hostdata->dev, script, MessageLocation,
359			pScript + MSGOUT_OFFSET);
360	script_patch_32(hostdata->dev, script, StatusAddress,
361			pScript + STATUS_OFFSET);
362	script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
363			pScript + MSGIN_OFFSET);
364
365	hostdata->script = script;
366	hostdata->pScript = pScript;
367	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
368	hostdata->state = NCR_700_HOST_FREE;
369	hostdata->cmd = NULL;
370	host->max_id = 8;
371	host->max_lun = NCR_700_MAX_LUNS;
372	BUG_ON(NCR_700_transport_template == NULL);
373	host->transportt = NCR_700_transport_template;
374	host->unique_id = (unsigned long)hostdata->base;
375	hostdata->eh_complete = NULL;
376	host->hostdata[0] = (unsigned long)hostdata;
377	/* kick the chip */
378	NCR_700_writeb(0xff, host, CTEST9_REG);
379	if (hostdata->chip710)
380		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
381	else
382		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
383	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
384	if (banner == 0) {
385		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
386		banner = 1;
387	}
388	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
389	       hostdata->chip710 ? "53c710" :
390	       (hostdata->fast ? "53c700-66" : "53c700"),
391	       hostdata->rev, hostdata->differential ?
392	       "(Differential)" : "");
393	/* reset the chip */
394	NCR_700_chip_reset(host);
395
396	if (scsi_add_host(host, dev)) {
397		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
398		scsi_host_put(host);
399		return NULL;
400	}
401
402	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
403		SPI_SIGNAL_SE;
404
405	return host;
406}
407
408int
409NCR_700_release(struct Scsi_Host *host)
410{
411	struct NCR_700_Host_Parameters *hostdata =
412		(struct NCR_700_Host_Parameters *)host->hostdata[0];
413
414	dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
415			       hostdata->script, hostdata->pScript);
416	return 1;
417}
418
419static inline __u8
420NCR_700_identify(int can_disconnect, __u8 lun)
421{
422	return IDENTIFY_BASE |
423		((can_disconnect) ? 0x40 : 0) |
424		(lun & NCR_700_LUN_MASK);
425}
426
427/*
428 * Function : static int data_residual (Scsi_Host *host)
429 *
430 * Purpose : return residual data count of what's in the chip.  If you
431 * really want to know what this function is doing, it's almost a
432 * direct transcription of the algorithm described in the 53c710
433 * guide, except that the DBC and DFIFO registers are only 6 bits
434 * wide on a 53c700.
435 *
436 * Inputs : host - SCSI host */
437static inline int
438NCR_700_data_residual (struct Scsi_Host *host) {
439	struct NCR_700_Host_Parameters *hostdata =
440		(struct NCR_700_Host_Parameters *)host->hostdata[0];
441	int count, synchronous = 0;
442	unsigned int ddir;
443
444	if(hostdata->chip710) {
445		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
446			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
447	} else {
448		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
449			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
450	}
451
452	if(hostdata->fast)
453		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
454
455	/* get the data direction */
456	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
457
458	if (ddir) {
459		/* Receive */
460		if (synchronous)
461			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
462		else
463			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
464				++count;
465	} else {
466		/* Send */
467		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
468		if (sstat & SODL_REG_FULL)
469			++count;
470		if (synchronous && (sstat & SODR_REG_FULL))
471			++count;
472	}
473#ifdef NCR_700_DEBUG
474	if(count)
475		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
476#endif
477	return count;
478}
479
480/* print out the SCSI wires and corresponding phase from the SBCL register
481 * in the chip */
482static inline char *
483sbcl_to_string(__u8 sbcl)
484{
485	int i;
486	static char ret[256];
487
488	ret[0]='\0';
489	for(i=0; i<8; i++) {
490		if((1<<i) & sbcl)
491			strcat(ret, NCR_700_SBCL_bits[i]);
492	}
493	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
494	return ret;
495}
496
497static inline __u8
498bitmap_to_number(__u8 bitmap)
499{
500	__u8 i;
501
502	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
503		;
504	return i;
505}
506
507/* Pull a slot off the free list */
508STATIC struct NCR_700_command_slot *
509find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
510{
511	struct NCR_700_command_slot *slot = hostdata->free_list;
512
513	if(slot == NULL) {
514		/* sanity check */
515		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
516			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
517		return NULL;
518	}
519
520	if(slot->state != NCR_700_SLOT_FREE)
521		/* should panic! */
522		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
523
524
525	hostdata->free_list = slot->ITL_forw;
526	slot->ITL_forw = NULL;
527
528
529	/* NOTE: set the state to busy here, not queued, since this
530	 * indicates the slot is in use and cannot be run by the IRQ
531	 * finish routine.  If we cannot queue the command when it
532	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
533	slot->state = NCR_700_SLOT_BUSY;
534	slot->flags = 0;
535	hostdata->command_slot_count++;
536
537	return slot;
538}
539
540STATIC void
541free_slot(struct NCR_700_command_slot *slot,
542	  struct NCR_700_Host_Parameters *hostdata)
543{
544	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
545		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
546	}
547	if(slot->state == NCR_700_SLOT_FREE) {
548		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
549	}
550
551	slot->resume_offset = 0;
552	slot->cmnd = NULL;
553	slot->state = NCR_700_SLOT_FREE;
554	slot->ITL_forw = hostdata->free_list;
555	hostdata->free_list = slot;
556	hostdata->command_slot_count--;
557}
558
559
560/* This routine really does very little.  The command is indexed on
561   the ITL and (if tagged) the ITLQ lists in _queuecommand */
562STATIC void
563save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
564		     struct scsi_cmnd *SCp, __u32 dsp)
565{
566	/* Its just possible that this gets executed twice */
567	if(SCp != NULL) {
568		struct NCR_700_command_slot *slot =
569			(struct NCR_700_command_slot *)SCp->host_scribble;
570
571		slot->resume_offset = dsp;
572	}
573	hostdata->state = NCR_700_HOST_FREE;
574	hostdata->cmd = NULL;
575}
576
577STATIC inline void
578NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
579	      struct NCR_700_command_slot *slot)
580{
581	if(SCp->sc_data_direction != DMA_NONE &&
582	   SCp->sc_data_direction != DMA_BIDIRECTIONAL)
583		scsi_dma_unmap(SCp);
584}
585
586STATIC inline void
587NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
588	       struct scsi_cmnd *SCp, int result)
589{
590	hostdata->state = NCR_700_HOST_FREE;
591	hostdata->cmd = NULL;
592
593	if(SCp != NULL) {
594		struct NCR_700_command_slot *slot =
595			(struct NCR_700_command_slot *)SCp->host_scribble;
596
597		dma_unmap_single(hostdata->dev, slot->pCmd,
598				 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
599		if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
600			char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
601
602			dma_unmap_single(hostdata->dev, slot->dma_handle,
603					 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
604			/* restore the old result if the request sense was
605			 * successful */
606			if (result == 0)
607				result = cmnd[7];
608			/* restore the original length */
609			SCp->cmd_len = cmnd[8];
610		} else
611			NCR_700_unmap(hostdata, SCp, slot);
612
613		free_slot(slot, hostdata);
614#ifdef NCR_700_DEBUG
615		if(NCR_700_get_depth(SCp->device) == 0 ||
616		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
617			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
618			       NCR_700_get_depth(SCp->device));
619#endif /* NCR_700_DEBUG */
620		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
621
622		SCp->host_scribble = NULL;
623		SCp->result = result;
624		SCp->scsi_done(SCp);
625	} else {
626		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
627	}
628}
629
630
631STATIC void
632NCR_700_internal_bus_reset(struct Scsi_Host *host)
633{
634	/* Bus reset */
635	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
636	udelay(50);
637	NCR_700_writeb(0, host, SCNTL1_REG);
638
639}
640
641STATIC void
642NCR_700_chip_setup(struct Scsi_Host *host)
643{
644	struct NCR_700_Host_Parameters *hostdata =
645		(struct NCR_700_Host_Parameters *)host->hostdata[0];
646	__u8 min_period;
647	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
648
649	if(hostdata->chip710) {
650		__u8 burst_disable = 0;
651		__u8 burst_length = 0;
652
653		switch (hostdata->burst_length) {
654			case 1:
655			        burst_length = BURST_LENGTH_1;
656			        break;
657			case 2:
658			        burst_length = BURST_LENGTH_2;
659			        break;
660			case 4:
661			        burst_length = BURST_LENGTH_4;
662			        break;
663			case 8:
664			        burst_length = BURST_LENGTH_8;
665			        break;
666			default:
667			        burst_disable = BURST_DISABLE;
668			        break;
669		}
670		hostdata->dcntl_extra |= COMPAT_700_MODE;
671
672		NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
673		NCR_700_writeb(burst_length | hostdata->dmode_extra,
674			       host, DMODE_710_REG);
675		NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
676			       (hostdata->differential ? DIFF : 0),
677			       host, CTEST7_REG);
678		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
679		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
680			       | AUTO_ATN, host, SCNTL0_REG);
681	} else {
682		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
683			       host, DMODE_700_REG);
684		NCR_700_writeb(hostdata->differential ?
685			       DIFF : 0, host, CTEST7_REG);
686		if(hostdata->fast) {
687			/* this is for 700-66, does nothing on 700 */
688			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
689				       | GENERATE_RECEIVE_PARITY, host,
690				       CTEST8_REG);
691		} else {
692			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
693				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
694		}
695	}
696
697	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
698	NCR_700_writeb(0, host, SBCL_REG);
699	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
700
701	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
702	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
703
704	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
705	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
706	if(hostdata->clock > 75) {
707		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
708		/* do the best we can, but the async clock will be out
709		 * of spec: sync divider 2, async divider 3 */
710		DEBUG(("53c700: sync 2 async 3\n"));
711		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
712		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
713		hostdata->sync_clock = hostdata->clock/2;
714	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
715		/* sync divider 1.5, async divider 3 */
716		DEBUG(("53c700: sync 1.5 async 3\n"));
717		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
718		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
719		hostdata->sync_clock = hostdata->clock*2;
720		hostdata->sync_clock /= 3;
721
722	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
723		/* sync divider 1, async divider 2 */
724		DEBUG(("53c700: sync 1 async 2\n"));
725		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
726		NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
727		hostdata->sync_clock = hostdata->clock;
728	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
729		/* sync divider 1, async divider 1.5 */
730		DEBUG(("53c700: sync 1 async 1.5\n"));
731		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
732		NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
733		hostdata->sync_clock = hostdata->clock;
734	} else {
735		DEBUG(("53c700: sync 1 async 1\n"));
736		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
737		NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
738		/* sync divider 1, async divider 1 */
739		hostdata->sync_clock = hostdata->clock;
740	}
741	/* Calculate the actual minimum period that can be supported
742	 * by our synchronous clock speed.  See the 710 manual for
743	 * exact details of this calculation which is based on a
744	 * setting of the SXFER register */
745	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
746	hostdata->min_period = NCR_700_MIN_PERIOD;
747	if(min_period > NCR_700_MIN_PERIOD)
748		hostdata->min_period = min_period;
749}
750
751STATIC void
752NCR_700_chip_reset(struct Scsi_Host *host)
753{
754	struct NCR_700_Host_Parameters *hostdata =
755		(struct NCR_700_Host_Parameters *)host->hostdata[0];
756	if(hostdata->chip710) {
757		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
758		udelay(100);
759
760		NCR_700_writeb(0, host, ISTAT_REG);
761	} else {
762		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
763		udelay(100);
764
765		NCR_700_writeb(0, host, DCNTL_REG);
766	}
767
768	mdelay(1000);
769
770	NCR_700_chip_setup(host);
771}
772
773/* The heart of the message processing engine is that the instruction
774 * immediately after the INT is the normal case (and so must be CLEAR
775 * ACK).  If we want to do something else, we call that routine in
776 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
777 * ACK) so that the routine returns correctly to resume its activity
778 * */
779STATIC __u32
780process_extended_message(struct Scsi_Host *host,
781			 struct NCR_700_Host_Parameters *hostdata,
782			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
783{
784	__u32 resume_offset = dsp, temp = dsp + 8;
785	__u8 pun = 0xff, lun = 0xff;
786
787	if(SCp != NULL) {
788		pun = SCp->device->id;
789		lun = SCp->device->lun;
790	}
791
792	switch(hostdata->msgin[2]) {
793	case A_SDTR_MSG:
794		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
795			struct scsi_target *starget = SCp->device->sdev_target;
796			__u8 period = hostdata->msgin[3];
797			__u8 offset = hostdata->msgin[4];
798
799			if(offset == 0 || period == 0) {
800				offset = 0;
801				period = 0;
802			}
803
804			spi_offset(starget) = offset;
805			spi_period(starget) = period;
806
807			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
808				spi_display_xfer_agreement(starget);
809				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
810			}
811
812			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
813			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
814
815			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
816				       host, SXFER_REG);
817
818		} else {
819			/* SDTR message out of the blue, reject it */
820			shost_printk(KERN_WARNING, host,
821				"Unexpected SDTR msg\n");
822			hostdata->msgout[0] = A_REJECT_MSG;
823			dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
824			script_patch_16(hostdata->dev, hostdata->script,
825			                MessageCount, 1);
826			/* SendMsgOut returns, so set up the return
827			 * address */
828			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
829		}
830		break;
831
832	case A_WDTR_MSG:
833		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
834		       host->host_no, pun, lun);
835		hostdata->msgout[0] = A_REJECT_MSG;
836		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
837		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
838		                1);
839		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
840
841		break;
842
843	default:
844		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
845		       host->host_no, pun, lun,
846		       NCR_700_phase[(dsps & 0xf00) >> 8]);
847		spi_print_msg(hostdata->msgin);
848		printk("\n");
849		/* just reject it */
850		hostdata->msgout[0] = A_REJECT_MSG;
851		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
852		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
853		                1);
854		/* SendMsgOut returns, so set up the return
855		 * address */
856		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
857	}
858	NCR_700_writel(temp, host, TEMP_REG);
859	return resume_offset;
860}
861
862STATIC __u32
863process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
864		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
865{
866	/* work out where to return to */
867	__u32 temp = dsp + 8, resume_offset = dsp;
868	__u8 pun = 0xff, lun = 0xff;
869
870	if(SCp != NULL) {
871		pun = SCp->device->id;
872		lun = SCp->device->lun;
873	}
874
875#ifdef NCR_700_DEBUG
876	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
877	       NCR_700_phase[(dsps & 0xf00) >> 8]);
878	spi_print_msg(hostdata->msgin);
879	printk("\n");
880#endif
881
882	switch(hostdata->msgin[0]) {
883
884	case A_EXTENDED_MSG:
885		resume_offset =  process_extended_message(host, hostdata, SCp,
886							  dsp, dsps);
887		break;
888
889	case A_REJECT_MSG:
890		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
891			/* Rejected our sync negotiation attempt */
892			spi_period(SCp->device->sdev_target) =
893				spi_offset(SCp->device->sdev_target) = 0;
894			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
895			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
896		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
897			/* rejected our first simple tag message */
898			scmd_printk(KERN_WARNING, SCp,
899				"Rejected first tag queue attempt, turning off tag queueing\n");
900			/* we're done negotiating */
901			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
902			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
903
904			SCp->device->tagged_supported = 0;
905			SCp->device->simple_tags = 0;
906			scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
907		} else {
908			shost_printk(KERN_WARNING, host,
909				"(%d:%d) Unexpected REJECT Message %s\n",
910			       pun, lun,
911			       NCR_700_phase[(dsps & 0xf00) >> 8]);
912			/* however, just ignore it */
913		}
914		break;
915
916	case A_PARITY_ERROR_MSG:
917		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
918		       pun, lun);
919		NCR_700_internal_bus_reset(host);
920		break;
921	case A_SIMPLE_TAG_MSG:
922		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
923		       pun, lun, hostdata->msgin[1],
924		       NCR_700_phase[(dsps & 0xf00) >> 8]);
925		/* just ignore it */
926		break;
927	default:
928		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
929		       host->host_no, pun, lun,
930		       NCR_700_phase[(dsps & 0xf00) >> 8]);
931
932		spi_print_msg(hostdata->msgin);
933		printk("\n");
934		/* just reject it */
935		hostdata->msgout[0] = A_REJECT_MSG;
936		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
937		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
938		                1);
939		/* SendMsgOut returns, so set up the return
940		 * address */
941		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
942
943		break;
944	}
945	NCR_700_writel(temp, host, TEMP_REG);
946	/* set us up to receive another message */
947	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
948	return resume_offset;
949}
950
951STATIC __u32
952process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
953			 struct Scsi_Host *host,
954			 struct NCR_700_Host_Parameters *hostdata)
955{
956	__u32 resume_offset = 0;
957	__u8 pun = 0xff, lun=0xff;
958
959	if(SCp != NULL) {
960		pun = SCp->device->id;
961		lun = SCp->device->lun;
962	}
963
964	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
965		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
966		       hostdata->status[0]));
967		/* OK, if TCQ still under negotiation, we now know it works */
968		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
969			NCR_700_set_tag_neg_state(SCp->device,
970						  NCR_700_FINISHED_TAG_NEGOTIATION);
971
972		/* check for contingent allegiance contitions */
973		if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
974		   status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
975			struct NCR_700_command_slot *slot =
976				(struct NCR_700_command_slot *)SCp->host_scribble;
977			if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
978				/* OOPS: bad device, returning another
979				 * contingent allegiance condition */
980				scmd_printk(KERN_ERR, SCp,
981					"broken device is looping in contingent allegiance: ignoring\n");
982				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
983			} else {
984				char *cmnd =
985					NCR_700_get_sense_cmnd(SCp->device);
986#ifdef NCR_DEBUG
987				scsi_print_command(SCp);
988				printk("  cmd %p has status %d, requesting sense\n",
989				       SCp, hostdata->status[0]);
990#endif
991				/* we can destroy the command here
992				 * because the contingent allegiance
993				 * condition will cause a retry which
994				 * will re-copy the command from the
995				 * saved data_cmnd.  We also unmap any
996				 * data associated with the command
997				 * here */
998				NCR_700_unmap(hostdata, SCp, slot);
999				dma_unmap_single(hostdata->dev, slot->pCmd,
1000						 MAX_COMMAND_SIZE,
1001						 DMA_TO_DEVICE);
1002
1003				cmnd[0] = REQUEST_SENSE;
1004				cmnd[1] = (lun & 0x7) << 5;
1005				cmnd[2] = 0;
1006				cmnd[3] = 0;
1007				cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1008				cmnd[5] = 0;
1009				/* Here's a quiet hack: the
1010				 * REQUEST_SENSE command is six bytes,
1011				 * so store a flag indicating that
1012				 * this was an internal sense request
1013				 * and the original status at the end
1014				 * of the command */
1015				cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1016				cmnd[7] = hostdata->status[0];
1017				cmnd[8] = SCp->cmd_len;
1018				SCp->cmd_len = 6; /* command length for
1019						   * REQUEST_SENSE */
1020				slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1021				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1022				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1023				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1024				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1025				slot->SG[1].pAddr = 0;
1026				slot->resume_offset = hostdata->pScript;
1027				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1028				dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1029
1030				/* queue the command for reissue */
1031				slot->state = NCR_700_SLOT_QUEUED;
1032				slot->flags = NCR_700_FLAG_AUTOSENSE;
1033				hostdata->state = NCR_700_HOST_FREE;
1034				hostdata->cmd = NULL;
1035			}
1036		} else {
1037			// Currently rely on the mid layer evaluation
1038			// of the tag queuing capability
1039			//
1040			//if(status_byte(hostdata->status[0]) == GOOD &&
1041			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1042			//	/* Piggy back the tag queueing support
1043			//	 * on this command */
1044			//	dma_sync_single_for_cpu(hostdata->dev,
1045			//			    slot->dma_handle,
1046			//			    SCp->request_bufflen,
1047			//			    DMA_FROM_DEVICE);
1048			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1049			//		scmd_printk(KERN_INFO, SCp,
1050			//		     "Enabling Tag Command Queuing\n");
1051			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1052			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1053			//	} else {
1054			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1055			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1056			//	}
1057			//}
1058			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1059		}
1060	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1061		__u8 i = (dsps & 0xf00) >> 8;
1062
1063		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1064		       NCR_700_phase[i],
1065		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1066		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1067			SCp->cmd_len);
1068		scsi_print_command(SCp);
1069
1070		NCR_700_internal_bus_reset(host);
1071	} else if((dsps & 0xfffff000) == A_FATAL) {
1072		int i = (dsps & 0xfff);
1073
1074		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1075		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1076		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1077			printk(KERN_ERR "     msg begins %02x %02x\n",
1078			       hostdata->msgin[0], hostdata->msgin[1]);
1079		}
1080		NCR_700_internal_bus_reset(host);
1081	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1082#ifdef NCR_700_DEBUG
1083		__u8 i = (dsps & 0xf00) >> 8;
1084
1085		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1086		       host->host_no, pun, lun,
1087		       i, NCR_700_phase[i]);
1088#endif
1089		save_for_reselection(hostdata, SCp, dsp);
1090
1091	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1092		__u8 lun;
1093		struct NCR_700_command_slot *slot;
1094		__u8 reselection_id = hostdata->reselection_id;
1095		struct scsi_device *SDp;
1096
1097		lun = hostdata->msgin[0] & 0x1f;
1098
1099		hostdata->reselection_id = 0xff;
1100		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1101		       host->host_no, reselection_id, lun));
1102		/* clear the reselection indicator */
1103		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1104		if(unlikely(SDp == NULL)) {
1105			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1106			       host->host_no, reselection_id, lun);
1107			BUG();
1108		}
1109		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1110			struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1111			if(unlikely(SCp == NULL)) {
1112				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1113				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1114				BUG();
1115			}
1116
1117			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1118			DDEBUG(KERN_DEBUG, SDp,
1119				"reselection is tag %d, slot %p(%d)\n",
1120				hostdata->msgin[2], slot, slot->tag);
1121		} else {
1122			struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1123			if(unlikely(SCp == NULL)) {
1124				sdev_printk(KERN_ERR, SDp,
1125					"no saved request for untagged cmd\n");
1126				BUG();
1127			}
1128			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1129		}
1130
1131		if(slot == NULL) {
1132			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1133			       host->host_no, reselection_id, lun,
1134			       hostdata->msgin[0], hostdata->msgin[1],
1135			       hostdata->msgin[2]);
1136		} else {
1137			if(hostdata->state != NCR_700_HOST_BUSY)
1138				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1139				       host->host_no);
1140			resume_offset = slot->resume_offset;
1141			hostdata->cmd = slot->cmnd;
1142
1143			/* re-patch for this command */
1144			script_patch_32_abs(hostdata->dev, hostdata->script,
1145			                    CommandAddress, slot->pCmd);
1146			script_patch_16(hostdata->dev, hostdata->script,
1147					CommandCount, slot->cmnd->cmd_len);
1148			script_patch_32_abs(hostdata->dev, hostdata->script,
1149			                    SGScriptStartAddress,
1150					    to32bit(&slot->pSG[0].ins));
1151
1152			/* Note: setting SXFER only works if we're
1153			 * still in the MESSAGE phase, so it is vital
1154			 * that ACK is still asserted when we process
1155			 * the reselection message.  The resume offset
1156			 * should therefore always clear ACK */
1157			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1158				       host, SXFER_REG);
1159			dma_cache_sync(hostdata->dev, hostdata->msgin,
1160				       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1161			dma_cache_sync(hostdata->dev, hostdata->msgout,
1162				       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1163			/* I'm just being paranoid here, the command should
1164			 * already have been flushed from the cache */
1165			dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1166				       slot->cmnd->cmd_len, DMA_TO_DEVICE);
1167
1168
1169
1170		}
1171	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1172
1173		/* This section is full of debugging code because I've
1174		 * never managed to reach it.  I think what happens is
1175		 * that, because the 700 runs with selection
1176		 * interrupts enabled the whole time that we take a
1177		 * selection interrupt before we manage to get to the
1178		 * reselected script interrupt */
1179
1180		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1181		struct NCR_700_command_slot *slot;
1182
1183		/* Take out our own ID */
1184		reselection_id &= ~(1<<host->this_id);
1185
1186		/* I've never seen this happen, so keep this as a printk rather
1187		 * than a debug */
1188		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1189		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1190
1191		{
1192			/* FIXME: DEBUGGING CODE */
1193			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1194			int i;
1195
1196			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1197				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1198				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1199					break;
1200			}
1201			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1202			SCp =  hostdata->slots[i].cmnd;
1203		}
1204
1205		if(SCp != NULL) {
1206			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1207			/* change slot from busy to queued to redo command */
1208			slot->state = NCR_700_SLOT_QUEUED;
1209		}
1210		hostdata->cmd = NULL;
1211
1212		if(reselection_id == 0) {
1213			if(hostdata->reselection_id == 0xff) {
1214				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1215				return 0;
1216			} else {
1217				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1218				       host->host_no);
1219				reselection_id = hostdata->reselection_id;
1220			}
1221		} else {
1222
1223			/* convert to real ID */
1224			reselection_id = bitmap_to_number(reselection_id);
1225		}
1226		hostdata->reselection_id = reselection_id;
1227		/* just in case we have a stale simple tag message, clear it */
1228		hostdata->msgin[1] = 0;
1229		dma_cache_sync(hostdata->dev, hostdata->msgin,
1230			       MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1231		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1232			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1233		} else {
1234			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1235		}
1236	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1237		/* we've just disconnected from the bus, do nothing since
1238		 * a return here will re-run the queued command slot
1239		 * that may have been interrupted by the initial selection */
1240		DEBUG((" SELECTION COMPLETED\n"));
1241	} else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1242		resume_offset = process_message(host, hostdata, SCp,
1243						dsp, dsps);
1244	} else if((dsps &  0xfffff000) == 0) {
1245		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1246		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1247		       host->host_no, pun, lun, NCR_700_condition[i],
1248		       NCR_700_phase[j], dsp - hostdata->pScript);
1249		if(SCp != NULL) {
1250			struct scatterlist *sg;
1251
1252			scsi_print_command(SCp);
1253			scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1254				printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1255			}
1256		}
1257		NCR_700_internal_bus_reset(host);
1258	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1259		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1260		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1261		resume_offset = dsp;
1262	} else {
1263		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1264		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1265		NCR_700_internal_bus_reset(host);
1266	}
1267	return resume_offset;
1268}
1269
1270/* We run the 53c700 with selection interrupts always enabled.  This
1271 * means that the chip may be selected as soon as the bus frees.  On a
1272 * busy bus, this can be before the scripts engine finishes its
1273 * processing.  Therefore, part of the selection processing has to be
1274 * to find out what the scripts engine is doing and complete the
1275 * function if necessary (i.e. process the pending disconnect or save
1276 * the interrupted initial selection */
1277STATIC inline __u32
1278process_selection(struct Scsi_Host *host, __u32 dsp)
1279{
1280	__u8 id = 0;	/* Squash compiler warning */
1281	int count = 0;
1282	__u32 resume_offset = 0;
1283	struct NCR_700_Host_Parameters *hostdata =
1284		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1285	struct scsi_cmnd *SCp = hostdata->cmd;
1286	__u8 sbcl;
1287
1288	for(count = 0; count < 5; count++) {
1289		id = NCR_700_readb(host, hostdata->chip710 ?
1290				   CTEST9_REG : SFBR_REG);
1291
1292		/* Take out our own ID */
1293		id &= ~(1<<host->this_id);
1294		if(id != 0)
1295			break;
1296		udelay(5);
1297	}
1298	sbcl = NCR_700_readb(host, SBCL_REG);
1299	if((sbcl & SBCL_IO) == 0) {
1300		/* mark as having been selected rather than reselected */
1301		id = 0xff;
1302	} else {
1303		/* convert to real ID */
1304		hostdata->reselection_id = id = bitmap_to_number(id);
1305		DEBUG(("scsi%d:  Reselected by %d\n",
1306		       host->host_no, id));
1307	}
1308	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1309		struct NCR_700_command_slot *slot =
1310			(struct NCR_700_command_slot *)SCp->host_scribble;
1311		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1312
1313		switch(dsp - hostdata->pScript) {
1314		case Ent_Disconnect1:
1315		case Ent_Disconnect2:
1316			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1317			break;
1318		case Ent_Disconnect3:
1319		case Ent_Disconnect4:
1320			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1321			break;
1322		case Ent_Disconnect5:
1323		case Ent_Disconnect6:
1324			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1325			break;
1326		case Ent_Disconnect7:
1327		case Ent_Disconnect8:
1328			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1329			break;
1330		case Ent_Finish1:
1331		case Ent_Finish2:
1332			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1333			break;
1334
1335		default:
1336			slot->state = NCR_700_SLOT_QUEUED;
1337			break;
1338			}
1339	}
1340	hostdata->state = NCR_700_HOST_BUSY;
1341	hostdata->cmd = NULL;
1342	/* clear any stale simple tag message */
1343	hostdata->msgin[1] = 0;
1344	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1345		       DMA_BIDIRECTIONAL);
1346
1347	if(id == 0xff) {
1348		/* Selected as target, Ignore */
1349		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1350	} else if(hostdata->tag_negotiated & (1<<id)) {
1351		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1352	} else {
1353		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1354	}
1355	return resume_offset;
1356}
1357
1358static inline void
1359NCR_700_clear_fifo(struct Scsi_Host *host) {
1360	const struct NCR_700_Host_Parameters *hostdata
1361		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1362	if(hostdata->chip710) {
1363		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1364	} else {
1365		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1366	}
1367}
1368
1369static inline void
1370NCR_700_flush_fifo(struct Scsi_Host *host) {
1371	const struct NCR_700_Host_Parameters *hostdata
1372		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1373	if(hostdata->chip710) {
1374		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1375		udelay(10);
1376		NCR_700_writeb(0, host, CTEST8_REG);
1377	} else {
1378		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1379		udelay(10);
1380		NCR_700_writeb(0, host, DFIFO_REG);
1381	}
1382}
1383
1384
1385/* The queue lock with interrupts disabled must be held on entry to
1386 * this function */
1387STATIC int
1388NCR_700_start_command(struct scsi_cmnd *SCp)
1389{
1390	struct NCR_700_command_slot *slot =
1391		(struct NCR_700_command_slot *)SCp->host_scribble;
1392	struct NCR_700_Host_Parameters *hostdata =
1393		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1394	__u16 count = 1;	/* for IDENTIFY message */
1395	u8 lun = SCp->device->lun;
1396
1397	if(hostdata->state != NCR_700_HOST_FREE) {
1398		/* keep this inside the lock to close the race window where
1399		 * the running command finishes on another CPU while we don't
1400		 * change the state to queued on this one */
1401		slot->state = NCR_700_SLOT_QUEUED;
1402
1403		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1404		       SCp->device->host->host_no, slot->cmnd, slot));
1405		return 0;
1406	}
1407	hostdata->state = NCR_700_HOST_BUSY;
1408	hostdata->cmd = SCp;
1409	slot->state = NCR_700_SLOT_BUSY;
1410	/* keep interrupts disabled until we have the command correctly
1411	 * set up so we cannot take a selection interrupt */
1412
1413	hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1414						slot->flags != NCR_700_FLAG_AUTOSENSE),
1415					       lun);
1416	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1417	 * if the negotiated transfer parameters still hold, so
1418	 * always renegotiate them */
1419	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1420	   slot->flags == NCR_700_FLAG_AUTOSENSE) {
1421		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1422	}
1423
1424	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1425	 * If a contingent allegiance condition exists, the device
1426	 * will refuse all tags, so send the request sense as untagged
1427	 * */
1428	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1429	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1430	       slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1431		count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
1432	}
1433
1434	if(hostdata->fast &&
1435	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1436		count += spi_populate_sync_msg(&hostdata->msgout[count],
1437				spi_period(SCp->device->sdev_target),
1438				spi_offset(SCp->device->sdev_target));
1439		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1440	}
1441
1442	script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1443
1444
1445	script_patch_ID(hostdata->dev, hostdata->script,
1446			Device_ID, 1<<scmd_id(SCp));
1447
1448	script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1449			    slot->pCmd);
1450	script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1451	                SCp->cmd_len);
1452	/* finally plumb the beginning of the SG list into the script
1453	 * */
1454	script_patch_32_abs(hostdata->dev, hostdata->script,
1455	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1456	NCR_700_clear_fifo(SCp->device->host);
1457
1458	if(slot->resume_offset == 0)
1459		slot->resume_offset = hostdata->pScript;
1460	/* now perform all the writebacks and invalidates */
1461	dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1462	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1463		       DMA_FROM_DEVICE);
1464	dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1465	dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1466
1467	/* set the synchronous period/offset */
1468	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1469		       SCp->device->host, SXFER_REG);
1470	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1471	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1472
1473	return 1;
1474}
1475
1476irqreturn_t
1477NCR_700_intr(int irq, void *dev_id)
1478{
1479	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1480	struct NCR_700_Host_Parameters *hostdata =
1481		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1482	__u8 istat;
1483	__u32 resume_offset = 0;
1484	__u8 pun = 0xff, lun = 0xff;
1485	unsigned long flags;
1486	int handled = 0;
1487
1488	/* Use the host lock to serialise access to the 53c700
1489	 * hardware.  Note: In future, we may need to take the queue
1490	 * lock to enter the done routines.  When that happens, we
1491	 * need to ensure that for this driver, the host lock and the
1492	 * queue lock point to the same thing. */
1493	spin_lock_irqsave(host->host_lock, flags);
1494	if((istat = NCR_700_readb(host, ISTAT_REG))
1495	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1496		__u32 dsps;
1497		__u8 sstat0 = 0, dstat = 0;
1498		__u32 dsp;
1499		struct scsi_cmnd *SCp = hostdata->cmd;
1500		enum NCR_700_Host_State state;
1501
1502		handled = 1;
1503		state = hostdata->state;
1504		SCp = hostdata->cmd;
1505
1506		if(istat & SCSI_INT_PENDING) {
1507			udelay(10);
1508
1509			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1510		}
1511
1512		if(istat & DMA_INT_PENDING) {
1513			udelay(10);
1514
1515			dstat = NCR_700_readb(host, DSTAT_REG);
1516		}
1517
1518		dsps = NCR_700_readl(host, DSPS_REG);
1519		dsp = NCR_700_readl(host, DSP_REG);
1520
1521		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1522		       host->host_no, istat, sstat0, dstat,
1523		       (dsp - (__u32)(hostdata->pScript))/4,
1524		       dsp, dsps));
1525
1526		if(SCp != NULL) {
1527			pun = SCp->device->id;
1528			lun = SCp->device->lun;
1529		}
1530
1531		if(sstat0 & SCSI_RESET_DETECTED) {
1532			struct scsi_device *SDp;
1533			int i;
1534
1535			hostdata->state = NCR_700_HOST_BUSY;
1536
1537			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1538			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1539
1540			scsi_report_bus_reset(host, 0);
1541
1542			/* clear all the negotiated parameters */
1543			__shost_for_each_device(SDp, host)
1544				NCR_700_clear_flag(SDp, ~0);
1545
1546			/* clear all the slots and their pending commands */
1547			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1548				struct scsi_cmnd *SCp;
1549				struct NCR_700_command_slot *slot =
1550					&hostdata->slots[i];
1551
1552				if(slot->state == NCR_700_SLOT_FREE)
1553					continue;
1554
1555				SCp = slot->cmnd;
1556				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1557				       slot, SCp);
1558				free_slot(slot, hostdata);
1559				SCp->host_scribble = NULL;
1560				NCR_700_set_depth(SCp->device, 0);
1561				/* NOTE: deadlock potential here: we
1562				 * rely on mid-layer guarantees that
1563				 * scsi_done won't try to issue the
1564				 * command again otherwise we'll
1565				 * deadlock on the
1566				 * hostdata->state_lock */
1567				SCp->result = DID_RESET << 16;
1568				SCp->scsi_done(SCp);
1569			}
1570			mdelay(25);
1571			NCR_700_chip_setup(host);
1572
1573			hostdata->state = NCR_700_HOST_FREE;
1574			hostdata->cmd = NULL;
1575			/* signal back if this was an eh induced reset */
1576			if(hostdata->eh_complete != NULL)
1577				complete(hostdata->eh_complete);
1578			goto out_unlock;
1579		} else if(sstat0 & SELECTION_TIMEOUT) {
1580			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1581			       host->host_no, pun, lun));
1582			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1583		} else if(sstat0 & PHASE_MISMATCH) {
1584			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1585				(struct NCR_700_command_slot *)SCp->host_scribble;
1586
1587			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1588				/* It wants to reply to some part of
1589				 * our message */
1590#ifdef NCR_700_DEBUG
1591				__u32 temp = NCR_700_readl(host, TEMP_REG);
1592				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1593				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1594#endif
1595				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1596			} else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1597				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1598				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1599				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1600				int residual = NCR_700_data_residual(host);
1601				int i;
1602#ifdef NCR_700_DEBUG
1603				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1604
1605				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1606				       host->host_no, pun, lun,
1607				       SGcount, data_transfer);
1608				scsi_print_command(SCp);
1609				if(residual) {
1610					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1611				       host->host_no, pun, lun,
1612				       SGcount, data_transfer, residual);
1613				}
1614#endif
1615				data_transfer += residual;
1616
1617				if(data_transfer != 0) {
1618					int count;
1619					__u32 pAddr;
1620
1621					SGcount--;
1622
1623					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1624					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1625					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1626					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1627					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1628					pAddr += (count - data_transfer);
1629#ifdef NCR_700_DEBUG
1630					if(pAddr != naddr) {
1631						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1632					}
1633#endif
1634					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1635				}
1636				/* set the executed moves to nops */
1637				for(i=0; i<SGcount; i++) {
1638					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1639					slot->SG[i].pAddr = 0;
1640				}
1641				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1642				/* and pretend we disconnected after
1643				 * the command phase */
1644				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1645				/* make sure all the data is flushed */
1646				NCR_700_flush_fifo(host);
1647			} else {
1648				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1649				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1650				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1651				NCR_700_internal_bus_reset(host);
1652			}
1653
1654		} else if(sstat0 & SCSI_GROSS_ERROR) {
1655			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1656			       host->host_no, pun, lun);
1657			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1658		} else if(sstat0 & PARITY_ERROR) {
1659			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1660			       host->host_no, pun, lun);
1661			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1662		} else if(dstat & SCRIPT_INT_RECEIVED) {
1663			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1664			       host->host_no, pun, lun));
1665			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1666		} else if(dstat & (ILGL_INST_DETECTED)) {
1667			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1668			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1669			       host->host_no, pun, lun,
1670			       dsp, dsp - hostdata->pScript);
1671			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1672		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1673			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1674			       host->host_no, pun, lun, dstat);
1675			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1676		}
1677
1678
1679		/* NOTE: selection interrupt processing MUST occur
1680		 * after script interrupt processing to correctly cope
1681		 * with the case where we process a disconnect and
1682		 * then get reselected before we process the
1683		 * disconnection */
1684		if(sstat0 & SELECTED) {
1685			/* FIXME: It currently takes at least FOUR
1686			 * interrupts to complete a command that
1687			 * disconnects: one for the disconnect, one
1688			 * for the reselection, one to get the
1689			 * reselection data and one to complete the
1690			 * command.  If we guess the reselected
1691			 * command here and prepare it, we only need
1692			 * to get a reselection data interrupt if we
1693			 * guessed wrongly.  Since the interrupt
1694			 * overhead is much greater than the command
1695			 * setup, this would be an efficient
1696			 * optimisation particularly as we probably
1697			 * only have one outstanding command on a
1698			 * target most of the time */
1699
1700			resume_offset = process_selection(host, dsp);
1701
1702		}
1703
1704	}
1705
1706	if(resume_offset) {
1707		if(hostdata->state != NCR_700_HOST_BUSY) {
1708			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1709			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1710			hostdata->state = NCR_700_HOST_BUSY;
1711		}
1712
1713		DEBUG(("Attempting to resume at %x\n", resume_offset));
1714		NCR_700_clear_fifo(host);
1715		NCR_700_writel(resume_offset, host, DSP_REG);
1716	}
1717	/* There is probably a technical no-no about this: If we're a
1718	 * shared interrupt and we got this interrupt because the
1719	 * other device needs servicing not us, we're still going to
1720	 * check our queued commands here---of course, there shouldn't
1721	 * be any outstanding.... */
1722	if(hostdata->state == NCR_700_HOST_FREE) {
1723		int i;
1724
1725		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1726			/* fairness: always run the queue from the last
1727			 * position we left off */
1728			int j = (i + hostdata->saved_slot_position)
1729				% NCR_700_COMMAND_SLOTS_PER_HOST;
1730
1731			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1732				continue;
1733			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1734				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1735				       host->host_no, &hostdata->slots[j],
1736				       hostdata->slots[j].cmnd));
1737				hostdata->saved_slot_position = j + 1;
1738			}
1739
1740			break;
1741		}
1742	}
1743 out_unlock:
1744	spin_unlock_irqrestore(host->host_lock, flags);
1745	return IRQ_RETVAL(handled);
1746}
1747
1748static int
1749NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1750{
1751	struct NCR_700_Host_Parameters *hostdata =
1752		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1753	__u32 move_ins;
1754	enum dma_data_direction direction;
1755	struct NCR_700_command_slot *slot;
1756
1757	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1758		/* We're over our allocation, this should never happen
1759		 * since we report the max allocation to the mid layer */
1760		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1761		return 1;
1762	}
1763	/* check for untagged commands.  We cannot have any outstanding
1764	 * commands if we accept them.  Commands could be untagged because:
1765	 *
1766	 * - The tag negotiated bitmap is clear
1767	 * - The blk layer sent and untagged command
1768	 */
1769	if(NCR_700_get_depth(SCp->device) != 0
1770	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1771	       || !(SCp->flags & SCMD_TAGGED))) {
1772		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1773		       NCR_700_get_depth(SCp->device));
1774		return SCSI_MLQUEUE_DEVICE_BUSY;
1775	}
1776	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1777		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1778		       NCR_700_get_depth(SCp->device));
1779		return SCSI_MLQUEUE_DEVICE_BUSY;
1780	}
1781	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1782
1783	/* begin the command here */
1784	/* no need to check for NULL, test for command_slot_count above
1785	 * ensures a slot is free */
1786	slot = find_empty_slot(hostdata);
1787
1788	slot->cmnd = SCp;
1789
1790	SCp->scsi_done = done;
1791	SCp->host_scribble = (unsigned char *)slot;
1792	SCp->SCp.ptr = NULL;
1793	SCp->SCp.buffer = NULL;
1794
1795#ifdef NCR_700_DEBUG
1796	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1797	scsi_print_command(SCp);
1798#endif
1799	if ((SCp->flags & SCMD_TAGGED)
1800	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1801	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1802		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1803		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1804		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1805	}
1806
1807	/* here we may have to process an untagged command.  The gate
1808	 * above ensures that this will be the only one outstanding,
1809	 * so clear the tag negotiated bit.
1810	 *
1811	 * FIXME: This will royally screw up on multiple LUN devices
1812	 * */
1813	if (!(SCp->flags & SCMD_TAGGED)
1814	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1815		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1816		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1817	}
1818
1819	if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1820	    SCp->device->simple_tags) {
1821		slot->tag = SCp->request->tag;
1822		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1823		       slot->tag, slot);
1824	} else {
1825		slot->tag = SCSI_NO_TAG;
1826		/* must populate current_cmnd for scsi_find_tag to work */
1827		SCp->device->current_cmnd = SCp;
1828	}
1829	/* sanity check: some of the commands generated by the mid-layer
1830	 * have an eccentric idea of their sc_data_direction */
1831	if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1832	   SCp->sc_data_direction != DMA_NONE) {
1833#ifdef NCR_700_DEBUG
1834		printk("53c700: Command");
1835		scsi_print_command(SCp);
1836		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1837#endif
1838		SCp->sc_data_direction = DMA_NONE;
1839	}
1840
1841	switch (SCp->cmnd[0]) {
1842	case REQUEST_SENSE:
1843		/* clear the internal sense magic */
1844		SCp->cmnd[6] = 0;
1845		/* fall through */
1846	default:
1847		/* OK, get it from the command */
1848		switch(SCp->sc_data_direction) {
1849		case DMA_BIDIRECTIONAL:
1850		default:
1851			printk(KERN_ERR "53c700: Unknown command for data direction ");
1852			scsi_print_command(SCp);
1853
1854			move_ins = 0;
1855			break;
1856		case DMA_NONE:
1857			move_ins = 0;
1858			break;
1859		case DMA_FROM_DEVICE:
1860			move_ins = SCRIPT_MOVE_DATA_IN;
1861			break;
1862		case DMA_TO_DEVICE:
1863			move_ins = SCRIPT_MOVE_DATA_OUT;
1864			break;
1865		}
1866	}
1867
1868	/* now build the scatter gather list */
1869	direction = SCp->sc_data_direction;
1870	if(move_ins != 0) {
1871		int i;
1872		int sg_count;
1873		dma_addr_t vPtr = 0;
1874		struct scatterlist *sg;
1875		__u32 count = 0;
1876
1877		sg_count = scsi_dma_map(SCp);
1878		BUG_ON(sg_count < 0);
1879
1880		scsi_for_each_sg(SCp, sg, sg_count, i) {
1881			vPtr = sg_dma_address(sg);
1882			count = sg_dma_len(sg);
1883
1884			slot->SG[i].ins = bS_to_host(move_ins | count);
1885			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1886			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1887			slot->SG[i].pAddr = bS_to_host(vPtr);
1888		}
1889		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1890		slot->SG[i].pAddr = 0;
1891		dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1892		DEBUG((" SETTING %08lx to %x\n",
1893		       (&slot->pSG[i].ins),
1894		       slot->SG[i].ins));
1895	}
1896	slot->resume_offset = 0;
1897	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1898				    MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1899	NCR_700_start_command(SCp);
1900	return 0;
1901}
1902
1903STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1904
1905STATIC int
1906NCR_700_abort(struct scsi_cmnd * SCp)
1907{
1908	struct NCR_700_command_slot *slot;
1909
1910	scmd_printk(KERN_INFO, SCp, "abort command\n");
1911
1912	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1913
1914	if(slot == NULL)
1915		/* no outstanding command to abort */
1916		return SUCCESS;
1917	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1918		/* FIXME: This is because of a problem in the new
1919		 * error handler.  When it is in error recovery, it
1920		 * will send a TUR to a device it thinks may still be
1921		 * showing a problem.  If the TUR isn't responded to,
1922		 * it will abort it and mark the device off line.
1923		 * Unfortunately, it does no other error recovery, so
1924		 * this would leave us with an outstanding command
1925		 * occupying a slot.  Rather than allow this to
1926		 * happen, we issue a bus reset to force all
1927		 * outstanding commands to terminate here. */
1928		NCR_700_internal_bus_reset(SCp->device->host);
1929		/* still drop through and return failed */
1930	}
1931	return FAILED;
1932
1933}
1934
1935STATIC int
1936NCR_700_bus_reset(struct scsi_cmnd * SCp)
1937{
1938	DECLARE_COMPLETION_ONSTACK(complete);
1939	struct NCR_700_Host_Parameters *hostdata =
1940		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1941
1942	scmd_printk(KERN_INFO, SCp,
1943		"New error handler wants BUS reset, cmd %p\n\t", SCp);
1944	scsi_print_command(SCp);
1945
1946	/* In theory, eh_complete should always be null because the
1947	 * eh is single threaded, but just in case we're handling a
1948	 * reset via sg or something */
1949	spin_lock_irq(SCp->device->host->host_lock);
1950	while (hostdata->eh_complete != NULL) {
1951		spin_unlock_irq(SCp->device->host->host_lock);
1952		msleep_interruptible(100);
1953		spin_lock_irq(SCp->device->host->host_lock);
1954	}
1955
1956	hostdata->eh_complete = &complete;
1957	NCR_700_internal_bus_reset(SCp->device->host);
1958
1959	spin_unlock_irq(SCp->device->host->host_lock);
1960	wait_for_completion(&complete);
1961	spin_lock_irq(SCp->device->host->host_lock);
1962
1963	hostdata->eh_complete = NULL;
1964	/* Revalidate the transport parameters of the failing device */
1965	if(hostdata->fast)
1966		spi_schedule_dv_device(SCp->device);
1967
1968	spin_unlock_irq(SCp->device->host->host_lock);
1969	return SUCCESS;
1970}
1971
1972STATIC int
1973NCR_700_host_reset(struct scsi_cmnd * SCp)
1974{
1975	scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1976	scsi_print_command(SCp);
1977
1978	spin_lock_irq(SCp->device->host->host_lock);
1979
1980	NCR_700_internal_bus_reset(SCp->device->host);
1981	NCR_700_chip_reset(SCp->device->host);
1982
1983	spin_unlock_irq(SCp->device->host->host_lock);
1984
1985	return SUCCESS;
1986}
1987
1988STATIC void
1989NCR_700_set_period(struct scsi_target *STp, int period)
1990{
1991	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1992	struct NCR_700_Host_Parameters *hostdata =
1993		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1994
1995	if(!hostdata->fast)
1996		return;
1997
1998	if(period < hostdata->min_period)
1999		period = hostdata->min_period;
2000
2001	spi_period(STp) = period;
2002	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2003			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2004	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2005}
2006
2007STATIC void
2008NCR_700_set_offset(struct scsi_target *STp, int offset)
2009{
2010	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2011	struct NCR_700_Host_Parameters *hostdata =
2012		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2013	int max_offset = hostdata->chip710
2014		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2015
2016	if(!hostdata->fast)
2017		return;
2018
2019	if(offset > max_offset)
2020		offset = max_offset;
2021
2022	/* if we're currently async, make sure the period is reasonable */
2023	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2024				    spi_period(STp) > 0xff))
2025		spi_period(STp) = hostdata->min_period;
2026
2027	spi_offset(STp) = offset;
2028	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2029			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2030	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2031}
2032
2033STATIC int
2034NCR_700_slave_alloc(struct scsi_device *SDp)
2035{
2036	SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2037				GFP_KERNEL);
2038
2039	if (!SDp->hostdata)
2040		return -ENOMEM;
2041
2042	return 0;
2043}
2044
2045STATIC int
2046NCR_700_slave_configure(struct scsi_device *SDp)
2047{
2048	struct NCR_700_Host_Parameters *hostdata =
2049		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2050
2051	/* to do here: allocate memory; build a queue_full list */
2052	if(SDp->tagged_supported) {
2053		scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2054		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2055	}
2056
2057	if(hostdata->fast) {
2058		/* Find the correct offset and period via domain validation */
2059		if (!spi_initial_dv(SDp->sdev_target))
2060			spi_dv_device(SDp);
2061	} else {
2062		spi_offset(SDp->sdev_target) = 0;
2063		spi_period(SDp->sdev_target) = 0;
2064	}
2065	return 0;
2066}
2067
2068STATIC void
2069NCR_700_slave_destroy(struct scsi_device *SDp)
2070{
2071	kfree(SDp->hostdata);
2072	SDp->hostdata = NULL;
2073}
2074
2075static int
2076NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2077{
2078	if (depth > NCR_700_MAX_TAGS)
2079		depth = NCR_700_MAX_TAGS;
2080	return scsi_change_queue_depth(SDp, depth);
2081}
2082
2083static ssize_t
2084NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2085{
2086	struct scsi_device *SDp = to_scsi_device(dev);
2087
2088	return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2089}
2090
2091static struct device_attribute NCR_700_active_tags_attr = {
2092	.attr = {
2093		.name =		"active_tags",
2094		.mode =		S_IRUGO,
2095	},
2096	.show = NCR_700_show_active_tags,
2097};
2098
2099STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2100	&NCR_700_active_tags_attr,
2101	NULL,
2102};
2103
2104EXPORT_SYMBOL(NCR_700_detect);
2105EXPORT_SYMBOL(NCR_700_release);
2106EXPORT_SYMBOL(NCR_700_intr);
2107
2108static struct spi_function_template NCR_700_transport_functions =  {
2109	.set_period	= NCR_700_set_period,
2110	.show_period	= 1,
2111	.set_offset	= NCR_700_set_offset,
2112	.show_offset	= 1,
2113};
2114
2115static int __init NCR_700_init(void)
2116{
2117	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2118	if(!NCR_700_transport_template)
2119		return -ENODEV;
2120	return 0;
2121}
2122
2123static void __exit NCR_700_exit(void)
2124{
2125	spi_release_transport(NCR_700_transport_template);
2126}
2127
2128module_init(NCR_700_init);
2129module_exit(NCR_700_exit);
2130
2131