1/* virthba.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT.  See the GNU General Public License for more
15 * details.
16 */
17
18#define EXPORT_SYMTAB
19
20/* if you want to turn on some debugging of write device data or read
21 * device data, define these two undefs.  You will probably want to
22 * customize the code which is here since it was written assuming
23 * reading and writing a specific data file df.64M.txt which is a
24 * 64Megabyte file created by Art Nilson using a scritp I wrote called
25 * cr_test_data.pl.  The data file consists of 256 byte lines of text
26 * which start with an 8 digit sequence number, a colon, and then
27 * letters after that */
28
29#include <linux/kernel.h>
30#ifdef CONFIG_MODVERSIONS
31#include <config/modversions.h>
32#endif
33
34#include "diagnostics/appos_subsystems.h"
35#include "uisutils.h"
36#include "uisqueue.h"
37#include "uisthread.h"
38
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/pci.h>
42#include <linux/spinlock.h>
43#include <linux/device.h>
44#include <linux/slab.h>
45#include <scsi/scsi.h>
46#include <scsi/scsi_host.h>
47#include <scsi/scsi_cmnd.h>
48#include <scsi/scsi_device.h>
49#include <asm/param.h>
50#include <linux/debugfs.h>
51#include <linux/types.h>
52
53#include "virthba.h"
54#include "virtpci.h"
55#include "visorchipset.h"
56#include "version.h"
57#include "guestlinuxdebug.h"
58/* this is shorter than using __FILE__ (full path name) in
59 * debug/info/error messages
60 */
61#define CURRENT_FILE_PC VIRT_HBA_PC_virthba_c
62#define __MYFILE__ "virthba.c"
63
64/* NOTE:  L1_CACHE_BYTES >=128 */
65#define DEVICE_ATTRIBUTE struct device_attribute
66
67 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
68 *         = 4800 bytes ~ 2^13 = 8192 bytes
69 */
70#define MAX_BUF 8192
71
72/*****************************************************/
73/* Forward declarations                              */
74/*****************************************************/
75static int virthba_probe(struct virtpci_dev *dev,
76			 const struct pci_device_id *id);
77static void virthba_remove(struct virtpci_dev *dev);
78static int virthba_abort_handler(struct scsi_cmnd *scsicmd);
79static int virthba_bus_reset_handler(struct scsi_cmnd *scsicmd);
80static int virthba_device_reset_handler(struct scsi_cmnd *scsicmd);
81static int virthba_host_reset_handler(struct scsi_cmnd *scsicmd);
82static const char *virthba_get_info(struct Scsi_Host *shp);
83static int virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
84static int virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
85				     void (*virthba_cmnd_done)
86					   (struct scsi_cmnd *));
87
88static const struct x86_cpu_id unisys_spar_ids[] = {
89	{ X86_VENDOR_INTEL, 6, 62, X86_FEATURE_ANY },
90	{}
91};
92
93/* Autoload */
94MODULE_DEVICE_TABLE(x86cpu, unisys_spar_ids);
95
96#ifdef DEF_SCSI_QCMD
97static DEF_SCSI_QCMD(virthba_queue_command)
98#else
99#define virthba_queue_command virthba_queue_command_lck
100#endif
101
102static int virthba_slave_alloc(struct scsi_device *scsidev);
103static int virthba_slave_configure(struct scsi_device *scsidev);
104static void virthba_slave_destroy(struct scsi_device *scsidev);
105static int process_incoming_rsps(void *);
106static int virthba_serverup(struct virtpci_dev *virtpcidev);
107static int virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state);
108static void do_disk_add_remove(struct work_struct *work);
109static void virthba_serverdown_complete(struct work_struct *work);
110static ssize_t info_debugfs_read(struct file *file, char __user *buf,
111				 size_t len, loff_t *offset);
112static ssize_t enable_ints_write(struct file *file,
113				 const char __user *buffer, size_t count,
114				 loff_t *ppos);
115
116/*****************************************************/
117/* Globals                                           */
118/*****************************************************/
119
120static int rsltq_wait_usecs = 4000;	/* Default 4ms */
121static unsigned int max_buff_len;
122
123/* Module options */
124static char *virthba_options = "NONE";
125
126static const struct pci_device_id virthba_id_table[] = {
127	{PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_VIRTHBA)},
128	{0},
129};
130
131/* export virthba_id_table */
132MODULE_DEVICE_TABLE(pci, virthba_id_table);
133
134static struct workqueue_struct *virthba_serverdown_workqueue;
135
136static struct virtpci_driver virthba_driver = {
137	.name = "uisvirthba",
138	.version = VERSION,
139	.vertag = NULL,
140	.id_table = virthba_id_table,
141	.probe = virthba_probe,
142	.remove = virthba_remove,
143	.resume = virthba_serverup,
144	.suspend = virthba_serverdown
145};
146
147/* The Send and Recive Buffers of the IO Queue may both be full */
148#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS*2)
149#define INTERRUPT_VECTOR_MASK 0x3F
150
151struct scsipending {
152	char cmdtype;		/* Type of pointer that is being stored */
153	void *sent;		/* The Data being tracked */
154	/* struct scsi_cmnd *type for virthba_queue_command */
155	/* struct uiscmdrsp *type for management commands */
156};
157
158#define VIRTHBA_ERROR_COUNT 30
159#define IOS_ERROR_THRESHOLD 1000
160struct virtdisk_info {
161	u32 valid;
162	u32 channel, id, lun;	/* Disk Path */
163	atomic_t ios_threshold;
164	atomic_t error_count;
165	struct virtdisk_info *next;
166};
167
168/* Each Scsi_Host has a host_data area that contains this struct. */
169struct virthba_info {
170	struct Scsi_Host *scsihost;
171	struct virtpci_dev *virtpcidev;
172	struct list_head dev_info_list;
173	struct chaninfo chinfo;
174	struct irq_info intr;		/* use recvInterrupt info to receive
175					   interrupts when IOs complete */
176	int interrupt_vector;
177	struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests
178							     that have been */
179	/* forwarded to the IOVM and haven't returned yet */
180	unsigned int nextinsert;	/* Start search for next pending
181					   free slot here */
182	spinlock_t privlock;
183	bool serverdown;
184	bool serverchangingstate;
185	unsigned long long acquire_failed_cnt;
186	unsigned long long interrupts_rcvd;
187	unsigned long long interrupts_notme;
188	unsigned long long interrupts_disabled;
189	struct work_struct serverdown_completion;
190	u64 __iomem *flags_addr;
191	atomic_t interrupt_rcvd;
192	wait_queue_head_t rsp_queue;
193	struct virtdisk_info head;
194};
195
196/* Work Data for dar_work_queue */
197struct diskaddremove {
198	u8 add;			/* 0-remove, 1-add */
199	struct Scsi_Host *shost; /* Scsi Host for this virthba instance */
200	u32 channel, id, lun;	/* Disk Path */
201	struct diskaddremove *next;
202};
203
204#define virtpci_dev_to_virthba_virthba_get_info(d) \
205	container_of(d, struct virthba_info, virtpcidev)
206
207static DEVICE_ATTRIBUTE *virthba_shost_attrs[];
208static struct scsi_host_template virthba_driver_template = {
209	.name = "Unisys Virtual HBA",
210	.info = virthba_get_info,
211	.ioctl = virthba_ioctl,
212	.queuecommand = virthba_queue_command,
213	.eh_abort_handler = virthba_abort_handler,
214	.eh_device_reset_handler = virthba_device_reset_handler,
215	.eh_bus_reset_handler = virthba_bus_reset_handler,
216	.eh_host_reset_handler = virthba_host_reset_handler,
217	.shost_attrs = virthba_shost_attrs,
218
219#define VIRTHBA_MAX_CMNDS 128
220	.can_queue = VIRTHBA_MAX_CMNDS,
221	.sg_tablesize = 64,	/* largest number of address/length pairs */
222	.this_id = -1,
223	.slave_alloc = virthba_slave_alloc,
224	.slave_configure = virthba_slave_configure,
225	.slave_destroy = virthba_slave_destroy,
226	.use_clustering = ENABLE_CLUSTERING,
227};
228
229struct virthba_devices_open {
230	struct virthba_info *virthbainfo;
231};
232
233static const struct file_operations debugfs_info_fops = {
234	.read = info_debugfs_read,
235};
236
237static const struct file_operations debugfs_enable_ints_fops = {
238	.write = enable_ints_write,
239};
240
241/*****************************************************/
242/* Structs                                           */
243/*****************************************************/
244
245#define VIRTHBASOPENMAX 1
246/* array of open devices maintained by open() and close(); */
247static struct virthba_devices_open virthbas_open[VIRTHBASOPENMAX];
248static struct dentry *virthba_debugfs_dir;
249
250/*****************************************************/
251/* Local Functions				     */
252/*****************************************************/
253static int
254add_scsipending_entry(struct virthba_info *vhbainfo, char cmdtype, void *new)
255{
256	unsigned long flags;
257	int insert_location;
258
259	spin_lock_irqsave(&vhbainfo->privlock, flags);
260	insert_location = vhbainfo->nextinsert;
261	while (vhbainfo->pending[insert_location].sent) {
262		insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
263		if (insert_location == (int)vhbainfo->nextinsert) {
264			spin_unlock_irqrestore(&vhbainfo->privlock, flags);
265			return -1;
266		}
267	}
268
269	vhbainfo->pending[insert_location].cmdtype = cmdtype;
270	vhbainfo->pending[insert_location].sent = new;
271	vhbainfo->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
272	spin_unlock_irqrestore(&vhbainfo->privlock, flags);
273
274	return insert_location;
275}
276
277static unsigned int
278add_scsipending_entry_with_wait(struct virthba_info *vhbainfo, char cmdtype,
279				void *new)
280{
281	int insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
282
283	while (insert_location == -1) {
284		set_current_state(TASK_INTERRUPTIBLE);
285		schedule_timeout(msecs_to_jiffies(10));
286		insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
287	}
288
289	return (unsigned int)insert_location;
290}
291
292static void *
293del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del)
294{
295	unsigned long flags;
296	void *sent = NULL;
297
298	if (del < MAX_PENDING_REQUESTS) {
299		spin_lock_irqsave(&vhbainfo->privlock, flags);
300		sent = vhbainfo->pending[del].sent;
301
302		vhbainfo->pending[del].cmdtype = 0;
303		vhbainfo->pending[del].sent = NULL;
304		spin_unlock_irqrestore(&vhbainfo->privlock, flags);
305	}
306
307	return sent;
308}
309
310/* dar_work_queue (Disk Add/Remove) */
311static struct work_struct dar_work_queue;
312static struct diskaddremove *dar_work_queue_head;
313static spinlock_t dar_work_queue_lock;
314static unsigned short dar_work_queue_sched;
315#define QUEUE_DISKADDREMOVE(dar) { \
316	spin_lock_irqsave(&dar_work_queue_lock, flags); \
317	if (!dar_work_queue_head) { \
318		dar_work_queue_head = dar; \
319		dar->next = NULL; \
320	} \
321	else { \
322		dar->next = dar_work_queue_head; \
323		dar_work_queue_head = dar; \
324	} \
325	if (!dar_work_queue_sched) { \
326		schedule_work(&dar_work_queue); \
327		dar_work_queue_sched = 1; \
328	} \
329	spin_unlock_irqrestore(&dar_work_queue_lock, flags); \
330}
331
332static inline void
333send_disk_add_remove(struct diskaddremove *dar)
334{
335	struct scsi_device *sdev;
336	int error;
337
338	sdev = scsi_device_lookup(dar->shost, dar->channel, dar->id, dar->lun);
339	if (sdev) {
340		if (!(dar->add))
341			scsi_remove_device(sdev);
342	} else if (dar->add) {
343		error =
344		    scsi_add_device(dar->shost, dar->channel, dar->id,
345				    dar->lun);
346	}
347	kfree(dar);
348}
349
350/*****************************************************/
351/* dar_work_queue Handler Thread                     */
352/*****************************************************/
353static void
354do_disk_add_remove(struct work_struct *work)
355{
356	struct diskaddremove *dar;
357	struct diskaddremove *tmphead;
358	int i = 0;
359	unsigned long flags;
360
361	spin_lock_irqsave(&dar_work_queue_lock, flags);
362	tmphead = dar_work_queue_head;
363	dar_work_queue_head = NULL;
364	dar_work_queue_sched = 0;
365	spin_unlock_irqrestore(&dar_work_queue_lock, flags);
366	while (tmphead) {
367		dar = tmphead;
368		tmphead = dar->next;
369		send_disk_add_remove(dar);
370		i++;
371	}
372}
373
374/*****************************************************/
375/* Routine to add entry to dar_work_queue            */
376/*****************************************************/
377static void
378process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp)
379{
380	struct diskaddremove *dar;
381	unsigned long flags;
382
383	dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
384	if (dar) {
385		dar->add = cmdrsp->disknotify.add;
386		dar->shost = shost;
387		dar->channel = cmdrsp->disknotify.channel;
388		dar->id = cmdrsp->disknotify.id;
389		dar->lun = cmdrsp->disknotify.lun;
390		QUEUE_DISKADDREMOVE(dar);
391	}
392}
393
394/*****************************************************/
395/* Probe Remove Functions                            */
396/*****************************************************/
397static irqreturn_t
398virthba_isr(int irq, void *dev_id)
399{
400	struct virthba_info *virthbainfo = (struct virthba_info *)dev_id;
401	struct channel_header __iomem *channel_header;
402	struct signal_queue_header __iomem *pqhdr;
403	u64 mask;
404	unsigned long long rc1;
405
406	if (!virthbainfo)
407		return IRQ_NONE;
408	virthbainfo->interrupts_rcvd++;
409	channel_header = virthbainfo->chinfo.queueinfo->chan;
410	if (((readq(&channel_header->features)
411	      & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0) &&
412	     ((readq(&channel_header->features) &
413		 ULTRA_IO_DRIVER_DISABLES_INTS) !=
414		0)) {
415		virthbainfo->interrupts_disabled++;
416		mask = ~ULTRA_CHANNEL_ENABLE_INTS;
417		rc1 = uisqueue_interlocked_and(virthbainfo->flags_addr, mask);
418	}
419	if (spar_signalqueue_empty(channel_header, IOCHAN_FROM_IOPART)) {
420		virthbainfo->interrupts_notme++;
421		return IRQ_NONE;
422	}
423	pqhdr = (struct signal_queue_header __iomem *)
424		((char __iomem *)channel_header +
425		 readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART;
426	writeq(readq(&pqhdr->num_irq_received) + 1,
427	       &pqhdr->num_irq_received);
428	atomic_set(&virthbainfo->interrupt_rcvd, 1);
429	wake_up_interruptible(&virthbainfo->rsp_queue);
430	return IRQ_HANDLED;
431}
432
433static int
434virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
435{
436	int error;
437	struct Scsi_Host *scsihost;
438	struct virthba_info *virthbainfo;
439	int rsp;
440	int i;
441	irq_handler_t handler = virthba_isr;
442	struct channel_header __iomem *channel_header;
443	struct signal_queue_header __iomem *pqhdr;
444	u64 mask;
445
446	POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
447	/* call scsi_host_alloc to register a scsi host adapter
448	 * instance - this virthba that has just been created is an
449	 * instance of a scsi host adapter. This scsi_host_alloc
450	 * function allocates a new Scsi_Host struct & performs basic
451	 * initialization.  The host is not published to the scsi
452	 * midlayer until scsi_add_host is called.
453	 */
454
455	/* arg 2 passed in length of extra space we want allocated
456	 * with scsi_host struct for our own use scsi_host_alloc
457	 * assign host_no
458	 */
459	scsihost = scsi_host_alloc(&virthba_driver_template,
460				   sizeof(struct virthba_info));
461	if (!scsihost)
462		return -ENODEV;
463
464	scsihost->this_id = UIS_MAGIC_VHBA;
465	/* linux treats max-channel differently than max-id & max-lun.
466	 * In the latter cases, those two values result in 0 to max-1
467	 * (inclusive) being scanned. But in the case of channels, the
468	 * scan is 0 to max (inclusive); so we will subtract one from
469	 * the max-channel value.
470	 */
471	scsihost->max_channel = (unsigned)virtpcidev->scsi.max.max_channel;
472	scsihost->max_id = (unsigned)virtpcidev->scsi.max.max_id;
473	scsihost->max_lun = (unsigned)virtpcidev->scsi.max.max_lun;
474	scsihost->cmd_per_lun = (unsigned)virtpcidev->scsi.max.cmd_per_lun;
475	scsihost->max_sectors =
476	    (unsigned short)(virtpcidev->scsi.max.max_io_size >> 9);
477	scsihost->sg_tablesize =
478	    (unsigned short)(virtpcidev->scsi.max.max_io_size / PAGE_SIZE);
479	if (scsihost->sg_tablesize > MAX_PHYS_INFO)
480		scsihost->sg_tablesize = MAX_PHYS_INFO;
481
482	/* this creates "host%d" in sysfs.  If 2nd argument is NULL,
483	 * then this generic /sys/devices/platform/host?  device is
484	 * created and /sys/scsi_host/host? ->
485	 * /sys/devices/platform/host?  If 2nd argument is not NULL,
486	 * then this generic /sys/devices/<path>/host? is created and
487	 * host? points to that device instead.
488	 */
489	error = scsi_add_host(scsihost, &virtpcidev->generic_dev);
490	if (error) {
491		POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
492		/* decr refcount on scsihost which was incremented by
493		 * scsi_add_host so the scsi_host gets deleted
494		 */
495		scsi_host_put(scsihost);
496		return -ENODEV;
497	}
498
499	virthbainfo = (struct virthba_info *)scsihost->hostdata;
500	memset(virthbainfo, 0, sizeof(struct virthba_info));
501	for (i = 0; i < VIRTHBASOPENMAX; i++) {
502		if (!virthbas_open[i].virthbainfo) {
503			virthbas_open[i].virthbainfo = virthbainfo;
504			break;
505		}
506	}
507	virthbainfo->interrupt_vector = -1;
508	virthbainfo->chinfo.queueinfo = &virtpcidev->queueinfo;
509	virthbainfo->virtpcidev = virtpcidev;
510	spin_lock_init(&virthbainfo->chinfo.insertlock);
511
512	init_waitqueue_head(&virthbainfo->rsp_queue);
513	spin_lock_init(&virthbainfo->privlock);
514	memset(&virthbainfo->pending, 0, sizeof(virthbainfo->pending));
515	virthbainfo->serverdown = false;
516	virthbainfo->serverchangingstate = false;
517
518	virthbainfo->intr = virtpcidev->intr;
519	/* save of host within virthba_info */
520	virthbainfo->scsihost = scsihost;
521
522	/* save of host within virtpci_dev */
523	virtpcidev->scsi.scsihost = scsihost;
524
525	/* Setup workqueue for serverdown messages */
526	INIT_WORK(&virthbainfo->serverdown_completion,
527		  virthba_serverdown_complete);
528
529	writeq(readq(&virthbainfo->chinfo.queueinfo->chan->features) |
530	       ULTRA_IO_CHANNEL_IS_POLLING,
531	       &virthbainfo->chinfo.queueinfo->chan->features);
532	/* start thread that will receive scsicmnd responses */
533
534	channel_header = virthbainfo->chinfo.queueinfo->chan;
535	pqhdr = (struct signal_queue_header __iomem *)
536		((char __iomem *)channel_header +
537		 readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART;
538	virthbainfo->flags_addr = &pqhdr->features;
539
540	if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
541			     process_incoming_rsps,
542			     virthbainfo, "vhba_incoming")) {
543		/* decr refcount on scsihost which was incremented by
544		 * scsi_add_host so the scsi_host gets deleted
545		 */
546		POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
547		scsi_host_put(scsihost);
548		return -ENODEV;
549	}
550	virthbainfo->interrupt_vector =
551	    virthbainfo->intr.recv_irq_handle & INTERRUPT_VECTOR_MASK;
552	rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED,
553			  scsihost->hostt->name, virthbainfo);
554	if (rsp != 0) {
555		virthbainfo->interrupt_vector = -1;
556		POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
557	} else {
558		u64 __iomem *features_addr =
559		    &virthbainfo->chinfo.queueinfo->chan->features;
560		mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
561			 ULTRA_IO_DRIVER_DISABLES_INTS);
562		uisqueue_interlocked_and(features_addr, mask);
563		mask = ULTRA_IO_DRIVER_ENABLES_INTS;
564		uisqueue_interlocked_or(features_addr, mask);
565		rsltq_wait_usecs = 4000000;
566	}
567
568	scsi_scan_host(scsihost);
569
570	POSTCODE_LINUX_2(VHBA_PROBE_EXIT_PC, POSTCODE_SEVERITY_INFO);
571	return 0;
572}
573
574static void
575virthba_remove(struct virtpci_dev *virtpcidev)
576{
577	struct virthba_info *virthbainfo;
578	struct Scsi_Host *scsihost =
579	    (struct Scsi_Host *)virtpcidev->scsi.scsihost;
580
581	virthbainfo = (struct virthba_info *)scsihost->hostdata;
582	if (virthbainfo->interrupt_vector != -1)
583		free_irq(virthbainfo->interrupt_vector, virthbainfo);
584
585	scsi_remove_host(scsihost);
586
587	uisthread_stop(&virthbainfo->chinfo.threadinfo);
588
589	/* decr refcount on scsihost which was incremented by
590	 * scsi_add_host so the scsi_host gets deleted
591	 */
592	scsi_host_put(scsihost);
593}
594
595static int
596forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype,
597			  struct Scsi_Host *scsihost,
598			  struct uisscsi_dest *vdest)
599{
600	struct uiscmdrsp *cmdrsp;
601	struct virthba_info *virthbainfo =
602	    (struct virthba_info *)scsihost->hostdata;
603	int notifyresult = 0xffff;
604	wait_queue_head_t notifyevent;
605
606	if (virthbainfo->serverdown || virthbainfo->serverchangingstate)
607		return FAILED;
608
609	cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
610	if (!cmdrsp)
611		return FAILED;  /* reject */
612
613	init_waitqueue_head(&notifyevent);
614
615	/* issue VDISK_MGMT_CMD
616	 * set type to command - as opposed to task mgmt
617	 */
618	cmdrsp->cmdtype = CMD_VDISKMGMT_TYPE;
619	/* specify the event that has to be triggered when this cmd is
620	 * complete
621	 */
622	cmdrsp->vdiskmgmt.notify = (void *)&notifyevent;
623	cmdrsp->vdiskmgmt.notifyresult = (void *)&notifyresult;
624
625	/* save destination */
626	cmdrsp->vdiskmgmt.vdisktype = vdiskcmdtype;
627	cmdrsp->vdiskmgmt.vdest.channel = vdest->channel;
628	cmdrsp->vdiskmgmt.vdest.id = vdest->id;
629	cmdrsp->vdiskmgmt.vdest.lun = vdest->lun;
630	cmdrsp->vdiskmgmt.scsicmd =
631	    (void *)(uintptr_t)
632		add_scsipending_entry_with_wait(virthbainfo, CMD_VDISKMGMT_TYPE,
633						(void *)cmdrsp);
634
635	uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
636					     cmdrsp, IOCHAN_TO_IOPART,
637					     &virthbainfo->chinfo.insertlock,
638					     DONT_ISSUE_INTERRUPT, (u64)NULL,
639					     OK_TO_WAIT, "vhba");
640	wait_event(notifyevent, notifyresult != 0xffff);
641	kfree(cmdrsp);
642	return SUCCESS;
643}
644
645/*****************************************************/
646/* Scsi Host support functions                       */
647/*****************************************************/
648
649static int
650forward_taskmgmt_command(enum task_mgmt_types tasktype,
651			 struct scsi_device *scsidev)
652{
653	struct uiscmdrsp *cmdrsp;
654	struct virthba_info *virthbainfo =
655	    (struct virthba_info *)scsidev->host->hostdata;
656	int notifyresult = 0xffff;
657	wait_queue_head_t notifyevent;
658
659	if (virthbainfo->serverdown || virthbainfo->serverchangingstate)
660		return FAILED;
661
662	cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
663	if (!cmdrsp)
664		return FAILED;	/* reject */
665
666	init_waitqueue_head(&notifyevent);
667
668	/* issue TASK_MGMT_ABORT_TASK */
669	/* set type to command - as opposed to task mgmt */
670	cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
671	/* specify the event that has to be triggered when this */
672	/* cmd is complete */
673	cmdrsp->scsitaskmgmt.notify = (void *)&notifyevent;
674	cmdrsp->scsitaskmgmt.notifyresult = (void *)&notifyresult;
675
676	/* save destination */
677	cmdrsp->scsitaskmgmt.tasktype = tasktype;
678	cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
679	cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
680	cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
681	cmdrsp->scsitaskmgmt.scsicmd =
682	    (void *)(uintptr_t)
683		add_scsipending_entry_with_wait(virthbainfo,
684						CMD_SCSITASKMGMT_TYPE,
685						(void *)cmdrsp);
686
687	uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
688					     cmdrsp, IOCHAN_TO_IOPART,
689					     &virthbainfo->chinfo.insertlock,
690					     DONT_ISSUE_INTERRUPT, (u64)NULL,
691					     OK_TO_WAIT, "vhba");
692	wait_event(notifyevent, notifyresult != 0xffff);
693	kfree(cmdrsp);
694	return SUCCESS;
695}
696
697/* The abort handler returns SUCCESS if it has succeeded to make LLDD
698 * and all related hardware forget about the scmd.
699 */
700static int
701virthba_abort_handler(struct scsi_cmnd *scsicmd)
702{
703	/* issue TASK_MGMT_ABORT_TASK */
704	struct scsi_device *scsidev;
705	struct virtdisk_info *vdisk;
706
707	scsidev = scsicmd->device;
708	for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
709	     vdisk->next; vdisk = vdisk->next) {
710		if ((scsidev->channel == vdisk->channel) &&
711		    (scsidev->id == vdisk->id) &&
712		    (scsidev->lun == vdisk->lun)) {
713			if (atomic_read(&vdisk->error_count) <
714			    VIRTHBA_ERROR_COUNT) {
715				atomic_inc(&vdisk->error_count);
716				POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
717						 POSTCODE_SEVERITY_INFO);
718			} else
719				atomic_set(&vdisk->ios_threshold,
720					   IOS_ERROR_THRESHOLD);
721		}
722	}
723	return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd->device);
724}
725
726static int
727virthba_bus_reset_handler(struct scsi_cmnd *scsicmd)
728{
729	/* issue TASK_MGMT_TARGET_RESET for each target on the bus */
730	struct scsi_device *scsidev;
731	struct virtdisk_info *vdisk;
732
733	scsidev = scsicmd->device;
734	for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
735	     vdisk->next; vdisk = vdisk->next) {
736		if ((scsidev->channel == vdisk->channel) &&
737		    (scsidev->id == vdisk->id) &&
738		    (scsidev->lun == vdisk->lun)) {
739			if (atomic_read(&vdisk->error_count) <
740			    VIRTHBA_ERROR_COUNT) {
741				atomic_inc(&vdisk->error_count);
742				POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
743						 POSTCODE_SEVERITY_INFO);
744			} else
745				atomic_set(&vdisk->ios_threshold,
746					   IOS_ERROR_THRESHOLD);
747		}
748	}
749	return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd->device);
750}
751
752static int
753virthba_device_reset_handler(struct scsi_cmnd *scsicmd)
754{
755	/* issue TASK_MGMT_LUN_RESET */
756	struct scsi_device *scsidev;
757	struct virtdisk_info *vdisk;
758
759	scsidev = scsicmd->device;
760	for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
761	     vdisk->next; vdisk = vdisk->next) {
762		if ((scsidev->channel == vdisk->channel) &&
763		    (scsidev->id == vdisk->id) &&
764		    (scsidev->lun == vdisk->lun)) {
765			if (atomic_read(&vdisk->error_count) <
766			    VIRTHBA_ERROR_COUNT) {
767				atomic_inc(&vdisk->error_count);
768				POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
769						 POSTCODE_SEVERITY_INFO);
770			} else
771				atomic_set(&vdisk->ios_threshold,
772					   IOS_ERROR_THRESHOLD);
773		}
774	}
775	return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd->device);
776}
777
778static int
779virthba_host_reset_handler(struct scsi_cmnd *scsicmd)
780{
781	/* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
782	return SUCCESS;
783}
784
785static char virthba_get_info_str[256];
786
787static const char *
788virthba_get_info(struct Scsi_Host *shp)
789{
790	/* Return version string */
791	sprintf(virthba_get_info_str, "virthba, version %s\n", VIRTHBA_VERSION);
792	return virthba_get_info_str;
793}
794
795static int
796virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
797{
798	return -EINVAL;
799}
800
801/* This returns SCSI_MLQUEUE_DEVICE_BUSY if the signal queue to IOpart
802 * is full.
803 */
804static int
805virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
806			  void (*virthba_cmnd_done)(struct scsi_cmnd *))
807{
808	struct scsi_device *scsidev = scsicmd->device;
809	int insert_location;
810	unsigned char op;
811	unsigned char *cdb = scsicmd->cmnd;
812	struct Scsi_Host *scsihost = scsidev->host;
813	struct uiscmdrsp *cmdrsp;
814	unsigned int i;
815	struct virthba_info *virthbainfo =
816	    (struct virthba_info *)scsihost->hostdata;
817	struct scatterlist *sg = NULL;
818	struct scatterlist *sgl = NULL;
819	int sg_failed = 0;
820
821	if (virthbainfo->serverdown || virthbainfo->serverchangingstate)
822		return SCSI_MLQUEUE_DEVICE_BUSY;
823	cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
824	if (!cmdrsp)
825		return 1;	/* reject the command */
826
827	/* now saving everything we need from scsi_cmd into cmdrsp
828	 * before we queue cmdrsp set type to command - as opposed to
829	 * task mgmt
830	 */
831	cmdrsp->cmdtype = CMD_SCSI_TYPE;
832	/* save the pending insertion location.  Deletion from pending
833	 * will return the scsicmd pointer for completion
834	 */
835	insert_location =
836	    add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *)scsicmd);
837	if (insert_location != -1) {
838		cmdrsp->scsi.scsicmd = (void *)(uintptr_t)insert_location;
839	} else {
840		kfree(cmdrsp);
841		return SCSI_MLQUEUE_DEVICE_BUSY;
842	}
843	/* save done function that we have call when cmd is complete */
844	scsicmd->scsi_done = virthba_cmnd_done;
845	/* save destination */
846	cmdrsp->scsi.vdest.channel = scsidev->channel;
847	cmdrsp->scsi.vdest.id = scsidev->id;
848	cmdrsp->scsi.vdest.lun = scsidev->lun;
849	/* save datadir */
850	cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
851	memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
852
853	cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
854
855	/* keep track of the max buffer length so far. */
856	if (cmdrsp->scsi.bufflen > max_buff_len)
857		max_buff_len = cmdrsp->scsi.bufflen;
858
859	if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
860		del_scsipending_entry(virthbainfo, (uintptr_t)insert_location);
861		kfree(cmdrsp);
862		return 1;	/* reject the command */
863	}
864
865	/* This is what we USED to do when we assumed we were running */
866	/* uissd & virthba on the same Linux system. */
867	/* cmdrsp->scsi.buffer = scsicmd->request_buffer; */
868	/* The following code does NOT make that assumption. */
869	/* convert buffer to phys information */
870	if (scsi_sg_count(scsicmd) == 0) {
871		if (scsi_bufflen(scsicmd) > 0) {
872			BUG_ON(scsi_sg_count(scsicmd) == 0);
873		}
874	} else {
875		/* buffer is scatterlist - copy it out */
876		sgl = scsi_sglist(scsicmd);
877
878		for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
879			cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
880			cmdrsp->scsi.gpi_list[i].length = sg->length;
881		}
882
883		if (sg_failed) {
884			/* BUG(); ***** For now, let it fail in uissd
885			 * if it is a problem, as it might just
886			 * work
887			 */
888		}
889
890		cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
891	}
892
893	op = cdb[0];
894	i = uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
895						 cmdrsp, IOCHAN_TO_IOPART,
896						 &virthbainfo->chinfo.
897						 insertlock,
898						 DONT_ISSUE_INTERRUPT,
899						 (u64)NULL, DONT_WAIT, "vhba");
900	if (i == 0) {
901		/* queue must be full - and we said don't wait - return busy */
902		kfree(cmdrsp);
903		del_scsipending_entry(virthbainfo, (uintptr_t)insert_location);
904		return SCSI_MLQUEUE_DEVICE_BUSY;
905	}
906
907	/* we're done with cmdrsp space - data from it has been copied
908	 * into channel - free it now.
909	 */
910	kfree(cmdrsp);
911	return 0;		/* non-zero implies host/device is busy */
912}
913
914static int
915virthba_slave_alloc(struct scsi_device *scsidev)
916{
917	/* this called by the midlayer before scan for new devices -
918	 * LLD can alloc any struct & do init if needed.
919	 */
920	struct virtdisk_info *vdisk;
921	struct virtdisk_info *tmpvdisk;
922	struct virthba_info *virthbainfo;
923	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
924
925	virthbainfo = (struct virthba_info *)scsihost->hostdata;
926	if (!virthbainfo)
927		return 0;	/* even though we errored, treat as success */
928
929	for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
930		if (vdisk->next->valid &&
931		    (vdisk->next->channel == scsidev->channel) &&
932		    (vdisk->next->id == scsidev->id) &&
933		    (vdisk->next->lun == scsidev->lun))
934			return 0;
935	}
936	tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
937	if (!tmpvdisk)
938		return 0;
939
940	tmpvdisk->channel = scsidev->channel;
941	tmpvdisk->id = scsidev->id;
942	tmpvdisk->lun = scsidev->lun;
943	tmpvdisk->valid = 1;
944	vdisk->next = tmpvdisk;
945	return 0;		/* success */
946}
947
948static int
949virthba_slave_configure(struct scsi_device *scsidev)
950{
951	return 0;		/* success */
952}
953
954static void
955virthba_slave_destroy(struct scsi_device *scsidev)
956{
957	/* midlevel calls this after device has been quiesced and
958	 * before it is to be deleted.
959	 */
960	struct virtdisk_info *vdisk, *delvdisk;
961	struct virthba_info *virthbainfo;
962	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
963
964	virthbainfo = (struct virthba_info *)scsihost->hostdata;
965	for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
966		if (vdisk->next->valid &&
967		    (vdisk->next->channel == scsidev->channel) &&
968		    (vdisk->next->id == scsidev->id) &&
969		    (vdisk->next->lun == scsidev->lun)) {
970			delvdisk = vdisk->next;
971			vdisk->next = vdisk->next->next;
972			kfree(delvdisk);
973			return;
974		}
975	}
976}
977
978/*****************************************************/
979/* Scsi Cmnd support thread                          */
980/*****************************************************/
981
982static void
983do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
984{
985	struct virtdisk_info *vdisk;
986	struct scsi_device *scsidev;
987	struct sense_data *sd;
988
989	scsidev = scsicmd->device;
990	memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
991	sd = (struct sense_data *)scsicmd->sense_buffer;
992
993	/* Do not log errors for disk-not-present inquiries */
994	if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
995	    (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
996	    (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
997		return;
998
999	/* Okay see what our error_count is here.... */
1000	for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
1001	     vdisk->next; vdisk = vdisk->next) {
1002		if ((scsidev->channel != vdisk->channel) ||
1003		    (scsidev->id != vdisk->id) ||
1004		    (scsidev->lun != vdisk->lun))
1005			continue;
1006
1007		if (atomic_read(&vdisk->error_count) < VIRTHBA_ERROR_COUNT) {
1008			atomic_inc(&vdisk->error_count);
1009			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
1010		}
1011	}
1012}
1013
1014static void
1015do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1016{
1017	struct scsi_device *scsidev;
1018	unsigned char buf[36];
1019	struct scatterlist *sg;
1020	unsigned int i;
1021	char *thispage;
1022	char *thispage_orig;
1023	int bufind = 0;
1024	struct virtdisk_info *vdisk;
1025
1026	scsidev = scsicmd->device;
1027	if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
1028	    (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
1029		if (cmdrsp->scsi.no_disk_result == 0)
1030			return;
1031
1032		/* Linux scsi code is weird; it wants
1033		 * a device at Lun 0 to issue report
1034		 * luns, but we don't want a disk
1035		 * there so we'll present a processor
1036		 * there. */
1037		SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
1038					   scsidev->lun,
1039					   DEV_DISK_CAPABLE_NOT_PRESENT,
1040					   DEV_NOT_CAPABLE);
1041
1042		if (scsi_sg_count(scsicmd) == 0) {
1043			if (scsi_bufflen(scsicmd) > 0) {
1044				BUG_ON(scsi_sg_count(scsicmd) ==
1045				       0);
1046			}
1047			memcpy(scsi_sglist(scsicmd), buf,
1048			       cmdrsp->scsi.bufflen);
1049			return;
1050		}
1051
1052		sg = scsi_sglist(scsicmd);
1053		for (i = 0; i < scsi_sg_count(scsicmd); i++) {
1054			thispage_orig = kmap_atomic(sg_page(sg + i));
1055			thispage = (void *)((unsigned long)thispage_orig |
1056					     sg[i].offset);
1057			memcpy(thispage, buf + bufind, sg[i].length);
1058			kunmap_atomic(thispage_orig);
1059			bufind += sg[i].length;
1060		}
1061	} else {
1062		vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
1063		for ( ; vdisk->next; vdisk = vdisk->next) {
1064			if ((scsidev->channel != vdisk->channel) ||
1065			    (scsidev->id != vdisk->id) ||
1066			    (scsidev->lun != vdisk->lun))
1067				continue;
1068
1069			if (atomic_read(&vdisk->ios_threshold) > 0) {
1070				atomic_dec(&vdisk->ios_threshold);
1071				if (atomic_read(&vdisk->ios_threshold) == 0) {
1072					atomic_set(&vdisk->error_count, 0);
1073				}
1074			}
1075		}
1076	}
1077}
1078
1079static void
1080complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1081{
1082	/* take what we need out of cmdrsp and complete the scsicmd */
1083	scsicmd->result = cmdrsp->scsi.linuxstat;
1084	if (cmdrsp->scsi.linuxstat)
1085		do_scsi_linuxstat(cmdrsp, scsicmd);
1086	else
1087		do_scsi_nolinuxstat(cmdrsp, scsicmd);
1088
1089	if (scsicmd->scsi_done)
1090		scsicmd->scsi_done(scsicmd);
1091}
1092
1093static inline void
1094complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
1095{
1096	/* copy the result of the taskmgmt and */
1097	/* wake up the error handler that is waiting for this */
1098	*(int *)cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
1099	wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify);
1100}
1101
1102static inline void
1103complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
1104{
1105	/* copy the result of the taskmgmt and */
1106	/* wake up the error handler that is waiting for this */
1107	*(int *)cmdrsp->scsitaskmgmt.notifyresult =
1108	    cmdrsp->scsitaskmgmt.result;
1109	wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify);
1110}
1111
1112static void
1113drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
1114	    struct uiscmdrsp *cmdrsp)
1115{
1116	unsigned long flags;
1117	int qrslt = 0;
1118	struct scsi_cmnd *scsicmd;
1119	struct Scsi_Host *shost = virthbainfo->scsihost;
1120
1121	while (1) {
1122		spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags);
1123		if (!spar_channel_client_acquire_os(dc->queueinfo->chan,
1124						    "vhba")) {
1125			spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock,
1126					       flags);
1127			virthbainfo->acquire_failed_cnt++;
1128			break;
1129		}
1130		qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp,
1131					    IOCHAN_FROM_IOPART);
1132		spar_channel_client_release_os(dc->queueinfo->chan, "vhba");
1133		spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags);
1134		if (qrslt == 0)
1135			break;
1136		if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
1137			/* scsicmd location is returned by the
1138			 * deletion
1139			 */
1140			scsicmd = del_scsipending_entry(virthbainfo,
1141							(uintptr_t)
1142							 cmdrsp->scsi.scsicmd);
1143			if (!scsicmd)
1144				break;
1145			/* complete the orig cmd */
1146			complete_scsi_command(cmdrsp, scsicmd);
1147		} else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
1148			if (!del_scsipending_entry(virthbainfo,
1149				   (uintptr_t)cmdrsp->scsitaskmgmt.scsicmd))
1150				break;
1151			complete_taskmgmt_command(cmdrsp);
1152		} else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
1153			/* The vHba pointer has no meaning in
1154			 * a Client/Guest Partition. Let's be
1155			 * safe and set it to NULL now.  Do
1156			 * not use it here! */
1157			cmdrsp->disknotify.v_hba = NULL;
1158			process_disk_notify(shost, cmdrsp);
1159		} else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
1160			if (!del_scsipending_entry(virthbainfo,
1161						   (uintptr_t)
1162						    cmdrsp->vdiskmgmt.scsicmd))
1163				break;
1164			complete_vdiskmgmt_command(cmdrsp);
1165		}
1166		/* cmdrsp is now available for reuse */
1167	}
1168}
1169
1170/* main function for the thread that waits for scsi commands to arrive
1171 * in a specified queue
1172 */
1173static int
1174process_incoming_rsps(void *v)
1175{
1176	struct virthba_info *virthbainfo = v;
1177	struct chaninfo *dc = &virthbainfo->chinfo;
1178	struct uiscmdrsp *cmdrsp = NULL;
1179	const int SZ = sizeof(struct uiscmdrsp);
1180	u64 mask;
1181	unsigned long long rc1;
1182
1183	UIS_DAEMONIZE("vhba_incoming");
1184	/* alloc once and reuse */
1185	cmdrsp = kmalloc(SZ, GFP_ATOMIC);
1186	if (!cmdrsp) {
1187		complete_and_exit(&dc->threadinfo.has_stopped, 0);
1188		return 0;
1189	}
1190	mask = ULTRA_CHANNEL_ENABLE_INTS;
1191	while (1) {
1192		if (kthread_should_stop())
1193			break;
1194		wait_event_interruptible_timeout(virthbainfo->rsp_queue,
1195			 (atomic_read(&virthbainfo->interrupt_rcvd) == 1),
1196				      usecs_to_jiffies(rsltq_wait_usecs));
1197		atomic_set(&virthbainfo->interrupt_rcvd, 0);
1198		/* drain queue */
1199		drain_queue(virthbainfo, dc, cmdrsp);
1200		rc1 = uisqueue_interlocked_or(virthbainfo->flags_addr, mask);
1201	}
1202
1203	kfree(cmdrsp);
1204
1205	complete_and_exit(&dc->threadinfo.has_stopped, 0);
1206}
1207
1208/*****************************************************/
1209/* Debugfs filesystem functions                      */
1210/*****************************************************/
1211
1212static ssize_t info_debugfs_read(struct file *file,
1213				 char __user *buf, size_t len, loff_t *offset)
1214{
1215	ssize_t bytes_read = 0;
1216	int str_pos = 0;
1217	u64 phys_flags_addr;
1218	int i;
1219	struct virthba_info *virthbainfo;
1220	char *vbuf;
1221
1222	if (len > MAX_BUF)
1223		len = MAX_BUF;
1224	vbuf = kzalloc(len, GFP_KERNEL);
1225	if (!vbuf)
1226		return -ENOMEM;
1227
1228	for (i = 0; i < VIRTHBASOPENMAX; i++) {
1229		if (!virthbas_open[i].virthbainfo)
1230			continue;
1231
1232		virthbainfo = virthbas_open[i].virthbainfo;
1233
1234		str_pos += scnprintf(vbuf + str_pos,
1235				len - str_pos, "max_buff_len:%u\n",
1236				max_buff_len);
1237
1238		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1239				"\nvirthba result queue poll wait:%d usecs.\n",
1240				rsltq_wait_usecs);
1241		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1242				"\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
1243				virthbainfo->interrupts_rcvd,
1244				virthbainfo->interrupts_disabled);
1245		str_pos += scnprintf(vbuf + str_pos,
1246				len - str_pos, "\ninterrupts_notme = %llu,\n",
1247				virthbainfo->interrupts_notme);
1248		phys_flags_addr = virt_to_phys((__force  void *)
1249					       virthbainfo->flags_addr);
1250		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1251				"flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
1252				virthbainfo->flags_addr, phys_flags_addr,
1253				(__le64)readq(virthbainfo->flags_addr));
1254		str_pos += scnprintf(vbuf + str_pos,
1255			len - str_pos, "acquire_failed_cnt:%llu\n",
1256			virthbainfo->acquire_failed_cnt);
1257		str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
1258	}
1259
1260	bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
1261	kfree(vbuf);
1262	return bytes_read;
1263}
1264
1265static ssize_t enable_ints_write(struct file *file, const char __user *buffer,
1266				 size_t count, loff_t *ppos)
1267{
1268	char buf[4];
1269	int i, new_value;
1270	struct virthba_info *virthbainfo;
1271
1272	u64 __iomem *features_addr;
1273	u64 mask;
1274
1275	if (count >= ARRAY_SIZE(buf))
1276		return -EINVAL;
1277
1278	buf[count] = '\0';
1279	if (copy_from_user(buf, buffer, count))
1280		return -EFAULT;
1281
1282	i = kstrtoint(buf, 10, &new_value);
1283
1284	if (i != 0)
1285		return -EFAULT;
1286
1287	/* set all counts to new_value usually 0 */
1288	for (i = 0; i < VIRTHBASOPENMAX; i++) {
1289		if (virthbas_open[i].virthbainfo) {
1290			virthbainfo = virthbas_open[i].virthbainfo;
1291			features_addr =
1292				&virthbainfo->chinfo.queueinfo->chan->features;
1293			if (new_value == 1) {
1294				mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
1295					 ULTRA_IO_DRIVER_DISABLES_INTS);
1296				uisqueue_interlocked_and(features_addr, mask);
1297				mask = ULTRA_IO_DRIVER_ENABLES_INTS;
1298				uisqueue_interlocked_or(features_addr, mask);
1299				rsltq_wait_usecs = 4000000;
1300			} else {
1301				mask = ~(ULTRA_IO_DRIVER_ENABLES_INTS |
1302					 ULTRA_IO_DRIVER_DISABLES_INTS);
1303				uisqueue_interlocked_and(features_addr, mask);
1304				mask = ULTRA_IO_CHANNEL_IS_POLLING;
1305				uisqueue_interlocked_or(features_addr, mask);
1306				rsltq_wait_usecs = 4000;
1307			}
1308		}
1309	}
1310	return count;
1311}
1312
1313/* As per VirtpciFunc returns 1 for success and 0 for failure */
1314static int
1315virthba_serverup(struct virtpci_dev *virtpcidev)
1316{
1317	struct virthba_info *virthbainfo =
1318	    (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi.
1319				     scsihost)->hostdata;
1320
1321	if (!virthbainfo->serverdown)
1322		return 1;
1323
1324	if (virthbainfo->serverchangingstate)
1325		return 0;
1326
1327	virthbainfo->serverchangingstate = true;
1328	/* Must transition channel to ATTACHED state BEFORE we
1329	 * can start using the device again
1330	 */
1331	SPAR_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
1332				       dev_name(&virtpcidev->generic_dev),
1333				       CHANNELCLI_ATTACHED, NULL);
1334
1335	/* Start Processing the IOVM Response Queue Again */
1336	if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
1337			     process_incoming_rsps,
1338			     virthbainfo, "vhba_incoming")) {
1339		return 0;
1340	}
1341	virthbainfo->serverdown = false;
1342	virthbainfo->serverchangingstate = false;
1343
1344	return 1;
1345}
1346
1347static void
1348virthba_serverdown_complete(struct work_struct *work)
1349{
1350	struct virthba_info *virthbainfo;
1351	struct virtpci_dev *virtpcidev;
1352	int i;
1353	struct scsipending *pendingdel = NULL;
1354	struct scsi_cmnd *scsicmd = NULL;
1355	struct uiscmdrsp *cmdrsp;
1356	unsigned long flags;
1357
1358	virthbainfo = container_of(work, struct virthba_info,
1359				   serverdown_completion);
1360
1361	/* Stop Using the IOVM Response Queue (queue should be drained
1362	 * by the end)
1363	 */
1364	uisthread_stop(&virthbainfo->chinfo.threadinfo);
1365
1366	/* Fail Commands that weren't completed */
1367	spin_lock_irqsave(&virthbainfo->privlock, flags);
1368	for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
1369		pendingdel = &virthbainfo->pending[i];
1370		switch (pendingdel->cmdtype) {
1371		case CMD_SCSI_TYPE:
1372			scsicmd = (struct scsi_cmnd *)pendingdel->sent;
1373			scsicmd->result = DID_RESET << 16;
1374			if (scsicmd->scsi_done)
1375				scsicmd->scsi_done(scsicmd);
1376			break;
1377		case CMD_SCSITASKMGMT_TYPE:
1378			cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
1379			wake_up_all((wait_queue_head_t *)
1380				    cmdrsp->scsitaskmgmt.notify);
1381			*(int *)cmdrsp->scsitaskmgmt.notifyresult =
1382				TASK_MGMT_FAILED;
1383			break;
1384		case CMD_VDISKMGMT_TYPE:
1385			cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
1386			*(int *)cmdrsp->vdiskmgmt.notifyresult =
1387			    VDISK_MGMT_FAILED;
1388			wake_up_all((wait_queue_head_t *)
1389				    cmdrsp->vdiskmgmt.notify);
1390			break;
1391		default:
1392			break;
1393		}
1394		pendingdel->cmdtype = 0;
1395		pendingdel->sent = NULL;
1396	}
1397	spin_unlock_irqrestore(&virthbainfo->privlock, flags);
1398
1399	virtpcidev = virthbainfo->virtpcidev;
1400
1401	virthbainfo->serverdown = true;
1402	virthbainfo->serverchangingstate = false;
1403	/* Return the ServerDown response to Command */
1404	visorchipset_device_pause_response(virtpcidev->bus_no,
1405					   virtpcidev->device_no, 0);
1406}
1407
1408/* As per VirtpciFunc returns 1 for success and 0 for failure */
1409static int
1410virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
1411{
1412	int stat = 1;
1413
1414	struct virthba_info *virthbainfo =
1415	    (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi.
1416				     scsihost)->hostdata;
1417
1418	if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) {
1419		virthbainfo->serverchangingstate = true;
1420		queue_work(virthba_serverdown_workqueue,
1421			   &virthbainfo->serverdown_completion);
1422	} else if (virthbainfo->serverchangingstate) {
1423		stat = 0;
1424	}
1425
1426	return stat;
1427}
1428
1429/*****************************************************/
1430/* Module Init & Exit functions                      */
1431/*****************************************************/
1432
1433static int __init
1434virthba_parse_line(char *str)
1435{
1436	return 1;
1437}
1438
1439static void __init
1440virthba_parse_options(char *line)
1441{
1442	char *next = line;
1443
1444	POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1445	if (!line || !*line)
1446		return;
1447	while ((line = next)) {
1448		next = strchr(line, ' ');
1449		if (next)
1450			*next++ = 0;
1451		virthba_parse_line(line);
1452	}
1453
1454	POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
1455}
1456
1457static int __init
1458virthba_mod_init(void)
1459{
1460	int error;
1461	int i;
1462
1463	if (!unisys_spar_platform)
1464		return -ENODEV;
1465
1466	POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1467	virthba_parse_options(virthba_options);
1468
1469	error = virtpci_register_driver(&virthba_driver);
1470	if (error < 0) {
1471		POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC, error,
1472				 POSTCODE_SEVERITY_ERR);
1473	} else {
1474		/* create the debugfs directories and entries */
1475		virthba_debugfs_dir = debugfs_create_dir("virthba", NULL);
1476		debugfs_create_file("info", S_IRUSR, virthba_debugfs_dir,
1477				    NULL, &debugfs_info_fops);
1478		debugfs_create_u32("rqwait_usecs", S_IRUSR | S_IWUSR,
1479				   virthba_debugfs_dir, &rsltq_wait_usecs);
1480		debugfs_create_file("enable_ints", S_IWUSR,
1481				    virthba_debugfs_dir, NULL,
1482				    &debugfs_enable_ints_fops);
1483		/* Initialize dar_work_queue */
1484		INIT_WORK(&dar_work_queue, do_disk_add_remove);
1485		spin_lock_init(&dar_work_queue_lock);
1486
1487		/* clear out array */
1488		for (i = 0; i < VIRTHBASOPENMAX; i++)
1489			virthbas_open[i].virthbainfo = NULL;
1490		/* Initialize the serverdown workqueue */
1491		virthba_serverdown_workqueue =
1492		    create_singlethread_workqueue("virthba_serverdown");
1493		if (!virthba_serverdown_workqueue) {
1494			POSTCODE_LINUX_2(VHBA_CREATE_FAILURE_PC,
1495					 POSTCODE_SEVERITY_ERR);
1496			error = -1;
1497		}
1498	}
1499
1500	POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
1501	return error;
1502}
1503
1504static ssize_t
1505virthba_acquire_lun(struct device *cdev, struct device_attribute *attr,
1506		    const char *buf, size_t count)
1507{
1508	struct uisscsi_dest vdest;
1509	struct Scsi_Host *shost = class_to_shost(cdev);
1510	int i;
1511
1512	i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
1513	if (i != 3)
1514		return i;
1515
1516	return forward_vdiskmgmt_command(VDISK_MGMT_ACQUIRE, shost, &vdest);
1517}
1518
1519static ssize_t
1520virthba_release_lun(struct device *cdev, struct device_attribute *attr,
1521		    const char *buf, size_t count)
1522{
1523	struct uisscsi_dest vdest;
1524	struct Scsi_Host *shost = class_to_shost(cdev);
1525	int i;
1526
1527	i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
1528	if (i != 3)
1529		return i;
1530
1531	return forward_vdiskmgmt_command(VDISK_MGMT_RELEASE, shost, &vdest);
1532}
1533
1534#define CLASS_DEVICE_ATTR(_name, _mode, _show, _store)      \
1535	struct device_attribute class_device_attr_##_name =   \
1536		__ATTR(_name, _mode, _show, _store)
1537
1538static CLASS_DEVICE_ATTR(acquire_lun, S_IWUSR, NULL, virthba_acquire_lun);
1539static CLASS_DEVICE_ATTR(release_lun, S_IWUSR, NULL, virthba_release_lun);
1540
1541static DEVICE_ATTRIBUTE *virthba_shost_attrs[] = {
1542	&class_device_attr_acquire_lun,
1543	&class_device_attr_release_lun,
1544	NULL
1545};
1546
1547static void __exit
1548virthba_mod_exit(void)
1549{
1550	virtpci_unregister_driver(&virthba_driver);
1551	/* unregister is going to call virthba_remove */
1552	/* destroy serverdown completion workqueue */
1553	if (virthba_serverdown_workqueue) {
1554		destroy_workqueue(virthba_serverdown_workqueue);
1555		virthba_serverdown_workqueue = NULL;
1556	}
1557
1558	debugfs_remove_recursive(virthba_debugfs_dir);
1559}
1560
1561/* specify function to be run at module insertion time */
1562module_init(virthba_mod_init);
1563
1564/* specify function to be run when module is removed */
1565module_exit(virthba_mod_exit);
1566
1567MODULE_LICENSE("GPL");
1568MODULE_AUTHOR("Usha Srinivasan");
1569MODULE_ALIAS("uisvirthba");
1570	/* this is extracted during depmod and kept in modules.dep */
1571/* module parameter */
1572module_param(virthba_options, charp, S_IRUGO);
1573