1#ifndef _SCSI_SCSI_HOST_H
2#define _SCSI_SCSI_HOST_H
3
4#include <linux/device.h>
5#include <linux/list.h>
6#include <linux/types.h>
7#include <linux/workqueue.h>
8#include <linux/mutex.h>
9#include <linux/seq_file.h>
10#include <linux/blk-mq.h>
11#include <scsi/scsi.h>
12
13struct request_queue;
14struct block_device;
15struct completion;
16struct module;
17struct scsi_cmnd;
18struct scsi_device;
19struct scsi_host_cmd_pool;
20struct scsi_target;
21struct Scsi_Host;
22struct scsi_host_cmd_pool;
23struct scsi_transport_template;
24struct blk_queue_tags;
25
26
27/*
28 * The various choices mean:
29 * NONE: Self evident.	Host adapter is not capable of scatter-gather.
30 * ALL:	 Means that the host adapter module can do scatter-gather,
31 *	 and that there is no limit to the size of the table to which
32 *	 we scatter/gather data.  The value we set here is the maximum
33 *	 single element sglist.  To use chained sglists, the adapter
34 *	 has to set a value beyond ALL (and correctly use the chain
35 *	 handling API.
36 * Anything else:  Indicates the maximum number of chains that can be
37 *	 used in one scatter-gather request.
38 */
39#define SG_NONE 0
40#define SG_ALL	SCSI_MAX_SG_SEGMENTS
41
42#define MODE_UNKNOWN 0x00
43#define MODE_INITIATOR 0x01
44#define MODE_TARGET 0x02
45
46#define DISABLE_CLUSTERING 0
47#define ENABLE_CLUSTERING 1
48
49struct scsi_host_template {
50	struct module *module;
51	const char *name;
52
53	/*
54	 * Used to initialize old-style drivers.  For new-style drivers
55	 * just perform all work in your module initialization function.
56	 *
57	 * Status:  OBSOLETE
58	 */
59	int (* detect)(struct scsi_host_template *);
60
61	/*
62	 * Used as unload callback for hosts with old-style drivers.
63	 *
64	 * Status: OBSOLETE
65	 */
66	int (* release)(struct Scsi_Host *);
67
68	/*
69	 * The info function will return whatever useful information the
70	 * developer sees fit.  If not provided, then the name field will
71	 * be used instead.
72	 *
73	 * Status: OPTIONAL
74	 */
75	const char *(* info)(struct Scsi_Host *);
76
77	/*
78	 * Ioctl interface
79	 *
80	 * Status: OPTIONAL
81	 */
82	int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
83
84
85#ifdef CONFIG_COMPAT
86	/*
87	 * Compat handler. Handle 32bit ABI.
88	 * When unknown ioctl is passed return -ENOIOCTLCMD.
89	 *
90	 * Status: OPTIONAL
91	 */
92	int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
93#endif
94
95	/*
96	 * The queuecommand function is used to queue up a scsi
97	 * command block to the LLDD.  When the driver finished
98	 * processing the command the done callback is invoked.
99	 *
100	 * If queuecommand returns 0, then the HBA has accepted the
101	 * command.  The done() function must be called on the command
102	 * when the driver has finished with it. (you may call done on the
103	 * command before queuecommand returns, but in this case you
104	 * *must* return 0 from queuecommand).
105	 *
106	 * Queuecommand may also reject the command, in which case it may
107	 * not touch the command and must not call done() for it.
108	 *
109	 * There are two possible rejection returns:
110	 *
111	 *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
112	 *   allow commands to other devices serviced by this host.
113	 *
114	 *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
115	 *   host temporarily.
116	 *
117         * For compatibility, any other non-zero return is treated the
118         * same as SCSI_MLQUEUE_HOST_BUSY.
119	 *
120	 * NOTE: "temporarily" means either until the next command for#
121	 * this device/host completes, or a period of time determined by
122	 * I/O pressure in the system if there are no other outstanding
123	 * commands.
124	 *
125	 * STATUS: REQUIRED
126	 */
127	int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
128
129	/*
130	 * This is an error handling strategy routine.  You don't need to
131	 * define one of these if you don't want to - there is a default
132	 * routine that is present that should work in most cases.  For those
133	 * driver authors that have the inclination and ability to write their
134	 * own strategy routine, this is where it is specified.  Note - the
135	 * strategy routine is *ALWAYS* run in the context of the kernel eh
136	 * thread.  Thus you are guaranteed to *NOT* be in an interrupt
137	 * handler when you execute this, and you are also guaranteed to
138	 * *NOT* have any other commands being queued while you are in the
139	 * strategy routine. When you return from this function, operations
140	 * return to normal.
141	 *
142	 * See scsi_error.c scsi_unjam_host for additional comments about
143	 * what this function should and should not be attempting to do.
144	 *
145	 * Status: REQUIRED	(at least one of them)
146	 */
147	int (* eh_abort_handler)(struct scsi_cmnd *);
148	int (* eh_device_reset_handler)(struct scsi_cmnd *);
149	int (* eh_target_reset_handler)(struct scsi_cmnd *);
150	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
151	int (* eh_host_reset_handler)(struct scsi_cmnd *);
152
153	/*
154	 * Before the mid layer attempts to scan for a new device where none
155	 * currently exists, it will call this entry in your driver.  Should
156	 * your driver need to allocate any structs or perform any other init
157	 * items in order to send commands to a currently unused target/lun
158	 * combo, then this is where you can perform those allocations.  This
159	 * is specifically so that drivers won't have to perform any kind of
160	 * "is this a new device" checks in their queuecommand routine,
161	 * thereby making the hot path a bit quicker.
162	 *
163	 * Return values: 0 on success, non-0 on failure
164	 *
165	 * Deallocation:  If we didn't find any devices at this ID, you will
166	 * get an immediate call to slave_destroy().  If we find something
167	 * here then you will get a call to slave_configure(), then the
168	 * device will be used for however long it is kept around, then when
169	 * the device is removed from the system (or * possibly at reboot
170	 * time), you will then get a call to slave_destroy().  This is
171	 * assuming you implement slave_configure and slave_destroy.
172	 * However, if you allocate memory and hang it off the device struct,
173	 * then you must implement the slave_destroy() routine at a minimum
174	 * in order to avoid leaking memory
175	 * each time a device is tore down.
176	 *
177	 * Status: OPTIONAL
178	 */
179	int (* slave_alloc)(struct scsi_device *);
180
181	/*
182	 * Once the device has responded to an INQUIRY and we know the
183	 * device is online, we call into the low level driver with the
184	 * struct scsi_device *.  If the low level device driver implements
185	 * this function, it *must* perform the task of setting the queue
186	 * depth on the device.  All other tasks are optional and depend
187	 * on what the driver supports and various implementation details.
188	 *
189	 * Things currently recommended to be handled at this time include:
190	 *
191	 * 1.  Setting the device queue depth.  Proper setting of this is
192	 *     described in the comments for scsi_change_queue_depth.
193	 * 2.  Determining if the device supports the various synchronous
194	 *     negotiation protocols.  The device struct will already have
195	 *     responded to INQUIRY and the results of the standard items
196	 *     will have been shoved into the various device flag bits, eg.
197	 *     device->sdtr will be true if the device supports SDTR messages.
198	 * 3.  Allocating command structs that the device will need.
199	 * 4.  Setting the default timeout on this device (if needed).
200	 * 5.  Anything else the low level driver might want to do on a device
201	 *     specific setup basis...
202	 * 6.  Return 0 on success, non-0 on error.  The device will be marked
203	 *     as offline on error so that no access will occur.  If you return
204	 *     non-0, your slave_destroy routine will never get called for this
205	 *     device, so don't leave any loose memory hanging around, clean
206	 *     up after yourself before returning non-0
207	 *
208	 * Status: OPTIONAL
209	 */
210	int (* slave_configure)(struct scsi_device *);
211
212	/*
213	 * Immediately prior to deallocating the device and after all activity
214	 * has ceased the mid layer calls this point so that the low level
215	 * driver may completely detach itself from the scsi device and vice
216	 * versa.  The low level driver is responsible for freeing any memory
217	 * it allocated in the slave_alloc or slave_configure calls.
218	 *
219	 * Status: OPTIONAL
220	 */
221	void (* slave_destroy)(struct scsi_device *);
222
223	/*
224	 * Before the mid layer attempts to scan for a new device attached
225	 * to a target where no target currently exists, it will call this
226	 * entry in your driver.  Should your driver need to allocate any
227	 * structs or perform any other init items in order to send commands
228	 * to a currently unused target, then this is where you can perform
229	 * those allocations.
230	 *
231	 * Return values: 0 on success, non-0 on failure
232	 *
233	 * Status: OPTIONAL
234	 */
235	int (* target_alloc)(struct scsi_target *);
236
237	/*
238	 * Immediately prior to deallocating the target structure, and
239	 * after all activity to attached scsi devices has ceased, the
240	 * midlayer calls this point so that the driver may deallocate
241	 * and terminate any references to the target.
242	 *
243	 * Status: OPTIONAL
244	 */
245	void (* target_destroy)(struct scsi_target *);
246
247	/*
248	 * If a host has the ability to discover targets on its own instead
249	 * of scanning the entire bus, it can fill in this function and
250	 * call scsi_scan_host().  This function will be called periodically
251	 * until it returns 1 with the scsi_host and the elapsed time of
252	 * the scan in jiffies.
253	 *
254	 * Status: OPTIONAL
255	 */
256	int (* scan_finished)(struct Scsi_Host *, unsigned long);
257
258	/*
259	 * If the host wants to be called before the scan starts, but
260	 * after the midlayer has set up ready for the scan, it can fill
261	 * in this function.
262	 *
263	 * Status: OPTIONAL
264	 */
265	void (* scan_start)(struct Scsi_Host *);
266
267	/*
268	 * Fill in this function to allow the queue depth of this host
269	 * to be changeable (on a per device basis).  Returns either
270	 * the current queue depth setting (may be different from what
271	 * was passed in) or an error.  An error should only be
272	 * returned if the requested depth is legal but the driver was
273	 * unable to set it.  If the requested depth is illegal, the
274	 * driver should set and return the closest legal queue depth.
275	 *
276	 * Status: OPTIONAL
277	 */
278	int (* change_queue_depth)(struct scsi_device *, int);
279
280	/*
281	 * This function determines the BIOS parameters for a given
282	 * harddisk.  These tend to be numbers that are made up by
283	 * the host adapter.  Parameters:
284	 * size, device, list (heads, sectors, cylinders)
285	 *
286	 * Status: OPTIONAL
287	 */
288	int (* bios_param)(struct scsi_device *, struct block_device *,
289			sector_t, int []);
290
291	/*
292	 * This function is called when one or more partitions on the
293	 * device reach beyond the end of the device.
294	 *
295	 * Status: OPTIONAL
296	 */
297	void (*unlock_native_capacity)(struct scsi_device *);
298
299	/*
300	 * Can be used to export driver statistics and other infos to the
301	 * world outside the kernel ie. userspace and it also provides an
302	 * interface to feed the driver with information.
303	 *
304	 * Status: OBSOLETE
305	 */
306	int (*show_info)(struct seq_file *, struct Scsi_Host *);
307	int (*write_info)(struct Scsi_Host *, char *, int);
308
309	/*
310	 * This is an optional routine that allows the transport to become
311	 * involved when a scsi io timer fires. The return value tells the
312	 * timer routine how to finish the io timeout handling:
313	 * EH_HANDLED:		I fixed the error, please complete the command
314	 * EH_RESET_TIMER:	I need more time, reset the timer and
315	 *			begin counting again
316	 * EH_NOT_HANDLED	Begin normal error recovery
317	 *
318	 * Status: OPTIONAL
319	 */
320	enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
321
322	/* This is an optional routine that allows transport to initiate
323	 * LLD adapter or firmware reset using sysfs attribute.
324	 *
325	 * Return values: 0 on success, -ve value on failure.
326	 *
327	 * Status: OPTIONAL
328	 */
329
330	int (*host_reset)(struct Scsi_Host *shost, int reset_type);
331#define SCSI_ADAPTER_RESET	1
332#define SCSI_FIRMWARE_RESET	2
333
334
335	/*
336	 * Name of proc directory
337	 */
338	const char *proc_name;
339
340	/*
341	 * Used to store the procfs directory if a driver implements the
342	 * show_info method.
343	 */
344	struct proc_dir_entry *proc_dir;
345
346	/*
347	 * This determines if we will use a non-interrupt driven
348	 * or an interrupt driven scheme.  It is set to the maximum number
349	 * of simultaneous commands a given host adapter will accept.
350	 */
351	int can_queue;
352
353	/*
354	 * In many instances, especially where disconnect / reconnect are
355	 * supported, our host also has an ID on the SCSI bus.  If this is
356	 * the case, then it must be reserved.  Please set this_id to -1 if
357	 * your setup is in single initiator mode, and the host lacks an
358	 * ID.
359	 */
360	int this_id;
361
362	/*
363	 * This determines the degree to which the host adapter is capable
364	 * of scatter-gather.
365	 */
366	unsigned short sg_tablesize;
367	unsigned short sg_prot_tablesize;
368
369	/*
370	 * Set this if the host adapter has limitations beside segment count.
371	 */
372	unsigned int max_sectors;
373
374	/*
375	 * DMA scatter gather segment boundary limit. A segment crossing this
376	 * boundary will be split in two.
377	 */
378	unsigned long dma_boundary;
379
380	/*
381	 * This specifies "machine infinity" for host templates which don't
382	 * limit the transfer size.  Note this limit represents an absolute
383	 * maximum, and may be over the transfer limits allowed for
384	 * individual devices (e.g. 256 for SCSI-1).
385	 */
386#define SCSI_DEFAULT_MAX_SECTORS	1024
387
388	/*
389	 * True if this host adapter can make good use of linked commands.
390	 * This will allow more than one command to be queued to a given
391	 * unit on a given host.  Set this to the maximum number of command
392	 * blocks to be provided for each device.  Set this to 1 for one
393	 * command block per lun, 2 for two, etc.  Do not set this to 0.
394	 * You should make sure that the host adapter will do the right thing
395	 * before you try setting this above 1.
396	 */
397	short cmd_per_lun;
398
399	/*
400	 * present contains counter indicating how many boards of this
401	 * type were found when we did the scan.
402	 */
403	unsigned char present;
404
405	/* If use block layer to manage tags, this is tag allocation policy */
406	int tag_alloc_policy;
407
408	/*
409	 * Let the block layer assigns tags to all commands.
410	 */
411	unsigned use_blk_tags:1;
412
413	/*
414	 * Track QUEUE_FULL events and reduce queue depth on demand.
415	 */
416	unsigned track_queue_depth:1;
417
418	/*
419	 * This specifies the mode that a LLD supports.
420	 */
421	unsigned supported_mode:2;
422
423	/*
424	 * True if this host adapter uses unchecked DMA onto an ISA bus.
425	 */
426	unsigned unchecked_isa_dma:1;
427
428	/*
429	 * True if this host adapter can make good use of clustering.
430	 * I originally thought that if the tablesize was large that it
431	 * was a waste of CPU cycles to prepare a cluster list, but
432	 * it works out that the Buslogic is faster if you use a smaller
433	 * number of segments (i.e. use clustering).  I guess it is
434	 * inefficient.
435	 */
436	unsigned use_clustering:1;
437
438	/*
439	 * True for emulated SCSI host adapters (e.g. ATAPI).
440	 */
441	unsigned emulated:1;
442
443	/*
444	 * True if the low-level driver performs its own reset-settle delays.
445	 */
446	unsigned skip_settle_delay:1;
447
448	/* True if the controller does not support WRITE SAME */
449	unsigned no_write_same:1;
450
451	/*
452	 * True if asynchronous aborts are not supported
453	 */
454	unsigned no_async_abort:1;
455
456	/*
457	 * Countdown for host blocking with no commands outstanding.
458	 */
459	unsigned int max_host_blocked;
460
461	/*
462	 * Default value for the blocking.  If the queue is empty,
463	 * host_blocked counts down in the request_fn until it restarts
464	 * host operations as zero is reached.
465	 *
466	 * FIXME: This should probably be a value in the template
467	 */
468#define SCSI_DEFAULT_HOST_BLOCKED	7
469
470	/*
471	 * Pointer to the sysfs class properties for this host, NULL terminated.
472	 */
473	struct device_attribute **shost_attrs;
474
475	/*
476	 * Pointer to the SCSI device properties for this host, NULL terminated.
477	 */
478	struct device_attribute **sdev_attrs;
479
480	/*
481	 * List of hosts per template.
482	 *
483	 * This is only for use by scsi_module.c for legacy templates.
484	 * For these access to it is synchronized implicitly by
485	 * module_init/module_exit.
486	 */
487	struct list_head legacy_hosts;
488
489	/*
490	 * Vendor Identifier associated with the host
491	 *
492	 * Note: When specifying vendor_id, be sure to read the
493	 *   Vendor Type and ID formatting requirements specified in
494	 *   scsi_netlink.h
495	 */
496	u64 vendor_id;
497
498	/*
499	 * Additional per-command data allocated for the driver.
500	 */
501	unsigned int cmd_size;
502	struct scsi_host_cmd_pool *cmd_pool;
503
504	/* temporary flag to disable blk-mq I/O path */
505	bool disable_blk_mq;
506};
507
508/*
509 * Temporary #define for host lock push down. Can be removed when all
510 * drivers have been updated to take advantage of unlocked
511 * queuecommand.
512 *
513 */
514#define DEF_SCSI_QCMD(func_name) \
515	int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)	\
516	{								\
517		unsigned long irq_flags;				\
518		int rc;							\
519		spin_lock_irqsave(shost->host_lock, irq_flags);		\
520		scsi_cmd_get_serial(shost, cmd);			\
521		rc = func_name##_lck (cmd, cmd->scsi_done);			\
522		spin_unlock_irqrestore(shost->host_lock, irq_flags);	\
523		return rc;						\
524	}
525
526
527/*
528 * shost state: If you alter this, you also need to alter scsi_sysfs.c
529 * (for the ascii descriptions) and the state model enforcer:
530 * scsi_host_set_state()
531 */
532enum scsi_host_state {
533	SHOST_CREATED = 1,
534	SHOST_RUNNING,
535	SHOST_CANCEL,
536	SHOST_DEL,
537	SHOST_RECOVERY,
538	SHOST_CANCEL_RECOVERY,
539	SHOST_DEL_RECOVERY,
540};
541
542struct Scsi_Host {
543	/*
544	 * __devices is protected by the host_lock, but you should
545	 * usually use scsi_device_lookup / shost_for_each_device
546	 * to access it and don't care about locking yourself.
547	 * In the rare case of being in irq context you can use
548	 * their __ prefixed variants with the lock held. NEVER
549	 * access this list directly from a driver.
550	 */
551	struct list_head	__devices;
552	struct list_head	__targets;
553
554	struct scsi_host_cmd_pool *cmd_pool;
555	spinlock_t		free_list_lock;
556	struct list_head	free_list; /* backup store of cmd structs */
557	struct list_head	starved_list;
558
559	spinlock_t		default_lock;
560	spinlock_t		*host_lock;
561
562	struct mutex		scan_mutex;/* serialize scanning activity */
563
564	struct list_head	eh_cmd_q;
565	struct task_struct    * ehandler;  /* Error recovery thread. */
566	struct completion     * eh_action; /* Wait for specific actions on the
567					      host. */
568	wait_queue_head_t       host_wait;
569	struct scsi_host_template *hostt;
570	struct scsi_transport_template *transportt;
571
572	/*
573	 * Area to keep a shared tag map (if needed, will be
574	 * NULL if not).
575	 */
576	union {
577		struct blk_queue_tag	*bqt;
578		struct blk_mq_tag_set	tag_set;
579	};
580
581	atomic_t host_busy;		   /* commands actually active on low-level */
582	atomic_t host_blocked;
583
584	unsigned int host_failed;	   /* commands that failed.
585					      protected by host_lock */
586	unsigned int host_eh_scheduled;    /* EH scheduled without command */
587
588	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
589
590	/* next two fields are used to bound the time spent in error handling */
591	int eh_deadline;
592	unsigned long last_reset;
593
594
595	/*
596	 * These three parameters can be used to allow for wide scsi,
597	 * and for host adapters that support multiple busses
598	 * The last two should be set to 1 more than the actual max id
599	 * or lun (e.g. 8 for SCSI parallel systems).
600	 */
601	unsigned int max_channel;
602	unsigned int max_id;
603	u64 max_lun;
604
605	/*
606	 * This is a unique identifier that must be assigned so that we
607	 * have some way of identifying each detected host adapter properly
608	 * and uniquely.  For hosts that do not support more than one card
609	 * in the system at one time, this does not need to be set.  It is
610	 * initialized to 0 in scsi_register.
611	 */
612	unsigned int unique_id;
613
614	/*
615	 * The maximum length of SCSI commands that this host can accept.
616	 * Probably 12 for most host adapters, but could be 16 for others.
617	 * or 260 if the driver supports variable length cdbs.
618	 * For drivers that don't set this field, a value of 12 is
619	 * assumed.
620	 */
621	unsigned short max_cmd_len;
622
623	int this_id;
624	int can_queue;
625	short cmd_per_lun;
626	short unsigned int sg_tablesize;
627	short unsigned int sg_prot_tablesize;
628	unsigned int max_sectors;
629	unsigned long dma_boundary;
630	/*
631	 * In scsi-mq mode, the number of hardware queues supported by the LLD.
632	 *
633	 * Note: it is assumed that each hardware queue has a queue depth of
634	 * can_queue. In other words, the total queue depth per host
635	 * is nr_hw_queues * can_queue.
636	 */
637	unsigned nr_hw_queues;
638	/*
639	 * Used to assign serial numbers to the cmds.
640	 * Protected by the host lock.
641	 */
642	unsigned long cmd_serial_number;
643
644	unsigned active_mode:2;
645	unsigned unchecked_isa_dma:1;
646	unsigned use_clustering:1;
647
648	/*
649	 * Host has requested that no further requests come through for the
650	 * time being.
651	 */
652	unsigned host_self_blocked:1;
653
654	/*
655	 * Host uses correct SCSI ordering not PC ordering. The bit is
656	 * set for the minority of drivers whose authors actually read
657	 * the spec ;).
658	 */
659	unsigned reverse_ordering:1;
660
661	/* Task mgmt function in progress */
662	unsigned tmf_in_progress:1;
663
664	/* Asynchronous scan in progress */
665	unsigned async_scan:1;
666
667	/* Don't resume host in EH */
668	unsigned eh_noresume:1;
669
670	/* The controller does not support WRITE SAME */
671	unsigned no_write_same:1;
672
673	unsigned use_blk_mq:1;
674	unsigned use_cmd_list:1;
675
676	/*
677	 * Optional work queue to be utilized by the transport
678	 */
679	char work_q_name[20];
680	struct workqueue_struct *work_q;
681
682	/*
683	 * Task management function work queue
684	 */
685	struct workqueue_struct *tmf_work_q;
686
687	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
688	unsigned no_scsi2_lun_in_cdb:1;
689
690	/*
691	 * Value host_blocked counts down from
692	 */
693	unsigned int max_host_blocked;
694
695	/* Protection Information */
696	unsigned int prot_capabilities;
697	unsigned char prot_guard_type;
698
699	/*
700	 * q used for scsi_tgt msgs, async events or any other requests that
701	 * need to be processed in userspace
702	 */
703	struct request_queue *uspace_req_q;
704
705	/* legacy crap */
706	unsigned long base;
707	unsigned long io_port;
708	unsigned char n_io_port;
709	unsigned char dma_channel;
710	unsigned int  irq;
711
712
713	enum scsi_host_state shost_state;
714
715	/* ldm bits */
716	struct device		shost_gendev, shost_dev;
717
718	/*
719	 * List of hosts per template.
720	 *
721	 * This is only for use by scsi_module.c for legacy templates.
722	 * For these access to it is synchronized implicitly by
723	 * module_init/module_exit.
724	 */
725	struct list_head sht_legacy_list;
726
727	/*
728	 * Points to the transport data (if any) which is allocated
729	 * separately
730	 */
731	void *shost_data;
732
733	/*
734	 * Points to the physical bus device we'd use to do DMA
735	 * Needed just in case we have virtual hosts.
736	 */
737	struct device *dma_dev;
738
739	/*
740	 * We should ensure that this is aligned, both for better performance
741	 * and also because some compilers (m68k) don't automatically force
742	 * alignment to a long boundary.
743	 */
744	unsigned long hostdata[0]  /* Used for storage of host specific stuff */
745		__attribute__ ((aligned (sizeof(unsigned long))));
746};
747
748#define		class_to_shost(d)	\
749	container_of(d, struct Scsi_Host, shost_dev)
750
751#define shost_printk(prefix, shost, fmt, a...)	\
752	dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
753
754static inline void *shost_priv(struct Scsi_Host *shost)
755{
756	return (void *)shost->hostdata;
757}
758
759int scsi_is_host_device(const struct device *);
760
761static inline struct Scsi_Host *dev_to_shost(struct device *dev)
762{
763	while (!scsi_is_host_device(dev)) {
764		if (!dev->parent)
765			return NULL;
766		dev = dev->parent;
767	}
768	return container_of(dev, struct Scsi_Host, shost_gendev);
769}
770
771static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
772{
773	return shost->shost_state == SHOST_RECOVERY ||
774		shost->shost_state == SHOST_CANCEL_RECOVERY ||
775		shost->shost_state == SHOST_DEL_RECOVERY ||
776		shost->tmf_in_progress;
777}
778
779extern bool scsi_use_blk_mq;
780
781static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
782{
783	return shost->use_blk_mq;
784}
785
786extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
787extern void scsi_flush_work(struct Scsi_Host *);
788
789extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
790extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
791					       struct device *,
792					       struct device *);
793extern void scsi_scan_host(struct Scsi_Host *);
794extern void scsi_rescan_device(struct device *);
795extern void scsi_remove_host(struct Scsi_Host *);
796extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
797extern void scsi_host_put(struct Scsi_Host *t);
798extern struct Scsi_Host *scsi_host_lookup(unsigned short);
799extern const char *scsi_host_state_name(enum scsi_host_state);
800extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *);
801
802static inline int __must_check scsi_add_host(struct Scsi_Host *host,
803					     struct device *dev)
804{
805	return scsi_add_host_with_dma(host, dev, dev);
806}
807
808static inline struct device *scsi_get_device(struct Scsi_Host *shost)
809{
810        return shost->shost_gendev.parent;
811}
812
813/**
814 * scsi_host_scan_allowed - Is scanning of this host allowed
815 * @shost:	Pointer to Scsi_Host.
816 **/
817static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
818{
819	return shost->shost_state == SHOST_RUNNING ||
820	       shost->shost_state == SHOST_RECOVERY;
821}
822
823extern void scsi_unblock_requests(struct Scsi_Host *);
824extern void scsi_block_requests(struct Scsi_Host *);
825
826struct class_container;
827
828extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
829						void (*) (struct request_queue *));
830/*
831 * These two functions are used to allocate and free a pseudo device
832 * which will connect to the host adapter itself rather than any
833 * physical device.  You must deallocate when you are done with the
834 * thing.  This physical pseudo-device isn't real and won't be available
835 * from any high-level drivers.
836 */
837extern void scsi_free_host_dev(struct scsi_device *);
838extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
839
840/*
841 * DIF defines the exchange of protection information between
842 * initiator and SBC block device.
843 *
844 * DIX defines the exchange of protection information between OS and
845 * initiator.
846 */
847enum scsi_host_prot_capabilities {
848	SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
849	SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
850	SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
851
852	SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
853	SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
854	SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
855	SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
856};
857
858/*
859 * SCSI hosts which support the Data Integrity Extensions must
860 * indicate their capabilities by setting the prot_capabilities using
861 * this call.
862 */
863static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
864{
865	shost->prot_capabilities = mask;
866}
867
868static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
869{
870	return shost->prot_capabilities;
871}
872
873static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
874{
875	return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
876}
877
878static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
879{
880	static unsigned char cap[] = { 0,
881				       SHOST_DIF_TYPE1_PROTECTION,
882				       SHOST_DIF_TYPE2_PROTECTION,
883				       SHOST_DIF_TYPE3_PROTECTION };
884
885	if (target_type >= ARRAY_SIZE(cap))
886		return 0;
887
888	return shost->prot_capabilities & cap[target_type] ? target_type : 0;
889}
890
891static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
892{
893#if defined(CONFIG_BLK_DEV_INTEGRITY)
894	static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
895				       SHOST_DIX_TYPE1_PROTECTION,
896				       SHOST_DIX_TYPE2_PROTECTION,
897				       SHOST_DIX_TYPE3_PROTECTION };
898
899	if (target_type >= ARRAY_SIZE(cap))
900		return 0;
901
902	return shost->prot_capabilities & cap[target_type];
903#endif
904	return 0;
905}
906
907/*
908 * All DIX-capable initiators must support the T10-mandated CRC
909 * checksum.  Controllers can optionally implement the IP checksum
910 * scheme which has much lower impact on system performance.  Note
911 * that the main rationale for the checksum is to match integrity
912 * metadata with data.  Detecting bit errors are a job for ECC memory
913 * and buses.
914 */
915
916enum scsi_host_guard_type {
917	SHOST_DIX_GUARD_CRC = 1 << 0,
918	SHOST_DIX_GUARD_IP  = 1 << 1,
919};
920
921static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
922{
923	shost->prot_guard_type = type;
924}
925
926static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
927{
928	return shost->prot_guard_type;
929}
930
931/* legacy interfaces */
932extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
933extern void scsi_unregister(struct Scsi_Host *);
934extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
935
936#endif /* _SCSI_SCSI_HOST_H */
937