1/*
2 * finite state machine for device handling
3 *
4 *    Copyright IBM Corp. 2002, 2008
5 *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/jiffies.h>
12#include <linux/string.h>
13
14#include <asm/ccwdev.h>
15#include <asm/cio.h>
16#include <asm/chpid.h>
17
18#include "cio.h"
19#include "cio_debug.h"
20#include "css.h"
21#include "device.h"
22#include "chsc.h"
23#include "ioasm.h"
24#include "chp.h"
25
26static int timeout_log_enabled;
27
28static int __init ccw_timeout_log_setup(char *unused)
29{
30	timeout_log_enabled = 1;
31	return 1;
32}
33
34__setup("ccw_timeout_log", ccw_timeout_log_setup);
35
36static void ccw_timeout_log(struct ccw_device *cdev)
37{
38	struct schib schib;
39	struct subchannel *sch;
40	struct io_subchannel_private *private;
41	union orb *orb;
42	int cc;
43
44	sch = to_subchannel(cdev->dev.parent);
45	private = to_io_private(sch);
46	orb = &private->orb;
47	cc = stsch_err(sch->schid, &schib);
48
49	printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
50	       "device information:\n", get_tod_clock());
51	printk(KERN_WARNING "cio: orb:\n");
52	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
53		       orb, sizeof(*orb), 0);
54	printk(KERN_WARNING "cio: ccw device bus id: %s\n",
55	       dev_name(&cdev->dev));
56	printk(KERN_WARNING "cio: subchannel bus id: %s\n",
57	       dev_name(&sch->dev));
58	printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
59	       "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
60
61	if (orb->tm.b) {
62		printk(KERN_WARNING "cio: orb indicates transport mode\n");
63		printk(KERN_WARNING "cio: last tcw:\n");
64		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
65			       (void *)(addr_t)orb->tm.tcw,
66			       sizeof(struct tcw), 0);
67	} else {
68		printk(KERN_WARNING "cio: orb indicates command mode\n");
69		if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
70		    (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
71			printk(KERN_WARNING "cio: last channel program "
72			       "(intern):\n");
73		else
74			printk(KERN_WARNING "cio: last channel program:\n");
75
76		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
77			       (void *)(addr_t)orb->cmd.cpa,
78			       sizeof(struct ccw1), 0);
79	}
80	printk(KERN_WARNING "cio: ccw device state: %d\n",
81	       cdev->private->state);
82	printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
83	printk(KERN_WARNING "cio: schib:\n");
84	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
85		       &schib, sizeof(schib), 0);
86	printk(KERN_WARNING "cio: ccw device flags:\n");
87	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
88		       &cdev->private->flags, sizeof(cdev->private->flags), 0);
89}
90
91/*
92 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
93 */
94static void
95ccw_device_timeout(unsigned long data)
96{
97	struct ccw_device *cdev;
98
99	cdev = (struct ccw_device *) data;
100	spin_lock_irq(cdev->ccwlock);
101	if (timeout_log_enabled)
102		ccw_timeout_log(cdev);
103	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
104	spin_unlock_irq(cdev->ccwlock);
105}
106
107/*
108 * Set timeout
109 */
110void
111ccw_device_set_timeout(struct ccw_device *cdev, int expires)
112{
113	if (expires == 0) {
114		del_timer(&cdev->private->timer);
115		return;
116	}
117	if (timer_pending(&cdev->private->timer)) {
118		if (mod_timer(&cdev->private->timer, jiffies + expires))
119			return;
120	}
121	cdev->private->timer.function = ccw_device_timeout;
122	cdev->private->timer.data = (unsigned long) cdev;
123	cdev->private->timer.expires = jiffies + expires;
124	add_timer(&cdev->private->timer);
125}
126
127/*
128 * Cancel running i/o. This is called repeatedly since halt/clear are
129 * asynchronous operations. We do one try with cio_cancel, two tries
130 * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
131 * Returns 0 if device now idle, -ENODEV for device not operational and
132 * -EBUSY if an interrupt is expected (either from halt/clear or from a
133 * status pending).
134 */
135int
136ccw_device_cancel_halt_clear(struct ccw_device *cdev)
137{
138	struct subchannel *sch;
139	int ret;
140
141	sch = to_subchannel(cdev->dev.parent);
142	if (cio_update_schib(sch))
143		return -ENODEV;
144	if (!sch->schib.pmcw.ena)
145		/* Not operational -> done. */
146		return 0;
147	/* Stage 1: cancel io. */
148	if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
149	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
150		if (!scsw_is_tm(&sch->schib.scsw)) {
151			ret = cio_cancel(sch);
152			if (ret != -EINVAL)
153				return ret;
154		}
155		/* cancel io unsuccessful or not applicable (transport mode).
156		 * Continue with asynchronous instructions. */
157		cdev->private->iretry = 3;	/* 3 halt retries. */
158	}
159	if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
160		/* Stage 2: halt io. */
161		if (cdev->private->iretry) {
162			cdev->private->iretry--;
163			ret = cio_halt(sch);
164			if (ret != -EBUSY)
165				return (ret == 0) ? -EBUSY : ret;
166		}
167		/* halt io unsuccessful. */
168		cdev->private->iretry = 255;	/* 255 clear retries. */
169	}
170	/* Stage 3: clear io. */
171	if (cdev->private->iretry) {
172		cdev->private->iretry--;
173		ret = cio_clear (sch);
174		return (ret == 0) ? -EBUSY : ret;
175	}
176	/* Function was unsuccessful */
177	CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
178		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
179	return -EIO;
180}
181
182void ccw_device_update_sense_data(struct ccw_device *cdev)
183{
184	memset(&cdev->id, 0, sizeof(cdev->id));
185	cdev->id.cu_type   = cdev->private->senseid.cu_type;
186	cdev->id.cu_model  = cdev->private->senseid.cu_model;
187	cdev->id.dev_type  = cdev->private->senseid.dev_type;
188	cdev->id.dev_model = cdev->private->senseid.dev_model;
189}
190
191int ccw_device_test_sense_data(struct ccw_device *cdev)
192{
193	return cdev->id.cu_type == cdev->private->senseid.cu_type &&
194		cdev->id.cu_model == cdev->private->senseid.cu_model &&
195		cdev->id.dev_type == cdev->private->senseid.dev_type &&
196		cdev->id.dev_model == cdev->private->senseid.dev_model;
197}
198
199/*
200 * The machine won't give us any notification by machine check if a chpid has
201 * been varied online on the SE so we have to find out by magic (i. e. driving
202 * the channel subsystem to device selection and updating our path masks).
203 */
204static void
205__recover_lost_chpids(struct subchannel *sch, int old_lpm)
206{
207	int mask, i;
208	struct chp_id chpid;
209
210	chp_id_init(&chpid);
211	for (i = 0; i<8; i++) {
212		mask = 0x80 >> i;
213		if (!(sch->lpm & mask))
214			continue;
215		if (old_lpm & mask)
216			continue;
217		chpid.id = sch->schib.pmcw.chpid[i];
218		if (!chp_is_registered(chpid))
219			css_schedule_eval_all();
220	}
221}
222
223/*
224 * Stop device recognition.
225 */
226static void
227ccw_device_recog_done(struct ccw_device *cdev, int state)
228{
229	struct subchannel *sch;
230	int old_lpm;
231
232	sch = to_subchannel(cdev->dev.parent);
233
234	if (cio_disable_subchannel(sch))
235		state = DEV_STATE_NOT_OPER;
236	/*
237	 * Now that we tried recognition, we have performed device selection
238	 * through ssch() and the path information is up to date.
239	 */
240	old_lpm = sch->lpm;
241
242	/* Check since device may again have become not operational. */
243	if (cio_update_schib(sch))
244		state = DEV_STATE_NOT_OPER;
245	else
246		sch->lpm = sch->schib.pmcw.pam & sch->opm;
247
248	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
249		/* Force reprobe on all chpids. */
250		old_lpm = 0;
251	if (sch->lpm != old_lpm)
252		__recover_lost_chpids(sch, old_lpm);
253	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
254	    (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
255		cdev->private->flags.recog_done = 1;
256		cdev->private->state = DEV_STATE_DISCONNECTED;
257		wake_up(&cdev->private->wait_q);
258		return;
259	}
260	if (cdev->private->flags.resuming) {
261		cdev->private->state = state;
262		cdev->private->flags.recog_done = 1;
263		wake_up(&cdev->private->wait_q);
264		return;
265	}
266	switch (state) {
267	case DEV_STATE_NOT_OPER:
268		break;
269	case DEV_STATE_OFFLINE:
270		if (!cdev->online) {
271			ccw_device_update_sense_data(cdev);
272			break;
273		}
274		cdev->private->state = DEV_STATE_OFFLINE;
275		cdev->private->flags.recog_done = 1;
276		if (ccw_device_test_sense_data(cdev)) {
277			cdev->private->flags.donotify = 1;
278			ccw_device_online(cdev);
279			wake_up(&cdev->private->wait_q);
280		} else {
281			ccw_device_update_sense_data(cdev);
282			ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
283		}
284		return;
285	case DEV_STATE_BOXED:
286		if (cdev->id.cu_type != 0) { /* device was recognized before */
287			cdev->private->flags.recog_done = 1;
288			cdev->private->state = DEV_STATE_BOXED;
289			wake_up(&cdev->private->wait_q);
290			return;
291		}
292		break;
293	}
294	cdev->private->state = state;
295	io_subchannel_recog_done(cdev);
296	wake_up(&cdev->private->wait_q);
297}
298
299/*
300 * Function called from device_id.c after sense id has completed.
301 */
302void
303ccw_device_sense_id_done(struct ccw_device *cdev, int err)
304{
305	switch (err) {
306	case 0:
307		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
308		break;
309	case -ETIME:		/* Sense id stopped by timeout. */
310		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
311		break;
312	default:
313		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
314		break;
315	}
316}
317
318/**
319  * ccw_device_notify() - inform the device's driver about an event
320  * @cdev: device for which an event occurred
321  * @event: event that occurred
322  *
323  * Returns:
324  *   -%EINVAL if the device is offline or has no driver.
325  *   -%EOPNOTSUPP if the device's driver has no notifier registered.
326  *   %NOTIFY_OK if the driver wants to keep the device.
327  *   %NOTIFY_BAD if the driver doesn't want to keep the device.
328  */
329int ccw_device_notify(struct ccw_device *cdev, int event)
330{
331	int ret = -EINVAL;
332
333	if (!cdev->drv)
334		goto out;
335	if (!cdev->online)
336		goto out;
337	CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
338		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
339		      event);
340	if (!cdev->drv->notify) {
341		ret = -EOPNOTSUPP;
342		goto out;
343	}
344	if (cdev->drv->notify(cdev, event))
345		ret = NOTIFY_OK;
346	else
347		ret = NOTIFY_BAD;
348out:
349	return ret;
350}
351
352static void ccw_device_oper_notify(struct ccw_device *cdev)
353{
354	struct subchannel *sch = to_subchannel(cdev->dev.parent);
355
356	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
357		/* Reenable channel measurements, if needed. */
358		ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
359		/* Save indication for new paths. */
360		cdev->private->path_new_mask = sch->vpm;
361		return;
362	}
363	/* Driver doesn't want device back. */
364	ccw_device_set_notoper(cdev);
365	ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
366}
367
368/*
369 * Finished with online/offline processing.
370 */
371static void
372ccw_device_done(struct ccw_device *cdev, int state)
373{
374	struct subchannel *sch;
375
376	sch = to_subchannel(cdev->dev.parent);
377
378	ccw_device_set_timeout(cdev, 0);
379
380	if (state != DEV_STATE_ONLINE)
381		cio_disable_subchannel(sch);
382
383	/* Reset device status. */
384	memset(&cdev->private->irb, 0, sizeof(struct irb));
385
386	cdev->private->state = state;
387
388	switch (state) {
389	case DEV_STATE_BOXED:
390		CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
391			      cdev->private->dev_id.devno, sch->schid.sch_no);
392		if (cdev->online &&
393		    ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
394			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
395		cdev->private->flags.donotify = 0;
396		break;
397	case DEV_STATE_NOT_OPER:
398		CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
399			      cdev->private->dev_id.devno, sch->schid.sch_no);
400		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
401			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
402		else
403			ccw_device_set_disconnected(cdev);
404		cdev->private->flags.donotify = 0;
405		break;
406	case DEV_STATE_DISCONNECTED:
407		CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
408			      "%04x\n", cdev->private->dev_id.devno,
409			      sch->schid.sch_no);
410		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
411			cdev->private->state = DEV_STATE_NOT_OPER;
412			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
413		} else
414			ccw_device_set_disconnected(cdev);
415		cdev->private->flags.donotify = 0;
416		break;
417	default:
418		break;
419	}
420
421	if (cdev->private->flags.donotify) {
422		cdev->private->flags.donotify = 0;
423		ccw_device_oper_notify(cdev);
424	}
425	wake_up(&cdev->private->wait_q);
426}
427
428/*
429 * Start device recognition.
430 */
431void ccw_device_recognition(struct ccw_device *cdev)
432{
433	struct subchannel *sch = to_subchannel(cdev->dev.parent);
434
435	/*
436	 * We used to start here with a sense pgid to find out whether a device
437	 * is locked by someone else. Unfortunately, the sense pgid command
438	 * code has other meanings on devices predating the path grouping
439	 * algorithm, so we start with sense id and box the device after an
440	 * timeout (or if sense pgid during path verification detects the device
441	 * is locked, as may happen on newer devices).
442	 */
443	cdev->private->flags.recog_done = 0;
444	cdev->private->state = DEV_STATE_SENSE_ID;
445	if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
446		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
447		return;
448	}
449	ccw_device_sense_id_start(cdev);
450}
451
452/*
453 * Handle events for states that use the ccw request infrastructure.
454 */
455static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
456{
457	switch (e) {
458	case DEV_EVENT_NOTOPER:
459		ccw_request_notoper(cdev);
460		break;
461	case DEV_EVENT_INTERRUPT:
462		ccw_request_handler(cdev);
463		break;
464	case DEV_EVENT_TIMEOUT:
465		ccw_request_timeout(cdev);
466		break;
467	default:
468		break;
469	}
470}
471
472static void ccw_device_report_path_events(struct ccw_device *cdev)
473{
474	struct subchannel *sch = to_subchannel(cdev->dev.parent);
475	int path_event[8];
476	int chp, mask;
477
478	for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
479		path_event[chp] = PE_NONE;
480		if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
481			path_event[chp] |= PE_PATH_GONE;
482		if (mask & cdev->private->path_new_mask & sch->vpm)
483			path_event[chp] |= PE_PATH_AVAILABLE;
484		if (mask & cdev->private->pgid_reset_mask & sch->vpm)
485			path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
486	}
487	if (cdev->online && cdev->drv->path_event)
488		cdev->drv->path_event(cdev, path_event);
489}
490
491static void ccw_device_reset_path_events(struct ccw_device *cdev)
492{
493	cdev->private->path_gone_mask = 0;
494	cdev->private->path_new_mask = 0;
495	cdev->private->pgid_reset_mask = 0;
496}
497
498static void create_fake_irb(struct irb *irb, int type)
499{
500	memset(irb, 0, sizeof(*irb));
501	if (type == FAKE_CMD_IRB) {
502		struct cmd_scsw *scsw = &irb->scsw.cmd;
503		scsw->cc = 1;
504		scsw->fctl = SCSW_FCTL_START_FUNC;
505		scsw->actl = SCSW_ACTL_START_PEND;
506		scsw->stctl = SCSW_STCTL_STATUS_PEND;
507	} else if (type == FAKE_TM_IRB) {
508		struct tm_scsw *scsw = &irb->scsw.tm;
509		scsw->x = 1;
510		scsw->cc = 1;
511		scsw->fctl = SCSW_FCTL_START_FUNC;
512		scsw->actl = SCSW_ACTL_START_PEND;
513		scsw->stctl = SCSW_STCTL_STATUS_PEND;
514	}
515}
516
517void ccw_device_verify_done(struct ccw_device *cdev, int err)
518{
519	struct subchannel *sch;
520
521	sch = to_subchannel(cdev->dev.parent);
522	/* Update schib - pom may have changed. */
523	if (cio_update_schib(sch)) {
524		err = -ENODEV;
525		goto callback;
526	}
527	/* Update lpm with verified path mask. */
528	sch->lpm = sch->vpm;
529	/* Repeat path verification? */
530	if (cdev->private->flags.doverify) {
531		ccw_device_verify_start(cdev);
532		return;
533	}
534callback:
535	switch (err) {
536	case 0:
537		ccw_device_done(cdev, DEV_STATE_ONLINE);
538		/* Deliver fake irb to device driver, if needed. */
539		if (cdev->private->flags.fake_irb) {
540			create_fake_irb(&cdev->private->irb,
541					cdev->private->flags.fake_irb);
542			cdev->private->flags.fake_irb = 0;
543			if (cdev->handler)
544				cdev->handler(cdev, cdev->private->intparm,
545					      &cdev->private->irb);
546			memset(&cdev->private->irb, 0, sizeof(struct irb));
547		}
548		ccw_device_report_path_events(cdev);
549		break;
550	case -ETIME:
551	case -EUSERS:
552		/* Reset oper notify indication after verify error. */
553		cdev->private->flags.donotify = 0;
554		ccw_device_done(cdev, DEV_STATE_BOXED);
555		break;
556	case -EACCES:
557		/* Reset oper notify indication after verify error. */
558		cdev->private->flags.donotify = 0;
559		ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
560		break;
561	default:
562		/* Reset oper notify indication after verify error. */
563		cdev->private->flags.donotify = 0;
564		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
565		break;
566	}
567	ccw_device_reset_path_events(cdev);
568}
569
570/*
571 * Get device online.
572 */
573int
574ccw_device_online(struct ccw_device *cdev)
575{
576	struct subchannel *sch;
577	int ret;
578
579	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
580	    (cdev->private->state != DEV_STATE_BOXED))
581		return -EINVAL;
582	sch = to_subchannel(cdev->dev.parent);
583	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
584	if (ret != 0) {
585		/* Couldn't enable the subchannel for i/o. Sick device. */
586		if (ret == -ENODEV)
587			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
588		return ret;
589	}
590	/* Start initial path verification. */
591	cdev->private->state = DEV_STATE_VERIFY;
592	ccw_device_verify_start(cdev);
593	return 0;
594}
595
596void
597ccw_device_disband_done(struct ccw_device *cdev, int err)
598{
599	switch (err) {
600	case 0:
601		ccw_device_done(cdev, DEV_STATE_OFFLINE);
602		break;
603	case -ETIME:
604		ccw_device_done(cdev, DEV_STATE_BOXED);
605		break;
606	default:
607		cdev->private->flags.donotify = 0;
608		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
609		break;
610	}
611}
612
613/*
614 * Shutdown device.
615 */
616int
617ccw_device_offline(struct ccw_device *cdev)
618{
619	struct subchannel *sch;
620
621	/* Allow ccw_device_offline while disconnected. */
622	if (cdev->private->state == DEV_STATE_DISCONNECTED ||
623	    cdev->private->state == DEV_STATE_NOT_OPER) {
624		cdev->private->flags.donotify = 0;
625		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
626		return 0;
627	}
628	if (cdev->private->state == DEV_STATE_BOXED) {
629		ccw_device_done(cdev, DEV_STATE_BOXED);
630		return 0;
631	}
632	if (ccw_device_is_orphan(cdev)) {
633		ccw_device_done(cdev, DEV_STATE_OFFLINE);
634		return 0;
635	}
636	sch = to_subchannel(cdev->dev.parent);
637	if (cio_update_schib(sch))
638		return -ENODEV;
639	if (scsw_actl(&sch->schib.scsw) != 0)
640		return -EBUSY;
641	if (cdev->private->state != DEV_STATE_ONLINE)
642		return -EINVAL;
643	/* Are we doing path grouping? */
644	if (!cdev->private->flags.pgroup) {
645		/* No, set state offline immediately. */
646		ccw_device_done(cdev, DEV_STATE_OFFLINE);
647		return 0;
648	}
649	/* Start Set Path Group commands. */
650	cdev->private->state = DEV_STATE_DISBAND_PGID;
651	ccw_device_disband_start(cdev);
652	return 0;
653}
654
655/*
656 * Handle not operational event in non-special state.
657 */
658static void ccw_device_generic_notoper(struct ccw_device *cdev,
659				       enum dev_event dev_event)
660{
661	if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
662		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
663	else
664		ccw_device_set_disconnected(cdev);
665}
666
667/*
668 * Handle path verification event in offline state.
669 */
670static void ccw_device_offline_verify(struct ccw_device *cdev,
671				      enum dev_event dev_event)
672{
673	struct subchannel *sch = to_subchannel(cdev->dev.parent);
674
675	css_schedule_eval(sch->schid);
676}
677
678/*
679 * Handle path verification event.
680 */
681static void
682ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
683{
684	struct subchannel *sch;
685
686	if (cdev->private->state == DEV_STATE_W4SENSE) {
687		cdev->private->flags.doverify = 1;
688		return;
689	}
690	sch = to_subchannel(cdev->dev.parent);
691	/*
692	 * Since we might not just be coming from an interrupt from the
693	 * subchannel we have to update the schib.
694	 */
695	if (cio_update_schib(sch)) {
696		ccw_device_verify_done(cdev, -ENODEV);
697		return;
698	}
699
700	if (scsw_actl(&sch->schib.scsw) != 0 ||
701	    (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
702	    (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
703		/*
704		 * No final status yet or final status not yet delivered
705		 * to the device driver. Can't do path verification now,
706		 * delay until final status was delivered.
707		 */
708		cdev->private->flags.doverify = 1;
709		return;
710	}
711	/* Device is idle, we can do the path verification. */
712	cdev->private->state = DEV_STATE_VERIFY;
713	ccw_device_verify_start(cdev);
714}
715
716/*
717 * Handle path verification event in boxed state.
718 */
719static void ccw_device_boxed_verify(struct ccw_device *cdev,
720				    enum dev_event dev_event)
721{
722	struct subchannel *sch = to_subchannel(cdev->dev.parent);
723
724	if (cdev->online) {
725		if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
726			ccw_device_done(cdev, DEV_STATE_NOT_OPER);
727		else
728			ccw_device_online_verify(cdev, dev_event);
729	} else
730		css_schedule_eval(sch->schid);
731}
732
733/*
734 * Got an interrupt for a normal io (state online).
735 */
736static void
737ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
738{
739	struct irb *irb;
740	int is_cmd;
741
742	irb = this_cpu_ptr(&cio_irb);
743	is_cmd = !scsw_is_tm(&irb->scsw);
744	/* Check for unsolicited interrupt. */
745	if (!scsw_is_solicited(&irb->scsw)) {
746		if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
747		    !irb->esw.esw0.erw.cons) {
748			/* Unit check but no sense data. Need basic sense. */
749			if (ccw_device_do_sense(cdev, irb) != 0)
750				goto call_handler_unsol;
751			memcpy(&cdev->private->irb, irb, sizeof(struct irb));
752			cdev->private->state = DEV_STATE_W4SENSE;
753			cdev->private->intparm = 0;
754			return;
755		}
756call_handler_unsol:
757		if (cdev->handler)
758			cdev->handler (cdev, 0, irb);
759		if (cdev->private->flags.doverify)
760			ccw_device_online_verify(cdev, 0);
761		return;
762	}
763	/* Accumulate status and find out if a basic sense is needed. */
764	ccw_device_accumulate_irb(cdev, irb);
765	if (is_cmd && cdev->private->flags.dosense) {
766		if (ccw_device_do_sense(cdev, irb) == 0) {
767			cdev->private->state = DEV_STATE_W4SENSE;
768		}
769		return;
770	}
771	/* Call the handler. */
772	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
773		/* Start delayed path verification. */
774		ccw_device_online_verify(cdev, 0);
775}
776
777/*
778 * Got an timeout in online state.
779 */
780static void
781ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
782{
783	int ret;
784
785	ccw_device_set_timeout(cdev, 0);
786	cdev->private->iretry = 255;
787	ret = ccw_device_cancel_halt_clear(cdev);
788	if (ret == -EBUSY) {
789		ccw_device_set_timeout(cdev, 3*HZ);
790		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
791		return;
792	}
793	if (ret)
794		dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
795	else if (cdev->handler)
796		cdev->handler(cdev, cdev->private->intparm,
797			      ERR_PTR(-ETIMEDOUT));
798}
799
800/*
801 * Got an interrupt for a basic sense.
802 */
803static void
804ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
805{
806	struct irb *irb;
807
808	irb = this_cpu_ptr(&cio_irb);
809	/* Check for unsolicited interrupt. */
810	if (scsw_stctl(&irb->scsw) ==
811	    (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
812		if (scsw_cc(&irb->scsw) == 1)
813			/* Basic sense hasn't started. Try again. */
814			ccw_device_do_sense(cdev, irb);
815		else {
816			CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
817				      "interrupt during w4sense...\n",
818				      cdev->private->dev_id.ssid,
819				      cdev->private->dev_id.devno);
820			if (cdev->handler)
821				cdev->handler (cdev, 0, irb);
822		}
823		return;
824	}
825	/*
826	 * Check if a halt or clear has been issued in the meanwhile. If yes,
827	 * only deliver the halt/clear interrupt to the device driver as if it
828	 * had killed the original request.
829	 */
830	if (scsw_fctl(&irb->scsw) &
831	    (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
832		cdev->private->flags.dosense = 0;
833		memset(&cdev->private->irb, 0, sizeof(struct irb));
834		ccw_device_accumulate_irb(cdev, irb);
835		goto call_handler;
836	}
837	/* Add basic sense info to irb. */
838	ccw_device_accumulate_basic_sense(cdev, irb);
839	if (cdev->private->flags.dosense) {
840		/* Another basic sense is needed. */
841		ccw_device_do_sense(cdev, irb);
842		return;
843	}
844call_handler:
845	cdev->private->state = DEV_STATE_ONLINE;
846	/* In case sensing interfered with setting the device online */
847	wake_up(&cdev->private->wait_q);
848	/* Call the handler. */
849	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
850		/* Start delayed path verification. */
851		ccw_device_online_verify(cdev, 0);
852}
853
854static void
855ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
856{
857	ccw_device_set_timeout(cdev, 0);
858	/* Start delayed path verification. */
859	ccw_device_online_verify(cdev, 0);
860	/* OK, i/o is dead now. Call interrupt handler. */
861	if (cdev->handler)
862		cdev->handler(cdev, cdev->private->intparm,
863			      ERR_PTR(-EIO));
864}
865
866static void
867ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
868{
869	int ret;
870
871	ret = ccw_device_cancel_halt_clear(cdev);
872	if (ret == -EBUSY) {
873		ccw_device_set_timeout(cdev, 3*HZ);
874		return;
875	}
876	/* Start delayed path verification. */
877	ccw_device_online_verify(cdev, 0);
878	if (cdev->handler)
879		cdev->handler(cdev, cdev->private->intparm,
880			      ERR_PTR(-EIO));
881}
882
883void ccw_device_kill_io(struct ccw_device *cdev)
884{
885	int ret;
886
887	cdev->private->iretry = 255;
888	ret = ccw_device_cancel_halt_clear(cdev);
889	if (ret == -EBUSY) {
890		ccw_device_set_timeout(cdev, 3*HZ);
891		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
892		return;
893	}
894	/* Start delayed path verification. */
895	ccw_device_online_verify(cdev, 0);
896	if (cdev->handler)
897		cdev->handler(cdev, cdev->private->intparm,
898			      ERR_PTR(-EIO));
899}
900
901static void
902ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
903{
904	/* Start verification after current task finished. */
905	cdev->private->flags.doverify = 1;
906}
907
908static void
909ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
910{
911	struct subchannel *sch;
912
913	sch = to_subchannel(cdev->dev.parent);
914	if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
915		/* Couldn't enable the subchannel for i/o. Sick device. */
916		return;
917	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
918	ccw_device_sense_id_start(cdev);
919}
920
921void ccw_device_trigger_reprobe(struct ccw_device *cdev)
922{
923	struct subchannel *sch;
924
925	if (cdev->private->state != DEV_STATE_DISCONNECTED)
926		return;
927
928	sch = to_subchannel(cdev->dev.parent);
929	/* Update some values. */
930	if (cio_update_schib(sch))
931		return;
932	/*
933	 * The pim, pam, pom values may not be accurate, but they are the best
934	 * we have before performing device selection :/
935	 */
936	sch->lpm = sch->schib.pmcw.pam & sch->opm;
937	/*
938	 * Use the initial configuration since we can't be shure that the old
939	 * paths are valid.
940	 */
941	io_subchannel_init_config(sch);
942	if (cio_commit_config(sch))
943		return;
944
945	/* We should also udate ssd info, but this has to wait. */
946	/* Check if this is another device which appeared on the same sch. */
947	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
948		css_schedule_eval(sch->schid);
949	else
950		ccw_device_start_id(cdev, 0);
951}
952
953static void ccw_device_disabled_irq(struct ccw_device *cdev,
954				    enum dev_event dev_event)
955{
956	struct subchannel *sch;
957
958	sch = to_subchannel(cdev->dev.parent);
959	/*
960	 * An interrupt in a disabled state means a previous disable was not
961	 * successful - should not happen, but we try to disable again.
962	 */
963	cio_disable_subchannel(sch);
964}
965
966static void
967ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
968{
969	retry_set_schib(cdev);
970	cdev->private->state = DEV_STATE_ONLINE;
971	dev_fsm_event(cdev, dev_event);
972}
973
974static void ccw_device_update_cmfblock(struct ccw_device *cdev,
975				       enum dev_event dev_event)
976{
977	cmf_retry_copy_block(cdev);
978	cdev->private->state = DEV_STATE_ONLINE;
979	dev_fsm_event(cdev, dev_event);
980}
981
982static void
983ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
984{
985	ccw_device_set_timeout(cdev, 0);
986	cdev->private->state = DEV_STATE_NOT_OPER;
987	wake_up(&cdev->private->wait_q);
988}
989
990static void
991ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
992{
993	int ret;
994
995	ret = ccw_device_cancel_halt_clear(cdev);
996	if (ret == -EBUSY) {
997		ccw_device_set_timeout(cdev, HZ/10);
998	} else {
999		cdev->private->state = DEV_STATE_NOT_OPER;
1000		wake_up(&cdev->private->wait_q);
1001	}
1002}
1003
1004/*
1005 * No operation action. This is used e.g. to ignore a timeout event in
1006 * state offline.
1007 */
1008static void
1009ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1010{
1011}
1012
1013/*
1014 * device statemachine
1015 */
1016fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1017	[DEV_STATE_NOT_OPER] = {
1018		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1019		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
1020		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1021		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1022	},
1023	[DEV_STATE_SENSE_PGID] = {
1024		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1025		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1026		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1027		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1028	},
1029	[DEV_STATE_SENSE_ID] = {
1030		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1031		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1032		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1033		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1034	},
1035	[DEV_STATE_OFFLINE] = {
1036		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1037		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
1038		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1039		[DEV_EVENT_VERIFY]	= ccw_device_offline_verify,
1040	},
1041	[DEV_STATE_VERIFY] = {
1042		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1043		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1044		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1045		[DEV_EVENT_VERIFY]	= ccw_device_delay_verify,
1046	},
1047	[DEV_STATE_ONLINE] = {
1048		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1049		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
1050		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
1051		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1052	},
1053	[DEV_STATE_W4SENSE] = {
1054		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1055		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
1056		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1057		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1058	},
1059	[DEV_STATE_DISBAND_PGID] = {
1060		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1061		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1062		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1063		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1064	},
1065	[DEV_STATE_BOXED] = {
1066		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1067		[DEV_EVENT_INTERRUPT]	= ccw_device_nop,
1068		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1069		[DEV_EVENT_VERIFY]	= ccw_device_boxed_verify,
1070	},
1071	/* states to wait for i/o completion before doing something */
1072	[DEV_STATE_TIMEOUT_KILL] = {
1073		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1074		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
1075		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
1076		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
1077	},
1078	[DEV_STATE_QUIESCE] = {
1079		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
1080		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
1081		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
1082		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1083	},
1084	/* special states for devices gone not operational */
1085	[DEV_STATE_DISCONNECTED] = {
1086		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1087		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
1088		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1089		[DEV_EVENT_VERIFY]	= ccw_device_start_id,
1090	},
1091	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
1092		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1093		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1094		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1095		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1096	},
1097	[DEV_STATE_CMFCHANGE] = {
1098		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
1099		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
1100		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
1101		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
1102	},
1103	[DEV_STATE_CMFUPDATE] = {
1104		[DEV_EVENT_NOTOPER]	= ccw_device_update_cmfblock,
1105		[DEV_EVENT_INTERRUPT]	= ccw_device_update_cmfblock,
1106		[DEV_EVENT_TIMEOUT]	= ccw_device_update_cmfblock,
1107		[DEV_EVENT_VERIFY]	= ccw_device_update_cmfblock,
1108	},
1109	[DEV_STATE_STEAL_LOCK] = {
1110		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1111		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1112		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1113		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1114	},
1115};
1116
1117EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1118