1/*
2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT.  See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
24 */
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <asm/eeh.h>
31#include <asm/eeh_event.h>
32#include <asm/ppc-pci.h>
33#include <asm/pci-bridge.h>
34#include <asm/prom.h>
35#include <asm/rtas.h>
36
37/**
38 * eeh_pcid_name - Retrieve name of PCI device driver
39 * @pdev: PCI device
40 *
41 * This routine is used to retrieve the name of PCI device driver
42 * if that's valid.
43 */
44static inline const char *eeh_pcid_name(struct pci_dev *pdev)
45{
46	if (pdev && pdev->dev.driver)
47		return pdev->dev.driver->name;
48	return "";
49}
50
51/**
52 * eeh_pcid_get - Get the PCI device driver
53 * @pdev: PCI device
54 *
55 * The function is used to retrieve the PCI device driver for
56 * the indicated PCI device. Besides, we will increase the reference
57 * of the PCI device driver to prevent that being unloaded on
58 * the fly. Otherwise, kernel crash would be seen.
59 */
60static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
61{
62	if (!pdev || !pdev->driver)
63		return NULL;
64
65	if (!try_module_get(pdev->driver->driver.owner))
66		return NULL;
67
68	return pdev->driver;
69}
70
71/**
72 * eeh_pcid_put - Dereference on the PCI device driver
73 * @pdev: PCI device
74 *
75 * The function is called to do dereference on the PCI device
76 * driver of the indicated PCI device.
77 */
78static inline void eeh_pcid_put(struct pci_dev *pdev)
79{
80	if (!pdev || !pdev->driver)
81		return;
82
83	module_put(pdev->driver->driver.owner);
84}
85
86/**
87 * eeh_disable_irq - Disable interrupt for the recovering device
88 * @dev: PCI device
89 *
90 * This routine must be called when reporting temporary or permanent
91 * error to the particular PCI device to disable interrupt of that
92 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
93 * do real work because EEH should freeze DMA transfers for those PCI
94 * devices encountering EEH errors, which includes MSI or MSI-X.
95 */
96static void eeh_disable_irq(struct pci_dev *dev)
97{
98	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
99
100	/* Don't disable MSI and MSI-X interrupts. They are
101	 * effectively disabled by the DMA Stopped state
102	 * when an EEH error occurs.
103	 */
104	if (dev->msi_enabled || dev->msix_enabled)
105		return;
106
107	if (!irq_has_action(dev->irq))
108		return;
109
110	edev->mode |= EEH_DEV_IRQ_DISABLED;
111	disable_irq_nosync(dev->irq);
112}
113
114/**
115 * eeh_enable_irq - Enable interrupt for the recovering device
116 * @dev: PCI device
117 *
118 * This routine must be called to enable interrupt while failed
119 * device could be resumed.
120 */
121static void eeh_enable_irq(struct pci_dev *dev)
122{
123	struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
124
125	if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
126		edev->mode &= ~EEH_DEV_IRQ_DISABLED;
127		/*
128		 * FIXME !!!!!
129		 *
130		 * This is just ass backwards. This maze has
131		 * unbalanced irq_enable/disable calls. So instead of
132		 * finding the root cause it works around the warning
133		 * in the irq_enable code by conditionally calling
134		 * into it.
135		 *
136		 * That's just wrong.The warning in the core code is
137		 * there to tell people to fix their assymetries in
138		 * their own code, not by abusing the core information
139		 * to avoid it.
140		 *
141		 * I so wish that the assymetry would be the other way
142		 * round and a few more irq_disable calls render that
143		 * shit unusable forever.
144		 *
145		 *	tglx
146		 */
147		if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
148			enable_irq(dev->irq);
149	}
150}
151
152static bool eeh_dev_removed(struct eeh_dev *edev)
153{
154	/* EEH device removed ? */
155	if (!edev || (edev->mode & EEH_DEV_REMOVED))
156		return true;
157
158	return false;
159}
160
161static void *eeh_dev_save_state(void *data, void *userdata)
162{
163	struct eeh_dev *edev = data;
164	struct pci_dev *pdev;
165
166	if (!edev)
167		return NULL;
168
169	/*
170	 * We cannot access the config space on some adapters.
171	 * Otherwise, it will cause fenced PHB. We don't save
172	 * the content in their config space and will restore
173	 * from the initial config space saved when the EEH
174	 * device is created.
175	 */
176	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
177		return NULL;
178
179	pdev = eeh_dev_to_pci_dev(edev);
180	if (!pdev)
181		return NULL;
182
183	pci_save_state(pdev);
184	return NULL;
185}
186
187/**
188 * eeh_report_error - Report pci error to each device driver
189 * @data: eeh device
190 * @userdata: return value
191 *
192 * Report an EEH error to each device driver, collect up and
193 * merge the device driver responses. Cumulative response
194 * passed back in "userdata".
195 */
196static void *eeh_report_error(void *data, void *userdata)
197{
198	struct eeh_dev *edev = (struct eeh_dev *)data;
199	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
200	enum pci_ers_result rc, *res = userdata;
201	struct pci_driver *driver;
202
203	if (!dev || eeh_dev_removed(edev))
204		return NULL;
205	dev->error_state = pci_channel_io_frozen;
206
207	driver = eeh_pcid_get(dev);
208	if (!driver) return NULL;
209
210	eeh_disable_irq(dev);
211
212	if (!driver->err_handler ||
213	    !driver->err_handler->error_detected) {
214		eeh_pcid_put(dev);
215		return NULL;
216	}
217
218	rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
219
220	/* A driver that needs a reset trumps all others */
221	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
222	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
223
224	eeh_pcid_put(dev);
225	return NULL;
226}
227
228/**
229 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
230 * @data: eeh device
231 * @userdata: return value
232 *
233 * Tells each device driver that IO ports, MMIO and config space I/O
234 * are now enabled. Collects up and merges the device driver responses.
235 * Cumulative response passed back in "userdata".
236 */
237static void *eeh_report_mmio_enabled(void *data, void *userdata)
238{
239	struct eeh_dev *edev = (struct eeh_dev *)data;
240	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
241	enum pci_ers_result rc, *res = userdata;
242	struct pci_driver *driver;
243
244	if (!dev || eeh_dev_removed(edev))
245		return NULL;
246
247	driver = eeh_pcid_get(dev);
248	if (!driver) return NULL;
249
250	if (!driver->err_handler ||
251	    !driver->err_handler->mmio_enabled ||
252	    (edev->mode & EEH_DEV_NO_HANDLER)) {
253		eeh_pcid_put(dev);
254		return NULL;
255	}
256
257	rc = driver->err_handler->mmio_enabled(dev);
258
259	/* A driver that needs a reset trumps all others */
260	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
261	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
262
263	eeh_pcid_put(dev);
264	return NULL;
265}
266
267/**
268 * eeh_report_reset - Tell device that slot has been reset
269 * @data: eeh device
270 * @userdata: return value
271 *
272 * This routine must be called while EEH tries to reset particular
273 * PCI device so that the associated PCI device driver could take
274 * some actions, usually to save data the driver needs so that the
275 * driver can work again while the device is recovered.
276 */
277static void *eeh_report_reset(void *data, void *userdata)
278{
279	struct eeh_dev *edev = (struct eeh_dev *)data;
280	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
281	enum pci_ers_result rc, *res = userdata;
282	struct pci_driver *driver;
283
284	if (!dev || eeh_dev_removed(edev))
285		return NULL;
286	dev->error_state = pci_channel_io_normal;
287
288	driver = eeh_pcid_get(dev);
289	if (!driver) return NULL;
290
291	eeh_enable_irq(dev);
292
293	if (!driver->err_handler ||
294	    !driver->err_handler->slot_reset ||
295	    (edev->mode & EEH_DEV_NO_HANDLER)) {
296		eeh_pcid_put(dev);
297		return NULL;
298	}
299
300	rc = driver->err_handler->slot_reset(dev);
301	if ((*res == PCI_ERS_RESULT_NONE) ||
302	    (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
303	if (*res == PCI_ERS_RESULT_DISCONNECT &&
304	     rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
305
306	eeh_pcid_put(dev);
307	return NULL;
308}
309
310static void *eeh_dev_restore_state(void *data, void *userdata)
311{
312	struct eeh_dev *edev = data;
313	struct pci_dev *pdev;
314
315	if (!edev)
316		return NULL;
317
318	/*
319	 * The content in the config space isn't saved because
320	 * the blocked config space on some adapters. We have
321	 * to restore the initial saved config space when the
322	 * EEH device is created.
323	 */
324	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
325		if (list_is_last(&edev->list, &edev->pe->edevs))
326			eeh_pe_restore_bars(edev->pe);
327
328		return NULL;
329	}
330
331	pdev = eeh_dev_to_pci_dev(edev);
332	if (!pdev)
333		return NULL;
334
335	pci_restore_state(pdev);
336	return NULL;
337}
338
339/**
340 * eeh_report_resume - Tell device to resume normal operations
341 * @data: eeh device
342 * @userdata: return value
343 *
344 * This routine must be called to notify the device driver that it
345 * could resume so that the device driver can do some initialization
346 * to make the recovered device work again.
347 */
348static void *eeh_report_resume(void *data, void *userdata)
349{
350	struct eeh_dev *edev = (struct eeh_dev *)data;
351	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
352	struct pci_driver *driver;
353
354	if (!dev || eeh_dev_removed(edev))
355		return NULL;
356	dev->error_state = pci_channel_io_normal;
357
358	driver = eeh_pcid_get(dev);
359	if (!driver) return NULL;
360
361	eeh_enable_irq(dev);
362
363	if (!driver->err_handler ||
364	    !driver->err_handler->resume ||
365	    (edev->mode & EEH_DEV_NO_HANDLER)) {
366		edev->mode &= ~EEH_DEV_NO_HANDLER;
367		eeh_pcid_put(dev);
368		return NULL;
369	}
370
371	driver->err_handler->resume(dev);
372
373	eeh_pcid_put(dev);
374	return NULL;
375}
376
377/**
378 * eeh_report_failure - Tell device driver that device is dead.
379 * @data: eeh device
380 * @userdata: return value
381 *
382 * This informs the device driver that the device is permanently
383 * dead, and that no further recovery attempts will be made on it.
384 */
385static void *eeh_report_failure(void *data, void *userdata)
386{
387	struct eeh_dev *edev = (struct eeh_dev *)data;
388	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
389	struct pci_driver *driver;
390
391	if (!dev || eeh_dev_removed(edev))
392		return NULL;
393	dev->error_state = pci_channel_io_perm_failure;
394
395	driver = eeh_pcid_get(dev);
396	if (!driver) return NULL;
397
398	eeh_disable_irq(dev);
399
400	if (!driver->err_handler ||
401	    !driver->err_handler->error_detected) {
402		eeh_pcid_put(dev);
403		return NULL;
404	}
405
406	driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
407
408	eeh_pcid_put(dev);
409	return NULL;
410}
411
412static void *eeh_rmv_device(void *data, void *userdata)
413{
414	struct pci_driver *driver;
415	struct eeh_dev *edev = (struct eeh_dev *)data;
416	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
417	int *removed = (int *)userdata;
418
419	/*
420	 * Actually, we should remove the PCI bridges as well.
421	 * However, that's lots of complexity to do that,
422	 * particularly some of devices under the bridge might
423	 * support EEH. So we just care about PCI devices for
424	 * simplicity here.
425	 */
426	if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
427		return NULL;
428
429	/*
430	 * We rely on count-based pcibios_release_device() to
431	 * detach permanently offlined PEs. Unfortunately, that's
432	 * not reliable enough. We might have the permanently
433	 * offlined PEs attached, but we needn't take care of
434	 * them and their child devices.
435	 */
436	if (eeh_dev_removed(edev))
437		return NULL;
438
439	driver = eeh_pcid_get(dev);
440	if (driver) {
441		eeh_pcid_put(dev);
442		if (driver->err_handler)
443			return NULL;
444	}
445
446	/* Remove it from PCI subsystem */
447	pr_debug("EEH: Removing %s without EEH sensitive driver\n",
448		 pci_name(dev));
449	edev->bus = dev->bus;
450	edev->mode |= EEH_DEV_DISCONNECTED;
451	(*removed)++;
452
453	pci_lock_rescan_remove();
454	pci_stop_and_remove_bus_device(dev);
455	pci_unlock_rescan_remove();
456
457	return NULL;
458}
459
460static void *eeh_pe_detach_dev(void *data, void *userdata)
461{
462	struct eeh_pe *pe = (struct eeh_pe *)data;
463	struct eeh_dev *edev, *tmp;
464
465	eeh_pe_for_each_dev(pe, edev, tmp) {
466		if (!(edev->mode & EEH_DEV_DISCONNECTED))
467			continue;
468
469		edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
470		eeh_rmv_from_parent_pe(edev);
471	}
472
473	return NULL;
474}
475
476/*
477 * Explicitly clear PE's frozen state for PowerNV where
478 * we have frozen PE until BAR restore is completed. It's
479 * harmless to clear it for pSeries. To be consistent with
480 * PE reset (for 3 times), we try to clear the frozen state
481 * for 3 times as well.
482 */
483static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
484{
485	struct eeh_pe *pe = (struct eeh_pe *)data;
486	bool *clear_sw_state = flag;
487	int i, rc = 1;
488
489	for (i = 0; rc && i < 3; i++)
490		rc = eeh_unfreeze_pe(pe, clear_sw_state);
491
492	/* Stop immediately on any errors */
493	if (rc) {
494		pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
495			__func__, rc, pe->phb->global_number, pe->addr);
496		return (void *)pe;
497	}
498
499	return NULL;
500}
501
502static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
503				     bool clear_sw_state)
504{
505	void *rc;
506
507	rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
508	if (!rc)
509		eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
510
511	return rc ? -EIO : 0;
512}
513
514int eeh_pe_reset_and_recover(struct eeh_pe *pe)
515{
516	int result, ret;
517
518	/* Bail if the PE is being recovered */
519	if (pe->state & EEH_PE_RECOVERING)
520		return 0;
521
522	/* Put the PE into recovery mode */
523	eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
524
525	/* Save states */
526	eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
527
528	/* Issue reset */
529	ret = eeh_reset_pe(pe);
530	if (ret) {
531		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
532		return ret;
533	}
534
535	/* Unfreeze the PE */
536	ret = eeh_clear_pe_frozen_state(pe, true);
537	if (ret) {
538		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
539		return ret;
540	}
541
542	/* Notify completion of reset */
543	eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
544
545	/* Restore device state */
546	eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
547
548	/* Resume */
549	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
550
551	/* Clear recovery mode */
552	eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
553
554	return 0;
555}
556
557/**
558 * eeh_reset_device - Perform actual reset of a pci slot
559 * @pe: EEH PE
560 * @bus: PCI bus corresponding to the isolcated slot
561 *
562 * This routine must be called to do reset on the indicated PE.
563 * During the reset, udev might be invoked because those affected
564 * PCI devices will be removed and then added.
565 */
566static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
567{
568	struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
569	struct timeval tstamp;
570	int cnt, rc, removed = 0;
571
572	/* pcibios will clear the counter; save the value */
573	cnt = pe->freeze_count;
574	tstamp = pe->tstamp;
575
576	/*
577	 * We don't remove the corresponding PE instances because
578	 * we need the information afterwords. The attached EEH
579	 * devices are expected to be attached soon when calling
580	 * into pcibios_add_pci_devices().
581	 */
582	eeh_pe_state_mark(pe, EEH_PE_KEEP);
583	if (bus) {
584		eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
585		pci_lock_rescan_remove();
586		pcibios_remove_pci_devices(bus);
587		pci_unlock_rescan_remove();
588	} else if (frozen_bus) {
589		eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
590	}
591
592	/*
593	 * Reset the pci controller. (Asserts RST#; resets config space).
594	 * Reconfigure bridges and devices. Don't try to bring the system
595	 * up if the reset failed for some reason.
596	 *
597	 * During the reset, it's very dangerous to have uncontrolled PCI
598	 * config accesses. So we prefer to block them. However, controlled
599	 * PCI config accesses initiated from EEH itself are allowed.
600	 */
601	rc = eeh_reset_pe(pe);
602	if (rc)
603		return rc;
604
605	pci_lock_rescan_remove();
606
607	/* Restore PE */
608	eeh_ops->configure_bridge(pe);
609	eeh_pe_restore_bars(pe);
610
611	/* Clear frozen state */
612	rc = eeh_clear_pe_frozen_state(pe, false);
613	if (rc)
614		return rc;
615
616	/* Give the system 5 seconds to finish running the user-space
617	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
618	 * this is a hack, but if we don't do this, and try to bring
619	 * the device up before the scripts have taken it down,
620	 * potentially weird things happen.
621	 */
622	if (bus) {
623		pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
624		ssleep(5);
625
626		/*
627		 * The EEH device is still connected with its parent
628		 * PE. We should disconnect it so the binding can be
629		 * rebuilt when adding PCI devices.
630		 */
631		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
632		pcibios_add_pci_devices(bus);
633	} else if (frozen_bus && removed) {
634		pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
635		ssleep(5);
636
637		eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
638		pcibios_add_pci_devices(frozen_bus);
639	}
640	eeh_pe_state_clear(pe, EEH_PE_KEEP);
641
642	pe->tstamp = tstamp;
643	pe->freeze_count = cnt;
644
645	pci_unlock_rescan_remove();
646	return 0;
647}
648
649/* The longest amount of time to wait for a pci device
650 * to come back on line, in seconds.
651 */
652#define MAX_WAIT_FOR_RECOVERY 300
653
654static void eeh_handle_normal_event(struct eeh_pe *pe)
655{
656	struct pci_bus *frozen_bus;
657	int rc = 0;
658	enum pci_ers_result result = PCI_ERS_RESULT_NONE;
659
660	frozen_bus = eeh_pe_bus_get(pe);
661	if (!frozen_bus) {
662		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
663			__func__, pe->phb->global_number, pe->addr);
664		return;
665	}
666
667	eeh_pe_update_time_stamp(pe);
668	pe->freeze_count++;
669	if (pe->freeze_count > eeh_max_freezes)
670		goto excess_failures;
671	pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
672		pe->freeze_count);
673
674	/* Walk the various device drivers attached to this slot through
675	 * a reset sequence, giving each an opportunity to do what it needs
676	 * to accomplish the reset.  Each child gets a report of the
677	 * status ... if any child can't handle the reset, then the entire
678	 * slot is dlpar removed and added.
679	 */
680	pr_info("EEH: Notify device drivers to shutdown\n");
681	eeh_pe_dev_traverse(pe, eeh_report_error, &result);
682
683	/* Get the current PCI slot state. This can take a long time,
684	 * sometimes over 3 seconds for certain systems.
685	 */
686	rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
687	if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
688		pr_warn("EEH: Permanent failure\n");
689		goto hard_fail;
690	}
691
692	/* Since rtas may enable MMIO when posting the error log,
693	 * don't post the error log until after all dev drivers
694	 * have been informed.
695	 */
696	pr_info("EEH: Collect temporary log\n");
697	eeh_slot_error_detail(pe, EEH_LOG_TEMP);
698
699	/* If all device drivers were EEH-unaware, then shut
700	 * down all of the device drivers, and hope they
701	 * go down willingly, without panicing the system.
702	 */
703	if (result == PCI_ERS_RESULT_NONE) {
704		pr_info("EEH: Reset with hotplug activity\n");
705		rc = eeh_reset_device(pe, frozen_bus);
706		if (rc) {
707			pr_warn("%s: Unable to reset, err=%d\n",
708				__func__, rc);
709			goto hard_fail;
710		}
711	}
712
713	/* If all devices reported they can proceed, then re-enable MMIO */
714	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
715		pr_info("EEH: Enable I/O for affected devices\n");
716		rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
717
718		if (rc < 0)
719			goto hard_fail;
720		if (rc) {
721			result = PCI_ERS_RESULT_NEED_RESET;
722		} else {
723			pr_info("EEH: Notify device drivers to resume I/O\n");
724			eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
725		}
726	}
727
728	/* If all devices reported they can proceed, then re-enable DMA */
729	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
730		pr_info("EEH: Enabled DMA for affected devices\n");
731		rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
732
733		if (rc < 0)
734			goto hard_fail;
735		if (rc) {
736			result = PCI_ERS_RESULT_NEED_RESET;
737		} else {
738			/*
739			 * We didn't do PE reset for the case. The PE
740			 * is still in frozen state. Clear it before
741			 * resuming the PE.
742			 */
743			eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
744			result = PCI_ERS_RESULT_RECOVERED;
745		}
746	}
747
748	/* If any device has a hard failure, then shut off everything. */
749	if (result == PCI_ERS_RESULT_DISCONNECT) {
750		pr_warn("EEH: Device driver gave up\n");
751		goto hard_fail;
752	}
753
754	/* If any device called out for a reset, then reset the slot */
755	if (result == PCI_ERS_RESULT_NEED_RESET) {
756		pr_info("EEH: Reset without hotplug activity\n");
757		rc = eeh_reset_device(pe, NULL);
758		if (rc) {
759			pr_warn("%s: Cannot reset, err=%d\n",
760				__func__, rc);
761			goto hard_fail;
762		}
763
764		pr_info("EEH: Notify device drivers "
765			"the completion of reset\n");
766		result = PCI_ERS_RESULT_NONE;
767		eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
768	}
769
770	/* All devices should claim they have recovered by now. */
771	if ((result != PCI_ERS_RESULT_RECOVERED) &&
772	    (result != PCI_ERS_RESULT_NONE)) {
773		pr_warn("EEH: Not recovered\n");
774		goto hard_fail;
775	}
776
777	/* Tell all device drivers that they can resume operations */
778	pr_info("EEH: Notify device driver to resume\n");
779	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
780
781	return;
782
783excess_failures:
784	/*
785	 * About 90% of all real-life EEH failures in the field
786	 * are due to poorly seated PCI cards. Only 10% or so are
787	 * due to actual, failed cards.
788	 */
789	pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
790	       "last hour and has been permanently disabled.\n"
791	       "Please try reseating or replacing it.\n",
792		pe->phb->global_number, pe->addr,
793		pe->freeze_count);
794	goto perm_error;
795
796hard_fail:
797	pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
798	       "Please try reseating or replacing it\n",
799		pe->phb->global_number, pe->addr);
800
801perm_error:
802	eeh_slot_error_detail(pe, EEH_LOG_PERM);
803
804	/* Notify all devices that they're about to go down. */
805	eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
806
807	/* Mark the PE to be removed permanently */
808	eeh_pe_state_mark(pe, EEH_PE_REMOVED);
809
810	/*
811	 * Shut down the device drivers for good. We mark
812	 * all removed devices correctly to avoid access
813	 * the their PCI config any more.
814	 */
815	if (frozen_bus) {
816		eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
817		eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
818
819		pci_lock_rescan_remove();
820		pcibios_remove_pci_devices(frozen_bus);
821		pci_unlock_rescan_remove();
822	}
823}
824
825static void eeh_handle_special_event(void)
826{
827	struct eeh_pe *pe, *phb_pe;
828	struct pci_bus *bus;
829	struct pci_controller *hose;
830	unsigned long flags;
831	int rc;
832
833
834	do {
835		rc = eeh_ops->next_error(&pe);
836
837		switch (rc) {
838		case EEH_NEXT_ERR_DEAD_IOC:
839			/* Mark all PHBs in dead state */
840			eeh_serialize_lock(&flags);
841
842			/* Purge all events */
843			eeh_remove_event(NULL, true);
844
845			list_for_each_entry(hose, &hose_list, list_node) {
846				phb_pe = eeh_phb_pe_get(hose);
847				if (!phb_pe) continue;
848
849				eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
850			}
851
852			eeh_serialize_unlock(flags);
853
854			break;
855		case EEH_NEXT_ERR_FROZEN_PE:
856		case EEH_NEXT_ERR_FENCED_PHB:
857		case EEH_NEXT_ERR_DEAD_PHB:
858			/* Mark the PE in fenced state */
859			eeh_serialize_lock(&flags);
860
861			/* Purge all events of the PHB */
862			eeh_remove_event(pe, true);
863
864			if (rc == EEH_NEXT_ERR_DEAD_PHB)
865				eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
866			else
867				eeh_pe_state_mark(pe,
868					EEH_PE_ISOLATED | EEH_PE_RECOVERING);
869
870			eeh_serialize_unlock(flags);
871
872			break;
873		case EEH_NEXT_ERR_NONE:
874			return;
875		default:
876			pr_warn("%s: Invalid value %d from next_error()\n",
877				__func__, rc);
878			return;
879		}
880
881		/*
882		 * For fenced PHB and frozen PE, it's handled as normal
883		 * event. We have to remove the affected PHBs for dead
884		 * PHB and IOC
885		 */
886		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
887		    rc == EEH_NEXT_ERR_FENCED_PHB) {
888			eeh_handle_normal_event(pe);
889			eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
890		} else {
891			pci_lock_rescan_remove();
892			list_for_each_entry(hose, &hose_list, list_node) {
893				phb_pe = eeh_phb_pe_get(hose);
894				if (!phb_pe ||
895				    !(phb_pe->state & EEH_PE_ISOLATED) ||
896				    (phb_pe->state & EEH_PE_RECOVERING))
897					continue;
898
899				/* Notify all devices to be down */
900				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
901				bus = eeh_pe_bus_get(phb_pe);
902				eeh_pe_dev_traverse(pe,
903					eeh_report_failure, NULL);
904				pcibios_remove_pci_devices(bus);
905			}
906			pci_unlock_rescan_remove();
907		}
908
909		/*
910		 * If we have detected dead IOC, we needn't proceed
911		 * any more since all PHBs would have been removed
912		 */
913		if (rc == EEH_NEXT_ERR_DEAD_IOC)
914			break;
915	} while (rc != EEH_NEXT_ERR_NONE);
916}
917
918/**
919 * eeh_handle_event - Reset a PCI device after hard lockup.
920 * @pe: EEH PE
921 *
922 * While PHB detects address or data parity errors on particular PCI
923 * slot, the associated PE will be frozen. Besides, DMA's occurring
924 * to wild addresses (which usually happen due to bugs in device
925 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
926 * #PERR or other misc PCI-related errors also can trigger EEH errors.
927 *
928 * Recovery process consists of unplugging the device driver (which
929 * generated hotplug events to userspace), then issuing a PCI #RST to
930 * the device, then reconfiguring the PCI config space for all bridges
931 * & devices under this slot, and then finally restarting the device
932 * drivers (which cause a second set of hotplug events to go out to
933 * userspace).
934 */
935void eeh_handle_event(struct eeh_pe *pe)
936{
937	if (pe)
938		eeh_handle_normal_event(pe);
939	else
940		eeh_handle_special_event();
941}
942