1#ifndef _ASM_IA64_PCI_H
2#define _ASM_IA64_PCI_H
3
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/spinlock.h>
7#include <linux/string.h>
8#include <linux/types.h>
9
10#include <asm/io.h>
11#include <asm/scatterlist.h>
12#include <asm/hw_irq.h>
13
14struct pci_vector_struct {
15	__u16 segment;	/* PCI Segment number */
16	__u16 bus;	/* PCI Bus number */
17	__u32 pci_id;	/* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
18	__u8 pin;	/* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
19	__u32 irq;	/* IRQ assigned */
20};
21
22/*
23 * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
24 * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
25 * loader.
26 */
27#define pcibios_assign_all_busses()     0
28
29#define PCIBIOS_MIN_IO		0x1000
30#define PCIBIOS_MIN_MEM		0x10000000
31
32void pcibios_config_init(void);
33
34struct pci_dev;
35
36/*
37 * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
38 * correspondence between device bus addresses and CPU physical addresses.
39 * Platforms with a hardware I/O MMU _must_ turn this off to suppress the
40 * bounce buffer handling code in the block and network device layers.
41 * Platforms with separate bus address spaces _must_ turn this off and provide
42 * a device DMA mapping implementation that takes care of the necessary
43 * address translation.
44 *
45 * For now, the ia64 platforms which may have separate/multiple bus address
46 * spaces all have I/O MMUs which support the merging of physically
47 * discontiguous buffers, so we can use that as the sole factor to determine
48 * the setting of PCI_DMA_BUS_IS_PHYS.
49 */
50extern unsigned long ia64_max_iommu_merge_mask;
51#define PCI_DMA_BUS_IS_PHYS	(ia64_max_iommu_merge_mask == ~0UL)
52
53#include <asm-generic/pci-dma-compat.h>
54
55#ifdef CONFIG_PCI
56static inline void pci_dma_burst_advice(struct pci_dev *pdev,
57					enum pci_dma_burst_strategy *strat,
58					unsigned long *strategy_parameter)
59{
60	unsigned long cacheline_size;
61	u8 byte;
62
63	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
64	if (byte == 0)
65		cacheline_size = 1024;
66	else
67		cacheline_size = (int) byte * 4;
68
69	*strat = PCI_DMA_BURST_MULTIPLE;
70	*strategy_parameter = cacheline_size;
71}
72#endif
73
74#define HAVE_PCI_MMAP
75extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
76				enum pci_mmap_state mmap_state, int write_combine);
77#define HAVE_PCI_LEGACY
78extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
79				      struct vm_area_struct *vma,
80				      enum pci_mmap_state mmap_state);
81
82#define pci_get_legacy_mem platform_pci_get_legacy_mem
83#define pci_legacy_read platform_pci_legacy_read
84#define pci_legacy_write platform_pci_legacy_write
85
86struct iospace_resource {
87	struct list_head list;
88	struct resource res;
89};
90
91struct pci_controller {
92	struct acpi_device *companion;
93	void *iommu;
94	int segment;
95	int node;		/* nearest node with memory or NUMA_NO_NODE for global allocation */
96
97	void *platform_data;
98};
99
100
101#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
102#define pci_domain_nr(busdev)    (PCI_CONTROLLER(busdev)->segment)
103
104extern struct pci_ops pci_root_ops;
105
106static inline int pci_proc_domain(struct pci_bus *bus)
107{
108	return (pci_domain_nr(bus) != 0);
109}
110
111static inline struct resource *
112pcibios_select_root(struct pci_dev *pdev, struct resource *res)
113{
114	struct resource *root = NULL;
115
116	if (res->flags & IORESOURCE_IO)
117		root = &ioport_resource;
118	if (res->flags & IORESOURCE_MEM)
119		root = &iomem_resource;
120
121	return root;
122}
123
124#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
125static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
126{
127	return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
128}
129
130#ifdef CONFIG_INTEL_IOMMU
131extern void pci_iommu_alloc(void);
132#endif
133#endif /* _ASM_IA64_PCI_H */
134