1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9#include <linux/types.h> 10#include <linux/interrupt.h> 11#include <linux/pci.h> 12#include <linux/bitmap.h> 13#include <linux/slab.h> 14#include <linux/export.h> 15#include <asm/sn/sn_sal.h> 16#include <asm/sn/addrs.h> 17#include <asm/sn/io.h> 18#include <asm/sn/pcidev.h> 19#include <asm/sn/pcibus_provider_defs.h> 20#include <asm/sn/tioca_provider.h> 21 22u32 tioca_gart_found; 23EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */ 24 25LIST_HEAD(tioca_list); 26EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */ 27 28static int tioca_gart_init(struct tioca_kernel *); 29 30/** 31 * tioca_gart_init - Initialize SGI TIOCA GART 32 * @tioca_common: ptr to common prom/kernel struct identifying the 33 * 34 * If the indicated tioca has devices present, initialize its associated 35 * GART MMR's and kernel memory. 36 */ 37static int 38tioca_gart_init(struct tioca_kernel *tioca_kern) 39{ 40 u64 ap_reg; 41 u64 offset; 42 struct page *tmp; 43 struct tioca_common *tioca_common; 44 struct tioca __iomem *ca_base; 45 46 tioca_common = tioca_kern->ca_common; 47 ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base; 48 49 if (list_empty(tioca_kern->ca_devices)) 50 return 0; 51 52 ap_reg = 0; 53 54 /* 55 * Validate aperature size 56 */ 57 58 switch (CA_APERATURE_SIZE >> 20) { 59 case 4: 60 ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */ 61 break; 62 case 8: 63 ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */ 64 break; 65 case 16: 66 ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */ 67 break; 68 case 32: 69 ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */ 70 break; 71 case 64: 72 ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */ 73 break; 74 case 128: 75 ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */ 76 break; 77 case 256: 78 ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */ 79 break; 80 case 512: 81 ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */ 82 break; 83 case 1024: 84 ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */ 85 break; 86 case 2048: 87 ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */ 88 break; 89 case 4096: 90 ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */ 91 break; 92 default: 93 printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " 94 "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE); 95 return -1; 96 } 97 98 /* 99 * Set up other aperature parameters 100 */ 101 102 if (PAGE_SIZE >= 16384) { 103 tioca_kern->ca_ap_pagesize = 16384; 104 ap_reg |= CA_GART_PAGE_SIZE; 105 } else { 106 tioca_kern->ca_ap_pagesize = 4096; 107 } 108 109 tioca_kern->ca_ap_size = CA_APERATURE_SIZE; 110 tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE; 111 tioca_kern->ca_gart_entries = 112 tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize; 113 114 ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI); 115 ap_reg |= tioca_kern->ca_ap_bus_base; 116 117 /* 118 * Allocate and set up the GART 119 */ 120 121 tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64); 122 tmp = 123 alloc_pages_node(tioca_kern->ca_closest_node, 124 GFP_KERNEL | __GFP_ZERO, 125 get_order(tioca_kern->ca_gart_size)); 126 127 if (!tmp) { 128 printk(KERN_ERR "%s: Could not allocate " 129 "%llu bytes (order %d) for GART\n", 130 __func__, 131 tioca_kern->ca_gart_size, 132 get_order(tioca_kern->ca_gart_size)); 133 return -ENOMEM; 134 } 135 136 tioca_kern->ca_gart = page_address(tmp); 137 tioca_kern->ca_gart_coretalk_addr = 138 PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart)); 139 140 /* 141 * Compute PCI/AGP convenience fields 142 */ 143 144 offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE; 145 tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE; 146 tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE; 147 tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize; 148 tioca_kern->ca_pcigart_base = 149 tioca_kern->ca_gart_coretalk_addr + offset; 150 tioca_kern->ca_pcigart = 151 &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start]; 152 tioca_kern->ca_pcigart_entries = 153 tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; 154 tioca_kern->ca_pcigart_pagemap = 155 kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); 156 if (!tioca_kern->ca_pcigart_pagemap) { 157 free_pages((unsigned long)tioca_kern->ca_gart, 158 get_order(tioca_kern->ca_gart_size)); 159 return -1; 160 } 161 162 offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE; 163 tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE; 164 tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE; 165 tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize; 166 tioca_kern->ca_gfxgart_base = 167 tioca_kern->ca_gart_coretalk_addr + offset; 168 tioca_kern->ca_gfxgart = 169 &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start]; 170 tioca_kern->ca_gfxgart_entries = 171 tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize; 172 173 /* 174 * various control settings: 175 * use agp op-combining 176 * use GET semantics to fetch memory 177 * participate in coherency domain 178 * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029 179 */ 180 181 __sn_setq_relaxed(&ca_base->ca_control1, 182 CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */ 183 __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM); 184 __sn_setq_relaxed(&ca_base->ca_control2, 185 (0x2ull << CA_GART_MEM_PARAM_SHFT)); 186 tioca_kern->ca_gart_iscoherent = 1; 187 __sn_clrq_relaxed(&ca_base->ca_control2, 188 (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB)); 189 190 /* 191 * Unmask GART fetch error interrupts. Clear residual errors first. 192 */ 193 194 writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias); 195 writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias); 196 __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR); 197 198 /* 199 * Program the aperature and gart registers in TIOCA 200 */ 201 202 writeq(ap_reg, &ca_base->ca_gart_aperature); 203 writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table); 204 205 return 0; 206} 207 208/** 209 * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions 210 * @tioca_kernel: structure representing the CA 211 * 212 * Given a CA, scan all attached functions making sure they all support 213 * FastWrite. If so, enable FastWrite for all functions and the CA itself. 214 */ 215 216void 217tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) 218{ 219 int cap_ptr; 220 u32 reg; 221 struct tioca __iomem *tioca_base; 222 struct pci_dev *pdev; 223 struct tioca_common *common; 224 225 common = tioca_kern->ca_common; 226 227 /* 228 * Scan all vga controllers on this bus making sure they all 229 * support FW. If not, return. 230 */ 231 232 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { 233 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) 234 continue; 235 236 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 237 if (!cap_ptr) 238 return; /* no AGP CAP means no FW */ 239 240 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, ®); 241 if (!(reg & PCI_AGP_STATUS_FW)) 242 return; /* function doesn't support FW */ 243 } 244 245 /* 246 * Set fw for all vga fn's 247 */ 248 249 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { 250 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) 251 continue; 252 253 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 254 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, ®); 255 reg |= PCI_AGP_COMMAND_FW; 256 pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg); 257 } 258 259 /* 260 * Set ca's fw to match 261 */ 262 263 tioca_base = (struct tioca __iomem*)common->ca_common.bs_base; 264 __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE); 265} 266 267EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */ 268 269/** 270 * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode 271 * @paddr: system physical address 272 * 273 * Map @paddr into 64-bit CA bus space. No device context is necessary. 274 * Bits 53:0 come from the coretalk address. We just need to mask in the 275 * following optional bits of the 64-bit pci address: 276 * 277 * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent) 278 * 0x2 for PIO (non-coherent) 279 * We will always use 0x1 280 * 55:55 - Swap bytes Currently unused 281 */ 282static u64 283tioca_dma_d64(unsigned long paddr) 284{ 285 dma_addr_t bus_addr; 286 287 bus_addr = PHYS_TO_TIODMA(paddr); 288 289 BUG_ON(!bus_addr); 290 BUG_ON(bus_addr >> 54); 291 292 /* Set upper nibble to Cache Coherent Memory op */ 293 bus_addr |= (1UL << 60); 294 295 return bus_addr; 296} 297 298/** 299 * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode 300 * @pdev: linux pci_dev representing the function 301 * @paddr: system physical address 302 * 303 * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info. 304 * 305 * The CA agp 48 bit direct address falls out as follows: 306 * 307 * When direct mapping AGP addresses, the 48 bit AGP address is 308 * constructed as follows: 309 * 310 * [47:40] - Low 8 bits of the page Node ID extracted from coretalk 311 * address [47:40]. The upper 8 node bits are fixed 312 * and come from the xxx register bits [5:0] 313 * [39:38] - Chiplet ID extracted from coretalk address [39:38] 314 * [37:00] - node offset extracted from coretalk address [37:00] 315 * 316 * Since the node id in general will be non-zero, and the chiplet id 317 * will always be non-zero, it follows that the device must support 318 * a dma mask of at least 0xffffffffff (40 bits) to target node 0 319 * and in general should be 0xffffffffffff (48 bits) to target nodes 320 * up to 255. Nodes above 255 need the support of the xxx register, 321 * and so a given CA can only directly target nodes in the range 322 * xxx - xxx+255. 323 */ 324static u64 325tioca_dma_d48(struct pci_dev *pdev, u64 paddr) 326{ 327 struct tioca_common *tioca_common; 328 struct tioca __iomem *ca_base; 329 u64 ct_addr; 330 dma_addr_t bus_addr; 331 u32 node_upper; 332 u64 agp_dma_extn; 333 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); 334 335 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; 336 ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base; 337 338 ct_addr = PHYS_TO_TIODMA(paddr); 339 if (!ct_addr) 340 return 0; 341 342 bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL); 343 node_upper = ct_addr >> 48; 344 345 if (node_upper > 64) { 346 printk(KERN_ERR "%s: coretalk addr 0x%p node id out " 347 "of range\n", __func__, (void *)ct_addr); 348 return 0; 349 } 350 351 agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn); 352 if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { 353 printk(KERN_ERR "%s: coretalk upper node (%u) " 354 "mismatch with ca_agp_dma_addr_extn (%llu)\n", 355 __func__, 356 node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); 357 return 0; 358 } 359 360 return bus_addr; 361} 362 363/** 364 * tioca_dma_mapped - create a DMA mapping using a CA GART 365 * @pdev: linux pci_dev representing the function 366 * @paddr: host physical address to map 367 * @req_size: len (bytes) to map 368 * 369 * Map @paddr into CA address space using the GART mechanism. The mapped 370 * dma_addr_t is guaranteed to be contiguous in CA bus space. 371 */ 372static dma_addr_t 373tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) 374{ 375 int ps, ps_shift, entry, entries, mapsize; 376 u64 xio_addr, end_xio_addr; 377 struct tioca_common *tioca_common; 378 struct tioca_kernel *tioca_kern; 379 dma_addr_t bus_addr = 0; 380 struct tioca_dmamap *ca_dmamap; 381 void *map; 382 unsigned long flags; 383 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); 384 385 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; 386 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; 387 388 xio_addr = PHYS_TO_TIODMA(paddr); 389 if (!xio_addr) 390 return 0; 391 392 spin_lock_irqsave(&tioca_kern->ca_lock, flags); 393 394 /* 395 * allocate a map struct 396 */ 397 398 ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC); 399 if (!ca_dmamap) 400 goto map_return; 401 402 /* 403 * Locate free entries that can hold req_size. Account for 404 * unaligned start/length when allocating. 405 */ 406 407 ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */ 408 ps_shift = ffs(ps) - 1; 409 end_xio_addr = xio_addr + req_size - 1; 410 411 entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1; 412 413 map = tioca_kern->ca_pcigart_pagemap; 414 mapsize = tioca_kern->ca_pcigart_entries; 415 416 entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0); 417 if (entry >= mapsize) { 418 kfree(ca_dmamap); 419 goto map_return; 420 } 421 422 bitmap_set(map, entry, entries); 423 424 bus_addr = tioca_kern->ca_pciap_base + (entry * ps); 425 426 ca_dmamap->cad_dma_addr = bus_addr; 427 ca_dmamap->cad_gart_size = entries; 428 ca_dmamap->cad_gart_entry = entry; 429 list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps); 430 431 if (xio_addr % ps) { 432 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); 433 bus_addr += xio_addr & (ps - 1); 434 xio_addr &= ~(ps - 1); 435 xio_addr += ps; 436 entry++; 437 } 438 439 while (xio_addr < end_xio_addr) { 440 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); 441 xio_addr += ps; 442 entry++; 443 } 444 445 tioca_tlbflush(tioca_kern); 446 447map_return: 448 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); 449 return bus_addr; 450} 451 452/** 453 * tioca_dma_unmap - release CA mapping resources 454 * @pdev: linux pci_dev representing the function 455 * @bus_addr: bus address returned by an earlier tioca_dma_map 456 * @dir: mapping direction (unused) 457 * 458 * Locate mapping resources associated with @bus_addr and release them. 459 * For mappings created using the direct modes (64 or 48) there are no 460 * resources to release. 461 */ 462static void 463tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) 464{ 465 int i, entry; 466 struct tioca_common *tioca_common; 467 struct tioca_kernel *tioca_kern; 468 struct tioca_dmamap *map; 469 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); 470 unsigned long flags; 471 472 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; 473 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; 474 475 /* return straight away if this isn't be a mapped address */ 476 477 if (bus_addr < tioca_kern->ca_pciap_base || 478 bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size)) 479 return; 480 481 spin_lock_irqsave(&tioca_kern->ca_lock, flags); 482 483 list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list) 484 if (map->cad_dma_addr == bus_addr) 485 break; 486 487 BUG_ON(map == NULL); 488 489 entry = map->cad_gart_entry; 490 491 for (i = 0; i < map->cad_gart_size; i++, entry++) { 492 clear_bit(entry, tioca_kern->ca_pcigart_pagemap); 493 tioca_kern->ca_pcigart[entry] = 0; 494 } 495 tioca_tlbflush(tioca_kern); 496 497 list_del(&map->cad_list); 498 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); 499 kfree(map); 500} 501 502/** 503 * tioca_dma_map - map pages for PCI DMA 504 * @pdev: linux pci_dev representing the function 505 * @paddr: host physical address to map 506 * @byte_count: bytes to map 507 * 508 * This is the main wrapper for mapping host physical pages to CA PCI space. 509 * The mapping mode used is based on the devices dma_mask. As a last resort 510 * use the GART mapped mode. 511 */ 512static u64 513tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) 514{ 515 u64 mapaddr; 516 517 /* 518 * Not supported for now ... 519 */ 520 if (dma_flags & SN_DMA_MSI) 521 return 0; 522 523 /* 524 * If card is 64 or 48 bit addressable, use a direct mapping. 32 525 * bit direct is so restrictive w.r.t. where the memory resides that 526 * we don't use it even though CA has some support. 527 */ 528 529 if (pdev->dma_mask == ~0UL) 530 mapaddr = tioca_dma_d64(paddr); 531 else if (pdev->dma_mask == 0xffffffffffffUL) 532 mapaddr = tioca_dma_d48(pdev, paddr); 533 else 534 mapaddr = 0; 535 536 /* Last resort ... use PCI portion of CA GART */ 537 538 if (mapaddr == 0) 539 mapaddr = tioca_dma_mapped(pdev, paddr, byte_count); 540 541 return mapaddr; 542} 543 544/** 545 * tioca_error_intr_handler - SGI TIO CA error interrupt handler 546 * @irq: unused 547 * @arg: pointer to tioca_common struct for the given CA 548 * 549 * Handle a CA error interrupt. Simply a wrapper around a SAL call which 550 * defers processing to the SGI prom. 551 */ 552static irqreturn_t 553tioca_error_intr_handler(int irq, void *arg) 554{ 555 struct tioca_common *soft = arg; 556 struct ia64_sal_retval ret_stuff; 557 u64 segment; 558 u64 busnum; 559 ret_stuff.status = 0; 560 ret_stuff.v0 = 0; 561 562 segment = soft->ca_common.bs_persist_segment; 563 busnum = soft->ca_common.bs_persist_busnum; 564 565 SAL_CALL_NOLOCK(ret_stuff, 566 (u64) SN_SAL_IOIF_ERROR_INTERRUPT, 567 segment, busnum, 0, 0, 0, 0, 0); 568 569 return IRQ_HANDLED; 570} 571 572/** 573 * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus 574 * @prom_bussoft: Common prom/kernel struct representing the bus 575 * 576 * Replicates the tioca_common pointed to by @prom_bussoft in kernel 577 * space. Allocates and initializes a kernel-only area for a given CA, 578 * and sets up an irq for handling CA error interrupts. 579 * 580 * On successful setup, returns the kernel version of tioca_common back to 581 * the caller. 582 */ 583static void * 584tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) 585{ 586 struct tioca_common *tioca_common; 587 struct tioca_kernel *tioca_kern; 588 struct pci_bus *bus; 589 590 /* sanity check prom rev */ 591 592 if (is_shub1() && sn_sal_rev() < 0x0406) { 593 printk 594 (KERN_ERR "%s: SGI prom rev 4.06 or greater required " 595 "for tioca support\n", __func__); 596 return NULL; 597 } 598 599 /* 600 * Allocate kernel bus soft and copy from prom. 601 */ 602 603 tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common), 604 GFP_KERNEL); 605 if (!tioca_common) 606 return NULL; 607 608 tioca_common->ca_common.bs_base = (unsigned long) 609 ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base), 610 sizeof(struct tioca_common)); 611 612 /* init kernel-private area */ 613 614 tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL); 615 if (!tioca_kern) { 616 kfree(tioca_common); 617 return NULL; 618 } 619 620 tioca_kern->ca_common = tioca_common; 621 spin_lock_init(&tioca_kern->ca_lock); 622 INIT_LIST_HEAD(&tioca_kern->ca_dmamaps); 623 tioca_kern->ca_closest_node = 624 nasid_to_cnodeid(tioca_common->ca_closest_nasid); 625 tioca_common->ca_kernel_private = (u64) tioca_kern; 626 627 bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment, 628 tioca_common->ca_common.bs_persist_busnum); 629 BUG_ON(!bus); 630 tioca_kern->ca_devices = &bus->devices; 631 632 /* init GART */ 633 634 if (tioca_gart_init(tioca_kern) < 0) { 635 kfree(tioca_kern); 636 kfree(tioca_common); 637 return NULL; 638 } 639 640 tioca_gart_found++; 641 list_add(&tioca_kern->ca_list, &tioca_list); 642 643 if (request_irq(SGI_TIOCA_ERROR, 644 tioca_error_intr_handler, 645 IRQF_SHARED, "TIOCA error", (void *)tioca_common)) 646 printk(KERN_WARNING 647 "%s: Unable to get irq %d. " 648 "Error interrupts won't be routed for TIOCA bus %d\n", 649 __func__, SGI_TIOCA_ERROR, 650 (int)tioca_common->ca_common.bs_persist_busnum); 651 652 irq_set_handler(SGI_TIOCA_ERROR, handle_level_irq); 653 sn_set_err_irq_affinity(SGI_TIOCA_ERROR); 654 655 /* Setup locality information */ 656 controller->node = tioca_kern->ca_closest_node; 657 return tioca_common; 658} 659 660static struct sn_pcibus_provider tioca_pci_interfaces = { 661 .dma_map = tioca_dma_map, 662 .dma_map_consistent = tioca_dma_map, 663 .dma_unmap = tioca_dma_unmap, 664 .bus_fixup = tioca_bus_fixup, 665 .force_interrupt = NULL, 666 .target_interrupt = NULL 667}; 668 669/** 670 * tioca_init_provider - init SN PCI provider ops for TIO CA 671 */ 672int 673tioca_init_provider(void) 674{ 675 sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces; 676 return 0; 677} 678