1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31#include <linux/async.h> 32#include <drm/drmP.h> 33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_fb_helper.h> 35#include <drm/drm_legacy.h> 36#include "intel_drv.h" 37#include <drm/i915_drm.h> 38#include "i915_drv.h" 39#include "i915_vgpu.h" 40#include "i915_trace.h" 41#include <linux/pci.h> 42#include <linux/console.h> 43#include <linux/vt.h> 44#include <linux/vgaarb.h> 45#include <linux/acpi.h> 46#include <linux/pnp.h> 47#include <linux/vga_switcheroo.h> 48#include <linux/slab.h> 49#include <acpi/video.h> 50#include <linux/pm.h> 51#include <linux/pm_runtime.h> 52#include <linux/oom.h> 53 54 55static int i915_getparam(struct drm_device *dev, void *data, 56 struct drm_file *file_priv) 57{ 58 struct drm_i915_private *dev_priv = dev->dev_private; 59 drm_i915_getparam_t *param = data; 60 int value; 61 62 switch (param->param) { 63 case I915_PARAM_IRQ_ACTIVE: 64 case I915_PARAM_ALLOW_BATCHBUFFER: 65 case I915_PARAM_LAST_DISPATCH: 66 /* Reject all old ums/dri params. */ 67 return -ENODEV; 68 case I915_PARAM_CHIPSET_ID: 69 value = dev->pdev->device; 70 break; 71 case I915_PARAM_REVISION: 72 value = dev->pdev->revision; 73 break; 74 case I915_PARAM_HAS_GEM: 75 value = 1; 76 break; 77 case I915_PARAM_NUM_FENCES_AVAIL: 78 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 79 break; 80 case I915_PARAM_HAS_OVERLAY: 81 value = dev_priv->overlay ? 1 : 0; 82 break; 83 case I915_PARAM_HAS_PAGEFLIPPING: 84 value = 1; 85 break; 86 case I915_PARAM_HAS_EXECBUF2: 87 /* depends on GEM */ 88 value = 1; 89 break; 90 case I915_PARAM_HAS_BSD: 91 value = intel_ring_initialized(&dev_priv->ring[VCS]); 92 break; 93 case I915_PARAM_HAS_BLT: 94 value = intel_ring_initialized(&dev_priv->ring[BCS]); 95 break; 96 case I915_PARAM_HAS_VEBOX: 97 value = intel_ring_initialized(&dev_priv->ring[VECS]); 98 break; 99 case I915_PARAM_HAS_BSD2: 100 value = intel_ring_initialized(&dev_priv->ring[VCS2]); 101 break; 102 case I915_PARAM_HAS_RELAXED_FENCING: 103 value = 1; 104 break; 105 case I915_PARAM_HAS_COHERENT_RINGS: 106 value = 1; 107 break; 108 case I915_PARAM_HAS_EXEC_CONSTANTS: 109 value = INTEL_INFO(dev)->gen >= 4; 110 break; 111 case I915_PARAM_HAS_RELAXED_DELTA: 112 value = 1; 113 break; 114 case I915_PARAM_HAS_GEN7_SOL_RESET: 115 value = 1; 116 break; 117 case I915_PARAM_HAS_LLC: 118 value = HAS_LLC(dev); 119 break; 120 case I915_PARAM_HAS_WT: 121 value = HAS_WT(dev); 122 break; 123 case I915_PARAM_HAS_ALIASING_PPGTT: 124 value = USES_PPGTT(dev); 125 break; 126 case I915_PARAM_HAS_WAIT_TIMEOUT: 127 value = 1; 128 break; 129 case I915_PARAM_HAS_SEMAPHORES: 130 value = i915_semaphore_is_enabled(dev); 131 break; 132 case I915_PARAM_HAS_PRIME_VMAP_FLUSH: 133 value = 1; 134 break; 135 case I915_PARAM_HAS_SECURE_BATCHES: 136 value = capable(CAP_SYS_ADMIN); 137 break; 138 case I915_PARAM_HAS_PINNED_BATCHES: 139 value = 1; 140 break; 141 case I915_PARAM_HAS_EXEC_NO_RELOC: 142 value = 1; 143 break; 144 case I915_PARAM_HAS_EXEC_HANDLE_LUT: 145 value = 1; 146 break; 147 case I915_PARAM_CMD_PARSER_VERSION: 148 value = i915_cmd_parser_get_version(); 149 break; 150 case I915_PARAM_HAS_COHERENT_PHYS_GTT: 151 value = 1; 152 break; 153 case I915_PARAM_MMAP_VERSION: 154 value = 1; 155 break; 156 case I915_PARAM_SUBSLICE_TOTAL: 157 value = INTEL_INFO(dev)->subslice_total; 158 if (!value) 159 return -ENODEV; 160 break; 161 case I915_PARAM_EU_TOTAL: 162 value = INTEL_INFO(dev)->eu_total; 163 if (!value) 164 return -ENODEV; 165 break; 166 default: 167 DRM_DEBUG("Unknown parameter %d\n", param->param); 168 return -EINVAL; 169 } 170 171 if (copy_to_user(param->value, &value, sizeof(int))) { 172 DRM_ERROR("copy_to_user failed\n"); 173 return -EFAULT; 174 } 175 176 return 0; 177} 178 179static int i915_setparam(struct drm_device *dev, void *data, 180 struct drm_file *file_priv) 181{ 182 struct drm_i915_private *dev_priv = dev->dev_private; 183 drm_i915_setparam_t *param = data; 184 185 switch (param->param) { 186 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 187 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 188 case I915_SETPARAM_ALLOW_BATCHBUFFER: 189 /* Reject all old ums/dri params. */ 190 return -ENODEV; 191 192 case I915_SETPARAM_NUM_USED_FENCES: 193 if (param->value > dev_priv->num_fence_regs || 194 param->value < 0) 195 return -EINVAL; 196 /* Userspace can use first N regs */ 197 dev_priv->fence_reg_start = param->value; 198 break; 199 default: 200 DRM_DEBUG_DRIVER("unknown parameter %d\n", 201 param->param); 202 return -EINVAL; 203 } 204 205 return 0; 206} 207 208static int i915_get_bridge_dev(struct drm_device *dev) 209{ 210 struct drm_i915_private *dev_priv = dev->dev_private; 211 212 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); 213 if (!dev_priv->bridge_dev) { 214 DRM_ERROR("bridge device not found\n"); 215 return -1; 216 } 217 return 0; 218} 219 220#define MCHBAR_I915 0x44 221#define MCHBAR_I965 0x48 222#define MCHBAR_SIZE (4*4096) 223 224#define DEVEN_REG 0x54 225#define DEVEN_MCHBAR_EN (1 << 28) 226 227/* Allocate space for the MCH regs if needed, return nonzero on error */ 228static int 229intel_alloc_mchbar_resource(struct drm_device *dev) 230{ 231 struct drm_i915_private *dev_priv = dev->dev_private; 232 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 233 u32 temp_lo, temp_hi = 0; 234 u64 mchbar_addr; 235 int ret; 236 237 if (INTEL_INFO(dev)->gen >= 4) 238 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 239 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 240 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 241 242 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 243#ifdef CONFIG_PNP 244 if (mchbar_addr && 245 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 246 return 0; 247#endif 248 249 /* Get some space for it */ 250 dev_priv->mch_res.name = "i915 MCHBAR"; 251 dev_priv->mch_res.flags = IORESOURCE_MEM; 252 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, 253 &dev_priv->mch_res, 254 MCHBAR_SIZE, MCHBAR_SIZE, 255 PCIBIOS_MIN_MEM, 256 0, pcibios_align_resource, 257 dev_priv->bridge_dev); 258 if (ret) { 259 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 260 dev_priv->mch_res.start = 0; 261 return ret; 262 } 263 264 if (INTEL_INFO(dev)->gen >= 4) 265 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 266 upper_32_bits(dev_priv->mch_res.start)); 267 268 pci_write_config_dword(dev_priv->bridge_dev, reg, 269 lower_32_bits(dev_priv->mch_res.start)); 270 return 0; 271} 272 273/* Setup MCHBAR if possible, return true if we should disable it again */ 274static void 275intel_setup_mchbar(struct drm_device *dev) 276{ 277 struct drm_i915_private *dev_priv = dev->dev_private; 278 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 279 u32 temp; 280 bool enabled; 281 282 if (IS_VALLEYVIEW(dev)) 283 return; 284 285 dev_priv->mchbar_need_disable = false; 286 287 if (IS_I915G(dev) || IS_I915GM(dev)) { 288 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 289 enabled = !!(temp & DEVEN_MCHBAR_EN); 290 } else { 291 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 292 enabled = temp & 1; 293 } 294 295 /* If it's already enabled, don't have to do anything */ 296 if (enabled) 297 return; 298 299 if (intel_alloc_mchbar_resource(dev)) 300 return; 301 302 dev_priv->mchbar_need_disable = true; 303 304 /* Space is allocated or reserved, so enable it. */ 305 if (IS_I915G(dev) || IS_I915GM(dev)) { 306 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, 307 temp | DEVEN_MCHBAR_EN); 308 } else { 309 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 310 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 311 } 312} 313 314static void 315intel_teardown_mchbar(struct drm_device *dev) 316{ 317 struct drm_i915_private *dev_priv = dev->dev_private; 318 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 319 u32 temp; 320 321 if (dev_priv->mchbar_need_disable) { 322 if (IS_I915G(dev) || IS_I915GM(dev)) { 323 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 324 temp &= ~DEVEN_MCHBAR_EN; 325 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); 326 } else { 327 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 328 temp &= ~1; 329 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); 330 } 331 } 332 333 if (dev_priv->mch_res.start) 334 release_resource(&dev_priv->mch_res); 335} 336 337/* true = enable decode, false = disable decoder */ 338static unsigned int i915_vga_set_decode(void *cookie, bool state) 339{ 340 struct drm_device *dev = cookie; 341 342 intel_modeset_vga_set_state(dev, state); 343 if (state) 344 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 345 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 346 else 347 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 348} 349 350static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 351{ 352 struct drm_device *dev = pci_get_drvdata(pdev); 353 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 354 355 if (state == VGA_SWITCHEROO_ON) { 356 pr_info("switched on\n"); 357 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 358 /* i915 resume handler doesn't set to D0 */ 359 pci_set_power_state(dev->pdev, PCI_D0); 360 i915_resume_legacy(dev); 361 dev->switch_power_state = DRM_SWITCH_POWER_ON; 362 } else { 363 pr_err("switched off\n"); 364 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 365 i915_suspend_legacy(dev, pmm); 366 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 367 } 368} 369 370static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 371{ 372 struct drm_device *dev = pci_get_drvdata(pdev); 373 374 /* 375 * FIXME: open_count is protected by drm_global_mutex but that would lead to 376 * locking inversion with the driver load path. And the access here is 377 * completely racy anyway. So don't bother with locking for now. 378 */ 379 return dev->open_count == 0; 380} 381 382static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { 383 .set_gpu_state = i915_switcheroo_set_state, 384 .reprobe = NULL, 385 .can_switch = i915_switcheroo_can_switch, 386}; 387 388static int i915_load_modeset_init(struct drm_device *dev) 389{ 390 struct drm_i915_private *dev_priv = dev->dev_private; 391 int ret; 392 393 ret = intel_parse_bios(dev); 394 if (ret) 395 DRM_INFO("failed to find VBIOS tables\n"); 396 397 /* If we have > 1 VGA cards, then we need to arbitrate access 398 * to the common VGA resources. 399 * 400 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), 401 * then we do not take part in VGA arbitration and the 402 * vga_client_register() fails with -ENODEV. 403 */ 404 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 405 if (ret && ret != -ENODEV) 406 goto out; 407 408 intel_register_dsm_handler(); 409 410 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); 411 if (ret) 412 goto cleanup_vga_client; 413 414 /* Initialise stolen first so that we may reserve preallocated 415 * objects for the BIOS to KMS transition. 416 */ 417 ret = i915_gem_init_stolen(dev); 418 if (ret) 419 goto cleanup_vga_switcheroo; 420 421 intel_power_domains_init_hw(dev_priv); 422 423 ret = intel_irq_install(dev_priv); 424 if (ret) 425 goto cleanup_gem_stolen; 426 427 /* Important: The output setup functions called by modeset_init need 428 * working irqs for e.g. gmbus and dp aux transfers. */ 429 intel_modeset_init(dev); 430 431 ret = i915_gem_init(dev); 432 if (ret) 433 goto cleanup_irq; 434 435 intel_modeset_gem_init(dev); 436 437 /* Always safe in the mode setting case. */ 438 /* FIXME: do pre/post-mode set stuff in core KMS code */ 439 dev->vblank_disable_allowed = true; 440 if (INTEL_INFO(dev)->num_pipes == 0) 441 return 0; 442 443 ret = intel_fbdev_init(dev); 444 if (ret) 445 goto cleanup_gem; 446 447 /* Only enable hotplug handling once the fbdev is fully set up. */ 448 intel_hpd_init(dev_priv); 449 450 /* 451 * Some ports require correctly set-up hpd registers for detection to 452 * work properly (leading to ghost connected connector status), e.g. VGA 453 * on gm45. Hence we can only set up the initial fbdev config after hpd 454 * irqs are fully enabled. Now we should scan for the initial config 455 * only once hotplug handling is enabled, but due to screwed-up locking 456 * around kms/fbdev init we can't protect the fdbev initial config 457 * scanning against hotplug events. Hence do this first and ignore the 458 * tiny window where we will loose hotplug notifactions. 459 */ 460 async_schedule(intel_fbdev_initial_config, dev_priv); 461 462 drm_kms_helper_poll_init(dev); 463 464 return 0; 465 466cleanup_gem: 467 mutex_lock(&dev->struct_mutex); 468 i915_gem_cleanup_ringbuffer(dev); 469 i915_gem_context_fini(dev); 470 mutex_unlock(&dev->struct_mutex); 471cleanup_irq: 472 drm_irq_uninstall(dev); 473cleanup_gem_stolen: 474 i915_gem_cleanup_stolen(dev); 475cleanup_vga_switcheroo: 476 vga_switcheroo_unregister_client(dev->pdev); 477cleanup_vga_client: 478 vga_client_register(dev->pdev, NULL, NULL, NULL); 479out: 480 return ret; 481} 482 483#if IS_ENABLED(CONFIG_FB) 484static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 485{ 486 struct apertures_struct *ap; 487 struct pci_dev *pdev = dev_priv->dev->pdev; 488 bool primary; 489 int ret; 490 491 ap = alloc_apertures(1); 492 if (!ap) 493 return -ENOMEM; 494 495 ap->ranges[0].base = dev_priv->gtt.mappable_base; 496 ap->ranges[0].size = dev_priv->gtt.mappable_end; 497 498 primary = 499 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 500 501 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); 502 503 kfree(ap); 504 505 return ret; 506} 507#else 508static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 509{ 510 return 0; 511} 512#endif 513 514#if !defined(CONFIG_VGA_CONSOLE) 515static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 516{ 517 return 0; 518} 519#elif !defined(CONFIG_DUMMY_CONSOLE) 520static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 521{ 522 return -ENODEV; 523} 524#else 525static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 526{ 527 int ret = 0; 528 529 DRM_INFO("Replacing VGA console driver\n"); 530 531 console_lock(); 532 if (con_is_bound(&vga_con)) 533 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); 534 if (ret == 0) { 535 ret = do_unregister_con_driver(&vga_con); 536 537 /* Ignore "already unregistered". */ 538 if (ret == -ENODEV) 539 ret = 0; 540 } 541 console_unlock(); 542 543 return ret; 544} 545#endif 546 547static void i915_dump_device_info(struct drm_i915_private *dev_priv) 548{ 549 const struct intel_device_info *info = &dev_priv->info; 550 551#define PRINT_S(name) "%s" 552#define SEP_EMPTY 553#define PRINT_FLAG(name) info->name ? #name "," : "" 554#define SEP_COMMA , 555 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" 556 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), 557 info->gen, 558 dev_priv->dev->pdev->device, 559 dev_priv->dev->pdev->revision, 560 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); 561#undef PRINT_S 562#undef SEP_EMPTY 563#undef PRINT_FLAG 564#undef SEP_COMMA 565} 566 567/* 568 * Determine various intel_device_info fields at runtime. 569 * 570 * Use it when either: 571 * - it's judged too laborious to fill n static structures with the limit 572 * when a simple if statement does the job, 573 * - run-time checks (eg read fuse/strap registers) are needed. 574 * 575 * This function needs to be called: 576 * - after the MMIO has been setup as we are reading registers, 577 * - after the PCH has been detected, 578 * - before the first usage of the fields it can tweak. 579 */ 580static void intel_device_info_runtime_init(struct drm_device *dev) 581{ 582 struct drm_i915_private *dev_priv = dev->dev_private; 583 struct intel_device_info *info; 584 enum pipe pipe; 585 586 info = (struct intel_device_info *)&dev_priv->info; 587 588 if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9) 589 for_each_pipe(dev_priv, pipe) 590 info->num_sprites[pipe] = 2; 591 else 592 for_each_pipe(dev_priv, pipe) 593 info->num_sprites[pipe] = 1; 594 595 if (i915.disable_display) { 596 DRM_INFO("Display disabled (module parameter)\n"); 597 info->num_pipes = 0; 598 } else if (info->num_pipes > 0 && 599 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && 600 !IS_VALLEYVIEW(dev)) { 601 u32 fuse_strap = I915_READ(FUSE_STRAP); 602 u32 sfuse_strap = I915_READ(SFUSE_STRAP); 603 604 /* 605 * SFUSE_STRAP is supposed to have a bit signalling the display 606 * is fused off. Unfortunately it seems that, at least in 607 * certain cases, fused off display means that PCH display 608 * reads don't land anywhere. In that case, we read 0s. 609 * 610 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK 611 * should be set when taking over after the firmware. 612 */ 613 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || 614 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || 615 (dev_priv->pch_type == PCH_CPT && 616 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { 617 DRM_INFO("Display fused off, disabling\n"); 618 info->num_pipes = 0; 619 } 620 } 621 622 /* Initialize slice/subslice/EU info */ 623 if (IS_CHERRYVIEW(dev)) { 624 u32 fuse, eu_dis; 625 626 fuse = I915_READ(CHV_FUSE_GT); 627 628 info->slice_total = 1; 629 630 if (!(fuse & CHV_FGT_DISABLE_SS0)) { 631 info->subslice_per_slice++; 632 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | 633 CHV_FGT_EU_DIS_SS0_R1_MASK); 634 info->eu_total += 8 - hweight32(eu_dis); 635 } 636 637 if (!(fuse & CHV_FGT_DISABLE_SS1)) { 638 info->subslice_per_slice++; 639 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | 640 CHV_FGT_EU_DIS_SS1_R1_MASK); 641 info->eu_total += 8 - hweight32(eu_dis); 642 } 643 644 info->subslice_total = info->subslice_per_slice; 645 /* 646 * CHV expected to always have a uniform distribution of EU 647 * across subslices. 648 */ 649 info->eu_per_subslice = info->subslice_total ? 650 info->eu_total / info->subslice_total : 651 0; 652 /* 653 * CHV supports subslice power gating on devices with more than 654 * one subslice, and supports EU power gating on devices with 655 * more than one EU pair per subslice. 656 */ 657 info->has_slice_pg = 0; 658 info->has_subslice_pg = (info->subslice_total > 1); 659 info->has_eu_pg = (info->eu_per_subslice > 2); 660 } else if (IS_SKYLAKE(dev)) { 661 const int s_max = 3, ss_max = 4, eu_max = 8; 662 int s, ss; 663 u32 fuse2, eu_disable[s_max], s_enable, ss_disable; 664 665 fuse2 = I915_READ(GEN8_FUSE2); 666 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> 667 GEN8_F2_S_ENA_SHIFT; 668 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> 669 GEN9_F2_SS_DIS_SHIFT; 670 671 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0); 672 eu_disable[1] = I915_READ(GEN8_EU_DISABLE1); 673 eu_disable[2] = I915_READ(GEN8_EU_DISABLE2); 674 675 info->slice_total = hweight32(s_enable); 676 /* 677 * The subslice disable field is global, i.e. it applies 678 * to each of the enabled slices. 679 */ 680 info->subslice_per_slice = ss_max - hweight32(ss_disable); 681 info->subslice_total = info->slice_total * 682 info->subslice_per_slice; 683 684 /* 685 * Iterate through enabled slices and subslices to 686 * count the total enabled EU. 687 */ 688 for (s = 0; s < s_max; s++) { 689 if (!(s_enable & (0x1 << s))) 690 /* skip disabled slice */ 691 continue; 692 693 for (ss = 0; ss < ss_max; ss++) { 694 u32 n_disabled; 695 696 if (ss_disable & (0x1 << ss)) 697 /* skip disabled subslice */ 698 continue; 699 700 n_disabled = hweight8(eu_disable[s] >> 701 (ss * eu_max)); 702 703 /* 704 * Record which subslice(s) has(have) 7 EUs. we 705 * can tune the hash used to spread work among 706 * subslices if they are unbalanced. 707 */ 708 if (eu_max - n_disabled == 7) 709 info->subslice_7eu[s] |= 1 << ss; 710 711 info->eu_total += eu_max - n_disabled; 712 } 713 } 714 715 /* 716 * SKL is expected to always have a uniform distribution 717 * of EU across subslices with the exception that any one 718 * EU in any one subslice may be fused off for die 719 * recovery. 720 */ 721 info->eu_per_subslice = info->subslice_total ? 722 DIV_ROUND_UP(info->eu_total, 723 info->subslice_total) : 0; 724 /* 725 * SKL supports slice power gating on devices with more than 726 * one slice, and supports EU power gating on devices with 727 * more than one EU pair per subslice. 728 */ 729 info->has_slice_pg = (info->slice_total > 1) ? 1 : 0; 730 info->has_subslice_pg = 0; 731 info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0; 732 } 733 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); 734 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); 735 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); 736 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); 737 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); 738 DRM_DEBUG_DRIVER("has slice power gating: %s\n", 739 info->has_slice_pg ? "y" : "n"); 740 DRM_DEBUG_DRIVER("has subslice power gating: %s\n", 741 info->has_subslice_pg ? "y" : "n"); 742 DRM_DEBUG_DRIVER("has EU power gating: %s\n", 743 info->has_eu_pg ? "y" : "n"); 744} 745 746/** 747 * i915_driver_load - setup chip and create an initial config 748 * @dev: DRM device 749 * @flags: startup flags 750 * 751 * The driver load routine has to do several things: 752 * - drive output discovery via intel_modeset_init() 753 * - initialize the memory manager 754 * - allocate initial config memory 755 * - setup the DRM framebuffer with the allocated memory 756 */ 757int i915_driver_load(struct drm_device *dev, unsigned long flags) 758{ 759 struct drm_i915_private *dev_priv; 760 struct intel_device_info *info, *device_info; 761 int ret = 0, mmio_bar, mmio_size; 762 uint32_t aperture_size; 763 764 info = (struct intel_device_info *) flags; 765 766 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 767 if (dev_priv == NULL) 768 return -ENOMEM; 769 770 dev->dev_private = dev_priv; 771 dev_priv->dev = dev; 772 773 /* Setup the write-once "constant" device info */ 774 device_info = (struct intel_device_info *)&dev_priv->info; 775 memcpy(device_info, info, sizeof(dev_priv->info)); 776 device_info->device_id = dev->pdev->device; 777 778 spin_lock_init(&dev_priv->irq_lock); 779 spin_lock_init(&dev_priv->gpu_error.lock); 780 mutex_init(&dev_priv->backlight_lock); 781 spin_lock_init(&dev_priv->uncore.lock); 782 spin_lock_init(&dev_priv->mm.object_stat_lock); 783 spin_lock_init(&dev_priv->mmio_flip_lock); 784 mutex_init(&dev_priv->dpio_lock); 785 mutex_init(&dev_priv->modeset_restore_lock); 786 787 intel_pm_setup(dev); 788 789 intel_display_crc_init(dev); 790 791 i915_dump_device_info(dev_priv); 792 793 /* Not all pre-production machines fall into this category, only the 794 * very first ones. Almost everything should work, except for maybe 795 * suspend/resume. And we don't implement workarounds that affect only 796 * pre-production machines. */ 797 if (IS_HSW_EARLY_SDV(dev)) 798 DRM_INFO("This is an early pre-production Haswell machine. " 799 "It may not be fully functional.\n"); 800 801 if (i915_get_bridge_dev(dev)) { 802 ret = -EIO; 803 goto free_priv; 804 } 805 806 mmio_bar = IS_GEN2(dev) ? 1 : 0; 807 /* Before gen4, the registers and the GTT are behind different BARs. 808 * However, from gen4 onwards, the registers and the GTT are shared 809 * in the same BAR, so we want to restrict this ioremap from 810 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 811 * the register BAR remains the same size for all the earlier 812 * generations up to Ironlake. 813 */ 814 if (info->gen < 5) 815 mmio_size = 512*1024; 816 else 817 mmio_size = 2*1024*1024; 818 819 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); 820 if (!dev_priv->regs) { 821 DRM_ERROR("failed to map registers\n"); 822 ret = -EIO; 823 goto put_bridge; 824 } 825 826 /* This must be called before any calls to HAS_PCH_* */ 827 intel_detect_pch(dev); 828 829 intel_uncore_init(dev); 830 831 ret = i915_gem_gtt_init(dev); 832 if (ret) 833 goto out_regs; 834 835 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 836 * otherwise the vga fbdev driver falls over. */ 837 ret = i915_kick_out_firmware_fb(dev_priv); 838 if (ret) { 839 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 840 goto out_gtt; 841 } 842 843 ret = i915_kick_out_vgacon(dev_priv); 844 if (ret) { 845 DRM_ERROR("failed to remove conflicting VGA console\n"); 846 goto out_gtt; 847 } 848 849 pci_set_master(dev->pdev); 850 851 /* overlay on gen2 is broken and can't address above 1G */ 852 if (IS_GEN2(dev)) 853 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 854 855 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 856 * using 32bit addressing, overwriting memory if HWS is located 857 * above 4GB. 858 * 859 * The documentation also mentions an issue with undefined 860 * behaviour if any general state is accessed within a page above 4GB, 861 * which also needs to be handled carefully. 862 */ 863 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 864 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 865 866 aperture_size = dev_priv->gtt.mappable_end; 867 868 dev_priv->gtt.mappable = 869 io_mapping_create_wc(dev_priv->gtt.mappable_base, 870 aperture_size); 871 if (dev_priv->gtt.mappable == NULL) { 872 ret = -EIO; 873 goto out_gtt; 874 } 875 876 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, 877 aperture_size); 878 879 /* The i915 workqueue is primarily used for batched retirement of 880 * requests (and thus managing bo) once the task has been completed 881 * by the GPU. i915_gem_retire_requests() is called directly when we 882 * need high-priority retirement, such as waiting for an explicit 883 * bo. 884 * 885 * It is also used for periodic low-priority events, such as 886 * idle-timers and recording error state. 887 * 888 * All tasks on the workqueue are expected to acquire the dev mutex 889 * so there is no point in running more than one instance of the 890 * workqueue at any time. Use an ordered one. 891 */ 892 dev_priv->wq = alloc_ordered_workqueue("i915", 0); 893 if (dev_priv->wq == NULL) { 894 DRM_ERROR("Failed to create our workqueue.\n"); 895 ret = -ENOMEM; 896 goto out_mtrrfree; 897 } 898 899 dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0); 900 if (dev_priv->dp_wq == NULL) { 901 DRM_ERROR("Failed to create our dp workqueue.\n"); 902 ret = -ENOMEM; 903 goto out_freewq; 904 } 905 906 dev_priv->gpu_error.hangcheck_wq = 907 alloc_ordered_workqueue("i915-hangcheck", 0); 908 if (dev_priv->gpu_error.hangcheck_wq == NULL) { 909 DRM_ERROR("Failed to create our hangcheck workqueue.\n"); 910 ret = -ENOMEM; 911 goto out_freedpwq; 912 } 913 914 intel_irq_init(dev_priv); 915 intel_uncore_sanitize(dev); 916 917 /* Try to make sure MCHBAR is enabled before poking at it */ 918 intel_setup_mchbar(dev); 919 intel_setup_gmbus(dev); 920 intel_opregion_setup(dev); 921 922 intel_setup_bios(dev); 923 924 i915_gem_load(dev); 925 926 /* On the 945G/GM, the chipset reports the MSI capability on the 927 * integrated graphics even though the support isn't actually there 928 * according to the published specs. It doesn't appear to function 929 * correctly in testing on 945G. 930 * This may be a side effect of MSI having been made available for PEG 931 * and the registers being closely associated. 932 * 933 * According to chipset errata, on the 965GM, MSI interrupts may 934 * be lost or delayed, but we use them anyways to avoid 935 * stuck interrupts on some machines. 936 */ 937 if (!IS_I945G(dev) && !IS_I945GM(dev)) 938 pci_enable_msi(dev->pdev); 939 940 intel_device_info_runtime_init(dev); 941 942 if (INTEL_INFO(dev)->num_pipes) { 943 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); 944 if (ret) 945 goto out_gem_unload; 946 } 947 948 intel_power_domains_init(dev_priv); 949 950 ret = i915_load_modeset_init(dev); 951 if (ret < 0) { 952 DRM_ERROR("failed to init modeset\n"); 953 goto out_power_well; 954 } 955 956 /* 957 * Notify a valid surface after modesetting, 958 * when running inside a VM. 959 */ 960 if (intel_vgpu_active(dev)) 961 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); 962 963 i915_setup_sysfs(dev); 964 965 if (INTEL_INFO(dev)->num_pipes) { 966 /* Must be done after probing outputs */ 967 intel_opregion_init(dev); 968 acpi_video_register(); 969 } 970 971 if (IS_GEN5(dev)) 972 intel_gpu_ips_init(dev_priv); 973 974 intel_runtime_pm_enable(dev_priv); 975 976 i915_audio_component_init(dev_priv); 977 978 return 0; 979 980out_power_well: 981 intel_power_domains_fini(dev_priv); 982 drm_vblank_cleanup(dev); 983out_gem_unload: 984 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); 985 unregister_shrinker(&dev_priv->mm.shrinker); 986 987 if (dev->pdev->msi_enabled) 988 pci_disable_msi(dev->pdev); 989 990 intel_teardown_gmbus(dev); 991 intel_teardown_mchbar(dev); 992 pm_qos_remove_request(&dev_priv->pm_qos); 993 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); 994out_freedpwq: 995 destroy_workqueue(dev_priv->dp_wq); 996out_freewq: 997 destroy_workqueue(dev_priv->wq); 998out_mtrrfree: 999 arch_phys_wc_del(dev_priv->gtt.mtrr); 1000 io_mapping_free(dev_priv->gtt.mappable); 1001out_gtt: 1002 i915_global_gtt_cleanup(dev); 1003out_regs: 1004 intel_uncore_fini(dev); 1005 pci_iounmap(dev->pdev, dev_priv->regs); 1006put_bridge: 1007 pci_dev_put(dev_priv->bridge_dev); 1008free_priv: 1009 if (dev_priv->slab) 1010 kmem_cache_destroy(dev_priv->slab); 1011 kfree(dev_priv); 1012 return ret; 1013} 1014 1015int i915_driver_unload(struct drm_device *dev) 1016{ 1017 struct drm_i915_private *dev_priv = dev->dev_private; 1018 int ret; 1019 1020 i915_audio_component_cleanup(dev_priv); 1021 1022 ret = i915_gem_suspend(dev); 1023 if (ret) { 1024 DRM_ERROR("failed to idle hardware: %d\n", ret); 1025 return ret; 1026 } 1027 1028 intel_power_domains_fini(dev_priv); 1029 1030 intel_gpu_ips_teardown(); 1031 1032 i915_teardown_sysfs(dev); 1033 1034 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); 1035 unregister_shrinker(&dev_priv->mm.shrinker); 1036 1037 io_mapping_free(dev_priv->gtt.mappable); 1038 arch_phys_wc_del(dev_priv->gtt.mtrr); 1039 1040 acpi_video_unregister(); 1041 1042 intel_fbdev_fini(dev); 1043 1044 drm_vblank_cleanup(dev); 1045 1046 intel_modeset_cleanup(dev); 1047 1048 /* 1049 * free the memory space allocated for the child device 1050 * config parsed from VBT 1051 */ 1052 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { 1053 kfree(dev_priv->vbt.child_dev); 1054 dev_priv->vbt.child_dev = NULL; 1055 dev_priv->vbt.child_dev_num = 0; 1056 } 1057 1058 vga_switcheroo_unregister_client(dev->pdev); 1059 vga_client_register(dev->pdev, NULL, NULL, NULL); 1060 1061 /* Free error state after interrupts are fully disabled. */ 1062 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1063 i915_destroy_error_state(dev); 1064 1065 if (dev->pdev->msi_enabled) 1066 pci_disable_msi(dev->pdev); 1067 1068 intel_opregion_fini(dev); 1069 1070 /* Flush any outstanding unpin_work. */ 1071 flush_workqueue(dev_priv->wq); 1072 1073 mutex_lock(&dev->struct_mutex); 1074 i915_gem_cleanup_ringbuffer(dev); 1075 i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool); 1076 i915_gem_context_fini(dev); 1077 mutex_unlock(&dev->struct_mutex); 1078 i915_gem_cleanup_stolen(dev); 1079 1080 intel_teardown_gmbus(dev); 1081 intel_teardown_mchbar(dev); 1082 1083 destroy_workqueue(dev_priv->dp_wq); 1084 destroy_workqueue(dev_priv->wq); 1085 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); 1086 pm_qos_remove_request(&dev_priv->pm_qos); 1087 1088 i915_global_gtt_cleanup(dev); 1089 1090 intel_uncore_fini(dev); 1091 if (dev_priv->regs != NULL) 1092 pci_iounmap(dev->pdev, dev_priv->regs); 1093 1094 if (dev_priv->slab) 1095 kmem_cache_destroy(dev_priv->slab); 1096 1097 pci_dev_put(dev_priv->bridge_dev); 1098 kfree(dev_priv); 1099 1100 return 0; 1101} 1102 1103int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1104{ 1105 int ret; 1106 1107 ret = i915_gem_open(dev, file); 1108 if (ret) 1109 return ret; 1110 1111 return 0; 1112} 1113 1114/** 1115 * i915_driver_lastclose - clean up after all DRM clients have exited 1116 * @dev: DRM device 1117 * 1118 * Take care of cleaning up after all DRM clients have exited. In the 1119 * mode setting case, we want to restore the kernel's initial mode (just 1120 * in case the last client left us in a bad state). 1121 * 1122 * Additionally, in the non-mode setting case, we'll tear down the GTT 1123 * and DMA structures, since the kernel won't be using them, and clea 1124 * up any GEM state. 1125 */ 1126void i915_driver_lastclose(struct drm_device *dev) 1127{ 1128 intel_fbdev_restore_mode(dev); 1129 vga_switcheroo_process_delayed_switch(); 1130} 1131 1132void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) 1133{ 1134 mutex_lock(&dev->struct_mutex); 1135 i915_gem_context_close(dev, file); 1136 i915_gem_release(dev, file); 1137 mutex_unlock(&dev->struct_mutex); 1138 1139 intel_modeset_preclose(dev, file); 1140} 1141 1142void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1143{ 1144 struct drm_i915_file_private *file_priv = file->driver_priv; 1145 1146 if (file_priv && file_priv->bsd_ring) 1147 file_priv->bsd_ring = NULL; 1148 kfree(file_priv); 1149} 1150 1151static int 1152i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, 1153 struct drm_file *file) 1154{ 1155 return -ENODEV; 1156} 1157 1158const struct drm_ioctl_desc i915_ioctls[] = { 1159 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1160 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), 1161 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), 1162 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 1163 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 1164 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 1165 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 1166 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1167 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 1168 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 1169 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1170 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), 1171 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1172 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1173 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 1174 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 1175 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1176 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1177 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 1178 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1179 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1180 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1181 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1182 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1183 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1184 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1185 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1186 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1187 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1188 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1189 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1190 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1191 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1192 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1193 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1194 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1195 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1196 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1197 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 1198 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1199 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1200 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1201 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1202 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1203 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1204 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1205 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1206 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1207 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1208 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1209 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1210 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1211}; 1212 1213int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); 1214 1215/* 1216 * This is really ugly: Because old userspace abused the linux agp interface to 1217 * manage the gtt, we need to claim that all intel devices are agp. For 1218 * otherwise the drm core refuses to initialize the agp support code. 1219 */ 1220int i915_driver_device_is_agp(struct drm_device *dev) 1221{ 1222 return 1; 1223} 1224