1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3/* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30#include <linux/device.h> 31#include <linux/acpi.h> 32#include <drm/drmP.h> 33#include <drm/i915_drm.h> 34#include "i915_drv.h" 35#include "i915_trace.h" 36#include "intel_drv.h" 37 38#include <linux/console.h> 39#include <linux/module.h> 40#include <linux/pm_runtime.h> 41#include <drm/drm_crtc_helper.h> 42 43static struct drm_driver driver; 44 45#define GEN_DEFAULT_PIPEOFFSETS \ 46 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 47 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ 48 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 49 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ 50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } 51 52#define GEN_CHV_PIPEOFFSETS \ 53 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 54 CHV_PIPE_C_OFFSET }, \ 55 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 56 CHV_TRANSCODER_C_OFFSET, }, \ 57 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ 58 CHV_PALETTE_C_OFFSET } 59 60#define CURSOR_OFFSETS \ 61 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } 62 63#define IVB_CURSOR_OFFSETS \ 64 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } 65 66static const struct intel_device_info intel_i830_info = { 67 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 68 .has_overlay = 1, .overlay_needs_physical = 1, 69 .ring_mask = RENDER_RING, 70 GEN_DEFAULT_PIPEOFFSETS, 71 CURSOR_OFFSETS, 72}; 73 74static const struct intel_device_info intel_845g_info = { 75 .gen = 2, .num_pipes = 1, 76 .has_overlay = 1, .overlay_needs_physical = 1, 77 .ring_mask = RENDER_RING, 78 GEN_DEFAULT_PIPEOFFSETS, 79 CURSOR_OFFSETS, 80}; 81 82static const struct intel_device_info intel_i85x_info = { 83 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, 84 .cursor_needs_physical = 1, 85 .has_overlay = 1, .overlay_needs_physical = 1, 86 .has_fbc = 1, 87 .ring_mask = RENDER_RING, 88 GEN_DEFAULT_PIPEOFFSETS, 89 CURSOR_OFFSETS, 90}; 91 92static const struct intel_device_info intel_i865g_info = { 93 .gen = 2, .num_pipes = 1, 94 .has_overlay = 1, .overlay_needs_physical = 1, 95 .ring_mask = RENDER_RING, 96 GEN_DEFAULT_PIPEOFFSETS, 97 CURSOR_OFFSETS, 98}; 99 100static const struct intel_device_info intel_i915g_info = { 101 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 102 .has_overlay = 1, .overlay_needs_physical = 1, 103 .ring_mask = RENDER_RING, 104 GEN_DEFAULT_PIPEOFFSETS, 105 CURSOR_OFFSETS, 106}; 107static const struct intel_device_info intel_i915gm_info = { 108 .gen = 3, .is_mobile = 1, .num_pipes = 2, 109 .cursor_needs_physical = 1, 110 .has_overlay = 1, .overlay_needs_physical = 1, 111 .supports_tv = 1, 112 .has_fbc = 1, 113 .ring_mask = RENDER_RING, 114 GEN_DEFAULT_PIPEOFFSETS, 115 CURSOR_OFFSETS, 116}; 117static const struct intel_device_info intel_i945g_info = { 118 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 119 .has_overlay = 1, .overlay_needs_physical = 1, 120 .ring_mask = RENDER_RING, 121 GEN_DEFAULT_PIPEOFFSETS, 122 CURSOR_OFFSETS, 123}; 124static const struct intel_device_info intel_i945gm_info = { 125 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 126 .has_hotplug = 1, .cursor_needs_physical = 1, 127 .has_overlay = 1, .overlay_needs_physical = 1, 128 .supports_tv = 1, 129 .has_fbc = 1, 130 .ring_mask = RENDER_RING, 131 GEN_DEFAULT_PIPEOFFSETS, 132 CURSOR_OFFSETS, 133}; 134 135static const struct intel_device_info intel_i965g_info = { 136 .gen = 4, .is_broadwater = 1, .num_pipes = 2, 137 .has_hotplug = 1, 138 .has_overlay = 1, 139 .ring_mask = RENDER_RING, 140 GEN_DEFAULT_PIPEOFFSETS, 141 CURSOR_OFFSETS, 142}; 143 144static const struct intel_device_info intel_i965gm_info = { 145 .gen = 4, .is_crestline = 1, .num_pipes = 2, 146 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 147 .has_overlay = 1, 148 .supports_tv = 1, 149 .ring_mask = RENDER_RING, 150 GEN_DEFAULT_PIPEOFFSETS, 151 CURSOR_OFFSETS, 152}; 153 154static const struct intel_device_info intel_g33_info = { 155 .gen = 3, .is_g33 = 1, .num_pipes = 2, 156 .need_gfx_hws = 1, .has_hotplug = 1, 157 .has_overlay = 1, 158 .ring_mask = RENDER_RING, 159 GEN_DEFAULT_PIPEOFFSETS, 160 CURSOR_OFFSETS, 161}; 162 163static const struct intel_device_info intel_g45_info = { 164 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 165 .has_pipe_cxsr = 1, .has_hotplug = 1, 166 .ring_mask = RENDER_RING | BSD_RING, 167 GEN_DEFAULT_PIPEOFFSETS, 168 CURSOR_OFFSETS, 169}; 170 171static const struct intel_device_info intel_gm45_info = { 172 .gen = 4, .is_g4x = 1, .num_pipes = 2, 173 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 174 .has_pipe_cxsr = 1, .has_hotplug = 1, 175 .supports_tv = 1, 176 .ring_mask = RENDER_RING | BSD_RING, 177 GEN_DEFAULT_PIPEOFFSETS, 178 CURSOR_OFFSETS, 179}; 180 181static const struct intel_device_info intel_pineview_info = { 182 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, 183 .need_gfx_hws = 1, .has_hotplug = 1, 184 .has_overlay = 1, 185 GEN_DEFAULT_PIPEOFFSETS, 186 CURSOR_OFFSETS, 187}; 188 189static const struct intel_device_info intel_ironlake_d_info = { 190 .gen = 5, .num_pipes = 2, 191 .need_gfx_hws = 1, .has_hotplug = 1, 192 .ring_mask = RENDER_RING | BSD_RING, 193 GEN_DEFAULT_PIPEOFFSETS, 194 CURSOR_OFFSETS, 195}; 196 197static const struct intel_device_info intel_ironlake_m_info = { 198 .gen = 5, .is_mobile = 1, .num_pipes = 2, 199 .need_gfx_hws = 1, .has_hotplug = 1, 200 .has_fbc = 1, 201 .ring_mask = RENDER_RING | BSD_RING, 202 GEN_DEFAULT_PIPEOFFSETS, 203 CURSOR_OFFSETS, 204}; 205 206static const struct intel_device_info intel_sandybridge_d_info = { 207 .gen = 6, .num_pipes = 2, 208 .need_gfx_hws = 1, .has_hotplug = 1, 209 .has_fbc = 1, 210 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 211 .has_llc = 1, 212 GEN_DEFAULT_PIPEOFFSETS, 213 CURSOR_OFFSETS, 214}; 215 216static const struct intel_device_info intel_sandybridge_m_info = { 217 .gen = 6, .is_mobile = 1, .num_pipes = 2, 218 .need_gfx_hws = 1, .has_hotplug = 1, 219 .has_fbc = 1, 220 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 221 .has_llc = 1, 222 GEN_DEFAULT_PIPEOFFSETS, 223 CURSOR_OFFSETS, 224}; 225 226#define GEN7_FEATURES \ 227 .gen = 7, .num_pipes = 3, \ 228 .need_gfx_hws = 1, .has_hotplug = 1, \ 229 .has_fbc = 1, \ 230 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 231 .has_llc = 1 232 233static const struct intel_device_info intel_ivybridge_d_info = { 234 GEN7_FEATURES, 235 .is_ivybridge = 1, 236 GEN_DEFAULT_PIPEOFFSETS, 237 IVB_CURSOR_OFFSETS, 238}; 239 240static const struct intel_device_info intel_ivybridge_m_info = { 241 GEN7_FEATURES, 242 .is_ivybridge = 1, 243 .is_mobile = 1, 244 GEN_DEFAULT_PIPEOFFSETS, 245 IVB_CURSOR_OFFSETS, 246}; 247 248static const struct intel_device_info intel_ivybridge_q_info = { 249 GEN7_FEATURES, 250 .is_ivybridge = 1, 251 .num_pipes = 0, /* legal, last one wins */ 252 GEN_DEFAULT_PIPEOFFSETS, 253 IVB_CURSOR_OFFSETS, 254}; 255 256static const struct intel_device_info intel_valleyview_m_info = { 257 GEN7_FEATURES, 258 .is_mobile = 1, 259 .num_pipes = 2, 260 .is_valleyview = 1, 261 .display_mmio_offset = VLV_DISPLAY_BASE, 262 .has_fbc = 0, /* legal, last one wins */ 263 .has_llc = 0, /* legal, last one wins */ 264 GEN_DEFAULT_PIPEOFFSETS, 265 CURSOR_OFFSETS, 266}; 267 268static const struct intel_device_info intel_valleyview_d_info = { 269 GEN7_FEATURES, 270 .num_pipes = 2, 271 .is_valleyview = 1, 272 .display_mmio_offset = VLV_DISPLAY_BASE, 273 .has_fbc = 0, /* legal, last one wins */ 274 .has_llc = 0, /* legal, last one wins */ 275 GEN_DEFAULT_PIPEOFFSETS, 276 CURSOR_OFFSETS, 277}; 278 279static const struct intel_device_info intel_haswell_d_info = { 280 GEN7_FEATURES, 281 .is_haswell = 1, 282 .has_ddi = 1, 283 .has_fpga_dbg = 1, 284 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 285 GEN_DEFAULT_PIPEOFFSETS, 286 IVB_CURSOR_OFFSETS, 287}; 288 289static const struct intel_device_info intel_haswell_m_info = { 290 GEN7_FEATURES, 291 .is_haswell = 1, 292 .is_mobile = 1, 293 .has_ddi = 1, 294 .has_fpga_dbg = 1, 295 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 296 GEN_DEFAULT_PIPEOFFSETS, 297 IVB_CURSOR_OFFSETS, 298}; 299 300static const struct intel_device_info intel_broadwell_d_info = { 301 .gen = 8, .num_pipes = 3, 302 .need_gfx_hws = 1, .has_hotplug = 1, 303 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 304 .has_llc = 1, 305 .has_ddi = 1, 306 .has_fpga_dbg = 1, 307 .has_fbc = 1, 308 GEN_DEFAULT_PIPEOFFSETS, 309 IVB_CURSOR_OFFSETS, 310}; 311 312static const struct intel_device_info intel_broadwell_m_info = { 313 .gen = 8, .is_mobile = 1, .num_pipes = 3, 314 .need_gfx_hws = 1, .has_hotplug = 1, 315 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 316 .has_llc = 1, 317 .has_ddi = 1, 318 .has_fpga_dbg = 1, 319 .has_fbc = 1, 320 GEN_DEFAULT_PIPEOFFSETS, 321 IVB_CURSOR_OFFSETS, 322}; 323 324static const struct intel_device_info intel_broadwell_gt3d_info = { 325 .gen = 8, .num_pipes = 3, 326 .need_gfx_hws = 1, .has_hotplug = 1, 327 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 328 .has_llc = 1, 329 .has_ddi = 1, 330 .has_fpga_dbg = 1, 331 .has_fbc = 1, 332 GEN_DEFAULT_PIPEOFFSETS, 333 IVB_CURSOR_OFFSETS, 334}; 335 336static const struct intel_device_info intel_broadwell_gt3m_info = { 337 .gen = 8, .is_mobile = 1, .num_pipes = 3, 338 .need_gfx_hws = 1, .has_hotplug = 1, 339 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 340 .has_llc = 1, 341 .has_ddi = 1, 342 .has_fpga_dbg = 1, 343 .has_fbc = 1, 344 GEN_DEFAULT_PIPEOFFSETS, 345 IVB_CURSOR_OFFSETS, 346}; 347 348static const struct intel_device_info intel_cherryview_info = { 349 .gen = 8, .num_pipes = 3, 350 .need_gfx_hws = 1, .has_hotplug = 1, 351 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 352 .is_valleyview = 1, 353 .display_mmio_offset = VLV_DISPLAY_BASE, 354 GEN_CHV_PIPEOFFSETS, 355 CURSOR_OFFSETS, 356}; 357 358static const struct intel_device_info intel_skylake_info = { 359 .is_preliminary = 1, 360 .is_skylake = 1, 361 .gen = 9, .num_pipes = 3, 362 .need_gfx_hws = 1, .has_hotplug = 1, 363 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 364 .has_llc = 1, 365 .has_ddi = 1, 366 .has_fbc = 1, 367 GEN_DEFAULT_PIPEOFFSETS, 368 IVB_CURSOR_OFFSETS, 369}; 370 371static const struct intel_device_info intel_skylake_gt3_info = { 372 .is_preliminary = 1, 373 .is_skylake = 1, 374 .gen = 9, .num_pipes = 3, 375 .need_gfx_hws = 1, .has_hotplug = 1, 376 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 377 .has_llc = 1, 378 .has_ddi = 1, 379 .has_fbc = 1, 380 GEN_DEFAULT_PIPEOFFSETS, 381 IVB_CURSOR_OFFSETS, 382}; 383 384/* 385 * Make sure any device matches here are from most specific to most 386 * general. For example, since the Quanta match is based on the subsystem 387 * and subvendor IDs, we need it to come before the more general IVB 388 * PCI ID matches, otherwise we'll use the wrong info struct above. 389 */ 390#define INTEL_PCI_IDS \ 391 INTEL_I830_IDS(&intel_i830_info), \ 392 INTEL_I845G_IDS(&intel_845g_info), \ 393 INTEL_I85X_IDS(&intel_i85x_info), \ 394 INTEL_I865G_IDS(&intel_i865g_info), \ 395 INTEL_I915G_IDS(&intel_i915g_info), \ 396 INTEL_I915GM_IDS(&intel_i915gm_info), \ 397 INTEL_I945G_IDS(&intel_i945g_info), \ 398 INTEL_I945GM_IDS(&intel_i945gm_info), \ 399 INTEL_I965G_IDS(&intel_i965g_info), \ 400 INTEL_G33_IDS(&intel_g33_info), \ 401 INTEL_I965GM_IDS(&intel_i965gm_info), \ 402 INTEL_GM45_IDS(&intel_gm45_info), \ 403 INTEL_G45_IDS(&intel_g45_info), \ 404 INTEL_PINEVIEW_IDS(&intel_pineview_info), \ 405 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \ 406 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \ 407 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \ 408 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \ 409 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \ 410 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ 411 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ 412 INTEL_HSW_D_IDS(&intel_haswell_d_info), \ 413 INTEL_HSW_M_IDS(&intel_haswell_m_info), \ 414 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ 415 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ 416 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \ 417 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ 418 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ 419 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ 420 INTEL_CHV_IDS(&intel_cherryview_info), \ 421 INTEL_SKL_GT1_IDS(&intel_skylake_info), \ 422 INTEL_SKL_GT2_IDS(&intel_skylake_info), \ 423 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \ 424 425static const struct pci_device_id pciidlist[] = { /* aka */ 426 INTEL_PCI_IDS, 427 {0, 0, 0} 428}; 429 430#if defined(CONFIG_DRM_I915_KMS) 431MODULE_DEVICE_TABLE(pci, pciidlist); 432#endif 433 434void intel_detect_pch(struct drm_device *dev) 435{ 436 struct drm_i915_private *dev_priv = dev->dev_private; 437 struct pci_dev *pch = NULL; 438 439 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 440 * (which really amounts to a PCH but no South Display). 441 */ 442 if (INTEL_INFO(dev)->num_pipes == 0) { 443 dev_priv->pch_type = PCH_NOP; 444 return; 445 } 446 447 /* 448 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 449 * make graphics device passthrough work easy for VMM, that only 450 * need to expose ISA bridge to let driver know the real hardware 451 * underneath. This is a requirement from virtualization team. 452 * 453 * In some virtualized environments (e.g. XEN), there is irrelevant 454 * ISA bridge in the system. To work reliably, we should scan trhough 455 * all the ISA bridge devices and check for the first match, instead 456 * of only checking the first one. 457 */ 458 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { 459 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 460 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 461 dev_priv->pch_id = id; 462 463 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 464 dev_priv->pch_type = PCH_IBX; 465 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 466 WARN_ON(!IS_GEN5(dev)); 467 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 468 dev_priv->pch_type = PCH_CPT; 469 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 470 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 471 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 472 /* PantherPoint is CPT compatible */ 473 dev_priv->pch_type = PCH_CPT; 474 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 475 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 476 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 477 dev_priv->pch_type = PCH_LPT; 478 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 479 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 480 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); 481 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 482 dev_priv->pch_type = PCH_LPT; 483 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 484 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 485 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); 486 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 487 dev_priv->pch_type = PCH_SPT; 488 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 489 WARN_ON(!IS_SKYLAKE(dev)); 490 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 491 dev_priv->pch_type = PCH_SPT; 492 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 493 WARN_ON(!IS_SKYLAKE(dev)); 494 } else 495 continue; 496 497 break; 498 } 499 } 500 if (!pch) 501 DRM_DEBUG_KMS("No PCH found.\n"); 502 503 pci_dev_put(pch); 504} 505 506bool i915_semaphore_is_enabled(struct drm_device *dev) 507{ 508 if (INTEL_INFO(dev)->gen < 6) 509 return false; 510 511 if (i915.semaphores >= 0) 512 return i915.semaphores; 513 514 /* TODO: make semaphores and Execlists play nicely together */ 515 if (i915.enable_execlists) 516 return false; 517 518 /* Until we get further testing... */ 519 if (IS_GEN8(dev)) 520 return false; 521 522#ifdef CONFIG_INTEL_IOMMU 523 /* Enable semaphores on SNB when IO remapping is off */ 524 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 525 return false; 526#endif 527 528 return true; 529} 530 531void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) 532{ 533 spin_lock_irq(&dev_priv->irq_lock); 534 535 dev_priv->long_hpd_port_mask = 0; 536 dev_priv->short_hpd_port_mask = 0; 537 dev_priv->hpd_event_bits = 0; 538 539 spin_unlock_irq(&dev_priv->irq_lock); 540 541 cancel_work_sync(&dev_priv->dig_port_work); 542 cancel_work_sync(&dev_priv->hotplug_work); 543 cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); 544} 545 546static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 547{ 548 struct drm_device *dev = dev_priv->dev; 549 struct drm_encoder *encoder; 550 551 drm_modeset_lock_all(dev); 552 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 553 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 554 555 if (intel_encoder->suspend) 556 intel_encoder->suspend(intel_encoder); 557 } 558 drm_modeset_unlock_all(dev); 559} 560 561static int intel_suspend_complete(struct drm_i915_private *dev_priv); 562static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 563 bool rpm_resume); 564 565static int i915_drm_suspend(struct drm_device *dev) 566{ 567 struct drm_i915_private *dev_priv = dev->dev_private; 568 struct drm_crtc *crtc; 569 pci_power_t opregion_target_state; 570 int error; 571 572 /* ignore lid events during suspend */ 573 mutex_lock(&dev_priv->modeset_restore_lock); 574 dev_priv->modeset_restore = MODESET_SUSPENDED; 575 mutex_unlock(&dev_priv->modeset_restore_lock); 576 577 /* We do a lot of poking in a lot of registers, make sure they work 578 * properly. */ 579 intel_display_set_init_power(dev_priv, true); 580 581 drm_kms_helper_poll_disable(dev); 582 583 pci_save_state(dev->pdev); 584 585 error = i915_gem_suspend(dev); 586 if (error) { 587 dev_err(&dev->pdev->dev, 588 "GEM idle failed, resume might fail\n"); 589 return error; 590 } 591 592 intel_suspend_gt_powersave(dev); 593 594 /* 595 * Disable CRTCs directly since we want to preserve sw state 596 * for _thaw. Also, power gate the CRTC power wells. 597 */ 598 drm_modeset_lock_all(dev); 599 for_each_crtc(dev, crtc) 600 intel_crtc_control(crtc, false); 601 drm_modeset_unlock_all(dev); 602 603 intel_dp_mst_suspend(dev); 604 605 intel_runtime_pm_disable_interrupts(dev_priv); 606 intel_hpd_cancel_work(dev_priv); 607 608 intel_suspend_encoders(dev_priv); 609 610 intel_suspend_hw(dev); 611 612 i915_gem_suspend_gtt_mappings(dev); 613 614 i915_save_state(dev); 615 616 opregion_target_state = PCI_D3cold; 617#if IS_ENABLED(CONFIG_ACPI_SLEEP) 618 if (acpi_target_system_state() < ACPI_STATE_S3) 619 opregion_target_state = PCI_D1; 620#endif 621 intel_opregion_notify_adapter(dev, opregion_target_state); 622 623 intel_uncore_forcewake_reset(dev, false); 624 intel_opregion_fini(dev); 625 626 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 627 628 dev_priv->suspend_count++; 629 630 intel_display_set_init_power(dev_priv, false); 631 632 return 0; 633} 634 635static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) 636{ 637 struct drm_i915_private *dev_priv = drm_dev->dev_private; 638 int ret; 639 640 ret = intel_suspend_complete(dev_priv); 641 642 if (ret) { 643 DRM_ERROR("Suspend complete failed: %d\n", ret); 644 645 return ret; 646 } 647 648 pci_disable_device(drm_dev->pdev); 649 /* 650 * During hibernation on some platforms the BIOS may try to access 651 * the device even though it's already in D3 and hang the machine. So 652 * leave the device in D0 on those platforms and hope the BIOS will 653 * power down the device properly. The issue was seen on multiple old 654 * GENs with different BIOS vendors, so having an explicit blacklist 655 * is inpractical; apply the workaround on everything pre GEN6. The 656 * platforms where the issue was seen: 657 * Lenovo Thinkpad X301, X61s, X60, T60, X41 658 * Fujitsu FSC S7110 659 * Acer Aspire 1830T 660 */ 661 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) 662 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 663 664 return 0; 665} 666 667int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) 668{ 669 int error; 670 671 if (!dev || !dev->dev_private) { 672 DRM_ERROR("dev: %p\n", dev); 673 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 674 return -ENODEV; 675 } 676 677 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && 678 state.event != PM_EVENT_FREEZE)) 679 return -EINVAL; 680 681 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 682 return 0; 683 684 error = i915_drm_suspend(dev); 685 if (error) 686 return error; 687 688 return i915_drm_suspend_late(dev, false); 689} 690 691static int i915_drm_resume(struct drm_device *dev) 692{ 693 struct drm_i915_private *dev_priv = dev->dev_private; 694 695 mutex_lock(&dev->struct_mutex); 696 i915_gem_restore_gtt_mappings(dev); 697 mutex_unlock(&dev->struct_mutex); 698 699 i915_restore_state(dev); 700 intel_opregion_setup(dev); 701 702 intel_init_pch_refclk(dev); 703 drm_mode_config_reset(dev); 704 705 /* 706 * Interrupts have to be enabled before any batches are run. If not the 707 * GPU will hang. i915_gem_init_hw() will initiate batches to 708 * update/restore the context. 709 * 710 * Modeset enabling in intel_modeset_init_hw() also needs working 711 * interrupts. 712 */ 713 intel_runtime_pm_enable_interrupts(dev_priv); 714 715 mutex_lock(&dev->struct_mutex); 716 if (i915_gem_init_hw(dev)) { 717 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 718 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 719 } 720 mutex_unlock(&dev->struct_mutex); 721 722 intel_modeset_init_hw(dev); 723 724 spin_lock_irq(&dev_priv->irq_lock); 725 if (dev_priv->display.hpd_irq_setup) 726 dev_priv->display.hpd_irq_setup(dev); 727 spin_unlock_irq(&dev_priv->irq_lock); 728 729 drm_modeset_lock_all(dev); 730 intel_modeset_setup_hw_state(dev, true); 731 drm_modeset_unlock_all(dev); 732 733 intel_dp_mst_resume(dev); 734 735 /* 736 * ... but also need to make sure that hotplug processing 737 * doesn't cause havoc. Like in the driver load code we don't 738 * bother with the tiny race here where we might loose hotplug 739 * notifications. 740 * */ 741 intel_hpd_init(dev_priv); 742 /* Config may have changed between suspend and resume */ 743 drm_helper_hpd_irq_event(dev); 744 745 intel_opregion_init(dev); 746 747 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 748 749 mutex_lock(&dev_priv->modeset_restore_lock); 750 dev_priv->modeset_restore = MODESET_DONE; 751 mutex_unlock(&dev_priv->modeset_restore_lock); 752 753 intel_opregion_notify_adapter(dev, PCI_D0); 754 755 drm_kms_helper_poll_enable(dev); 756 757 return 0; 758} 759 760static int i915_drm_resume_early(struct drm_device *dev) 761{ 762 struct drm_i915_private *dev_priv = dev->dev_private; 763 int ret = 0; 764 765 /* 766 * We have a resume ordering issue with the snd-hda driver also 767 * requiring our device to be power up. Due to the lack of a 768 * parent/child relationship we currently solve this with an early 769 * resume hook. 770 * 771 * FIXME: This should be solved with a special hdmi sink device or 772 * similar so that power domains can be employed. 773 */ 774 if (pci_enable_device(dev->pdev)) 775 return -EIO; 776 777 pci_set_master(dev->pdev); 778 779 if (IS_VALLEYVIEW(dev_priv)) 780 ret = vlv_resume_prepare(dev_priv, false); 781 if (ret) 782 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret); 783 784 intel_uncore_early_sanitize(dev, true); 785 786 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 787 hsw_disable_pc8(dev_priv); 788 789 intel_uncore_sanitize(dev); 790 intel_power_domains_init_hw(dev_priv); 791 792 return ret; 793} 794 795int i915_resume_legacy(struct drm_device *dev) 796{ 797 int ret; 798 799 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 800 return 0; 801 802 ret = i915_drm_resume_early(dev); 803 if (ret) 804 return ret; 805 806 return i915_drm_resume(dev); 807} 808 809/** 810 * i915_reset - reset chip after a hang 811 * @dev: drm device to reset 812 * 813 * Reset the chip. Useful if a hang is detected. Returns zero on successful 814 * reset or otherwise an error code. 815 * 816 * Procedure is fairly simple: 817 * - reset the chip using the reset reg 818 * - re-init context state 819 * - re-init hardware status page 820 * - re-init ring buffer 821 * - re-init interrupt state 822 * - re-init display 823 */ 824int i915_reset(struct drm_device *dev) 825{ 826 struct drm_i915_private *dev_priv = dev->dev_private; 827 bool simulated; 828 int ret; 829 830 if (!i915.reset) 831 return 0; 832 833 intel_reset_gt_powersave(dev); 834 835 mutex_lock(&dev->struct_mutex); 836 837 i915_gem_reset(dev); 838 839 simulated = dev_priv->gpu_error.stop_rings != 0; 840 841 ret = intel_gpu_reset(dev); 842 843 /* Also reset the gpu hangman. */ 844 if (simulated) { 845 DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); 846 dev_priv->gpu_error.stop_rings = 0; 847 if (ret == -ENODEV) { 848 DRM_INFO("Reset not implemented, but ignoring " 849 "error for simulated gpu hangs\n"); 850 ret = 0; 851 } 852 } 853 854 if (i915_stop_ring_allow_warn(dev_priv)) 855 pr_notice("drm/i915: Resetting chip after gpu hang\n"); 856 857 if (ret) { 858 DRM_ERROR("Failed to reset chip: %i\n", ret); 859 mutex_unlock(&dev->struct_mutex); 860 return ret; 861 } 862 863 intel_overlay_reset(dev_priv); 864 865 /* Ok, now get things going again... */ 866 867 /* 868 * Everything depends on having the GTT running, so we need to start 869 * there. Fortunately we don't need to do this unless we reset the 870 * chip at a PCI level. 871 * 872 * Next we need to restore the context, but we don't use those 873 * yet either... 874 * 875 * Ring buffer needs to be re-initialized in the KMS case, or if X 876 * was running at the time of the reset (i.e. we weren't VT 877 * switched away). 878 */ 879 880 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ 881 dev_priv->gpu_error.reload_in_reset = true; 882 883 ret = i915_gem_init_hw(dev); 884 885 dev_priv->gpu_error.reload_in_reset = false; 886 887 mutex_unlock(&dev->struct_mutex); 888 if (ret) { 889 DRM_ERROR("Failed hw init on reset %d\n", ret); 890 return ret; 891 } 892 893 /* 894 * rps/rc6 re-init is necessary to restore state lost after the 895 * reset and the re-install of gt irqs. Skip for ironlake per 896 * previous concerns that it doesn't respond well to some forms 897 * of re-init after reset. 898 */ 899 if (INTEL_INFO(dev)->gen > 5) 900 intel_enable_gt_powersave(dev); 901 902 return 0; 903} 904 905static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 906{ 907 struct intel_device_info *intel_info = 908 (struct intel_device_info *) ent->driver_data; 909 910 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { 911 DRM_INFO("This hardware requires preliminary hardware support.\n" 912 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); 913 return -ENODEV; 914 } 915 916 /* Only bind to function 0 of the device. Early generations 917 * used function 1 as a placeholder for multi-head. This causes 918 * us confusion instead, especially on the systems where both 919 * functions have the same PCI-ID! 920 */ 921 if (PCI_FUNC(pdev->devfn)) 922 return -ENODEV; 923 924 driver.driver_features &= ~(DRIVER_USE_AGP); 925 926 return drm_get_pci_dev(pdev, ent, &driver); 927} 928 929static void 930i915_pci_remove(struct pci_dev *pdev) 931{ 932 struct drm_device *dev = pci_get_drvdata(pdev); 933 934 drm_put_dev(dev); 935} 936 937static int i915_pm_suspend(struct device *dev) 938{ 939 struct pci_dev *pdev = to_pci_dev(dev); 940 struct drm_device *drm_dev = pci_get_drvdata(pdev); 941 942 if (!drm_dev || !drm_dev->dev_private) { 943 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 944 return -ENODEV; 945 } 946 947 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 948 return 0; 949 950 return i915_drm_suspend(drm_dev); 951} 952 953static int i915_pm_suspend_late(struct device *dev) 954{ 955 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 956 957 /* 958 * We have a suspedn ordering issue with the snd-hda driver also 959 * requiring our device to be power up. Due to the lack of a 960 * parent/child relationship we currently solve this with an late 961 * suspend hook. 962 * 963 * FIXME: This should be solved with a special hdmi sink device or 964 * similar so that power domains can be employed. 965 */ 966 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 967 return 0; 968 969 return i915_drm_suspend_late(drm_dev, false); 970} 971 972static int i915_pm_poweroff_late(struct device *dev) 973{ 974 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 975 976 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 977 return 0; 978 979 return i915_drm_suspend_late(drm_dev, true); 980} 981 982static int i915_pm_resume_early(struct device *dev) 983{ 984 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 985 986 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 987 return 0; 988 989 return i915_drm_resume_early(drm_dev); 990} 991 992static int i915_pm_resume(struct device *dev) 993{ 994 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 995 996 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 997 return 0; 998 999 return i915_drm_resume(drm_dev); 1000} 1001 1002static int hsw_suspend_complete(struct drm_i915_private *dev_priv) 1003{ 1004 hsw_enable_pc8(dev_priv); 1005 1006 return 0; 1007} 1008 1009/* 1010 * Save all Gunit registers that may be lost after a D3 and a subsequent 1011 * S0i[R123] transition. The list of registers needing a save/restore is 1012 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit 1013 * registers in the following way: 1014 * - Driver: saved/restored by the driver 1015 * - Punit : saved/restored by the Punit firmware 1016 * - No, w/o marking: no need to save/restore, since the register is R/O or 1017 * used internally by the HW in a way that doesn't depend 1018 * keeping the content across a suspend/resume. 1019 * - Debug : used for debugging 1020 * 1021 * We save/restore all registers marked with 'Driver', with the following 1022 * exceptions: 1023 * - Registers out of use, including also registers marked with 'Debug'. 1024 * These have no effect on the driver's operation, so we don't save/restore 1025 * them to reduce the overhead. 1026 * - Registers that are fully setup by an initialization function called from 1027 * the resume path. For example many clock gating and RPS/RC6 registers. 1028 * - Registers that provide the right functionality with their reset defaults. 1029 * 1030 * TODO: Except for registers that based on the above 3 criteria can be safely 1031 * ignored, we save/restore all others, practically treating the HW context as 1032 * a black-box for the driver. Further investigation is needed to reduce the 1033 * saved/restored registers even further, by following the same 3 criteria. 1034 */ 1035static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) 1036{ 1037 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 1038 int i; 1039 1040 /* GAM 0x4000-0x4770 */ 1041 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); 1042 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); 1043 s->arb_mode = I915_READ(ARB_MODE); 1044 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); 1045 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); 1046 1047 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1048 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4); 1049 1050 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); 1051 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); 1052 1053 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); 1054 s->ecochk = I915_READ(GAM_ECOCHK); 1055 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); 1056 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); 1057 1058 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); 1059 1060 /* MBC 0x9024-0x91D0, 0x8500 */ 1061 s->g3dctl = I915_READ(VLV_G3DCTL); 1062 s->gsckgctl = I915_READ(VLV_GSCKGCTL); 1063 s->mbctl = I915_READ(GEN6_MBCTL); 1064 1065 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 1066 s->ucgctl1 = I915_READ(GEN6_UCGCTL1); 1067 s->ucgctl3 = I915_READ(GEN6_UCGCTL3); 1068 s->rcgctl1 = I915_READ(GEN6_RCGCTL1); 1069 s->rcgctl2 = I915_READ(GEN6_RCGCTL2); 1070 s->rstctl = I915_READ(GEN6_RSTCTL); 1071 s->misccpctl = I915_READ(GEN7_MISCCPCTL); 1072 1073 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 1074 s->gfxpause = I915_READ(GEN6_GFXPAUSE); 1075 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); 1076 s->rpdeuc = I915_READ(GEN6_RPDEUC); 1077 s->ecobus = I915_READ(ECOBUS); 1078 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); 1079 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); 1080 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); 1081 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); 1082 s->rcedata = I915_READ(VLV_RCEDATA); 1083 s->spare2gh = I915_READ(VLV_SPAREG2H); 1084 1085 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 1086 s->gt_imr = I915_READ(GTIMR); 1087 s->gt_ier = I915_READ(GTIER); 1088 s->pm_imr = I915_READ(GEN6_PMIMR); 1089 s->pm_ier = I915_READ(GEN6_PMIER); 1090 1091 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1092 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4); 1093 1094 /* GT SA CZ domain, 0x100000-0x138124 */ 1095 s->tilectl = I915_READ(TILECTL); 1096 s->gt_fifoctl = I915_READ(GTFIFOCTL); 1097 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); 1098 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1099 s->pmwgicz = I915_READ(VLV_PMWGICZ); 1100 1101 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 1102 s->gu_ctl0 = I915_READ(VLV_GU_CTL0); 1103 s->gu_ctl1 = I915_READ(VLV_GU_CTL1); 1104 s->pcbr = I915_READ(VLV_PCBR); 1105 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); 1106 1107 /* 1108 * Not saving any of: 1109 * DFT, 0x9800-0x9EC0 1110 * SARB, 0xB000-0xB1FC 1111 * GAC, 0x5208-0x524C, 0x14000-0x14C000 1112 * PCI CFG 1113 */ 1114} 1115 1116static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) 1117{ 1118 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 1119 u32 val; 1120 int i; 1121 1122 /* GAM 0x4000-0x4770 */ 1123 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); 1124 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); 1125 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); 1126 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); 1127 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); 1128 1129 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1130 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]); 1131 1132 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); 1133 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); 1134 1135 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); 1136 I915_WRITE(GAM_ECOCHK, s->ecochk); 1137 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); 1138 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); 1139 1140 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); 1141 1142 /* MBC 0x9024-0x91D0, 0x8500 */ 1143 I915_WRITE(VLV_G3DCTL, s->g3dctl); 1144 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); 1145 I915_WRITE(GEN6_MBCTL, s->mbctl); 1146 1147 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 1148 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); 1149 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); 1150 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); 1151 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); 1152 I915_WRITE(GEN6_RSTCTL, s->rstctl); 1153 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); 1154 1155 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 1156 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); 1157 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); 1158 I915_WRITE(GEN6_RPDEUC, s->rpdeuc); 1159 I915_WRITE(ECOBUS, s->ecobus); 1160 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); 1161 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); 1162 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); 1163 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); 1164 I915_WRITE(VLV_RCEDATA, s->rcedata); 1165 I915_WRITE(VLV_SPAREG2H, s->spare2gh); 1166 1167 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 1168 I915_WRITE(GTIMR, s->gt_imr); 1169 I915_WRITE(GTIER, s->gt_ier); 1170 I915_WRITE(GEN6_PMIMR, s->pm_imr); 1171 I915_WRITE(GEN6_PMIER, s->pm_ier); 1172 1173 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1174 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]); 1175 1176 /* GT SA CZ domain, 0x100000-0x138124 */ 1177 I915_WRITE(TILECTL, s->tilectl); 1178 I915_WRITE(GTFIFOCTL, s->gt_fifoctl); 1179 /* 1180 * Preserve the GT allow wake and GFX force clock bit, they are not 1181 * be restored, as they are used to control the s0ix suspend/resume 1182 * sequence by the caller. 1183 */ 1184 val = I915_READ(VLV_GTLC_WAKE_CTRL); 1185 val &= VLV_GTLC_ALLOWWAKEREQ; 1186 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; 1187 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 1188 1189 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1190 val &= VLV_GFX_CLK_FORCE_ON_BIT; 1191 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; 1192 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 1193 1194 I915_WRITE(VLV_PMWGICZ, s->pmwgicz); 1195 1196 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 1197 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); 1198 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); 1199 I915_WRITE(VLV_PCBR, s->pcbr); 1200 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); 1201} 1202 1203int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) 1204{ 1205 u32 val; 1206 int err; 1207 1208#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) 1209 1210 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1211 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 1212 if (force_on) 1213 val |= VLV_GFX_CLK_FORCE_ON_BIT; 1214 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 1215 1216 if (!force_on) 1217 return 0; 1218 1219 err = wait_for(COND, 20); 1220 if (err) 1221 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 1222 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 1223 1224 return err; 1225#undef COND 1226} 1227 1228static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 1229{ 1230 u32 val; 1231 int err = 0; 1232 1233 val = I915_READ(VLV_GTLC_WAKE_CTRL); 1234 val &= ~VLV_GTLC_ALLOWWAKEREQ; 1235 if (allow) 1236 val |= VLV_GTLC_ALLOWWAKEREQ; 1237 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 1238 POSTING_READ(VLV_GTLC_WAKE_CTRL); 1239 1240#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ 1241 allow) 1242 err = wait_for(COND, 1); 1243 if (err) 1244 DRM_ERROR("timeout disabling GT waking\n"); 1245 return err; 1246#undef COND 1247} 1248 1249static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 1250 bool wait_for_on) 1251{ 1252 u32 mask; 1253 u32 val; 1254 int err; 1255 1256 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 1257 val = wait_for_on ? mask : 0; 1258#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) 1259 if (COND) 1260 return 0; 1261 1262 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", 1263 wait_for_on ? "on" : "off", 1264 I915_READ(VLV_GTLC_PW_STATUS)); 1265 1266 /* 1267 * RC6 transitioning can be delayed up to 2 msec (see 1268 * valleyview_enable_rps), use 3 msec for safety. 1269 */ 1270 err = wait_for(COND, 3); 1271 if (err) 1272 DRM_ERROR("timeout waiting for GT wells to go %s\n", 1273 wait_for_on ? "on" : "off"); 1274 1275 return err; 1276#undef COND 1277} 1278 1279static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 1280{ 1281 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) 1282 return; 1283 1284 DRM_ERROR("GT register access while GT waking disabled\n"); 1285 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 1286} 1287 1288static int vlv_suspend_complete(struct drm_i915_private *dev_priv) 1289{ 1290 u32 mask; 1291 int err; 1292 1293 /* 1294 * Bspec defines the following GT well on flags as debug only, so 1295 * don't treat them as hard failures. 1296 */ 1297 (void)vlv_wait_for_gt_wells(dev_priv, false); 1298 1299 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; 1300 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); 1301 1302 vlv_check_no_gt_access(dev_priv); 1303 1304 err = vlv_force_gfx_clock(dev_priv, true); 1305 if (err) 1306 goto err1; 1307 1308 err = vlv_allow_gt_wake(dev_priv, false); 1309 if (err) 1310 goto err2; 1311 1312 if (!IS_CHERRYVIEW(dev_priv->dev)) 1313 vlv_save_gunit_s0ix_state(dev_priv); 1314 1315 err = vlv_force_gfx_clock(dev_priv, false); 1316 if (err) 1317 goto err2; 1318 1319 return 0; 1320 1321err2: 1322 /* For safety always re-enable waking and disable gfx clock forcing */ 1323 vlv_allow_gt_wake(dev_priv, true); 1324err1: 1325 vlv_force_gfx_clock(dev_priv, false); 1326 1327 return err; 1328} 1329 1330static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 1331 bool rpm_resume) 1332{ 1333 struct drm_device *dev = dev_priv->dev; 1334 int err; 1335 int ret; 1336 1337 /* 1338 * If any of the steps fail just try to continue, that's the best we 1339 * can do at this point. Return the first error code (which will also 1340 * leave RPM permanently disabled). 1341 */ 1342 ret = vlv_force_gfx_clock(dev_priv, true); 1343 1344 if (!IS_CHERRYVIEW(dev_priv->dev)) 1345 vlv_restore_gunit_s0ix_state(dev_priv); 1346 1347 err = vlv_allow_gt_wake(dev_priv, true); 1348 if (!ret) 1349 ret = err; 1350 1351 err = vlv_force_gfx_clock(dev_priv, false); 1352 if (!ret) 1353 ret = err; 1354 1355 vlv_check_no_gt_access(dev_priv); 1356 1357 if (rpm_resume) { 1358 intel_init_clock_gating(dev); 1359 i915_gem_restore_fences(dev); 1360 } 1361 1362 return ret; 1363} 1364 1365static int intel_runtime_suspend(struct device *device) 1366{ 1367 struct pci_dev *pdev = to_pci_dev(device); 1368 struct drm_device *dev = pci_get_drvdata(pdev); 1369 struct drm_i915_private *dev_priv = dev->dev_private; 1370 int ret; 1371 1372 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 1373 return -ENODEV; 1374 1375 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1376 return -ENODEV; 1377 1378 DRM_DEBUG_KMS("Suspending device\n"); 1379 1380 /* 1381 * We could deadlock here in case another thread holding struct_mutex 1382 * calls RPM suspend concurrently, since the RPM suspend will wait 1383 * first for this RPM suspend to finish. In this case the concurrent 1384 * RPM resume will be followed by its RPM suspend counterpart. Still 1385 * for consistency return -EAGAIN, which will reschedule this suspend. 1386 */ 1387 if (!mutex_trylock(&dev->struct_mutex)) { 1388 DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); 1389 /* 1390 * Bump the expiration timestamp, otherwise the suspend won't 1391 * be rescheduled. 1392 */ 1393 pm_runtime_mark_last_busy(device); 1394 1395 return -EAGAIN; 1396 } 1397 /* 1398 * We are safe here against re-faults, since the fault handler takes 1399 * an RPM reference. 1400 */ 1401 i915_gem_release_all_mmaps(dev_priv); 1402 mutex_unlock(&dev->struct_mutex); 1403 1404 intel_suspend_gt_powersave(dev); 1405 intel_runtime_pm_disable_interrupts(dev_priv); 1406 1407 ret = intel_suspend_complete(dev_priv); 1408 if (ret) { 1409 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 1410 intel_runtime_pm_enable_interrupts(dev_priv); 1411 1412 return ret; 1413 } 1414 1415 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1416 intel_uncore_forcewake_reset(dev, false); 1417 dev_priv->pm.suspended = true; 1418 1419 /* 1420 * FIXME: We really should find a document that references the arguments 1421 * used below! 1422 */ 1423 if (IS_HASWELL(dev)) { 1424 /* 1425 * current versions of firmware which depend on this opregion 1426 * notification have repurposed the D1 definition to mean 1427 * "runtime suspended" vs. what you would normally expect (D3) 1428 * to distinguish it from notifications that might be sent via 1429 * the suspend path. 1430 */ 1431 intel_opregion_notify_adapter(dev, PCI_D1); 1432 } else { 1433 /* 1434 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 1435 * being detected, and the call we do at intel_runtime_resume() 1436 * won't be able to restore them. Since PCI_D3hot matches the 1437 * actual specification and appears to be working, use it. Let's 1438 * assume the other non-Haswell platforms will stay the same as 1439 * Broadwell. 1440 */ 1441 intel_opregion_notify_adapter(dev, PCI_D3hot); 1442 } 1443 1444 assert_forcewakes_inactive(dev_priv); 1445 1446 DRM_DEBUG_KMS("Device suspended\n"); 1447 return 0; 1448} 1449 1450static int intel_runtime_resume(struct device *device) 1451{ 1452 struct pci_dev *pdev = to_pci_dev(device); 1453 struct drm_device *dev = pci_get_drvdata(pdev); 1454 struct drm_i915_private *dev_priv = dev->dev_private; 1455 int ret = 0; 1456 1457 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1458 return -ENODEV; 1459 1460 DRM_DEBUG_KMS("Resuming device\n"); 1461 1462 intel_opregion_notify_adapter(dev, PCI_D0); 1463 dev_priv->pm.suspended = false; 1464 1465 if (IS_GEN6(dev_priv)) 1466 intel_init_pch_refclk(dev); 1467 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1468 hsw_disable_pc8(dev_priv); 1469 else if (IS_VALLEYVIEW(dev_priv)) 1470 ret = vlv_resume_prepare(dev_priv, true); 1471 1472 /* 1473 * No point of rolling back things in case of an error, as the best 1474 * we can do is to hope that things will still work (and disable RPM). 1475 */ 1476 i915_gem_init_swizzling(dev); 1477 gen6_update_ring_freq(dev); 1478 1479 intel_runtime_pm_enable_interrupts(dev_priv); 1480 intel_enable_gt_powersave(dev); 1481 1482 if (ret) 1483 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); 1484 else 1485 DRM_DEBUG_KMS("Device resumed\n"); 1486 1487 return ret; 1488} 1489 1490/* 1491 * This function implements common functionality of runtime and system 1492 * suspend sequence. 1493 */ 1494static int intel_suspend_complete(struct drm_i915_private *dev_priv) 1495{ 1496 struct drm_device *dev = dev_priv->dev; 1497 int ret; 1498 1499 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1500 ret = hsw_suspend_complete(dev_priv); 1501 else if (IS_VALLEYVIEW(dev)) 1502 ret = vlv_suspend_complete(dev_priv); 1503 else 1504 ret = 0; 1505 1506 return ret; 1507} 1508 1509static const struct dev_pm_ops i915_pm_ops = { 1510 /* 1511 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 1512 * PMSG_RESUME] 1513 */ 1514 .suspend = i915_pm_suspend, 1515 .suspend_late = i915_pm_suspend_late, 1516 .resume_early = i915_pm_resume_early, 1517 .resume = i915_pm_resume, 1518 1519 /* 1520 * S4 event handlers 1521 * @freeze, @freeze_late : called (1) before creating the 1522 * hibernation image [PMSG_FREEZE] and 1523 * (2) after rebooting, before restoring 1524 * the image [PMSG_QUIESCE] 1525 * @thaw, @thaw_early : called (1) after creating the hibernation 1526 * image, before writing it [PMSG_THAW] 1527 * and (2) after failing to create or 1528 * restore the image [PMSG_RECOVER] 1529 * @poweroff, @poweroff_late: called after writing the hibernation 1530 * image, before rebooting [PMSG_HIBERNATE] 1531 * @restore, @restore_early : called after rebooting and restoring the 1532 * hibernation image [PMSG_RESTORE] 1533 */ 1534 .freeze = i915_pm_suspend, 1535 .freeze_late = i915_pm_suspend_late, 1536 .thaw_early = i915_pm_resume_early, 1537 .thaw = i915_pm_resume, 1538 .poweroff = i915_pm_suspend, 1539 .poweroff_late = i915_pm_poweroff_late, 1540 .restore_early = i915_pm_resume_early, 1541 .restore = i915_pm_resume, 1542 1543 /* S0ix (via runtime suspend) event handlers */ 1544 .runtime_suspend = intel_runtime_suspend, 1545 .runtime_resume = intel_runtime_resume, 1546}; 1547 1548static const struct vm_operations_struct i915_gem_vm_ops = { 1549 .fault = i915_gem_fault, 1550 .open = drm_gem_vm_open, 1551 .close = drm_gem_vm_close, 1552}; 1553 1554static const struct file_operations i915_driver_fops = { 1555 .owner = THIS_MODULE, 1556 .open = drm_open, 1557 .release = drm_release, 1558 .unlocked_ioctl = drm_ioctl, 1559 .mmap = drm_gem_mmap, 1560 .poll = drm_poll, 1561 .read = drm_read, 1562#ifdef CONFIG_COMPAT 1563 .compat_ioctl = i915_compat_ioctl, 1564#endif 1565 .llseek = noop_llseek, 1566}; 1567 1568static struct drm_driver driver = { 1569 /* Don't use MTRRs here; the Xserver or userspace app should 1570 * deal with them for Intel hardware. 1571 */ 1572 .driver_features = 1573 DRIVER_USE_AGP | 1574 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | 1575 DRIVER_RENDER, 1576 .load = i915_driver_load, 1577 .unload = i915_driver_unload, 1578 .open = i915_driver_open, 1579 .lastclose = i915_driver_lastclose, 1580 .preclose = i915_driver_preclose, 1581 .postclose = i915_driver_postclose, 1582 .set_busid = drm_pci_set_busid, 1583 1584 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ 1585 .suspend = i915_suspend_legacy, 1586 .resume = i915_resume_legacy, 1587 1588 .device_is_agp = i915_driver_device_is_agp, 1589#if defined(CONFIG_DEBUG_FS) 1590 .debugfs_init = i915_debugfs_init, 1591 .debugfs_cleanup = i915_debugfs_cleanup, 1592#endif 1593 .gem_free_object = i915_gem_free_object, 1594 .gem_vm_ops = &i915_gem_vm_ops, 1595 1596 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1597 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1598 .gem_prime_export = i915_gem_prime_export, 1599 .gem_prime_import = i915_gem_prime_import, 1600 1601 .dumb_create = i915_gem_dumb_create, 1602 .dumb_map_offset = i915_gem_mmap_gtt, 1603 .dumb_destroy = drm_gem_dumb_destroy, 1604 .ioctls = i915_ioctls, 1605 .fops = &i915_driver_fops, 1606 .name = DRIVER_NAME, 1607 .desc = DRIVER_DESC, 1608 .date = DRIVER_DATE, 1609 .major = DRIVER_MAJOR, 1610 .minor = DRIVER_MINOR, 1611 .patchlevel = DRIVER_PATCHLEVEL, 1612}; 1613 1614static struct pci_driver i915_pci_driver = { 1615 .name = DRIVER_NAME, 1616 .id_table = pciidlist, 1617 .probe = i915_pci_probe, 1618 .remove = i915_pci_remove, 1619 .driver.pm = &i915_pm_ops, 1620}; 1621 1622static int __init i915_init(void) 1623{ 1624 driver.num_ioctls = i915_max_ioctl; 1625 1626 /* 1627 * If CONFIG_DRM_I915_KMS is set, default to KMS unless 1628 * explicitly disabled with the module pararmeter. 1629 * 1630 * Otherwise, just follow the parameter (defaulting to off). 1631 * 1632 * Allow optional vga_text_mode_force boot option to override 1633 * the default behavior. 1634 */ 1635#if defined(CONFIG_DRM_I915_KMS) 1636 if (i915.modeset != 0) 1637 driver.driver_features |= DRIVER_MODESET; 1638#endif 1639 if (i915.modeset == 1) 1640 driver.driver_features |= DRIVER_MODESET; 1641 1642#ifdef CONFIG_VGA_CONSOLE 1643 if (vgacon_text_force() && i915.modeset == -1) 1644 driver.driver_features &= ~DRIVER_MODESET; 1645#endif 1646 1647 if (!(driver.driver_features & DRIVER_MODESET)) { 1648 driver.get_vblank_timestamp = NULL; 1649 /* Silently fail loading to not upset userspace. */ 1650 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); 1651 return 0; 1652 } 1653 1654 /* 1655 * FIXME: Note that we're lying to the DRM core here so that we can get access 1656 * to the atomic ioctl and the atomic properties. Only plane operations on 1657 * a single CRTC will actually work. 1658 */ 1659 if (i915.nuclear_pageflip) 1660 driver.driver_features |= DRIVER_ATOMIC; 1661 1662 return drm_pci_init(&driver, &i915_pci_driver); 1663} 1664 1665static void __exit i915_exit(void) 1666{ 1667 if (!(driver.driver_features & DRIVER_MODESET)) 1668 return; /* Never loaded a driver. */ 1669 1670 drm_pci_exit(&driver, &i915_pci_driver); 1671} 1672 1673module_init(i915_init); 1674module_exit(i915_exit); 1675 1676MODULE_AUTHOR("Tungsten Graphics, Inc."); 1677MODULE_AUTHOR("Intel Corporation"); 1678 1679MODULE_DESCRIPTION(DRIVER_DESC); 1680MODULE_LICENSE("GPL and additional rights"); 1681