root/drivers/gpu/drm/i915/gt/intel_gt_pm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pm_notify
  2. __gt_unpark
  3. __gt_park
  4. intel_gt_pm_init_early
  5. reset_engines
  6. intel_gt_sanitize
  7. intel_gt_resume
  8. intel_gt_runtime_suspend
  9. intel_gt_runtime_resume

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2019 Intel Corporation
   5  */
   6 
   7 #include "i915_drv.h"
   8 #include "i915_params.h"
   9 #include "intel_engine_pm.h"
  10 #include "intel_gt.h"
  11 #include "intel_gt_pm.h"
  12 #include "intel_pm.h"
  13 #include "intel_wakeref.h"
  14 
  15 static void pm_notify(struct drm_i915_private *i915, int state)
  16 {
  17         blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
  18 }
  19 
  20 static int __gt_unpark(struct intel_wakeref *wf)
  21 {
  22         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
  23         struct drm_i915_private *i915 = gt->i915;
  24 
  25         GEM_TRACE("\n");
  26 
  27         /*
  28          * It seems that the DMC likes to transition between the DC states a lot
  29          * when there are no connected displays (no active power domains) during
  30          * command submission.
  31          *
  32          * This activity has negative impact on the performance of the chip with
  33          * huge latencies observed in the interrupt handler and elsewhere.
  34          *
  35          * Work around it by grabbing a GT IRQ power domain whilst there is any
  36          * GT activity, preventing any DC state transitions.
  37          */
  38         gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
  39         GEM_BUG_ON(!gt->awake);
  40 
  41         if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
  42                 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
  43 
  44         intel_enable_gt_powersave(i915);
  45 
  46         i915_update_gfx_val(i915);
  47         if (INTEL_GEN(i915) >= 6)
  48                 gen6_rps_busy(i915);
  49 
  50         i915_pmu_gt_unparked(i915);
  51 
  52         intel_gt_queue_hangcheck(gt);
  53 
  54         pm_notify(i915, INTEL_GT_UNPARK);
  55 
  56         return 0;
  57 }
  58 
  59 static int __gt_park(struct intel_wakeref *wf)
  60 {
  61         struct drm_i915_private *i915 =
  62                 container_of(wf, typeof(*i915), gt.wakeref);
  63         intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
  64 
  65         GEM_TRACE("\n");
  66 
  67         pm_notify(i915, INTEL_GT_PARK);
  68 
  69         i915_pmu_gt_parked(i915);
  70         if (INTEL_GEN(i915) >= 6)
  71                 gen6_rps_idle(i915);
  72 
  73         if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
  74                 i915_rc6_ctx_wa_check(i915);
  75                 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
  76         }
  77 
  78         /* Everything switched off, flush any residual interrupt just in case */
  79         intel_synchronize_irq(i915);
  80 
  81         GEM_BUG_ON(!wakeref);
  82         intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
  83 
  84         return 0;
  85 }
  86 
  87 static const struct intel_wakeref_ops wf_ops = {
  88         .get = __gt_unpark,
  89         .put = __gt_park,
  90         .flags = INTEL_WAKEREF_PUT_ASYNC,
  91 };
  92 
  93 void intel_gt_pm_init_early(struct intel_gt *gt)
  94 {
  95         intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
  96 
  97         BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
  98 }
  99 
 100 static bool reset_engines(struct intel_gt *gt)
 101 {
 102         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
 103                 return false;
 104 
 105         return __intel_gt_reset(gt, ALL_ENGINES) == 0;
 106 }
 107 
 108 /**
 109  * intel_gt_sanitize: called after the GPU has lost power
 110  * @gt: the i915 GT container
 111  * @force: ignore a failed reset and sanitize engine state anyway
 112  *
 113  * Anytime we reset the GPU, either with an explicit GPU reset or through a
 114  * PCI power cycle, the GPU loses state and we must reset our state tracking
 115  * to match. Note that calling intel_gt_sanitize() if the GPU has not
 116  * been reset results in much confusion!
 117  */
 118 void intel_gt_sanitize(struct intel_gt *gt, bool force)
 119 {
 120         struct intel_engine_cs *engine;
 121         enum intel_engine_id id;
 122 
 123         GEM_TRACE("\n");
 124 
 125         intel_uc_sanitize(&gt->uc);
 126 
 127         if (!reset_engines(gt) && !force)
 128                 return;
 129 
 130         for_each_engine(engine, gt->i915, id)
 131                 __intel_engine_reset(engine, false);
 132 }
 133 
 134 int intel_gt_resume(struct intel_gt *gt)
 135 {
 136         struct intel_engine_cs *engine;
 137         enum intel_engine_id id;
 138         int err = 0;
 139 
 140         /*
 141          * After resume, we may need to poke into the pinned kernel
 142          * contexts to paper over any damage caused by the sudden suspend.
 143          * Only the kernel contexts should remain pinned over suspend,
 144          * allowing us to fixup the user contexts on their first pin.
 145          */
 146         intel_gt_pm_get(gt);
 147         for_each_engine(engine, gt->i915, id) {
 148                 struct intel_context *ce;
 149 
 150                 intel_engine_pm_get(engine);
 151 
 152                 ce = engine->kernel_context;
 153                 if (ce)
 154                         ce->ops->reset(ce);
 155 
 156                 engine->serial++; /* kernel context lost */
 157                 err = engine->resume(engine);
 158 
 159                 intel_engine_pm_put(engine);
 160                 if (err) {
 161                         dev_err(gt->i915->drm.dev,
 162                                 "Failed to restart %s (%d)\n",
 163                                 engine->name, err);
 164                         break;
 165                 }
 166         }
 167         intel_gt_pm_put(gt);
 168 
 169         return err;
 170 }
 171 
 172 void intel_gt_runtime_suspend(struct intel_gt *gt)
 173 {
 174         intel_uc_runtime_suspend(&gt->uc);
 175 }
 176 
 177 int intel_gt_runtime_resume(struct intel_gt *gt)
 178 {
 179         intel_gt_init_swizzling(gt);
 180 
 181         return intel_uc_runtime_resume(&gt->uc);
 182 }

/* [<][>][^][v][top][bottom][index][help] */