root/drivers/gpu/drm/i915/gt/intel_engine_pm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __engine_unpark
  2. __timeline_mark_lock
  3. __timeline_mark_unlock
  4. __timeline_mark_lock
  5. __timeline_mark_unlock
  6. switch_to_kernel_context
  7. __engine_park
  8. intel_engine_init__pm

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2019 Intel Corporation
   5  */
   6 
   7 #include "i915_drv.h"
   8 
   9 #include "intel_engine.h"
  10 #include "intel_engine_pm.h"
  11 #include "intel_engine_pool.h"
  12 #include "intel_gt.h"
  13 #include "intel_gt_pm.h"
  14 
  15 static int __engine_unpark(struct intel_wakeref *wf)
  16 {
  17         struct intel_engine_cs *engine =
  18                 container_of(wf, typeof(*engine), wakeref);
  19         void *map;
  20 
  21         GEM_TRACE("%s\n", engine->name);
  22 
  23         intel_gt_pm_get(engine->gt);
  24 
  25         /* Pin the default state for fast resets from atomic context. */
  26         map = NULL;
  27         if (engine->default_state)
  28                 map = i915_gem_object_pin_map(engine->default_state,
  29                                               I915_MAP_WB);
  30         if (!IS_ERR_OR_NULL(map))
  31                 engine->pinned_default_state = map;
  32 
  33         if (engine->unpark)
  34                 engine->unpark(engine);
  35 
  36         intel_engine_init_hangcheck(engine);
  37         return 0;
  38 }
  39 
  40 #if IS_ENABLED(CONFIG_LOCKDEP)
  41 
  42 static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
  43 {
  44         unsigned long flags;
  45 
  46         local_irq_save(flags);
  47         mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
  48 
  49         return flags;
  50 }
  51 
  52 static inline void __timeline_mark_unlock(struct intel_context *ce,
  53                                           unsigned long flags)
  54 {
  55         mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
  56         local_irq_restore(flags);
  57 }
  58 
  59 #else
  60 
  61 static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
  62 {
  63         return 0;
  64 }
  65 
  66 static inline void __timeline_mark_unlock(struct intel_context *ce,
  67                                           unsigned long flags)
  68 {
  69 }
  70 
  71 #endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
  72 
  73 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
  74 {
  75         struct i915_request *rq;
  76         unsigned long flags;
  77         bool result = true;
  78 
  79         /* Already inside the kernel context, safe to power down. */
  80         if (engine->wakeref_serial == engine->serial)
  81                 return true;
  82 
  83         /* GPU is pointing to the void, as good as in the kernel context. */
  84         if (intel_gt_is_wedged(engine->gt))
  85                 return true;
  86 
  87         /*
  88          * Note, we do this without taking the timeline->mutex. We cannot
  89          * as we may be called while retiring the kernel context and so
  90          * already underneath the timeline->mutex. Instead we rely on the
  91          * exclusive property of the __engine_park that prevents anyone
  92          * else from creating a request on this engine. This also requires
  93          * that the ring is empty and we avoid any waits while constructing
  94          * the context, as they assume protection by the timeline->mutex.
  95          * This should hold true as we can only park the engine after
  96          * retiring the last request, thus all rings should be empty and
  97          * all timelines idle.
  98          */
  99         flags = __timeline_mark_lock(engine->kernel_context);
 100 
 101         rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
 102         if (IS_ERR(rq))
 103                 /* Context switch failed, hope for the best! Maybe reset? */
 104                 goto out_unlock;
 105 
 106         intel_timeline_enter(rq->timeline);
 107 
 108         /* Check again on the next retirement. */
 109         engine->wakeref_serial = engine->serial + 1;
 110         i915_request_add_active_barriers(rq);
 111 
 112         /* Install ourselves as a preemption barrier */
 113         rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
 114         __i915_request_commit(rq);
 115 
 116         /* Release our exclusive hold on the engine */
 117         __intel_wakeref_defer_park(&engine->wakeref);
 118         __i915_request_queue(rq, NULL);
 119 
 120         result = false;
 121 out_unlock:
 122         __timeline_mark_unlock(engine->kernel_context, flags);
 123         return result;
 124 }
 125 
 126 static int __engine_park(struct intel_wakeref *wf)
 127 {
 128         struct intel_engine_cs *engine =
 129                 container_of(wf, typeof(*engine), wakeref);
 130 
 131         engine->saturated = 0;
 132 
 133         /*
 134          * If one and only one request is completed between pm events,
 135          * we know that we are inside the kernel context and it is
 136          * safe to power down. (We are paranoid in case that runtime
 137          * suspend causes corruption to the active context image, and
 138          * want to avoid that impacting userspace.)
 139          */
 140         if (!switch_to_kernel_context(engine))
 141                 return -EBUSY;
 142 
 143         GEM_TRACE("%s\n", engine->name);
 144 
 145         intel_engine_disarm_breadcrumbs(engine);
 146         intel_engine_pool_park(&engine->pool);
 147 
 148         /* Must be reset upon idling, or we may miss the busy wakeup. */
 149         GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
 150 
 151         if (engine->park)
 152                 engine->park(engine);
 153 
 154         if (engine->pinned_default_state) {
 155                 i915_gem_object_unpin_map(engine->default_state);
 156                 engine->pinned_default_state = NULL;
 157         }
 158 
 159         engine->execlists.no_priolist = false;
 160 
 161         intel_gt_pm_put(engine->gt);
 162         return 0;
 163 }
 164 
 165 static const struct intel_wakeref_ops wf_ops = {
 166         .get = __engine_unpark,
 167         .put = __engine_park,
 168 };
 169 
 170 void intel_engine_init__pm(struct intel_engine_cs *engine)
 171 {
 172         struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
 173 
 174         intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
 175 }
 176 
 177 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 178 #include "selftest_engine_pm.c"
 179 #endif

/* [<][>][^][v][top][bottom][index][help] */