root/include/drm/gpu_scheduler.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. drm_sched_invalidate_job

   1 /*
   2  * Copyright 2015 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 
  24 #ifndef _DRM_GPU_SCHEDULER_H_
  25 #define _DRM_GPU_SCHEDULER_H_
  26 
  27 #include <drm/spsc_queue.h>
  28 #include <linux/dma-fence.h>
  29 
  30 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
  31 
  32 struct drm_gpu_scheduler;
  33 struct drm_sched_rq;
  34 
  35 enum drm_sched_priority {
  36         DRM_SCHED_PRIORITY_MIN,
  37         DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
  38         DRM_SCHED_PRIORITY_NORMAL,
  39         DRM_SCHED_PRIORITY_HIGH_SW,
  40         DRM_SCHED_PRIORITY_HIGH_HW,
  41         DRM_SCHED_PRIORITY_KERNEL,
  42         DRM_SCHED_PRIORITY_MAX,
  43         DRM_SCHED_PRIORITY_INVALID = -1,
  44         DRM_SCHED_PRIORITY_UNSET = -2
  45 };
  46 
  47 /**
  48  * struct drm_sched_entity - A wrapper around a job queue (typically
  49  * attached to the DRM file_priv).
  50  *
  51  * @list: used to append this struct to the list of entities in the
  52  *        runqueue.
  53  * @rq: runqueue on which this entity is currently scheduled.
  54  * @rq_list: a list of run queues on which jobs from this entity can
  55  *           be scheduled
  56  * @num_rq_list: number of run queues in the rq_list
  57  * @rq_lock: lock to modify the runqueue to which this entity belongs.
  58  * @job_queue: the list of jobs of this entity.
  59  * @fence_seq: a linearly increasing seqno incremented with each
  60  *             new &drm_sched_fence which is part of the entity.
  61  * @fence_context: a unique context for all the fences which belong
  62  *                 to this entity.
  63  *                 The &drm_sched_fence.scheduled uses the
  64  *                 fence_context but &drm_sched_fence.finished uses
  65  *                 fence_context + 1.
  66  * @dependency: the dependency fence of the job which is on the top
  67  *              of the job queue.
  68  * @cb: callback for the dependency fence above.
  69  * @guilty: points to ctx's guilty.
  70  * @fini_status: contains the exit status in case the process was signalled.
  71  * @last_scheduled: points to the finished fence of the last scheduled job.
  72  * @last_user: last group leader pushing a job into the entity.
  73  * @stopped: Marks the enity as removed from rq and destined for termination.
  74  *
  75  * Entities will emit jobs in order to their corresponding hardware
  76  * ring, and the scheduler will alternate between entities based on
  77  * scheduling policy.
  78  */
  79 struct drm_sched_entity {
  80         struct list_head                list;
  81         struct drm_sched_rq             *rq;
  82         struct drm_sched_rq             **rq_list;
  83         unsigned int                    num_rq_list;
  84         spinlock_t                      rq_lock;
  85 
  86         struct spsc_queue               job_queue;
  87 
  88         atomic_t                        fence_seq;
  89         uint64_t                        fence_context;
  90 
  91         struct dma_fence                *dependency;
  92         struct dma_fence_cb             cb;
  93         atomic_t                        *guilty;
  94         struct dma_fence                *last_scheduled;
  95         struct task_struct              *last_user;
  96         bool                            stopped;
  97 };
  98 
  99 /**
 100  * struct drm_sched_rq - queue of entities to be scheduled.
 101  *
 102  * @lock: to modify the entities list.
 103  * @sched: the scheduler to which this rq belongs to.
 104  * @entities: list of the entities to be scheduled.
 105  * @current_entity: the entity which is to be scheduled.
 106  *
 107  * Run queue is a set of entities scheduling command submissions for
 108  * one specific ring. It implements the scheduling policy that selects
 109  * the next entity to emit commands from.
 110  */
 111 struct drm_sched_rq {
 112         spinlock_t                      lock;
 113         struct drm_gpu_scheduler        *sched;
 114         struct list_head                entities;
 115         struct drm_sched_entity         *current_entity;
 116 };
 117 
 118 /**
 119  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
 120  */
 121 struct drm_sched_fence {
 122         /**
 123          * @scheduled: this fence is what will be signaled by the scheduler
 124          * when the job is scheduled.
 125          */
 126         struct dma_fence                scheduled;
 127 
 128         /**
 129          * @finished: this fence is what will be signaled by the scheduler
 130          * when the job is completed.
 131          *
 132          * When setting up an out fence for the job, you should use
 133          * this, since it's available immediately upon
 134          * drm_sched_job_init(), and the fence returned by the driver
 135          * from run_job() won't be created until the dependencies have
 136          * resolved.
 137          */
 138         struct dma_fence                finished;
 139 
 140         /**
 141          * @parent: the fence returned by &drm_sched_backend_ops.run_job
 142          * when scheduling the job on hardware. We signal the
 143          * &drm_sched_fence.finished fence once parent is signalled.
 144          */
 145         struct dma_fence                *parent;
 146         /**
 147          * @sched: the scheduler instance to which the job having this struct
 148          * belongs to.
 149          */
 150         struct drm_gpu_scheduler        *sched;
 151         /**
 152          * @lock: the lock used by the scheduled and the finished fences.
 153          */
 154         spinlock_t                      lock;
 155         /**
 156          * @owner: job owner for debugging
 157          */
 158         void                            *owner;
 159 };
 160 
 161 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 162 
 163 /**
 164  * struct drm_sched_job - A job to be run by an entity.
 165  *
 166  * @queue_node: used to append this struct to the queue of jobs in an entity.
 167  * @sched: the scheduler instance on which this job is scheduled.
 168  * @s_fence: contains the fences for the scheduling of job.
 169  * @finish_cb: the callback for the finished fence.
 170  * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
 171  * @id: a unique id assigned to each job scheduled on the scheduler.
 172  * @karma: increment on every hang caused by this job. If this exceeds the hang
 173  *         limit of the scheduler then the job is marked guilty and will not
 174  *         be scheduled further.
 175  * @s_priority: the priority of the job.
 176  * @entity: the entity to which this job belongs.
 177  * @cb: the callback for the parent fence in s_fence.
 178  *
 179  * A job is created by the driver using drm_sched_job_init(), and
 180  * should call drm_sched_entity_push_job() once it wants the scheduler
 181  * to schedule the job.
 182  */
 183 struct drm_sched_job {
 184         struct spsc_node                queue_node;
 185         struct drm_gpu_scheduler        *sched;
 186         struct drm_sched_fence          *s_fence;
 187         struct dma_fence_cb             finish_cb;
 188         struct list_head                node;
 189         uint64_t                        id;
 190         atomic_t                        karma;
 191         enum drm_sched_priority         s_priority;
 192         struct drm_sched_entity  *entity;
 193         struct dma_fence_cb             cb;
 194 };
 195 
 196 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
 197                                             int threshold)
 198 {
 199         return (s_job && atomic_inc_return(&s_job->karma) > threshold);
 200 }
 201 
 202 /**
 203  * struct drm_sched_backend_ops
 204  *
 205  * Define the backend operations called by the scheduler,
 206  * these functions should be implemented in driver side.
 207  */
 208 struct drm_sched_backend_ops {
 209         /**
 210          * @dependency: Called when the scheduler is considering scheduling
 211          * this job next, to get another struct dma_fence for this job to
 212          * block on.  Once it returns NULL, run_job() may be called.
 213          */
 214         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
 215                                         struct drm_sched_entity *s_entity);
 216 
 217         /**
 218          * @run_job: Called to execute the job once all of the dependencies
 219          * have been resolved.  This may be called multiple times, if
 220          * timedout_job() has happened and drm_sched_job_recovery()
 221          * decides to try it again.
 222          */
 223         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
 224 
 225         /**
 226          * @timedout_job: Called when a job has taken too long to execute,
 227          * to trigger GPU recovery.
 228          */
 229         void (*timedout_job)(struct drm_sched_job *sched_job);
 230 
 231         /**
 232          * @free_job: Called once the job's finished fence has been signaled
 233          * and it's time to clean it up.
 234          */
 235         void (*free_job)(struct drm_sched_job *sched_job);
 236 };
 237 
 238 /**
 239  * struct drm_gpu_scheduler
 240  *
 241  * @ops: backend operations provided by the driver.
 242  * @hw_submission_limit: the max size of the hardware queue.
 243  * @timeout: the time after which a job is removed from the scheduler.
 244  * @name: name of the ring for which this scheduler is being used.
 245  * @sched_rq: priority wise array of run queues.
 246  * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
 247  *                  is ready to be scheduled.
 248  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
 249  *                 waits on this wait queue until all the scheduled jobs are
 250  *                 finished.
 251  * @hw_rq_count: the number of jobs currently in the hardware queue.
 252  * @job_id_count: used to assign unique id to the each job.
 253  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
 254  *            timeout interval is over.
 255  * @thread: the kthread on which the scheduler which run.
 256  * @ring_mirror_list: the list of jobs which are currently in the job queue.
 257  * @job_list_lock: lock to protect the ring_mirror_list.
 258  * @hang_limit: once the hangs by a job crosses this limit then it is marked
 259  *              guilty and it will be considered for scheduling further.
 260  * @num_jobs: the number of jobs in queue in the scheduler
 261  * @ready: marks if the underlying HW is ready to work
 262  * @free_guilty: A hit to time out handler to free the guilty job.
 263  *
 264  * One scheduler is implemented for each hardware ring.
 265  */
 266 struct drm_gpu_scheduler {
 267         const struct drm_sched_backend_ops      *ops;
 268         uint32_t                        hw_submission_limit;
 269         long                            timeout;
 270         const char                      *name;
 271         struct drm_sched_rq             sched_rq[DRM_SCHED_PRIORITY_MAX];
 272         wait_queue_head_t               wake_up_worker;
 273         wait_queue_head_t               job_scheduled;
 274         atomic_t                        hw_rq_count;
 275         atomic64_t                      job_id_count;
 276         struct delayed_work             work_tdr;
 277         struct task_struct              *thread;
 278         struct list_head                ring_mirror_list;
 279         spinlock_t                      job_list_lock;
 280         int                             hang_limit;
 281         atomic_t                        num_jobs;
 282         bool                    ready;
 283         bool                            free_guilty;
 284 };
 285 
 286 int drm_sched_init(struct drm_gpu_scheduler *sched,
 287                    const struct drm_sched_backend_ops *ops,
 288                    uint32_t hw_submission, unsigned hang_limit, long timeout,
 289                    const char *name);
 290 
 291 void drm_sched_fini(struct drm_gpu_scheduler *sched);
 292 int drm_sched_job_init(struct drm_sched_job *job,
 293                        struct drm_sched_entity *entity,
 294                        void *owner);
 295 void drm_sched_job_cleanup(struct drm_sched_job *job);
 296 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 297 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
 298 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
 299 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
 300 void drm_sched_increase_karma(struct drm_sched_job *bad);
 301 bool drm_sched_dependency_optimized(struct dma_fence* fence,
 302                                     struct drm_sched_entity *entity);
 303 void drm_sched_fault(struct drm_gpu_scheduler *sched);
 304 void drm_sched_job_kickout(struct drm_sched_job *s_job);
 305 
 306 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 307                              struct drm_sched_entity *entity);
 308 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 309                                 struct drm_sched_entity *entity);
 310 
 311 int drm_sched_entity_init(struct drm_sched_entity *entity,
 312                           struct drm_sched_rq **rq_list,
 313                           unsigned int num_rq_list,
 314                           atomic_t *guilty);
 315 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
 316 void drm_sched_entity_fini(struct drm_sched_entity *entity);
 317 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
 318 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
 319 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
 320 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 321                                struct drm_sched_entity *entity);
 322 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
 323                                    enum drm_sched_priority priority);
 324 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 325 
 326 struct drm_sched_fence *drm_sched_fence_create(
 327         struct drm_sched_entity *s_entity, void *owner);
 328 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 329 void drm_sched_fence_finished(struct drm_sched_fence *fence);
 330 
 331 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
 332 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 333                                 unsigned long remaining);
 334 
 335 #endif

/* [<][>][^][v][top][bottom][index][help] */