root/drivers/gpu/drm/etnaviv/etnaviv_sched.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. etnaviv_sched_dependency
  2. etnaviv_sched_run_job
  3. etnaviv_sched_timedout_job
  4. etnaviv_sched_free_job
  5. etnaviv_sched_push_job
  6. etnaviv_sched_init
  7. etnaviv_sched_fini

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2017 Etnaviv Project
   4  */
   5 
   6 #include <linux/moduleparam.h>
   7 
   8 #include "etnaviv_drv.h"
   9 #include "etnaviv_dump.h"
  10 #include "etnaviv_gem.h"
  11 #include "etnaviv_gpu.h"
  12 #include "etnaviv_sched.h"
  13 #include "state.xml.h"
  14 
  15 static int etnaviv_job_hang_limit = 0;
  16 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
  17 static int etnaviv_hw_jobs_limit = 4;
  18 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
  19 
  20 static struct dma_fence *
  21 etnaviv_sched_dependency(struct drm_sched_job *sched_job,
  22                          struct drm_sched_entity *entity)
  23 {
  24         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
  25         struct dma_fence *fence;
  26         int i;
  27 
  28         if (unlikely(submit->in_fence)) {
  29                 fence = submit->in_fence;
  30                 submit->in_fence = NULL;
  31 
  32                 if (!dma_fence_is_signaled(fence))
  33                         return fence;
  34 
  35                 dma_fence_put(fence);
  36         }
  37 
  38         for (i = 0; i < submit->nr_bos; i++) {
  39                 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
  40                 int j;
  41 
  42                 if (bo->excl) {
  43                         fence = bo->excl;
  44                         bo->excl = NULL;
  45 
  46                         if (!dma_fence_is_signaled(fence))
  47                                 return fence;
  48 
  49                         dma_fence_put(fence);
  50                 }
  51 
  52                 for (j = 0; j < bo->nr_shared; j++) {
  53                         if (!bo->shared[j])
  54                                 continue;
  55 
  56                         fence = bo->shared[j];
  57                         bo->shared[j] = NULL;
  58 
  59                         if (!dma_fence_is_signaled(fence))
  60                                 return fence;
  61 
  62                         dma_fence_put(fence);
  63                 }
  64                 kfree(bo->shared);
  65                 bo->nr_shared = 0;
  66                 bo->shared = NULL;
  67         }
  68 
  69         return NULL;
  70 }
  71 
  72 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
  73 {
  74         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
  75         struct dma_fence *fence = NULL;
  76 
  77         if (likely(!sched_job->s_fence->finished.error))
  78                 fence = etnaviv_gpu_submit(submit);
  79         else
  80                 dev_dbg(submit->gpu->dev, "skipping bad job\n");
  81 
  82         return fence;
  83 }
  84 
  85 static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
  86 {
  87         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
  88         struct etnaviv_gpu *gpu = submit->gpu;
  89         u32 dma_addr;
  90         int change;
  91 
  92         /*
  93          * If the GPU managed to complete this jobs fence, the timout is
  94          * spurious. Bail out.
  95          */
  96         if (dma_fence_is_signaled(submit->out_fence))
  97                 return;
  98 
  99         /*
 100          * If the GPU is still making forward progress on the front-end (which
 101          * should never loop) we shift out the timeout to give it a chance to
 102          * finish the job.
 103          */
 104         dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 105         change = dma_addr - gpu->hangcheck_dma_addr;
 106         if (change < 0 || change > 16) {
 107                 gpu->hangcheck_dma_addr = dma_addr;
 108                 return;
 109         }
 110 
 111         /* block scheduler */
 112         drm_sched_stop(&gpu->sched, sched_job);
 113 
 114         if(sched_job)
 115                 drm_sched_increase_karma(sched_job);
 116 
 117         /* get the GPU back into the init state */
 118         etnaviv_core_dump(submit);
 119         etnaviv_gpu_recover_hang(gpu);
 120 
 121         drm_sched_resubmit_jobs(&gpu->sched);
 122 
 123         /* restart scheduler after GPU is usable again */
 124         drm_sched_start(&gpu->sched, true);
 125 }
 126 
 127 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
 128 {
 129         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 130 
 131         drm_sched_job_cleanup(sched_job);
 132 
 133         etnaviv_submit_put(submit);
 134 }
 135 
 136 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
 137         .dependency = etnaviv_sched_dependency,
 138         .run_job = etnaviv_sched_run_job,
 139         .timedout_job = etnaviv_sched_timedout_job,
 140         .free_job = etnaviv_sched_free_job,
 141 };
 142 
 143 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
 144                            struct etnaviv_gem_submit *submit)
 145 {
 146         int ret = 0;
 147 
 148         /*
 149          * Hold the fence lock across the whole operation to avoid jobs being
 150          * pushed out of order with regard to their sched fence seqnos as
 151          * allocated in drm_sched_job_init.
 152          */
 153         mutex_lock(&submit->gpu->fence_lock);
 154 
 155         ret = drm_sched_job_init(&submit->sched_job, sched_entity,
 156                                  submit->ctx);
 157         if (ret)
 158                 goto out_unlock;
 159 
 160         submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
 161         submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
 162                                                 submit->out_fence, 0,
 163                                                 INT_MAX, GFP_KERNEL);
 164         if (submit->out_fence_id < 0) {
 165                 drm_sched_job_cleanup(&submit->sched_job);
 166                 ret = -ENOMEM;
 167                 goto out_unlock;
 168         }
 169 
 170         /* the scheduler holds on to the job now */
 171         kref_get(&submit->refcount);
 172 
 173         drm_sched_entity_push_job(&submit->sched_job, sched_entity);
 174 
 175 out_unlock:
 176         mutex_unlock(&submit->gpu->fence_lock);
 177 
 178         return ret;
 179 }
 180 
 181 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
 182 {
 183         int ret;
 184 
 185         ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
 186                              etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
 187                              msecs_to_jiffies(500), dev_name(gpu->dev));
 188         if (ret)
 189                 return ret;
 190 
 191         return 0;
 192 }
 193 
 194 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
 195 {
 196         drm_sched_fini(&gpu->sched);
 197 }

/* [<][>][^][v][top][bottom][index][help] */