root/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. amdgpu_job_timedout
  2. amdgpu_job_alloc
  3. amdgpu_job_alloc_with_ib
  4. amdgpu_job_free_resources
  5. amdgpu_job_free_cb
  6. amdgpu_job_free
  7. amdgpu_job_submit
  8. amdgpu_job_submit_direct
  9. amdgpu_job_dependency
  10. amdgpu_job_run

   1 /*
   2  * Copyright 2015 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  *
  23  */
  24 #include <linux/kthread.h>
  25 #include <linux/wait.h>
  26 #include <linux/sched.h>
  27 
  28 #include "amdgpu.h"
  29 #include "amdgpu_trace.h"
  30 
  31 static void amdgpu_job_timedout(struct drm_sched_job *s_job)
  32 {
  33         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
  34         struct amdgpu_job *job = to_amdgpu_job(s_job);
  35         struct amdgpu_task_info ti;
  36 
  37         memset(&ti, 0, sizeof(struct amdgpu_task_info));
  38 
  39         if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
  40                 DRM_ERROR("ring %s timeout, but soft recovered\n",
  41                           s_job->sched->name);
  42                 return;
  43         }
  44 
  45         amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
  46         DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
  47                   job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
  48                   ring->fence_drv.sync_seq);
  49         DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
  50                   ti.process_name, ti.tgid, ti.task_name, ti.pid);
  51 
  52         if (amdgpu_device_should_recover_gpu(ring->adev))
  53                 amdgpu_device_gpu_recover(ring->adev, job);
  54         else
  55                 drm_sched_suspend_timeout(&ring->sched);
  56 }
  57 
  58 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
  59                      struct amdgpu_job **job, struct amdgpu_vm *vm)
  60 {
  61         size_t size = sizeof(struct amdgpu_job);
  62 
  63         if (num_ibs == 0)
  64                 return -EINVAL;
  65 
  66         size += sizeof(struct amdgpu_ib) * num_ibs;
  67 
  68         *job = kzalloc(size, GFP_KERNEL);
  69         if (!*job)
  70                 return -ENOMEM;
  71 
  72         /*
  73          * Initialize the scheduler to at least some ring so that we always
  74          * have a pointer to adev.
  75          */
  76         (*job)->base.sched = &adev->rings[0]->sched;
  77         (*job)->vm = vm;
  78         (*job)->ibs = (void *)&(*job)[1];
  79         (*job)->num_ibs = num_ibs;
  80 
  81         amdgpu_sync_create(&(*job)->sync);
  82         amdgpu_sync_create(&(*job)->sched_sync);
  83         (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
  84         (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
  85 
  86         return 0;
  87 }
  88 
  89 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
  90                              struct amdgpu_job **job)
  91 {
  92         int r;
  93 
  94         r = amdgpu_job_alloc(adev, 1, job, NULL);
  95         if (r)
  96                 return r;
  97 
  98         r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
  99         if (r)
 100                 kfree(*job);
 101 
 102         return r;
 103 }
 104 
 105 void amdgpu_job_free_resources(struct amdgpu_job *job)
 106 {
 107         struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
 108         struct dma_fence *f;
 109         unsigned i;
 110 
 111         /* use sched fence if available */
 112         f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
 113 
 114         for (i = 0; i < job->num_ibs; ++i)
 115                 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
 116 }
 117 
 118 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 119 {
 120         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
 121         struct amdgpu_job *job = to_amdgpu_job(s_job);
 122 
 123         drm_sched_job_cleanup(s_job);
 124 
 125         amdgpu_ring_priority_put(ring, s_job->s_priority);
 126         dma_fence_put(job->fence);
 127         amdgpu_sync_free(&job->sync);
 128         amdgpu_sync_free(&job->sched_sync);
 129         kfree(job);
 130 }
 131 
 132 void amdgpu_job_free(struct amdgpu_job *job)
 133 {
 134         amdgpu_job_free_resources(job);
 135 
 136         dma_fence_put(job->fence);
 137         amdgpu_sync_free(&job->sync);
 138         amdgpu_sync_free(&job->sched_sync);
 139         kfree(job);
 140 }
 141 
 142 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 143                       void *owner, struct dma_fence **f)
 144 {
 145         enum drm_sched_priority priority;
 146         struct amdgpu_ring *ring;
 147         int r;
 148 
 149         if (!f)
 150                 return -EINVAL;
 151 
 152         r = drm_sched_job_init(&job->base, entity, owner);
 153         if (r)
 154                 return r;
 155 
 156         job->owner = owner;
 157         *f = dma_fence_get(&job->base.s_fence->finished);
 158         amdgpu_job_free_resources(job);
 159         priority = job->base.s_priority;
 160         drm_sched_entity_push_job(&job->base, entity);
 161 
 162         ring = to_amdgpu_ring(entity->rq->sched);
 163         amdgpu_ring_priority_get(ring, priority);
 164 
 165         return 0;
 166 }
 167 
 168 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
 169                              struct dma_fence **fence)
 170 {
 171         int r;
 172 
 173         job->base.sched = &ring->sched;
 174         r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
 175         job->fence = dma_fence_get(*fence);
 176         if (r)
 177                 return r;
 178 
 179         amdgpu_job_free(job);
 180         return 0;
 181 }
 182 
 183 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 184                                                struct drm_sched_entity *s_entity)
 185 {
 186         struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
 187         struct amdgpu_job *job = to_amdgpu_job(sched_job);
 188         struct amdgpu_vm *vm = job->vm;
 189         struct dma_fence *fence;
 190         bool explicit = false;
 191         int r;
 192 
 193         fence = amdgpu_sync_get_fence(&job->sync, &explicit);
 194         if (fence && explicit) {
 195                 if (drm_sched_dependency_optimized(fence, s_entity)) {
 196                         r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
 197                                               fence, false);
 198                         if (r)
 199                                 DRM_ERROR("Error adding fence (%d)\n", r);
 200                 }
 201         }
 202 
 203         while (fence == NULL && vm && !job->vmid) {
 204                 r = amdgpu_vmid_grab(vm, ring, &job->sync,
 205                                      &job->base.s_fence->finished,
 206                                      job);
 207                 if (r)
 208                         DRM_ERROR("Error getting VM ID (%d)\n", r);
 209 
 210                 fence = amdgpu_sync_get_fence(&job->sync, NULL);
 211         }
 212 
 213         return fence;
 214 }
 215 
 216 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
 217 {
 218         struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
 219         struct dma_fence *fence = NULL, *finished;
 220         struct amdgpu_job *job;
 221         int r = 0;
 222 
 223         job = to_amdgpu_job(sched_job);
 224         finished = &job->base.s_fence->finished;
 225 
 226         BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 227 
 228         trace_amdgpu_sched_run_job(job);
 229 
 230         if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
 231                 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
 232 
 233         if (finished->error < 0) {
 234                 DRM_INFO("Skip scheduling IBs!\n");
 235         } else {
 236                 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
 237                                        &fence);
 238                 if (r)
 239                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
 240         }
 241         /* if gpu reset, hw fence will be replaced here */
 242         dma_fence_put(job->fence);
 243         job->fence = dma_fence_get(fence);
 244 
 245         amdgpu_job_free_resources(job);
 246 
 247         fence = r ? ERR_PTR(r) : fence;
 248         return fence;
 249 }
 250 
 251 const struct drm_sched_backend_ops amdgpu_sched_ops = {
 252         .dependency = amdgpu_job_dependency,
 253         .run_job = amdgpu_job_run,
 254         .timedout_job = amdgpu_job_timedout,
 255         .free_job = amdgpu_job_free_cb
 256 };

/* [<][>][^][v][top][bottom][index][help] */