root/drivers/gpu/drm/i915/selftests/igt_spinner.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. igt_spinner_init
  2. seqno_offset
  3. hws_address
  4. move_to_active
  5. igt_spinner_create_request
  6. hws_seqno
  7. igt_spinner_end
  8. igt_spinner_fini
  9. igt_wait_for_spinner

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2018 Intel Corporation
   5  */
   6 #include "gt/intel_gt.h"
   7 
   8 #include "gem/selftests/igt_gem_utils.h"
   9 
  10 #include "igt_spinner.h"
  11 
  12 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
  13 {
  14         unsigned int mode;
  15         void *vaddr;
  16         int err;
  17 
  18         GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
  19 
  20         memset(spin, 0, sizeof(*spin));
  21         spin->gt = gt;
  22 
  23         spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
  24         if (IS_ERR(spin->hws)) {
  25                 err = PTR_ERR(spin->hws);
  26                 goto err;
  27         }
  28 
  29         spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
  30         if (IS_ERR(spin->obj)) {
  31                 err = PTR_ERR(spin->obj);
  32                 goto err_hws;
  33         }
  34 
  35         i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
  36         vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
  37         if (IS_ERR(vaddr)) {
  38                 err = PTR_ERR(vaddr);
  39                 goto err_obj;
  40         }
  41         spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
  42 
  43         mode = i915_coherent_map_type(gt->i915);
  44         vaddr = i915_gem_object_pin_map(spin->obj, mode);
  45         if (IS_ERR(vaddr)) {
  46                 err = PTR_ERR(vaddr);
  47                 goto err_unpin_hws;
  48         }
  49         spin->batch = vaddr;
  50 
  51         return 0;
  52 
  53 err_unpin_hws:
  54         i915_gem_object_unpin_map(spin->hws);
  55 err_obj:
  56         i915_gem_object_put(spin->obj);
  57 err_hws:
  58         i915_gem_object_put(spin->hws);
  59 err:
  60         return err;
  61 }
  62 
  63 static unsigned int seqno_offset(u64 fence)
  64 {
  65         return offset_in_page(sizeof(u32) * fence);
  66 }
  67 
  68 static u64 hws_address(const struct i915_vma *hws,
  69                        const struct i915_request *rq)
  70 {
  71         return hws->node.start + seqno_offset(rq->fence.context);
  72 }
  73 
  74 static int move_to_active(struct i915_vma *vma,
  75                           struct i915_request *rq,
  76                           unsigned int flags)
  77 {
  78         int err;
  79 
  80         i915_vma_lock(vma);
  81         err = i915_request_await_object(rq, vma->obj,
  82                                         flags & EXEC_OBJECT_WRITE);
  83         if (err == 0)
  84                 err = i915_vma_move_to_active(vma, rq, flags);
  85         i915_vma_unlock(vma);
  86 
  87         return err;
  88 }
  89 
  90 struct i915_request *
  91 igt_spinner_create_request(struct igt_spinner *spin,
  92                            struct intel_context *ce,
  93                            u32 arbitration_command)
  94 {
  95         struct intel_engine_cs *engine = ce->engine;
  96         struct i915_request *rq = NULL;
  97         struct i915_vma *hws, *vma;
  98         u32 *batch;
  99         int err;
 100 
 101         GEM_BUG_ON(spin->gt != ce->vm->gt);
 102 
 103         vma = i915_vma_instance(spin->obj, ce->vm, NULL);
 104         if (IS_ERR(vma))
 105                 return ERR_CAST(vma);
 106 
 107         hws = i915_vma_instance(spin->hws, ce->vm, NULL);
 108         if (IS_ERR(hws))
 109                 return ERR_CAST(hws);
 110 
 111         err = i915_vma_pin(vma, 0, 0, PIN_USER);
 112         if (err)
 113                 return ERR_PTR(err);
 114 
 115         err = i915_vma_pin(hws, 0, 0, PIN_USER);
 116         if (err)
 117                 goto unpin_vma;
 118 
 119         rq = intel_context_create_request(ce);
 120         if (IS_ERR(rq)) {
 121                 err = PTR_ERR(rq);
 122                 goto unpin_hws;
 123         }
 124 
 125         err = move_to_active(vma, rq, 0);
 126         if (err)
 127                 goto cancel_rq;
 128 
 129         err = move_to_active(hws, rq, 0);
 130         if (err)
 131                 goto cancel_rq;
 132 
 133         batch = spin->batch;
 134 
 135         *batch++ = MI_STORE_DWORD_IMM_GEN4;
 136         *batch++ = lower_32_bits(hws_address(hws, rq));
 137         *batch++ = upper_32_bits(hws_address(hws, rq));
 138         *batch++ = rq->fence.seqno;
 139 
 140         *batch++ = arbitration_command;
 141 
 142         *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
 143         *batch++ = lower_32_bits(vma->node.start);
 144         *batch++ = upper_32_bits(vma->node.start);
 145         *batch++ = MI_BATCH_BUFFER_END; /* not reached */
 146 
 147         intel_gt_chipset_flush(engine->gt);
 148 
 149         if (engine->emit_init_breadcrumb &&
 150             rq->timeline->has_initial_breadcrumb) {
 151                 err = engine->emit_init_breadcrumb(rq);
 152                 if (err)
 153                         goto cancel_rq;
 154         }
 155 
 156         err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
 157 
 158 cancel_rq:
 159         if (err) {
 160                 i915_request_skip(rq, err);
 161                 i915_request_add(rq);
 162         }
 163 unpin_hws:
 164         i915_vma_unpin(hws);
 165 unpin_vma:
 166         i915_vma_unpin(vma);
 167         return err ? ERR_PTR(err) : rq;
 168 }
 169 
 170 static u32
 171 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
 172 {
 173         u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
 174 
 175         return READ_ONCE(*seqno);
 176 }
 177 
 178 void igt_spinner_end(struct igt_spinner *spin)
 179 {
 180         *spin->batch = MI_BATCH_BUFFER_END;
 181         intel_gt_chipset_flush(spin->gt);
 182 }
 183 
 184 void igt_spinner_fini(struct igt_spinner *spin)
 185 {
 186         igt_spinner_end(spin);
 187 
 188         i915_gem_object_unpin_map(spin->obj);
 189         i915_gem_object_put(spin->obj);
 190 
 191         i915_gem_object_unpin_map(spin->hws);
 192         i915_gem_object_put(spin->hws);
 193 }
 194 
 195 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
 196 {
 197         return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
 198                                                rq->fence.seqno),
 199                              10) &&
 200                  wait_for(i915_seqno_passed(hws_seqno(spin, rq),
 201                                             rq->fence.seqno),
 202                           1000));
 203 }

/* [<][>][^][v][top][bottom][index][help] */