root/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. igt_request_alloc
  2. igt_emit_store_dw
  3. igt_gpu_fill_dw

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2018 Intel Corporation
   5  */
   6 
   7 #include "igt_gem_utils.h"
   8 
   9 #include "gem/i915_gem_context.h"
  10 #include "gem/i915_gem_pm.h"
  11 #include "gt/intel_context.h"
  12 #include "i915_vma.h"
  13 #include "i915_drv.h"
  14 
  15 #include "i915_request.h"
  16 
  17 struct i915_request *
  18 igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
  19 {
  20         struct intel_context *ce;
  21         struct i915_request *rq;
  22 
  23         /*
  24          * Pinning the contexts may generate requests in order to acquire
  25          * GGTT space, so do this first before we reserve a seqno for
  26          * ourselves.
  27          */
  28         ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
  29         if (IS_ERR(ce))
  30                 return ERR_CAST(ce);
  31 
  32         rq = intel_context_create_request(ce);
  33         intel_context_put(ce);
  34 
  35         return rq;
  36 }
  37 
  38 struct i915_vma *
  39 igt_emit_store_dw(struct i915_vma *vma,
  40                   u64 offset,
  41                   unsigned long count,
  42                   u32 val)
  43 {
  44         struct drm_i915_gem_object *obj;
  45         const int gen = INTEL_GEN(vma->vm->i915);
  46         unsigned long n, size;
  47         u32 *cmd;
  48         int err;
  49 
  50         size = (4 * count + 1) * sizeof(u32);
  51         size = round_up(size, PAGE_SIZE);
  52         obj = i915_gem_object_create_internal(vma->vm->i915, size);
  53         if (IS_ERR(obj))
  54                 return ERR_CAST(obj);
  55 
  56         cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
  57         if (IS_ERR(cmd)) {
  58                 err = PTR_ERR(cmd);
  59                 goto err;
  60         }
  61 
  62         GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
  63         offset += vma->node.start;
  64 
  65         for (n = 0; n < count; n++) {
  66                 if (gen >= 8) {
  67                         *cmd++ = MI_STORE_DWORD_IMM_GEN4;
  68                         *cmd++ = lower_32_bits(offset);
  69                         *cmd++ = upper_32_bits(offset);
  70                         *cmd++ = val;
  71                 } else if (gen >= 4) {
  72                         *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
  73                                 (gen < 6 ? MI_USE_GGTT : 0);
  74                         *cmd++ = 0;
  75                         *cmd++ = offset;
  76                         *cmd++ = val;
  77                 } else {
  78                         *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
  79                         *cmd++ = offset;
  80                         *cmd++ = val;
  81                 }
  82                 offset += PAGE_SIZE;
  83         }
  84         *cmd = MI_BATCH_BUFFER_END;
  85         i915_gem_object_unpin_map(obj);
  86 
  87         vma = i915_vma_instance(obj, vma->vm, NULL);
  88         if (IS_ERR(vma)) {
  89                 err = PTR_ERR(vma);
  90                 goto err;
  91         }
  92 
  93         err = i915_vma_pin(vma, 0, 0, PIN_USER);
  94         if (err)
  95                 goto err;
  96 
  97         return vma;
  98 
  99 err:
 100         i915_gem_object_put(obj);
 101         return ERR_PTR(err);
 102 }
 103 
 104 int igt_gpu_fill_dw(struct i915_vma *vma,
 105                     struct i915_gem_context *ctx,
 106                     struct intel_engine_cs *engine,
 107                     u64 offset,
 108                     unsigned long count,
 109                     u32 val)
 110 {
 111         struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
 112         struct i915_request *rq;
 113         struct i915_vma *batch;
 114         unsigned int flags;
 115         int err;
 116 
 117         GEM_BUG_ON(vma->size > vm->total);
 118         GEM_BUG_ON(!intel_engine_can_store_dword(engine));
 119         GEM_BUG_ON(!i915_vma_is_pinned(vma));
 120 
 121         batch = igt_emit_store_dw(vma, offset, count, val);
 122         if (IS_ERR(batch))
 123                 return PTR_ERR(batch);
 124 
 125         rq = igt_request_alloc(ctx, engine);
 126         if (IS_ERR(rq)) {
 127                 err = PTR_ERR(rq);
 128                 goto err_batch;
 129         }
 130 
 131         flags = 0;
 132         if (INTEL_GEN(vm->i915) <= 5)
 133                 flags |= I915_DISPATCH_SECURE;
 134 
 135         err = engine->emit_bb_start(rq,
 136                                     batch->node.start, batch->node.size,
 137                                     flags);
 138         if (err)
 139                 goto err_request;
 140 
 141         i915_vma_lock(batch);
 142         err = i915_request_await_object(rq, batch->obj, false);
 143         if (err == 0)
 144                 err = i915_vma_move_to_active(batch, rq, 0);
 145         i915_vma_unlock(batch);
 146         if (err)
 147                 goto skip_request;
 148 
 149         i915_vma_lock(vma);
 150         err = i915_request_await_object(rq, vma->obj, true);
 151         if (err == 0)
 152                 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 153         i915_vma_unlock(vma);
 154         if (err)
 155                 goto skip_request;
 156 
 157         i915_request_add(rq);
 158 
 159         i915_vma_unpin(batch);
 160         i915_vma_close(batch);
 161         i915_vma_put(batch);
 162 
 163         return 0;
 164 
 165 skip_request:
 166         i915_request_skip(rq, err);
 167 err_request:
 168         i915_request_add(rq);
 169 err_batch:
 170         i915_vma_unpin(batch);
 171         i915_vma_put(batch);
 172         return err;
 173 }

/* [<][>][^][v][top][bottom][index][help] */