root/drivers/gpu/drm/i915/gem/i915_gem_object_types.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. to_intel_bo

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2016 Intel Corporation
   5  */
   6 
   7 #ifndef __I915_GEM_OBJECT_TYPES_H__
   8 #define __I915_GEM_OBJECT_TYPES_H__
   9 
  10 #include <drm/drm_gem.h>
  11 
  12 #include "i915_active.h"
  13 #include "i915_selftest.h"
  14 
  15 struct drm_i915_gem_object;
  16 struct intel_fronbuffer;
  17 
  18 /*
  19  * struct i915_lut_handle tracks the fast lookups from handle to vma used
  20  * for execbuf. Although we use a radixtree for that mapping, in order to
  21  * remove them as the object or context is closed, we need a secondary list
  22  * and a translation entry (i915_lut_handle).
  23  */
  24 struct i915_lut_handle {
  25         struct list_head obj_link;
  26         struct i915_gem_context *ctx;
  27         u32 handle;
  28 };
  29 
  30 struct drm_i915_gem_object_ops {
  31         unsigned int flags;
  32 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
  33 #define I915_GEM_OBJECT_IS_SHRINKABLE   BIT(1)
  34 #define I915_GEM_OBJECT_IS_PROXY        BIT(2)
  35 #define I915_GEM_OBJECT_NO_GGTT         BIT(3)
  36 #define I915_GEM_OBJECT_ASYNC_CANCEL    BIT(4)
  37 
  38         /* Interface between the GEM object and its backing storage.
  39          * get_pages() is called once prior to the use of the associated set
  40          * of pages before to binding them into the GTT, and put_pages() is
  41          * called after we no longer need them. As we expect there to be
  42          * associated cost with migrating pages between the backing storage
  43          * and making them available for the GPU (e.g. clflush), we may hold
  44          * onto the pages after they are no longer referenced by the GPU
  45          * in case they may be used again shortly (for example migrating the
  46          * pages to a different memory domain within the GTT). put_pages()
  47          * will therefore most likely be called when the object itself is
  48          * being released or under memory pressure (where we attempt to
  49          * reap pages for the shrinker).
  50          */
  51         int (*get_pages)(struct drm_i915_gem_object *obj);
  52         void (*put_pages)(struct drm_i915_gem_object *obj,
  53                           struct sg_table *pages);
  54         void (*truncate)(struct drm_i915_gem_object *obj);
  55         void (*writeback)(struct drm_i915_gem_object *obj);
  56 
  57         int (*pwrite)(struct drm_i915_gem_object *obj,
  58                       const struct drm_i915_gem_pwrite *arg);
  59 
  60         int (*dmabuf_export)(struct drm_i915_gem_object *obj);
  61         void (*release)(struct drm_i915_gem_object *obj);
  62 };
  63 
  64 struct drm_i915_gem_object {
  65         struct drm_gem_object base;
  66 
  67         const struct drm_i915_gem_object_ops *ops;
  68 
  69         struct {
  70                 /**
  71                  * @vma.lock: protect the list/tree of vmas
  72                  */
  73                 spinlock_t lock;
  74 
  75                 /**
  76                  * @vma.list: List of VMAs backed by this object
  77                  *
  78                  * The VMA on this list are ordered by type, all GGTT vma are
  79                  * placed at the head and all ppGTT vma are placed at the tail.
  80                  * The different types of GGTT vma are unordered between
  81                  * themselves, use the @vma.tree (which has a defined order
  82                  * between all VMA) to quickly find an exact match.
  83                  */
  84                 struct list_head list;
  85 
  86                 /**
  87                  * @vma.tree: Ordered tree of VMAs backed by this object
  88                  *
  89                  * All VMA created for this object are placed in the @vma.tree
  90                  * for fast retrieval via a binary search in
  91                  * i915_vma_instance(). They are also added to @vma.list for
  92                  * easy iteration.
  93                  */
  94                 struct rb_root tree;
  95         } vma;
  96 
  97         /**
  98          * @lut_list: List of vma lookup entries in use for this object.
  99          *
 100          * If this object is closed, we need to remove all of its VMA from
 101          * the fast lookup index in associated contexts; @lut_list provides
 102          * this translation from object to context->handles_vma.
 103          */
 104         struct list_head lut_list;
 105 
 106         /** Stolen memory for this object, instead of being backed by shmem. */
 107         struct drm_mm_node *stolen;
 108         union {
 109                 struct rcu_head rcu;
 110                 struct llist_node freed;
 111         };
 112 
 113         /**
 114          * Whether the object is currently in the GGTT mmap.
 115          */
 116         unsigned int userfault_count;
 117         struct list_head userfault_link;
 118 
 119         I915_SELFTEST_DECLARE(struct list_head st_link);
 120 
 121         /*
 122          * Is the object to be mapped as read-only to the GPU
 123          * Only honoured if hardware has relevant pte bit
 124          */
 125         unsigned int cache_level:3;
 126         unsigned int cache_coherent:2;
 127 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
 128 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
 129         unsigned int cache_dirty:1;
 130 
 131         /**
 132          * @read_domains: Read memory domains.
 133          *
 134          * These monitor which caches contain read/write data related to the
 135          * object. When transitioning from one set of domains to another,
 136          * the driver is called to ensure that caches are suitably flushed and
 137          * invalidated.
 138          */
 139         u16 read_domains;
 140 
 141         /**
 142          * @write_domain: Corresponding unique write memory domain.
 143          */
 144         u16 write_domain;
 145 
 146         struct intel_frontbuffer *frontbuffer;
 147 
 148         /** Current tiling stride for the object, if it's tiled. */
 149         unsigned int tiling_and_stride;
 150 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
 151 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
 152 #define STRIDE_MASK (~TILING_MASK)
 153 
 154         /** Count of VMA actually bound by this object */
 155         atomic_t bind_count;
 156         /** Count of how many global VMA are currently pinned for use by HW */
 157         unsigned int pin_global;
 158 
 159         struct {
 160                 struct mutex lock; /* protects the pages and their use */
 161                 atomic_t pages_pin_count;
 162 
 163                 struct sg_table *pages;
 164                 void *mapping;
 165 
 166                 /* TODO: whack some of this into the error state */
 167                 struct i915_page_sizes {
 168                         /**
 169                          * The sg mask of the pages sg_table. i.e the mask of
 170                          * of the lengths for each sg entry.
 171                          */
 172                         unsigned int phys;
 173 
 174                         /**
 175                          * The gtt page sizes we are allowed to use given the
 176                          * sg mask and the supported page sizes. This will
 177                          * express the smallest unit we can use for the whole
 178                          * object, as well as the larger sizes we may be able
 179                          * to use opportunistically.
 180                          */
 181                         unsigned int sg;
 182 
 183                         /**
 184                          * The actual gtt page size usage. Since we can have
 185                          * multiple vma associated with this object we need to
 186                          * prevent any trampling of state, hence a copy of this
 187                          * struct also lives in each vma, therefore the gtt
 188                          * value here should only be read/write through the vma.
 189                          */
 190                         unsigned int gtt;
 191                 } page_sizes;
 192 
 193                 I915_SELFTEST_DECLARE(unsigned int page_mask);
 194 
 195                 struct i915_gem_object_page_iter {
 196                         struct scatterlist *sg_pos;
 197                         unsigned int sg_idx; /* in pages, but 32bit eek! */
 198 
 199                         struct radix_tree_root radix;
 200                         struct mutex lock; /* protects this cache */
 201                 } get_page;
 202 
 203                 /**
 204                  * Element within i915->mm.unbound_list or i915->mm.bound_list,
 205                  * locked by i915->mm.obj_lock.
 206                  */
 207                 struct list_head link;
 208 
 209                 /**
 210                  * Advice: are the backing pages purgeable?
 211                  */
 212                 unsigned int madv:2;
 213 
 214                 /**
 215                  * This is set if the object has been written to since the
 216                  * pages were last acquired.
 217                  */
 218                 bool dirty:1;
 219 
 220                 /**
 221                  * This is set if the object has been pinned due to unknown
 222                  * swizzling.
 223                  */
 224                 bool quirked:1;
 225         } mm;
 226 
 227         /** Record of address bit 17 of each page at last unbind. */
 228         unsigned long *bit_17;
 229 
 230         union {
 231                 struct i915_gem_userptr {
 232                         uintptr_t ptr;
 233 
 234                         struct i915_mm_struct *mm;
 235                         struct i915_mmu_object *mmu_object;
 236                         struct work_struct *work;
 237                 } userptr;
 238 
 239                 unsigned long scratch;
 240 
 241                 void *gvt_info;
 242         };
 243 };
 244 
 245 static inline struct drm_i915_gem_object *
 246 to_intel_bo(struct drm_gem_object *gem)
 247 {
 248         /* Assert that to_intel_bo(NULL) == NULL */
 249         BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
 250 
 251         return container_of(gem, struct drm_i915_gem_object, base);
 252 }
 253 
 254 #endif

/* [<][>][^][v][top][bottom][index][help] */