root/drivers/gpu/drm/xen/xen_drm_front_gem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_xen_gem_obj
  2. gem_alloc_pages_array
  3. gem_free_pages_array
  4. gem_create_obj
  5. gem_create
  6. xen_drm_front_gem_create
  7. xen_drm_front_gem_free_object_unlocked
  8. xen_drm_front_gem_get_pages
  9. xen_drm_front_gem_get_sg_table
  10. xen_drm_front_gem_import_sg_table
  11. gem_mmap_obj
  12. xen_drm_front_gem_mmap
  13. xen_drm_front_gem_prime_vmap
  14. xen_drm_front_gem_prime_vunmap
  15. xen_drm_front_gem_prime_mmap

   1 // SPDX-License-Identifier: GPL-2.0 OR MIT
   2 
   3 /*
   4  *  Xen para-virtual DRM device
   5  *
   6  * Copyright (C) 2016-2018 EPAM Systems Inc.
   7  *
   8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
   9  */
  10 
  11 #include <linux/dma-buf.h>
  12 #include <linux/scatterlist.h>
  13 #include <linux/shmem_fs.h>
  14 
  15 #include <drm/drm_fb_helper.h>
  16 #include <drm/drm_gem.h>
  17 #include <drm/drm_prime.h>
  18 #include <drm/drm_probe_helper.h>
  19 
  20 #include <xen/balloon.h>
  21 
  22 #include "xen_drm_front.h"
  23 #include "xen_drm_front_gem.h"
  24 
  25 struct xen_gem_object {
  26         struct drm_gem_object base;
  27 
  28         size_t num_pages;
  29         struct page **pages;
  30 
  31         /* set for buffers allocated by the backend */
  32         bool be_alloc;
  33 
  34         /* this is for imported PRIME buffer */
  35         struct sg_table *sgt_imported;
  36 };
  37 
  38 static inline struct xen_gem_object *
  39 to_xen_gem_obj(struct drm_gem_object *gem_obj)
  40 {
  41         return container_of(gem_obj, struct xen_gem_object, base);
  42 }
  43 
  44 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
  45                                  size_t buf_size)
  46 {
  47         xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
  48         xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
  49                                         sizeof(struct page *), GFP_KERNEL);
  50         return !xen_obj->pages ? -ENOMEM : 0;
  51 }
  52 
  53 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
  54 {
  55         kvfree(xen_obj->pages);
  56         xen_obj->pages = NULL;
  57 }
  58 
  59 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
  60                                              size_t size)
  61 {
  62         struct xen_gem_object *xen_obj;
  63         int ret;
  64 
  65         xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
  66         if (!xen_obj)
  67                 return ERR_PTR(-ENOMEM);
  68 
  69         ret = drm_gem_object_init(dev, &xen_obj->base, size);
  70         if (ret < 0) {
  71                 kfree(xen_obj);
  72                 return ERR_PTR(ret);
  73         }
  74 
  75         return xen_obj;
  76 }
  77 
  78 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
  79 {
  80         struct xen_drm_front_drm_info *drm_info = dev->dev_private;
  81         struct xen_gem_object *xen_obj;
  82         int ret;
  83 
  84         size = round_up(size, PAGE_SIZE);
  85         xen_obj = gem_create_obj(dev, size);
  86         if (IS_ERR_OR_NULL(xen_obj))
  87                 return xen_obj;
  88 
  89         if (drm_info->front_info->cfg.be_alloc) {
  90                 /*
  91                  * backend will allocate space for this buffer, so
  92                  * only allocate array of pointers to pages
  93                  */
  94                 ret = gem_alloc_pages_array(xen_obj, size);
  95                 if (ret < 0)
  96                         goto fail;
  97 
  98                 /*
  99                  * allocate ballooned pages which will be used to map
 100                  * grant references provided by the backend
 101                  */
 102                 ret = alloc_xenballooned_pages(xen_obj->num_pages,
 103                                                xen_obj->pages);
 104                 if (ret < 0) {
 105                         DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
 106                                   xen_obj->num_pages, ret);
 107                         gem_free_pages_array(xen_obj);
 108                         goto fail;
 109                 }
 110 
 111                 xen_obj->be_alloc = true;
 112                 return xen_obj;
 113         }
 114         /*
 115          * need to allocate backing pages now, so we can share those
 116          * with the backend
 117          */
 118         xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
 119         xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
 120         if (IS_ERR_OR_NULL(xen_obj->pages)) {
 121                 ret = PTR_ERR(xen_obj->pages);
 122                 xen_obj->pages = NULL;
 123                 goto fail;
 124         }
 125 
 126         return xen_obj;
 127 
 128 fail:
 129         DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
 130         return ERR_PTR(ret);
 131 }
 132 
 133 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
 134                                                 size_t size)
 135 {
 136         struct xen_gem_object *xen_obj;
 137 
 138         xen_obj = gem_create(dev, size);
 139         if (IS_ERR_OR_NULL(xen_obj))
 140                 return ERR_CAST(xen_obj);
 141 
 142         return &xen_obj->base;
 143 }
 144 
 145 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
 146 {
 147         struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 148 
 149         if (xen_obj->base.import_attach) {
 150                 drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
 151                 gem_free_pages_array(xen_obj);
 152         } else {
 153                 if (xen_obj->pages) {
 154                         if (xen_obj->be_alloc) {
 155                                 free_xenballooned_pages(xen_obj->num_pages,
 156                                                         xen_obj->pages);
 157                                 gem_free_pages_array(xen_obj);
 158                         } else {
 159                                 drm_gem_put_pages(&xen_obj->base,
 160                                                   xen_obj->pages, true, false);
 161                         }
 162                 }
 163         }
 164         drm_gem_object_release(gem_obj);
 165         kfree(xen_obj);
 166 }
 167 
 168 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
 169 {
 170         struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 171 
 172         return xen_obj->pages;
 173 }
 174 
 175 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
 176 {
 177         struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 178 
 179         if (!xen_obj->pages)
 180                 return ERR_PTR(-ENOMEM);
 181 
 182         return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
 183 }
 184 
 185 struct drm_gem_object *
 186 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
 187                                   struct dma_buf_attachment *attach,
 188                                   struct sg_table *sgt)
 189 {
 190         struct xen_drm_front_drm_info *drm_info = dev->dev_private;
 191         struct xen_gem_object *xen_obj;
 192         size_t size;
 193         int ret;
 194 
 195         size = attach->dmabuf->size;
 196         xen_obj = gem_create_obj(dev, size);
 197         if (IS_ERR_OR_NULL(xen_obj))
 198                 return ERR_CAST(xen_obj);
 199 
 200         ret = gem_alloc_pages_array(xen_obj, size);
 201         if (ret < 0)
 202                 return ERR_PTR(ret);
 203 
 204         xen_obj->sgt_imported = sgt;
 205 
 206         ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
 207                                                NULL, xen_obj->num_pages);
 208         if (ret < 0)
 209                 return ERR_PTR(ret);
 210 
 211         ret = xen_drm_front_dbuf_create(drm_info->front_info,
 212                                         xen_drm_front_dbuf_to_cookie(&xen_obj->base),
 213                                         0, 0, 0, size, xen_obj->pages);
 214         if (ret < 0)
 215                 return ERR_PTR(ret);
 216 
 217         DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
 218                   size, sgt->nents);
 219 
 220         return &xen_obj->base;
 221 }
 222 
 223 static int gem_mmap_obj(struct xen_gem_object *xen_obj,
 224                         struct vm_area_struct *vma)
 225 {
 226         int ret;
 227 
 228         /*
 229          * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
 230          * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
 231          * the whole buffer.
 232          */
 233         vma->vm_flags &= ~VM_PFNMAP;
 234         vma->vm_flags |= VM_MIXEDMAP;
 235         vma->vm_pgoff = 0;
 236         /*
 237          * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
 238          * all memory which is shared with other entities in the system
 239          * (including the hypervisor and other guests) must reside in memory
 240          * which is mapped as Normal Inner Write-Back Outer Write-Back
 241          * Inner-Shareable.
 242          */
 243         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 244 
 245         /*
 246          * vm_operations_struct.fault handler will be called if CPU access
 247          * to VM is here. For GPUs this isn't the case, because CPU
 248          * doesn't touch the memory. Insert pages now, so both CPU and GPU are
 249          * happy.
 250          * FIXME: as we insert all the pages now then no .fault handler must
 251          * be called, so don't provide one
 252          */
 253         ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
 254         if (ret < 0)
 255                 DRM_ERROR("Failed to map pages into vma: %d\n", ret);
 256 
 257         return ret;
 258 }
 259 
 260 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 261 {
 262         struct xen_gem_object *xen_obj;
 263         struct drm_gem_object *gem_obj;
 264         int ret;
 265 
 266         ret = drm_gem_mmap(filp, vma);
 267         if (ret < 0)
 268                 return ret;
 269 
 270         gem_obj = vma->vm_private_data;
 271         xen_obj = to_xen_gem_obj(gem_obj);
 272         return gem_mmap_obj(xen_obj, vma);
 273 }
 274 
 275 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
 276 {
 277         struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 278 
 279         if (!xen_obj->pages)
 280                 return NULL;
 281 
 282         /* Please see comment in gem_mmap_obj on mapping and attributes. */
 283         return vmap(xen_obj->pages, xen_obj->num_pages,
 284                     VM_MAP, PAGE_KERNEL);
 285 }
 286 
 287 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
 288                                     void *vaddr)
 289 {
 290         vunmap(vaddr);
 291 }
 292 
 293 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
 294                                  struct vm_area_struct *vma)
 295 {
 296         struct xen_gem_object *xen_obj;
 297         int ret;
 298 
 299         ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
 300         if (ret < 0)
 301                 return ret;
 302 
 303         xen_obj = to_xen_gem_obj(gem_obj);
 304         return gem_mmap_obj(xen_obj, vma);
 305 }

/* [<][>][^][v][top][bottom][index][help] */