root/drivers/gpu/drm/vkms/vkms_gem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __vkms_gem_create
  2. vkms_gem_free_object
  3. vkms_gem_fault
  4. vkms_gem_create
  5. vkms_dumb_create
  6. _get_pages
  7. vkms_gem_vunmap
  8. vkms_gem_vmap

   1 // SPDX-License-Identifier: GPL-2.0+
   2 
   3 #include <linux/shmem_fs.h>
   4 #include <linux/vmalloc.h>
   5 
   6 #include "vkms_drv.h"
   7 
   8 static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
   9                                                  u64 size)
  10 {
  11         struct vkms_gem_object *obj;
  12         int ret;
  13 
  14         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  15         if (!obj)
  16                 return ERR_PTR(-ENOMEM);
  17 
  18         size = roundup(size, PAGE_SIZE);
  19         ret = drm_gem_object_init(dev, &obj->gem, size);
  20         if (ret) {
  21                 kfree(obj);
  22                 return ERR_PTR(ret);
  23         }
  24 
  25         mutex_init(&obj->pages_lock);
  26 
  27         return obj;
  28 }
  29 
  30 void vkms_gem_free_object(struct drm_gem_object *obj)
  31 {
  32         struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
  33                                                    gem);
  34 
  35         WARN_ON(gem->pages);
  36         WARN_ON(gem->vaddr);
  37 
  38         mutex_destroy(&gem->pages_lock);
  39         drm_gem_object_release(obj);
  40         kfree(gem);
  41 }
  42 
  43 vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
  44 {
  45         struct vm_area_struct *vma = vmf->vma;
  46         struct vkms_gem_object *obj = vma->vm_private_data;
  47         unsigned long vaddr = vmf->address;
  48         pgoff_t page_offset;
  49         loff_t num_pages;
  50         vm_fault_t ret = VM_FAULT_SIGBUS;
  51 
  52         page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
  53         num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
  54 
  55         if (page_offset > num_pages)
  56                 return VM_FAULT_SIGBUS;
  57 
  58         mutex_lock(&obj->pages_lock);
  59         if (obj->pages) {
  60                 get_page(obj->pages[page_offset]);
  61                 vmf->page = obj->pages[page_offset];
  62                 ret = 0;
  63         }
  64         mutex_unlock(&obj->pages_lock);
  65         if (ret) {
  66                 struct page *page;
  67                 struct address_space *mapping;
  68 
  69                 mapping = file_inode(obj->gem.filp)->i_mapping;
  70                 page = shmem_read_mapping_page(mapping, page_offset);
  71 
  72                 if (!IS_ERR(page)) {
  73                         vmf->page = page;
  74                         ret = 0;
  75                 } else {
  76                         switch (PTR_ERR(page)) {
  77                         case -ENOSPC:
  78                         case -ENOMEM:
  79                                 ret = VM_FAULT_OOM;
  80                                 break;
  81                         case -EBUSY:
  82                                 ret = VM_FAULT_RETRY;
  83                                 break;
  84                         case -EFAULT:
  85                         case -EINVAL:
  86                                 ret = VM_FAULT_SIGBUS;
  87                                 break;
  88                         default:
  89                                 WARN_ON(PTR_ERR(page));
  90                                 ret = VM_FAULT_SIGBUS;
  91                                 break;
  92                         }
  93                 }
  94         }
  95         return ret;
  96 }
  97 
  98 static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
  99                                               struct drm_file *file,
 100                                               u32 *handle,
 101                                               u64 size)
 102 {
 103         struct vkms_gem_object *obj;
 104         int ret;
 105 
 106         if (!file || !dev || !handle)
 107                 return ERR_PTR(-EINVAL);
 108 
 109         obj = __vkms_gem_create(dev, size);
 110         if (IS_ERR(obj))
 111                 return ERR_CAST(obj);
 112 
 113         ret = drm_gem_handle_create(file, &obj->gem, handle);
 114         if (ret)
 115                 return ERR_PTR(ret);
 116 
 117         return &obj->gem;
 118 }
 119 
 120 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
 121                      struct drm_mode_create_dumb *args)
 122 {
 123         struct drm_gem_object *gem_obj;
 124         u64 pitch, size;
 125 
 126         if (!args || !dev || !file)
 127                 return -EINVAL;
 128 
 129         pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
 130         size = pitch * args->height;
 131 
 132         if (!size)
 133                 return -EINVAL;
 134 
 135         gem_obj = vkms_gem_create(dev, file, &args->handle, size);
 136         if (IS_ERR(gem_obj))
 137                 return PTR_ERR(gem_obj);
 138 
 139         args->size = gem_obj->size;
 140         args->pitch = pitch;
 141 
 142         drm_gem_object_put_unlocked(gem_obj);
 143 
 144         DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
 145 
 146         return 0;
 147 }
 148 
 149 static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
 150 {
 151         struct drm_gem_object *gem_obj = &vkms_obj->gem;
 152 
 153         if (!vkms_obj->pages) {
 154                 struct page **pages = drm_gem_get_pages(gem_obj);
 155 
 156                 if (IS_ERR(pages))
 157                         return pages;
 158 
 159                 if (cmpxchg(&vkms_obj->pages, NULL, pages))
 160                         drm_gem_put_pages(gem_obj, pages, false, true);
 161         }
 162 
 163         return vkms_obj->pages;
 164 }
 165 
 166 void vkms_gem_vunmap(struct drm_gem_object *obj)
 167 {
 168         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
 169 
 170         mutex_lock(&vkms_obj->pages_lock);
 171         if (vkms_obj->vmap_count < 1) {
 172                 WARN_ON(vkms_obj->vaddr);
 173                 WARN_ON(vkms_obj->pages);
 174                 mutex_unlock(&vkms_obj->pages_lock);
 175                 return;
 176         }
 177 
 178         vkms_obj->vmap_count--;
 179 
 180         if (vkms_obj->vmap_count == 0) {
 181                 vunmap(vkms_obj->vaddr);
 182                 vkms_obj->vaddr = NULL;
 183                 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
 184                 vkms_obj->pages = NULL;
 185         }
 186 
 187         mutex_unlock(&vkms_obj->pages_lock);
 188 }
 189 
 190 int vkms_gem_vmap(struct drm_gem_object *obj)
 191 {
 192         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
 193         int ret = 0;
 194 
 195         mutex_lock(&vkms_obj->pages_lock);
 196 
 197         if (!vkms_obj->vaddr) {
 198                 unsigned int n_pages = obj->size >> PAGE_SHIFT;
 199                 struct page **pages = _get_pages(vkms_obj);
 200 
 201                 if (IS_ERR(pages)) {
 202                         ret = PTR_ERR(pages);
 203                         goto out;
 204                 }
 205 
 206                 vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
 207                 if (!vkms_obj->vaddr)
 208                         goto err_vmap;
 209         }
 210 
 211         vkms_obj->vmap_count++;
 212         goto out;
 213 
 214 err_vmap:
 215         ret = -ENOMEM;
 216         drm_gem_put_pages(obj, vkms_obj->pages, false, true);
 217         vkms_obj->pages = NULL;
 218 out:
 219         mutex_unlock(&vkms_obj->pages_lock);
 220         return ret;
 221 }

/* [<][>][^][v][top][bottom][index][help] */