1/* 2 * Copyright 2012 Red Hat Inc 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 */ 26#include <drm/drmP.h> 27#include "i915_drv.h" 28#include <linux/dma-buf.h> 29 30static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) 31{ 32 return to_intel_bo(buf->priv); 33} 34 35static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 36 enum dma_data_direction dir) 37{ 38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 39 struct sg_table *st; 40 struct scatterlist *src, *dst; 41 int ret, i; 42 43 ret = i915_mutex_lock_interruptible(obj->base.dev); 44 if (ret) 45 goto err; 46 47 ret = i915_gem_object_get_pages(obj); 48 if (ret) 49 goto err_unlock; 50 51 i915_gem_object_pin_pages(obj); 52 53 /* Copy sg so that we make an independent mapping */ 54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 55 if (st == NULL) { 56 ret = -ENOMEM; 57 goto err_unpin; 58 } 59 60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); 61 if (ret) 62 goto err_free; 63 64 src = obj->pages->sgl; 65 dst = st->sgl; 66 for (i = 0; i < obj->pages->nents; i++) { 67 sg_set_page(dst, sg_page(src), src->length, 0); 68 dst = sg_next(dst); 69 src = sg_next(src); 70 } 71 72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 73 ret =-ENOMEM; 74 goto err_free_sg; 75 } 76 77 mutex_unlock(&obj->base.dev->struct_mutex); 78 return st; 79 80err_free_sg: 81 sg_free_table(st); 82err_free: 83 kfree(st); 84err_unpin: 85 i915_gem_object_unpin_pages(obj); 86err_unlock: 87 mutex_unlock(&obj->base.dev->struct_mutex); 88err: 89 return ERR_PTR(ret); 90} 91 92static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 93 struct sg_table *sg, 94 enum dma_data_direction dir) 95{ 96 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); 97 98 mutex_lock(&obj->base.dev->struct_mutex); 99 100 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 101 sg_free_table(sg); 102 kfree(sg); 103 104 i915_gem_object_unpin_pages(obj); 105 106 mutex_unlock(&obj->base.dev->struct_mutex); 107} 108 109static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 110{ 111 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 112 struct drm_device *dev = obj->base.dev; 113 struct sg_page_iter sg_iter; 114 struct page **pages; 115 int ret, i; 116 117 ret = i915_mutex_lock_interruptible(dev); 118 if (ret) 119 return ERR_PTR(ret); 120 121 if (obj->dma_buf_vmapping) { 122 obj->vmapping_count++; 123 goto out_unlock; 124 } 125 126 ret = i915_gem_object_get_pages(obj); 127 if (ret) 128 goto err; 129 130 i915_gem_object_pin_pages(obj); 131 132 ret = -ENOMEM; 133 134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 135 if (pages == NULL) 136 goto err_unpin; 137 138 i = 0; 139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) 140 pages[i++] = sg_page_iter_page(&sg_iter); 141 142 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); 143 drm_free_large(pages); 144 145 if (!obj->dma_buf_vmapping) 146 goto err_unpin; 147 148 obj->vmapping_count = 1; 149out_unlock: 150 mutex_unlock(&dev->struct_mutex); 151 return obj->dma_buf_vmapping; 152 153err_unpin: 154 i915_gem_object_unpin_pages(obj); 155err: 156 mutex_unlock(&dev->struct_mutex); 157 return ERR_PTR(ret); 158} 159 160static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 161{ 162 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 163 struct drm_device *dev = obj->base.dev; 164 165 mutex_lock(&dev->struct_mutex); 166 if (--obj->vmapping_count == 0) { 167 vunmap(obj->dma_buf_vmapping); 168 obj->dma_buf_vmapping = NULL; 169 170 i915_gem_object_unpin_pages(obj); 171 } 172 mutex_unlock(&dev->struct_mutex); 173} 174 175static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) 176{ 177 return NULL; 178} 179 180static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 181{ 182 183} 184static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) 185{ 186 return NULL; 187} 188 189static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 190{ 191 192} 193 194static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 195{ 196 return -EINVAL; 197} 198 199static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) 200{ 201 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 202 struct drm_device *dev = obj->base.dev; 203 int ret; 204 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); 205 206 ret = i915_mutex_lock_interruptible(dev); 207 if (ret) 208 return ret; 209 210 ret = i915_gem_object_set_to_cpu_domain(obj, write); 211 mutex_unlock(&dev->struct_mutex); 212 return ret; 213} 214 215static const struct dma_buf_ops i915_dmabuf_ops = { 216 .map_dma_buf = i915_gem_map_dma_buf, 217 .unmap_dma_buf = i915_gem_unmap_dma_buf, 218 .release = drm_gem_dmabuf_release, 219 .kmap = i915_gem_dmabuf_kmap, 220 .kmap_atomic = i915_gem_dmabuf_kmap_atomic, 221 .kunmap = i915_gem_dmabuf_kunmap, 222 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, 223 .mmap = i915_gem_dmabuf_mmap, 224 .vmap = i915_gem_dmabuf_vmap, 225 .vunmap = i915_gem_dmabuf_vunmap, 226 .begin_cpu_access = i915_gem_begin_cpu_access, 227}; 228 229struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 230 struct drm_gem_object *gem_obj, int flags) 231{ 232 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 233 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 234 235 exp_info.ops = &i915_dmabuf_ops; 236 exp_info.size = gem_obj->size; 237 exp_info.flags = flags; 238 exp_info.priv = gem_obj; 239 240 241 if (obj->ops->dmabuf_export) { 242 int ret = obj->ops->dmabuf_export(obj); 243 if (ret) 244 return ERR_PTR(ret); 245 } 246 247 return dma_buf_export(&exp_info); 248} 249 250static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 251{ 252 struct sg_table *sg; 253 254 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); 255 if (IS_ERR(sg)) 256 return PTR_ERR(sg); 257 258 obj->pages = sg; 259 obj->has_dma_mapping = true; 260 return 0; 261} 262 263static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) 264{ 265 dma_buf_unmap_attachment(obj->base.import_attach, 266 obj->pages, DMA_BIDIRECTIONAL); 267 obj->has_dma_mapping = false; 268} 269 270static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { 271 .get_pages = i915_gem_object_get_pages_dmabuf, 272 .put_pages = i915_gem_object_put_pages_dmabuf, 273}; 274 275struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 276 struct dma_buf *dma_buf) 277{ 278 struct dma_buf_attachment *attach; 279 struct drm_i915_gem_object *obj; 280 int ret; 281 282 /* is this one of own objects? */ 283 if (dma_buf->ops == &i915_dmabuf_ops) { 284 obj = dma_buf_to_obj(dma_buf); 285 /* is it from our device? */ 286 if (obj->base.dev == dev) { 287 /* 288 * Importing dmabuf exported from out own gem increases 289 * refcount on gem itself instead of f_count of dmabuf. 290 */ 291 drm_gem_object_reference(&obj->base); 292 return &obj->base; 293 } 294 } 295 296 /* need to attach */ 297 attach = dma_buf_attach(dma_buf, dev->dev); 298 if (IS_ERR(attach)) 299 return ERR_CAST(attach); 300 301 get_dma_buf(dma_buf); 302 303 obj = i915_gem_object_alloc(dev); 304 if (obj == NULL) { 305 ret = -ENOMEM; 306 goto fail_detach; 307 } 308 309 drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 310 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); 311 obj->base.import_attach = attach; 312 313 return &obj->base; 314 315fail_detach: 316 dma_buf_detach(dma_buf, attach); 317 dma_buf_put(dma_buf); 318 319 return ERR_PTR(ret); 320} 321