root/drivers/gpu/drm/etnaviv/etnaviv_mmu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. etnaviv_context_unmap
  2. etnaviv_context_map
  3. etnaviv_iommu_map
  4. etnaviv_iommu_unmap
  5. etnaviv_iommu_remove_mapping
  6. etnaviv_iommu_find_iova
  7. etnaviv_iommu_insert_exact
  8. etnaviv_iommu_map_gem
  9. etnaviv_iommu_unmap_gem
  10. etnaviv_iommu_context_free
  11. etnaviv_iommu_context_put
  12. etnaviv_iommu_context_init
  13. etnaviv_iommu_restore
  14. etnaviv_iommu_get_suballoc_va
  15. etnaviv_iommu_put_suballoc_va
  16. etnaviv_iommu_dump_size
  17. etnaviv_iommu_dump
  18. etnaviv_iommu_global_init
  19. etnaviv_iommu_global_fini

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2015-2018 Etnaviv Project
   4  */
   5 
   6 #include <linux/dma-mapping.h>
   7 #include <linux/scatterlist.h>
   8 
   9 #include "common.xml.h"
  10 #include "etnaviv_cmdbuf.h"
  11 #include "etnaviv_drv.h"
  12 #include "etnaviv_gem.h"
  13 #include "etnaviv_gpu.h"
  14 #include "etnaviv_mmu.h"
  15 
  16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
  17                                  unsigned long iova, size_t size)
  18 {
  19         size_t unmapped_page, unmapped = 0;
  20         size_t pgsize = SZ_4K;
  21 
  22         if (!IS_ALIGNED(iova | size, pgsize)) {
  23                 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
  24                        iova, size, pgsize);
  25                 return;
  26         }
  27 
  28         while (unmapped < size) {
  29                 unmapped_page = context->global->ops->unmap(context, iova,
  30                                                             pgsize);
  31                 if (!unmapped_page)
  32                         break;
  33 
  34                 iova += unmapped_page;
  35                 unmapped += unmapped_page;
  36         }
  37 }
  38 
  39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
  40                               unsigned long iova, phys_addr_t paddr,
  41                               size_t size, int prot)
  42 {
  43         unsigned long orig_iova = iova;
  44         size_t pgsize = SZ_4K;
  45         size_t orig_size = size;
  46         int ret = 0;
  47 
  48         if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
  49                 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
  50                        iova, &paddr, size, pgsize);
  51                 return -EINVAL;
  52         }
  53 
  54         while (size) {
  55                 ret = context->global->ops->map(context, iova, paddr, pgsize,
  56                                                 prot);
  57                 if (ret)
  58                         break;
  59 
  60                 iova += pgsize;
  61                 paddr += pgsize;
  62                 size -= pgsize;
  63         }
  64 
  65         /* unroll mapping in case something went wrong */
  66         if (ret)
  67                 etnaviv_context_unmap(context, orig_iova, orig_size - size);
  68 
  69         return ret;
  70 }
  71 
  72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
  73                              struct sg_table *sgt, unsigned len, int prot)
  74 {       struct scatterlist *sg;
  75         unsigned int da = iova;
  76         unsigned int i, j;
  77         int ret;
  78 
  79         if (!context || !sgt)
  80                 return -EINVAL;
  81 
  82         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  83                 u32 pa = sg_dma_address(sg) - sg->offset;
  84                 size_t bytes = sg_dma_len(sg) + sg->offset;
  85 
  86                 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
  87 
  88                 ret = etnaviv_context_map(context, da, pa, bytes, prot);
  89                 if (ret)
  90                         goto fail;
  91 
  92                 da += bytes;
  93         }
  94 
  95         return 0;
  96 
  97 fail:
  98         da = iova;
  99 
 100         for_each_sg(sgt->sgl, sg, i, j) {
 101                 size_t bytes = sg_dma_len(sg) + sg->offset;
 102 
 103                 etnaviv_context_unmap(context, da, bytes);
 104                 da += bytes;
 105         }
 106         return ret;
 107 }
 108 
 109 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
 110                                 struct sg_table *sgt, unsigned len)
 111 {
 112         struct scatterlist *sg;
 113         unsigned int da = iova;
 114         int i;
 115 
 116         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 117                 size_t bytes = sg_dma_len(sg) + sg->offset;
 118 
 119                 etnaviv_context_unmap(context, da, bytes);
 120 
 121                 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
 122 
 123                 BUG_ON(!PAGE_ALIGNED(bytes));
 124 
 125                 da += bytes;
 126         }
 127 }
 128 
 129 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
 130         struct etnaviv_vram_mapping *mapping)
 131 {
 132         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 133 
 134         etnaviv_iommu_unmap(context, mapping->vram_node.start,
 135                             etnaviv_obj->sgt, etnaviv_obj->base.size);
 136         drm_mm_remove_node(&mapping->vram_node);
 137 }
 138 
 139 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
 140                                    struct drm_mm_node *node, size_t size)
 141 {
 142         struct etnaviv_vram_mapping *free = NULL;
 143         enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
 144         int ret;
 145 
 146         lockdep_assert_held(&context->lock);
 147 
 148         while (1) {
 149                 struct etnaviv_vram_mapping *m, *n;
 150                 struct drm_mm_scan scan;
 151                 struct list_head list;
 152                 bool found;
 153 
 154                 ret = drm_mm_insert_node_in_range(&context->mm, node,
 155                                                   size, 0, 0, 0, U64_MAX, mode);
 156                 if (ret != -ENOSPC)
 157                         break;
 158 
 159                 /* Try to retire some entries */
 160                 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
 161 
 162                 found = 0;
 163                 INIT_LIST_HEAD(&list);
 164                 list_for_each_entry(free, &context->mappings, mmu_node) {
 165                         /* If this vram node has not been used, skip this. */
 166                         if (!free->vram_node.mm)
 167                                 continue;
 168 
 169                         /*
 170                          * If the iova is pinned, then it's in-use,
 171                          * so we must keep its mapping.
 172                          */
 173                         if (free->use)
 174                                 continue;
 175 
 176                         list_add(&free->scan_node, &list);
 177                         if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
 178                                 found = true;
 179                                 break;
 180                         }
 181                 }
 182 
 183                 if (!found) {
 184                         /* Nothing found, clean up and fail */
 185                         list_for_each_entry_safe(m, n, &list, scan_node)
 186                                 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
 187                         break;
 188                 }
 189 
 190                 /*
 191                  * drm_mm does not allow any other operations while
 192                  * scanning, so we have to remove all blocks first.
 193                  * If drm_mm_scan_remove_block() returns false, we
 194                  * can leave the block pinned.
 195                  */
 196                 list_for_each_entry_safe(m, n, &list, scan_node)
 197                         if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
 198                                 list_del_init(&m->scan_node);
 199 
 200                 /*
 201                  * Unmap the blocks which need to be reaped from the MMU.
 202                  * Clear the mmu pointer to prevent the mapping_get finding
 203                  * this mapping.
 204                  */
 205                 list_for_each_entry_safe(m, n, &list, scan_node) {
 206                         etnaviv_iommu_remove_mapping(context, m);
 207                         m->context = NULL;
 208                         list_del_init(&m->mmu_node);
 209                         list_del_init(&m->scan_node);
 210                 }
 211 
 212                 mode = DRM_MM_INSERT_EVICT;
 213 
 214                 /*
 215                  * We removed enough mappings so that the new allocation will
 216                  * succeed, retry the allocation one more time.
 217                  */
 218         }
 219 
 220         return ret;
 221 }
 222 
 223 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
 224                    struct drm_mm_node *node, size_t size, u64 va)
 225 {
 226         return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
 227                                            va + size, DRM_MM_INSERT_LOWEST);
 228 }
 229 
 230 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
 231         struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
 232         struct etnaviv_vram_mapping *mapping, u64 va)
 233 {
 234         struct sg_table *sgt = etnaviv_obj->sgt;
 235         struct drm_mm_node *node;
 236         int ret;
 237 
 238         lockdep_assert_held(&etnaviv_obj->lock);
 239 
 240         mutex_lock(&context->lock);
 241 
 242         /* v1 MMU can optimize single entry (contiguous) scatterlists */
 243         if (context->global->version == ETNAVIV_IOMMU_V1 &&
 244             sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
 245                 u32 iova;
 246 
 247                 iova = sg_dma_address(sgt->sgl) - memory_base;
 248                 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
 249                         mapping->iova = iova;
 250                         list_add_tail(&mapping->mmu_node, &context->mappings);
 251                         ret = 0;
 252                         goto unlock;
 253                 }
 254         }
 255 
 256         node = &mapping->vram_node;
 257 
 258         if (va)
 259                 ret = etnaviv_iommu_insert_exact(context, node,
 260                                                  etnaviv_obj->base.size, va);
 261         else
 262                 ret = etnaviv_iommu_find_iova(context, node,
 263                                               etnaviv_obj->base.size);
 264         if (ret < 0)
 265                 goto unlock;
 266 
 267         mapping->iova = node->start;
 268         ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
 269                                 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
 270 
 271         if (ret < 0) {
 272                 drm_mm_remove_node(node);
 273                 goto unlock;
 274         }
 275 
 276         list_add_tail(&mapping->mmu_node, &context->mappings);
 277         context->flush_seq++;
 278 unlock:
 279         mutex_unlock(&context->lock);
 280 
 281         return ret;
 282 }
 283 
 284 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
 285         struct etnaviv_vram_mapping *mapping)
 286 {
 287         WARN_ON(mapping->use);
 288 
 289         mutex_lock(&context->lock);
 290 
 291         /* If the vram node is on the mm, unmap and remove the node */
 292         if (mapping->vram_node.mm == &context->mm)
 293                 etnaviv_iommu_remove_mapping(context, mapping);
 294 
 295         list_del(&mapping->mmu_node);
 296         context->flush_seq++;
 297         mutex_unlock(&context->lock);
 298 }
 299 
 300 static void etnaviv_iommu_context_free(struct kref *kref)
 301 {
 302         struct etnaviv_iommu_context *context =
 303                 container_of(kref, struct etnaviv_iommu_context, refcount);
 304 
 305         etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
 306 
 307         context->global->ops->free(context);
 308 }
 309 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
 310 {
 311         kref_put(&context->refcount, etnaviv_iommu_context_free);
 312 }
 313 
 314 struct etnaviv_iommu_context *
 315 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
 316                            struct etnaviv_cmdbuf_suballoc *suballoc)
 317 {
 318         struct etnaviv_iommu_context *ctx;
 319         int ret;
 320 
 321         if (global->version == ETNAVIV_IOMMU_V1)
 322                 ctx = etnaviv_iommuv1_context_alloc(global);
 323         else
 324                 ctx = etnaviv_iommuv2_context_alloc(global);
 325 
 326         if (!ctx)
 327                 return NULL;
 328 
 329         ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
 330                                           global->memory_base);
 331         if (ret)
 332                 goto out_free;
 333 
 334         if (global->version == ETNAVIV_IOMMU_V1 &&
 335             ctx->cmdbuf_mapping.iova > 0x80000000) {
 336                 dev_err(global->dev,
 337                         "command buffer outside valid memory window\n");
 338                 goto out_unmap;
 339         }
 340 
 341         return ctx;
 342 
 343 out_unmap:
 344         etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
 345 out_free:
 346         global->ops->free(ctx);
 347         return NULL;
 348 }
 349 
 350 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
 351                            struct etnaviv_iommu_context *context)
 352 {
 353         context->global->ops->restore(gpu, context);
 354 }
 355 
 356 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
 357                                   struct etnaviv_vram_mapping *mapping,
 358                                   u32 memory_base, dma_addr_t paddr,
 359                                   size_t size)
 360 {
 361         mutex_lock(&context->lock);
 362 
 363         if (mapping->use > 0) {
 364                 mapping->use++;
 365                 mutex_unlock(&context->lock);
 366                 return 0;
 367         }
 368 
 369         /*
 370          * For MMUv1 we don't add the suballoc region to the pagetables, as
 371          * those GPUs can only work with cmdbufs accessed through the linear
 372          * window. Instead we manufacture a mapping to make it look uniform
 373          * to the upper layers.
 374          */
 375         if (context->global->version == ETNAVIV_IOMMU_V1) {
 376                 mapping->iova = paddr - memory_base;
 377         } else {
 378                 struct drm_mm_node *node = &mapping->vram_node;
 379                 int ret;
 380 
 381                 ret = etnaviv_iommu_find_iova(context, node, size);
 382                 if (ret < 0) {
 383                         mutex_unlock(&context->lock);
 384                         return ret;
 385                 }
 386 
 387                 mapping->iova = node->start;
 388                 ret = etnaviv_context_map(context, node->start, paddr, size,
 389                                           ETNAVIV_PROT_READ);
 390                 if (ret < 0) {
 391                         drm_mm_remove_node(node);
 392                         mutex_unlock(&context->lock);
 393                         return ret;
 394                 }
 395 
 396                 context->flush_seq++;
 397         }
 398 
 399         list_add_tail(&mapping->mmu_node, &context->mappings);
 400         mapping->use = 1;
 401 
 402         mutex_unlock(&context->lock);
 403 
 404         return 0;
 405 }
 406 
 407 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
 408                   struct etnaviv_vram_mapping *mapping)
 409 {
 410         struct drm_mm_node *node = &mapping->vram_node;
 411 
 412         mutex_lock(&context->lock);
 413         mapping->use--;
 414 
 415         if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
 416                 mutex_unlock(&context->lock);
 417                 return;
 418         }
 419 
 420         etnaviv_context_unmap(context, node->start, node->size);
 421         drm_mm_remove_node(node);
 422         mutex_unlock(&context->lock);
 423 }
 424 
 425 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
 426 {
 427         return context->global->ops->dump_size(context);
 428 }
 429 
 430 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
 431 {
 432         context->global->ops->dump(context, buf);
 433 }
 434 
 435 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
 436 {
 437         enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
 438         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
 439         struct etnaviv_iommu_global *global;
 440         struct device *dev = gpu->drm->dev;
 441 
 442         if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
 443                 version = ETNAVIV_IOMMU_V2;
 444 
 445         if (priv->mmu_global) {
 446                 if (priv->mmu_global->version != version) {
 447                         dev_err(gpu->dev,
 448                                 "MMU version doesn't match global version\n");
 449                         return -ENXIO;
 450                 }
 451 
 452                 priv->mmu_global->use++;
 453                 return 0;
 454         }
 455 
 456         global = kzalloc(sizeof(*global), GFP_KERNEL);
 457         if (!global)
 458                 return -ENOMEM;
 459 
 460         global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
 461                                             GFP_KERNEL);
 462         if (!global->bad_page_cpu)
 463                 goto free_global;
 464 
 465         memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
 466 
 467         if (version == ETNAVIV_IOMMU_V2) {
 468                 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
 469                                                &global->v2.pta_dma, GFP_KERNEL);
 470                 if (!global->v2.pta_cpu)
 471                         goto free_bad_page;
 472         }
 473 
 474         global->dev = dev;
 475         global->version = version;
 476         global->use = 1;
 477         mutex_init(&global->lock);
 478 
 479         if (version == ETNAVIV_IOMMU_V1)
 480                 global->ops = &etnaviv_iommuv1_ops;
 481         else
 482                 global->ops = &etnaviv_iommuv2_ops;
 483 
 484         priv->mmu_global = global;
 485 
 486         return 0;
 487 
 488 free_bad_page:
 489         dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
 490 free_global:
 491         kfree(global);
 492 
 493         return -ENOMEM;
 494 }
 495 
 496 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
 497 {
 498         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
 499         struct etnaviv_iommu_global *global = priv->mmu_global;
 500 
 501         if (--global->use > 0)
 502                 return;
 503 
 504         if (global->v2.pta_cpu)
 505                 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
 506                             global->v2.pta_cpu, global->v2.pta_dma);
 507 
 508         if (global->bad_page_cpu)
 509                 dma_free_wc(global->dev, SZ_4K,
 510                             global->bad_page_cpu, global->bad_page_dma);
 511 
 512         mutex_destroy(&global->lock);
 513         kfree(global);
 514 
 515         priv->mmu_global = NULL;
 516 }

/* [<][>][^][v][top][bottom][index][help] */