root/drivers/gpu/drm/etnaviv/etnaviv_iommu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_v1_context
  2. etnaviv_iommuv1_free
  3. etnaviv_iommuv1_map
  4. etnaviv_iommuv1_unmap
  5. etnaviv_iommuv1_dump_size
  6. etnaviv_iommuv1_dump
  7. etnaviv_iommuv1_restore
  8. etnaviv_iommuv1_context_alloc

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2014-2018 Etnaviv Project
   4  */
   5 
   6 #include <linux/bitops.h>
   7 #include <linux/dma-mapping.h>
   8 #include <linux/platform_device.h>
   9 #include <linux/sizes.h>
  10 #include <linux/slab.h>
  11 
  12 #include "etnaviv_gpu.h"
  13 #include "etnaviv_mmu.h"
  14 #include "state_hi.xml.h"
  15 
  16 #define PT_SIZE         SZ_2M
  17 #define PT_ENTRIES      (PT_SIZE / sizeof(u32))
  18 
  19 #define GPU_MEM_START   0x80000000
  20 
  21 struct etnaviv_iommuv1_context {
  22         struct etnaviv_iommu_context base;
  23         u32 *pgtable_cpu;
  24         dma_addr_t pgtable_dma;
  25 };
  26 
  27 static struct etnaviv_iommuv1_context *
  28 to_v1_context(struct etnaviv_iommu_context *context)
  29 {
  30         return container_of(context, struct etnaviv_iommuv1_context, base);
  31 }
  32 
  33 static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
  34 {
  35         struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  36 
  37         drm_mm_takedown(&context->mm);
  38 
  39         dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
  40                     v1_context->pgtable_dma);
  41 
  42         context->global->v1.shared_context = NULL;
  43 
  44         kfree(v1_context);
  45 }
  46 
  47 static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
  48                                unsigned long iova, phys_addr_t paddr,
  49                                size_t size, int prot)
  50 {
  51         struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  52         unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  53 
  54         if (size != SZ_4K)
  55                 return -EINVAL;
  56 
  57         v1_context->pgtable_cpu[index] = paddr;
  58 
  59         return 0;
  60 }
  61 
  62 static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
  63         unsigned long iova, size_t size)
  64 {
  65         struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  66         unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  67 
  68         if (size != SZ_4K)
  69                 return -EINVAL;
  70 
  71         v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
  72 
  73         return SZ_4K;
  74 }
  75 
  76 static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
  77 {
  78         return PT_SIZE;
  79 }
  80 
  81 static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
  82                                  void *buf)
  83 {
  84         struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  85 
  86         memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
  87 }
  88 
  89 static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
  90                              struct etnaviv_iommu_context *context)
  91 {
  92         struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  93         u32 pgtable;
  94 
  95         /* set base addresses */
  96         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
  97         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
  98         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
  99         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
 100         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
 101 
 102         /* set page table address in MC */
 103         pgtable = (u32)v1_context->pgtable_dma;
 104 
 105         gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
 106         gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
 107         gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
 108         gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
 109         gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
 110 }
 111 
 112 
 113 const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
 114         .free = etnaviv_iommuv1_free,
 115         .map = etnaviv_iommuv1_map,
 116         .unmap = etnaviv_iommuv1_unmap,
 117         .dump_size = etnaviv_iommuv1_dump_size,
 118         .dump = etnaviv_iommuv1_dump,
 119         .restore = etnaviv_iommuv1_restore,
 120 };
 121 
 122 struct etnaviv_iommu_context *
 123 etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
 124 {
 125         struct etnaviv_iommuv1_context *v1_context;
 126         struct etnaviv_iommu_context *context;
 127 
 128         mutex_lock(&global->lock);
 129 
 130         /*
 131          * MMUv1 does not support switching between different contexts without
 132          * a stop the world operation, so we only support a single shared
 133          * context with this version.
 134          */
 135         if (global->v1.shared_context) {
 136                 context = global->v1.shared_context;
 137                 etnaviv_iommu_context_get(context);
 138                 mutex_unlock(&global->lock);
 139                 return context;
 140         }
 141 
 142         v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
 143         if (!v1_context) {
 144                 mutex_unlock(&global->lock);
 145                 return NULL;
 146         }
 147 
 148         v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
 149                                                &v1_context->pgtable_dma,
 150                                                GFP_KERNEL);
 151         if (!v1_context->pgtable_cpu)
 152                 goto out_free;
 153 
 154         memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
 155 
 156         context = &v1_context->base;
 157         context->global = global;
 158         kref_init(&context->refcount);
 159         mutex_init(&context->lock);
 160         INIT_LIST_HEAD(&context->mappings);
 161         drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
 162         context->global->v1.shared_context = context;
 163 
 164         mutex_unlock(&global->lock);
 165 
 166         return context;
 167 
 168 out_free:
 169         mutex_unlock(&global->lock);
 170         kfree(v1_context);
 171         return NULL;
 172 }

/* [<][>][^][v][top][bottom][index][help] */