root/drivers/gpu/drm/msm/msm_gpummu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. msm_gpummu_attach
  2. msm_gpummu_detach
  3. msm_gpummu_map
  4. msm_gpummu_unmap
  5. msm_gpummu_destroy
  6. msm_gpummu_new
  7. msm_gpummu_params

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
   3 
   4 #include <linux/dma-mapping.h>
   5 
   6 #include "msm_drv.h"
   7 #include "msm_mmu.h"
   8 #include "adreno/adreno_gpu.h"
   9 #include "adreno/a2xx.xml.h"
  10 
  11 struct msm_gpummu {
  12         struct msm_mmu base;
  13         struct msm_gpu *gpu;
  14         dma_addr_t pt_base;
  15         uint32_t *table;
  16 };
  17 #define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
  18 
  19 #define GPUMMU_VA_START SZ_16M
  20 #define GPUMMU_VA_RANGE (0xfff * SZ_64K)
  21 #define GPUMMU_PAGE_SIZE SZ_4K
  22 #define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
  23 
  24 static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
  25                 int cnt)
  26 {
  27         return 0;
  28 }
  29 
  30 static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
  31                 int cnt)
  32 {
  33 }
  34 
  35 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
  36                 struct sg_table *sgt, unsigned len, int prot)
  37 {
  38         struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
  39         unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
  40         struct scatterlist *sg;
  41         unsigned prot_bits = 0;
  42         unsigned i, j;
  43 
  44         if (prot & IOMMU_WRITE)
  45                 prot_bits |= 1;
  46         if (prot & IOMMU_READ)
  47                 prot_bits |= 2;
  48 
  49         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  50                 dma_addr_t addr = sg->dma_address;
  51                 for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
  52                         gpummu->table[idx] = addr | prot_bits;
  53                         addr += GPUMMU_PAGE_SIZE;
  54                 }
  55         }
  56 
  57         /* we can improve by deferring flush for multiple map() */
  58         gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
  59                 A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
  60                 A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
  61         return 0;
  62 }
  63 
  64 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
  65 {
  66         struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
  67         unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
  68         unsigned i;
  69 
  70         for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
  71                 gpummu->table[idx] = 0;
  72 
  73         gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
  74                 A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
  75                 A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
  76         return 0;
  77 }
  78 
  79 static void msm_gpummu_destroy(struct msm_mmu *mmu)
  80 {
  81         struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
  82 
  83         dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
  84                 DMA_ATTR_FORCE_CONTIGUOUS);
  85 
  86         kfree(gpummu);
  87 }
  88 
  89 static const struct msm_mmu_funcs funcs = {
  90                 .attach = msm_gpummu_attach,
  91                 .detach = msm_gpummu_detach,
  92                 .map = msm_gpummu_map,
  93                 .unmap = msm_gpummu_unmap,
  94                 .destroy = msm_gpummu_destroy,
  95 };
  96 
  97 struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
  98 {
  99         struct msm_gpummu *gpummu;
 100 
 101         gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
 102         if (!gpummu)
 103                 return ERR_PTR(-ENOMEM);
 104 
 105         gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
 106                 GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
 107         if (!gpummu->table) {
 108                 kfree(gpummu);
 109                 return ERR_PTR(-ENOMEM);
 110         }
 111 
 112         gpummu->gpu = gpu;
 113         msm_mmu_init(&gpummu->base, dev, &funcs);
 114 
 115         return &gpummu->base;
 116 }
 117 
 118 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
 119                 dma_addr_t *tran_error)
 120 {
 121         dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
 122 
 123         *pt_base = base;
 124         *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
 125 }

/* [<][>][^][v][top][bottom][index][help] */