root/drivers/tee/tee_shm_pool.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pool_op_gen_alloc
  2. pool_op_gen_free
  3. pool_op_gen_destroy_poolmgr
  4. tee_shm_pool_alloc_res_mem
  5. tee_shm_pool_mgr_alloc_res_mem
  6. check_mgr_ops
  7. tee_shm_pool_alloc
  8. tee_shm_pool_free

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2015, Linaro Limited
   4  */
   5 #include <linux/device.h>
   6 #include <linux/dma-buf.h>
   7 #include <linux/genalloc.h>
   8 #include <linux/slab.h>
   9 #include <linux/tee_drv.h>
  10 #include "tee_private.h"
  11 
  12 static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
  13                              struct tee_shm *shm, size_t size)
  14 {
  15         unsigned long va;
  16         struct gen_pool *genpool = poolm->private_data;
  17         size_t s = roundup(size, 1 << genpool->min_alloc_order);
  18 
  19         va = gen_pool_alloc(genpool, s);
  20         if (!va)
  21                 return -ENOMEM;
  22 
  23         memset((void *)va, 0, s);
  24         shm->kaddr = (void *)va;
  25         shm->paddr = gen_pool_virt_to_phys(genpool, va);
  26         shm->size = s;
  27         return 0;
  28 }
  29 
  30 static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
  31                              struct tee_shm *shm)
  32 {
  33         gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
  34                       shm->size);
  35         shm->kaddr = NULL;
  36 }
  37 
  38 static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
  39 {
  40         gen_pool_destroy(poolm->private_data);
  41         kfree(poolm);
  42 }
  43 
  44 static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
  45         .alloc = pool_op_gen_alloc,
  46         .free = pool_op_gen_free,
  47         .destroy_poolmgr = pool_op_gen_destroy_poolmgr,
  48 };
  49 
  50 /**
  51  * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
  52  * memory range
  53  * @priv_info:  Information for driver private shared memory pool
  54  * @dmabuf_info: Information for dma-buf shared memory pool
  55  *
  56  * Start and end of pools will must be page aligned.
  57  *
  58  * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
  59  * in @dmabuf, others will use the range provided by @priv.
  60  *
  61  * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
  62  */
  63 struct tee_shm_pool *
  64 tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
  65                            struct tee_shm_pool_mem_info *dmabuf_info)
  66 {
  67         struct tee_shm_pool_mgr *priv_mgr;
  68         struct tee_shm_pool_mgr *dmabuf_mgr;
  69         void *rc;
  70 
  71         /*
  72          * Create the pool for driver private shared memory
  73          */
  74         rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
  75                                             priv_info->size,
  76                                             3 /* 8 byte aligned */);
  77         if (IS_ERR(rc))
  78                 return rc;
  79         priv_mgr = rc;
  80 
  81         /*
  82          * Create the pool for dma_buf shared memory
  83          */
  84         rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
  85                                             dmabuf_info->paddr,
  86                                             dmabuf_info->size, PAGE_SHIFT);
  87         if (IS_ERR(rc))
  88                 goto err_free_priv_mgr;
  89         dmabuf_mgr = rc;
  90 
  91         rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
  92         if (IS_ERR(rc))
  93                 goto err_free_dmabuf_mgr;
  94 
  95         return rc;
  96 
  97 err_free_dmabuf_mgr:
  98         tee_shm_pool_mgr_destroy(dmabuf_mgr);
  99 err_free_priv_mgr:
 100         tee_shm_pool_mgr_destroy(priv_mgr);
 101 
 102         return rc;
 103 }
 104 EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
 105 
 106 struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
 107                                                         phys_addr_t paddr,
 108                                                         size_t size,
 109                                                         int min_alloc_order)
 110 {
 111         const size_t page_mask = PAGE_SIZE - 1;
 112         struct tee_shm_pool_mgr *mgr;
 113         int rc;
 114 
 115         /* Start and end must be page aligned */
 116         if (vaddr & page_mask || paddr & page_mask || size & page_mask)
 117                 return ERR_PTR(-EINVAL);
 118 
 119         mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
 120         if (!mgr)
 121                 return ERR_PTR(-ENOMEM);
 122 
 123         mgr->private_data = gen_pool_create(min_alloc_order, -1);
 124         if (!mgr->private_data) {
 125                 rc = -ENOMEM;
 126                 goto err;
 127         }
 128 
 129         gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
 130         rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
 131         if (rc) {
 132                 gen_pool_destroy(mgr->private_data);
 133                 goto err;
 134         }
 135 
 136         mgr->ops = &pool_ops_generic;
 137 
 138         return mgr;
 139 err:
 140         kfree(mgr);
 141 
 142         return ERR_PTR(rc);
 143 }
 144 EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
 145 
 146 static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
 147 {
 148         return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
 149                 mgr->ops->destroy_poolmgr;
 150 }
 151 
 152 struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
 153                                         struct tee_shm_pool_mgr *dmabuf_mgr)
 154 {
 155         struct tee_shm_pool *pool;
 156 
 157         if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
 158                 return ERR_PTR(-EINVAL);
 159 
 160         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 161         if (!pool)
 162                 return ERR_PTR(-ENOMEM);
 163 
 164         pool->private_mgr = priv_mgr;
 165         pool->dma_buf_mgr = dmabuf_mgr;
 166 
 167         return pool;
 168 }
 169 EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
 170 
 171 /**
 172  * tee_shm_pool_free() - Free a shared memory pool
 173  * @pool:       The shared memory pool to free
 174  *
 175  * There must be no remaining shared memory allocated from this pool when
 176  * this function is called.
 177  */
 178 void tee_shm_pool_free(struct tee_shm_pool *pool)
 179 {
 180         if (pool->private_mgr)
 181                 tee_shm_pool_mgr_destroy(pool->private_mgr);
 182         if (pool->dma_buf_mgr)
 183                 tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
 184         kfree(pool);
 185 }
 186 EXPORT_SYMBOL_GPL(tee_shm_pool_free);

/* [<][>][^][v][top][bottom][index][help] */