root/arch/sparc/kernel/iommu-common.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. need_flush
  2. set_flush
  3. clear_flush
  4. setup_iommu_pool_hash
  5. iommu_tbl_pool_init
  6. iommu_tbl_range_alloc
  7. get_pool
  8. iommu_tbl_range_free

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * IOMMU mmap management and range allocation functions.
   4  * Based almost entirely upon the powerpc iommu allocator.
   5  */
   6 
   7 #include <linux/export.h>
   8 #include <linux/bitmap.h>
   9 #include <linux/bug.h>
  10 #include <linux/iommu-helper.h>
  11 #include <linux/dma-mapping.h>
  12 #include <linux/hash.h>
  13 #include <asm/iommu-common.h>
  14 
  15 static unsigned long iommu_large_alloc = 15;
  16 
  17 static  DEFINE_PER_CPU(unsigned int, iommu_hash_common);
  18 
  19 static inline bool need_flush(struct iommu_map_table *iommu)
  20 {
  21         return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
  22 }
  23 
  24 static inline void set_flush(struct iommu_map_table *iommu)
  25 {
  26         iommu->flags |= IOMMU_NEED_FLUSH;
  27 }
  28 
  29 static inline void clear_flush(struct iommu_map_table *iommu)
  30 {
  31         iommu->flags &= ~IOMMU_NEED_FLUSH;
  32 }
  33 
  34 static void setup_iommu_pool_hash(void)
  35 {
  36         unsigned int i;
  37         static bool do_once;
  38 
  39         if (do_once)
  40                 return;
  41         do_once = true;
  42         for_each_possible_cpu(i)
  43                 per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
  44 }
  45 
  46 /*
  47  * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
  48  * is the number of table entries. If `large_pool' is set to true,
  49  * the top 1/4 of the table will be set aside for pool allocations
  50  * of more than iommu_large_alloc pages.
  51  */
  52 void iommu_tbl_pool_init(struct iommu_map_table *iommu,
  53                          unsigned long num_entries,
  54                          u32 table_shift,
  55                          void (*lazy_flush)(struct iommu_map_table *),
  56                          bool large_pool, u32 npools,
  57                          bool skip_span_boundary_check)
  58 {
  59         unsigned int start, i;
  60         struct iommu_pool *p = &(iommu->large_pool);
  61 
  62         setup_iommu_pool_hash();
  63         if (npools == 0)
  64                 iommu->nr_pools = IOMMU_NR_POOLS;
  65         else
  66                 iommu->nr_pools = npools;
  67         BUG_ON(npools > IOMMU_NR_POOLS);
  68 
  69         iommu->table_shift = table_shift;
  70         iommu->lazy_flush = lazy_flush;
  71         start = 0;
  72         if (skip_span_boundary_check)
  73                 iommu->flags |= IOMMU_NO_SPAN_BOUND;
  74         if (large_pool)
  75                 iommu->flags |= IOMMU_HAS_LARGE_POOL;
  76 
  77         if (!large_pool)
  78                 iommu->poolsize = num_entries/iommu->nr_pools;
  79         else
  80                 iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
  81         for (i = 0; i < iommu->nr_pools; i++) {
  82                 spin_lock_init(&(iommu->pools[i].lock));
  83                 iommu->pools[i].start = start;
  84                 iommu->pools[i].hint = start;
  85                 start += iommu->poolsize; /* start for next pool */
  86                 iommu->pools[i].end = start - 1;
  87         }
  88         if (!large_pool)
  89                 return;
  90         /* initialize large_pool */
  91         spin_lock_init(&(p->lock));
  92         p->start = start;
  93         p->hint = p->start;
  94         p->end = num_entries;
  95 }
  96 
  97 unsigned long iommu_tbl_range_alloc(struct device *dev,
  98                                 struct iommu_map_table *iommu,
  99                                 unsigned long npages,
 100                                 unsigned long *handle,
 101                                 unsigned long mask,
 102                                 unsigned int align_order)
 103 {
 104         unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
 105         unsigned long n, end, start, limit, boundary_size;
 106         struct iommu_pool *pool;
 107         int pass = 0;
 108         unsigned int pool_nr;
 109         unsigned int npools = iommu->nr_pools;
 110         unsigned long flags;
 111         bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
 112         bool largealloc = (large_pool && npages > iommu_large_alloc);
 113         unsigned long shift;
 114         unsigned long align_mask = 0;
 115 
 116         if (align_order > 0)
 117                 align_mask = ~0ul >> (BITS_PER_LONG - align_order);
 118 
 119         /* Sanity check */
 120         if (unlikely(npages == 0)) {
 121                 WARN_ON_ONCE(1);
 122                 return IOMMU_ERROR_CODE;
 123         }
 124 
 125         if (largealloc) {
 126                 pool = &(iommu->large_pool);
 127                 pool_nr = 0; /* to keep compiler happy */
 128         } else {
 129                 /* pick out pool_nr */
 130                 pool_nr =  pool_hash & (npools - 1);
 131                 pool = &(iommu->pools[pool_nr]);
 132         }
 133         spin_lock_irqsave(&pool->lock, flags);
 134 
 135  again:
 136         if (pass == 0 && handle && *handle &&
 137             (*handle >= pool->start) && (*handle < pool->end))
 138                 start = *handle;
 139         else
 140                 start = pool->hint;
 141 
 142         limit = pool->end;
 143 
 144         /* The case below can happen if we have a small segment appended
 145          * to a large, or when the previous alloc was at the very end of
 146          * the available space. If so, go back to the beginning. If a
 147          * flush is needed, it will get done based on the return value
 148          * from iommu_area_alloc() below.
 149          */
 150         if (start >= limit)
 151                 start = pool->start;
 152         shift = iommu->table_map_base >> iommu->table_shift;
 153         if (limit + shift > mask) {
 154                 limit = mask - shift + 1;
 155                 /* If we're constrained on address range, first try
 156                  * at the masked hint to avoid O(n) search complexity,
 157                  * but on second pass, start at 0 in pool 0.
 158                  */
 159                 if ((start & mask) >= limit || pass > 0) {
 160                         spin_unlock(&(pool->lock));
 161                         pool = &(iommu->pools[0]);
 162                         spin_lock(&(pool->lock));
 163                         start = pool->start;
 164                 } else {
 165                         start &= mask;
 166                 }
 167         }
 168 
 169         if (dev)
 170                 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 171                                       1 << iommu->table_shift);
 172         else
 173                 boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
 174 
 175         boundary_size = boundary_size >> iommu->table_shift;
 176         /*
 177          * if the skip_span_boundary_check had been set during init, we set
 178          * things up so that iommu_is_span_boundary() merely checks if the
 179          * (index + npages) < num_tsb_entries
 180          */
 181         if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
 182                 shift = 0;
 183                 boundary_size = iommu->poolsize * iommu->nr_pools;
 184         }
 185         n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
 186                              boundary_size, align_mask);
 187         if (n == -1) {
 188                 if (likely(pass == 0)) {
 189                         /* First failure, rescan from the beginning.  */
 190                         pool->hint = pool->start;
 191                         set_flush(iommu);
 192                         pass++;
 193                         goto again;
 194                 } else if (!largealloc && pass <= iommu->nr_pools) {
 195                         spin_unlock(&(pool->lock));
 196                         pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
 197                         pool = &(iommu->pools[pool_nr]);
 198                         spin_lock(&(pool->lock));
 199                         pool->hint = pool->start;
 200                         set_flush(iommu);
 201                         pass++;
 202                         goto again;
 203                 } else {
 204                         /* give up */
 205                         n = IOMMU_ERROR_CODE;
 206                         goto bail;
 207                 }
 208         }
 209         if (iommu->lazy_flush &&
 210             (n < pool->hint || need_flush(iommu))) {
 211                 clear_flush(iommu);
 212                 iommu->lazy_flush(iommu);
 213         }
 214 
 215         end = n + npages;
 216         pool->hint = end;
 217 
 218         /* Update handle for SG allocations */
 219         if (handle)
 220                 *handle = end;
 221 bail:
 222         spin_unlock_irqrestore(&(pool->lock), flags);
 223 
 224         return n;
 225 }
 226 
 227 static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
 228                                    unsigned long entry)
 229 {
 230         struct iommu_pool *p;
 231         unsigned long largepool_start = tbl->large_pool.start;
 232         bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
 233 
 234         /* The large pool is the last pool at the top of the table */
 235         if (large_pool && entry >= largepool_start) {
 236                 p = &tbl->large_pool;
 237         } else {
 238                 unsigned int pool_nr = entry / tbl->poolsize;
 239 
 240                 BUG_ON(pool_nr >= tbl->nr_pools);
 241                 p = &tbl->pools[pool_nr];
 242         }
 243         return p;
 244 }
 245 
 246 /* Caller supplies the index of the entry into the iommu map table
 247  * itself when the mapping from dma_addr to the entry is not the
 248  * default addr->entry mapping below.
 249  */
 250 void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
 251                           unsigned long npages, unsigned long entry)
 252 {
 253         struct iommu_pool *pool;
 254         unsigned long flags;
 255         unsigned long shift = iommu->table_shift;
 256 
 257         if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
 258                 entry = (dma_addr - iommu->table_map_base) >> shift;
 259         pool = get_pool(iommu, entry);
 260 
 261         spin_lock_irqsave(&(pool->lock), flags);
 262         bitmap_clear(iommu->map, entry, npages);
 263         spin_unlock_irqrestore(&(pool->lock), flags);
 264 }

/* [<][>][^][v][top][bottom][index][help] */