root/arch/csky/mm/asid.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. flush_context
  2. check_update_reserved_asid
  3. new_context
  4. asid_new_context
  5. asid_allocator_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Generic ASID allocator.
   4  *
   5  * Based on arch/arm/mm/context.c
   6  *
   7  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
   8  * Copyright (C) 2012 ARM Ltd.
   9  */
  10 
  11 #include <linux/slab.h>
  12 #include <linux/mm_types.h>
  13 
  14 #include <asm/asid.h>
  15 
  16 #define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
  17 
  18 #define ASID_MASK(info)                 (~GENMASK((info)->bits - 1, 0))
  19 #define ASID_FIRST_VERSION(info)        (1UL << ((info)->bits))
  20 
  21 #define asid2idx(info, asid)            (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
  22 #define idx2asid(info, idx)             (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
  23 
  24 static void flush_context(struct asid_info *info)
  25 {
  26         int i;
  27         u64 asid;
  28 
  29         /* Update the list of reserved ASIDs and the ASID bitmap. */
  30         bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
  31 
  32         for_each_possible_cpu(i) {
  33                 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
  34                 /*
  35                  * If this CPU has already been through a
  36                  * rollover, but hasn't run another task in
  37                  * the meantime, we must preserve its reserved
  38                  * ASID, as this is the only trace we have of
  39                  * the process it is still running.
  40                  */
  41                 if (asid == 0)
  42                         asid = reserved_asid(info, i);
  43                 __set_bit(asid2idx(info, asid), info->map);
  44                 reserved_asid(info, i) = asid;
  45         }
  46 
  47         /*
  48          * Queue a TLB invalidation for each CPU to perform on next
  49          * context-switch
  50          */
  51         cpumask_setall(&info->flush_pending);
  52 }
  53 
  54 static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
  55                                        u64 newasid)
  56 {
  57         int cpu;
  58         bool hit = false;
  59 
  60         /*
  61          * Iterate over the set of reserved ASIDs looking for a match.
  62          * If we find one, then we can update our mm to use newasid
  63          * (i.e. the same ASID in the current generation) but we can't
  64          * exit the loop early, since we need to ensure that all copies
  65          * of the old ASID are updated to reflect the mm. Failure to do
  66          * so could result in us missing the reserved ASID in a future
  67          * generation.
  68          */
  69         for_each_possible_cpu(cpu) {
  70                 if (reserved_asid(info, cpu) == asid) {
  71                         hit = true;
  72                         reserved_asid(info, cpu) = newasid;
  73                 }
  74         }
  75 
  76         return hit;
  77 }
  78 
  79 static u64 new_context(struct asid_info *info, atomic64_t *pasid,
  80                        struct mm_struct *mm)
  81 {
  82         static u32 cur_idx = 1;
  83         u64 asid = atomic64_read(pasid);
  84         u64 generation = atomic64_read(&info->generation);
  85 
  86         if (asid != 0) {
  87                 u64 newasid = generation | (asid & ~ASID_MASK(info));
  88 
  89                 /*
  90                  * If our current ASID was active during a rollover, we
  91                  * can continue to use it and this was just a false alarm.
  92                  */
  93                 if (check_update_reserved_asid(info, asid, newasid))
  94                         return newasid;
  95 
  96                 /*
  97                  * We had a valid ASID in a previous life, so try to re-use
  98                  * it if possible.
  99                  */
 100                 if (!__test_and_set_bit(asid2idx(info, asid), info->map))
 101                         return newasid;
 102         }
 103 
 104         /*
 105          * Allocate a free ASID. If we can't find one, take a note of the
 106          * currently active ASIDs and mark the TLBs as requiring flushes.  We
 107          * always count from ASID #2 (index 1), as we use ASID #0 when setting
 108          * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
 109          * pairs.
 110          */
 111         asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
 112         if (asid != NUM_CTXT_ASIDS(info))
 113                 goto set_asid;
 114 
 115         /* We're out of ASIDs, so increment the global generation count */
 116         generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
 117                                                  &info->generation);
 118         flush_context(info);
 119 
 120         /* We have more ASIDs than CPUs, so this will always succeed */
 121         asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
 122 
 123 set_asid:
 124         __set_bit(asid, info->map);
 125         cur_idx = asid;
 126         cpumask_clear(mm_cpumask(mm));
 127         return idx2asid(info, asid) | generation;
 128 }
 129 
 130 /*
 131  * Generate a new ASID for the context.
 132  *
 133  * @pasid: Pointer to the current ASID batch allocated. It will be updated
 134  * with the new ASID batch.
 135  * @cpu: current CPU ID. Must have been acquired through get_cpu()
 136  */
 137 void asid_new_context(struct asid_info *info, atomic64_t *pasid,
 138                       unsigned int cpu, struct mm_struct *mm)
 139 {
 140         unsigned long flags;
 141         u64 asid;
 142 
 143         raw_spin_lock_irqsave(&info->lock, flags);
 144         /* Check that our ASID belongs to the current generation. */
 145         asid = atomic64_read(pasid);
 146         if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
 147                 asid = new_context(info, pasid, mm);
 148                 atomic64_set(pasid, asid);
 149         }
 150 
 151         if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
 152                 info->flush_cpu_ctxt_cb();
 153 
 154         atomic64_set(&active_asid(info, cpu), asid);
 155         cpumask_set_cpu(cpu, mm_cpumask(mm));
 156         raw_spin_unlock_irqrestore(&info->lock, flags);
 157 }
 158 
 159 /*
 160  * Initialize the ASID allocator
 161  *
 162  * @info: Pointer to the asid allocator structure
 163  * @bits: Number of ASIDs available
 164  * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
 165  * allocated contiguously for a given context. This value should be a power of
 166  * 2.
 167  */
 168 int asid_allocator_init(struct asid_info *info,
 169                         u32 bits, unsigned int asid_per_ctxt,
 170                         void (*flush_cpu_ctxt_cb)(void))
 171 {
 172         info->bits = bits;
 173         info->ctxt_shift = ilog2(asid_per_ctxt);
 174         info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
 175         /*
 176          * Expect allocation after rollover to fail if we don't have at least
 177          * one more ASID than CPUs. ASID #0 is always reserved.
 178          */
 179         WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
 180         atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
 181         info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
 182                             sizeof(*info->map), GFP_KERNEL);
 183         if (!info->map)
 184                 return -ENOMEM;
 185 
 186         raw_spin_lock_init(&info->lock);
 187 
 188         return 0;
 189 }

/* [<][>][^][v][top][bottom][index][help] */