root/arch/s390/include/asm/tlbflush.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __tlb_flush_local
  2. __tlb_flush_idte
  3. __tlb_flush_global
  4. __tlb_flush_mm
  5. __tlb_flush_kernel
  6. __tlb_flush_mm_lazy
  7. flush_tlb_mm
  8. flush_tlb_range
  9. flush_tlb_kernel_range

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _S390_TLBFLUSH_H
   3 #define _S390_TLBFLUSH_H
   4 
   5 #include <linux/mm.h>
   6 #include <linux/sched.h>
   7 #include <asm/processor.h>
   8 #include <asm/pgalloc.h>
   9 #include <asm/pgtable.h>
  10 
  11 /*
  12  * Flush all TLB entries on the local CPU.
  13  */
  14 static inline void __tlb_flush_local(void)
  15 {
  16         asm volatile("ptlb" : : : "memory");
  17 }
  18 
  19 /*
  20  * Flush TLB entries for a specific ASCE on all CPUs
  21  */
  22 static inline void __tlb_flush_idte(unsigned long asce)
  23 {
  24         unsigned long opt;
  25 
  26         opt = IDTE_PTOA;
  27         if (MACHINE_HAS_TLB_GUEST)
  28                 opt |= IDTE_GUEST_ASCE;
  29         /* Global TLB flush for the mm */
  30         asm volatile(
  31                 "       .insn   rrf,0xb98e0000,0,%0,%1,0"
  32                 : : "a" (opt), "a" (asce) : "cc");
  33 }
  34 
  35 void smp_ptlb_all(void);
  36 
  37 /*
  38  * Flush all TLB entries on all CPUs.
  39  */
  40 static inline void __tlb_flush_global(void)
  41 {
  42         unsigned int dummy = 0;
  43 
  44         csp(&dummy, 0, 0);
  45 }
  46 
  47 /*
  48  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
  49  * this implicates multiple ASCEs!).
  50  */
  51 static inline void __tlb_flush_mm(struct mm_struct *mm)
  52 {
  53         unsigned long gmap_asce;
  54 
  55         /*
  56          * If the machine has IDTE we prefer to do a per mm flush
  57          * on all cpus instead of doing a local flush if the mm
  58          * only ran on the local cpu.
  59          */
  60         preempt_disable();
  61         atomic_inc(&mm->context.flush_count);
  62         /* Reset TLB flush mask */
  63         cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
  64         barrier();
  65         gmap_asce = READ_ONCE(mm->context.gmap_asce);
  66         if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
  67                 if (gmap_asce)
  68                         __tlb_flush_idte(gmap_asce);
  69                 __tlb_flush_idte(mm->context.asce);
  70         } else {
  71                 /* Global TLB flush */
  72                 __tlb_flush_global();
  73         }
  74         atomic_dec(&mm->context.flush_count);
  75         preempt_enable();
  76 }
  77 
  78 static inline void __tlb_flush_kernel(void)
  79 {
  80         if (MACHINE_HAS_IDTE)
  81                 __tlb_flush_idte(init_mm.context.asce);
  82         else
  83                 __tlb_flush_global();
  84 }
  85 
  86 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
  87 {
  88         spin_lock(&mm->context.lock);
  89         if (mm->context.flush_mm) {
  90                 mm->context.flush_mm = 0;
  91                 __tlb_flush_mm(mm);
  92         }
  93         spin_unlock(&mm->context.lock);
  94 }
  95 
  96 /*
  97  * TLB flushing:
  98  *  flush_tlb() - flushes the current mm struct TLBs
  99  *  flush_tlb_all() - flushes all processes TLBs
 100  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
 101  *  flush_tlb_page(vma, vmaddr) - flushes one page
 102  *  flush_tlb_range(vma, start, end) - flushes a range of pages
 103  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
 104  */
 105 
 106 /*
 107  * flush_tlb_mm goes together with ptep_set_wrprotect for the
 108  * copy_page_range operation and flush_tlb_range is related to
 109  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
 110  * ptep_get_and_clear do not flush the TLBs directly if the mm has
 111  * only one user. At the end of the update the flush_tlb_mm and
 112  * flush_tlb_range functions need to do the flush.
 113  */
 114 #define flush_tlb()                             do { } while (0)
 115 #define flush_tlb_all()                         do { } while (0)
 116 #define flush_tlb_page(vma, addr)               do { } while (0)
 117 
 118 static inline void flush_tlb_mm(struct mm_struct *mm)
 119 {
 120         __tlb_flush_mm_lazy(mm);
 121 }
 122 
 123 static inline void flush_tlb_range(struct vm_area_struct *vma,
 124                                    unsigned long start, unsigned long end)
 125 {
 126         __tlb_flush_mm_lazy(vma->vm_mm);
 127 }
 128 
 129 static inline void flush_tlb_kernel_range(unsigned long start,
 130                                           unsigned long end)
 131 {
 132         __tlb_flush_kernel();
 133 }
 134 
 135 #endif /* _S390_TLBFLUSH_H */

/* [<][>][^][v][top][bottom][index][help] */