root/arch/unicore32/include/asm/tlbflush.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. local_flush_tlb_all
  2. local_flush_tlb_mm
  3. local_flush_tlb_page
  4. local_flush_tlb_kernel_page
  5. flush_pmd_entry
  6. clean_pmd_entry

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * linux/arch/unicore32/include/asm/tlbflush.h
   4  *
   5  * Code specific to PKUnity SoC and UniCore ISA
   6  *
   7  * Copyright (C) 2001-2010 GUAN Xue-tao
   8  */
   9 #ifndef __UNICORE_TLBFLUSH_H__
  10 #define __UNICORE_TLBFLUSH_H__
  11 
  12 #ifndef __ASSEMBLY__
  13 
  14 #include <linux/sched.h>
  15 
  16 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long,
  17                                         struct vm_area_struct *);
  18 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
  19 
  20 /*
  21  *      TLB Management
  22  *      ==============
  23  *
  24  *      The arch/unicore/mm/tlb-*.S files implement these methods.
  25  *
  26  *      The TLB specific code is expected to perform whatever tests it
  27  *      needs to determine if it should invalidate the TLB for each
  28  *      call.  Start addresses are inclusive and end addresses are
  29  *      exclusive; it is safe to round these addresses down.
  30  *
  31  *      flush_tlb_all()
  32  *
  33  *              Invalidate the entire TLB.
  34  *
  35  *      flush_tlb_mm(mm)
  36  *
  37  *              Invalidate all TLB entries in a particular address
  38  *              space.
  39  *              - mm    - mm_struct describing address space
  40  *
  41  *      flush_tlb_range(mm,start,end)
  42  *
  43  *              Invalidate a range of TLB entries in the specified
  44  *              address space.
  45  *              - mm    - mm_struct describing address space
  46  *              - start - start address (may not be aligned)
  47  *              - end   - end address (exclusive, may not be aligned)
  48  *
  49  *      flush_tlb_page(vaddr,vma)
  50  *
  51  *              Invalidate the specified page in the specified address range.
  52  *              - vaddr - virtual address (may not be aligned)
  53  *              - vma   - vma_struct describing address range
  54  *
  55  *      flush_kern_tlb_page(kaddr)
  56  *
  57  *              Invalidate the TLB entry for the specified page.  The address
  58  *              will be in the kernels virtual memory space.  Current uses
  59  *              only require the D-TLB to be invalidated.
  60  *              - kaddr - Kernel virtual memory address
  61  */
  62 
  63 static inline void local_flush_tlb_all(void)
  64 {
  65         const int zero = 0;
  66 
  67         /* TLB invalidate all */
  68         asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  69                 : : "r" (zero) : "cc");
  70 }
  71 
  72 static inline void local_flush_tlb_mm(struct mm_struct *mm)
  73 {
  74         const int zero = 0;
  75 
  76         if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
  77                 /* TLB invalidate all */
  78                 asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  79                         : : "r" (zero) : "cc");
  80         }
  81         put_cpu();
  82 }
  83 
  84 static inline void
  85 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
  86 {
  87         if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
  88 #ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
  89                 /* iTLB invalidate page */
  90                 asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
  91                         : : "r" (uaddr & PAGE_MASK) : "cc");
  92                 /* dTLB invalidate page */
  93                 asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
  94                         : : "r" (uaddr & PAGE_MASK) : "cc");
  95 #else
  96                 /* TLB invalidate all */
  97                 asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  98                         : : "r" (uaddr & PAGE_MASK) : "cc");
  99 #endif
 100         }
 101 }
 102 
 103 static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 104 {
 105 #ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
 106         /* iTLB invalidate page */
 107         asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
 108                 : : "r" (kaddr & PAGE_MASK) : "cc");
 109         /* dTLB invalidate page */
 110         asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
 111                 : : "r" (kaddr & PAGE_MASK) : "cc");
 112 #else
 113         /* TLB invalidate all */
 114         asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
 115                 : : "r" (kaddr & PAGE_MASK) : "cc");
 116 #endif
 117 }
 118 
 119 /*
 120  *      flush_pmd_entry
 121  *
 122  *      Flush a PMD entry (word aligned, or double-word aligned) to
 123  *      RAM if the TLB for the CPU we are running on requires this.
 124  *      This is typically used when we are creating PMD entries.
 125  *
 126  *      clean_pmd_entry
 127  *
 128  *      Clean (but don't drain the write buffer) if the CPU requires
 129  *      these operations.  This is typically used when we are removing
 130  *      PMD entries.
 131  */
 132 static inline void flush_pmd_entry(pmd_t *pmd)
 133 {
 134 #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
 135         /* flush dcache line, see dcacheline_flush in proc-macros.S */
 136         asm("mov        r1, %0 << #20\n"
 137                 "ldw    r2, =_stext\n"
 138                 "add    r2, r2, r1 >> #20\n"
 139                 "ldw    r1, [r2+], #0x0000\n"
 140                 "ldw    r1, [r2+], #0x1000\n"
 141                 "ldw    r1, [r2+], #0x2000\n"
 142                 "ldw    r1, [r2+], #0x3000\n"
 143                 : : "r" (pmd) : "r1", "r2");
 144 #else
 145         /* flush dcache all */
 146         asm("movc p0.c5, %0, #14; nop; nop; nop; nop; nop; nop; nop; nop"
 147                 : : "r" (pmd) : "cc");
 148 #endif
 149 }
 150 
 151 static inline void clean_pmd_entry(pmd_t *pmd)
 152 {
 153 #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
 154         /* clean dcache line */
 155         asm("movc p0.c5, %0, #11; nop; nop; nop; nop; nop; nop; nop; nop"
 156                 : : "r" (__pa(pmd) & ~(L1_CACHE_BYTES - 1)) : "cc");
 157 #else
 158         /* clean dcache all */
 159         asm("movc p0.c5, %0, #10; nop; nop; nop; nop; nop; nop; nop; nop"
 160                 : : "r" (pmd) : "cc");
 161 #endif
 162 }
 163 
 164 /*
 165  * Convert calls to our calling convention.
 166  */
 167 #define local_flush_tlb_range(vma, start, end)  \
 168         __cpu_flush_user_tlb_range(start, end, vma)
 169 #define local_flush_tlb_kernel_range(s, e)      \
 170         __cpu_flush_kern_tlb_range(s, e)
 171 
 172 #define flush_tlb_all           local_flush_tlb_all
 173 #define flush_tlb_mm            local_flush_tlb_mm
 174 #define flush_tlb_page          local_flush_tlb_page
 175 #define flush_tlb_kernel_page   local_flush_tlb_kernel_page
 176 #define flush_tlb_range         local_flush_tlb_range
 177 #define flush_tlb_kernel_range  local_flush_tlb_kernel_range
 178 
 179 /*
 180  * if PG_dcache_clean is not set for the page, we need to ensure that any
 181  * cache entries for the kernels virtual memory range are written
 182  * back to the page.
 183  */
 184 extern void update_mmu_cache(struct vm_area_struct *vma,
 185                 unsigned long addr, pte_t *ptep);
 186 
 187 extern void do_bad_area(unsigned long addr, unsigned int fsr,
 188                 struct pt_regs *regs);
 189 
 190 #endif
 191 
 192 #endif

/* [<][>][^][v][top][bottom][index][help] */