root/arch/xtensa/include/asm/cacheflush.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __flush_dcache_page
  2. __flush_dcache_range
  3. __flush_invalidate_dcache_page_alias
  4. __invalidate_dcache_page_alias
  5. __invalidate_icache_page_alias

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * (C) 2001 - 2013 Tensilica Inc.
   7  */
   8 
   9 #ifndef _XTENSA_CACHEFLUSH_H
  10 #define _XTENSA_CACHEFLUSH_H
  11 
  12 #include <linux/mm.h>
  13 #include <asm/processor.h>
  14 #include <asm/page.h>
  15 
  16 /*
  17  * Lo-level routines for cache flushing.
  18  *
  19  * invalidate data or instruction cache:
  20  *
  21  * __invalidate_icache_all()
  22  * __invalidate_icache_page(adr)
  23  * __invalidate_dcache_page(adr)
  24  * __invalidate_icache_range(from,size)
  25  * __invalidate_dcache_range(from,size)
  26  *
  27  * flush data cache:
  28  *
  29  * __flush_dcache_page(adr)
  30  *
  31  * flush and invalidate data cache:
  32  *
  33  * __flush_invalidate_dcache_all()
  34  * __flush_invalidate_dcache_page(adr)
  35  * __flush_invalidate_dcache_range(from,size)
  36  *
  37  * specials for cache aliasing:
  38  *
  39  * __flush_invalidate_dcache_page_alias(vaddr,paddr)
  40  * __invalidate_dcache_page_alias(vaddr,paddr)
  41  * __invalidate_icache_page_alias(vaddr,paddr)
  42  */
  43 
  44 extern void __invalidate_dcache_all(void);
  45 extern void __invalidate_icache_all(void);
  46 extern void __invalidate_dcache_page(unsigned long);
  47 extern void __invalidate_icache_page(unsigned long);
  48 extern void __invalidate_icache_range(unsigned long, unsigned long);
  49 extern void __invalidate_dcache_range(unsigned long, unsigned long);
  50 
  51 #if XCHAL_DCACHE_IS_WRITEBACK
  52 extern void __flush_invalidate_dcache_all(void);
  53 extern void __flush_dcache_page(unsigned long);
  54 extern void __flush_dcache_range(unsigned long, unsigned long);
  55 extern void __flush_invalidate_dcache_page(unsigned long);
  56 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
  57 #else
  58 static inline void __flush_dcache_page(unsigned long va)
  59 {
  60 }
  61 static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
  62 {
  63 }
  64 # define __flush_invalidate_dcache_all()        __invalidate_dcache_all()
  65 # define __flush_invalidate_dcache_page(p)      __invalidate_dcache_page(p)
  66 # define __flush_invalidate_dcache_range(p,s)   __invalidate_dcache_range(p,s)
  67 #endif
  68 
  69 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
  70 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
  71 extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
  72 #else
  73 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
  74                                                         unsigned long phys) { }
  75 static inline void __invalidate_dcache_page_alias(unsigned long virt,
  76                                                   unsigned long phys) { }
  77 #endif
  78 #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
  79 extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
  80 #else
  81 static inline void __invalidate_icache_page_alias(unsigned long virt,
  82                                                 unsigned long phys) { }
  83 #endif
  84 
  85 /*
  86  * We have physically tagged caches - nothing to do here -
  87  * unless we have cache aliasing.
  88  *
  89  * Pages can get remapped. Because this might change the 'color' of that page,
  90  * we have to flush the cache before the PTE is changed.
  91  * (see also Documentation/core-api/cachetlb.rst)
  92  */
  93 
  94 #if defined(CONFIG_MMU) && \
  95         ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
  96 
  97 #ifdef CONFIG_SMP
  98 void flush_cache_all(void);
  99 void flush_cache_range(struct vm_area_struct*, ulong, ulong);
 100 void flush_icache_range(unsigned long start, unsigned long end);
 101 void flush_cache_page(struct vm_area_struct*,
 102                              unsigned long, unsigned long);
 103 #else
 104 #define flush_cache_all local_flush_cache_all
 105 #define flush_cache_range local_flush_cache_range
 106 #define flush_icache_range local_flush_icache_range
 107 #define flush_cache_page  local_flush_cache_page
 108 #endif
 109 
 110 #define local_flush_cache_all()                                         \
 111         do {                                                            \
 112                 __flush_invalidate_dcache_all();                        \
 113                 __invalidate_icache_all();                              \
 114         } while (0)
 115 
 116 #define flush_cache_mm(mm)              flush_cache_all()
 117 #define flush_cache_dup_mm(mm)          flush_cache_mm(mm)
 118 
 119 #define flush_cache_vmap(start,end)     flush_cache_all()
 120 #define flush_cache_vunmap(start,end)   flush_cache_all()
 121 
 122 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 123 extern void flush_dcache_page(struct page*);
 124 
 125 void local_flush_cache_range(struct vm_area_struct *vma,
 126                 unsigned long start, unsigned long end);
 127 void local_flush_cache_page(struct vm_area_struct *vma,
 128                 unsigned long address, unsigned long pfn);
 129 
 130 #else
 131 
 132 #define flush_cache_all()                               do { } while (0)
 133 #define flush_cache_mm(mm)                              do { } while (0)
 134 #define flush_cache_dup_mm(mm)                          do { } while (0)
 135 
 136 #define flush_cache_vmap(start,end)                     do { } while (0)
 137 #define flush_cache_vunmap(start,end)                   do { } while (0)
 138 
 139 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
 140 #define flush_dcache_page(page)                         do { } while (0)
 141 
 142 #define flush_icache_range local_flush_icache_range
 143 #define flush_cache_page(vma, addr, pfn)                do { } while (0)
 144 #define flush_cache_range(vma, start, end)              do { } while (0)
 145 
 146 #endif
 147 
 148 /* Ensure consistency between data and instruction cache. */
 149 #define local_flush_icache_range(start, end)                            \
 150         do {                                                            \
 151                 __flush_dcache_range(start, (end) - (start));           \
 152                 __invalidate_icache_range(start,(end) - (start));       \
 153         } while (0)
 154 
 155 /* This is not required, see Documentation/core-api/cachetlb.rst */
 156 #define flush_icache_page(vma,page)                     do { } while (0)
 157 
 158 #define flush_dcache_mmap_lock(mapping)                 do { } while (0)
 159 #define flush_dcache_mmap_unlock(mapping)               do { } while (0)
 160 
 161 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
 162 
 163 extern void copy_to_user_page(struct vm_area_struct*, struct page*,
 164                 unsigned long, void*, const void*, unsigned long);
 165 extern void copy_from_user_page(struct vm_area_struct*, struct page*,
 166                 unsigned long, void*, const void*, unsigned long);
 167 
 168 #else
 169 
 170 #define copy_to_user_page(vma, page, vaddr, dst, src, len)              \
 171         do {                                                            \
 172                 memcpy(dst, src, len);                                  \
 173                 __flush_dcache_range((unsigned long) dst, len);         \
 174                 __invalidate_icache_range((unsigned long) dst, len);    \
 175         } while (0)
 176 
 177 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 178         memcpy(dst, src, len)
 179 
 180 #endif
 181 
 182 #endif /* _XTENSA_CACHEFLUSH_H */

/* [<][>][^][v][top][bottom][index][help] */