root/arch/arm64/include/asm/cacheflush.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. flush_icache_range
  2. flush_cache_mm
  3. flush_cache_page
  4. flush_cache_range
  5. __flush_icache_all
  6. flush_cache_vmap
  7. flush_cache_vunmap

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Based on arch/arm/include/asm/cacheflush.h
   4  *
   5  * Copyright (C) 1999-2002 Russell King.
   6  * Copyright (C) 2012 ARM Ltd.
   7  */
   8 #ifndef __ASM_CACHEFLUSH_H
   9 #define __ASM_CACHEFLUSH_H
  10 
  11 #include <linux/kgdb.h>
  12 #include <linux/mm.h>
  13 
  14 /*
  15  * This flag is used to indicate that the page pointed to by a pte is clean
  16  * and does not require cleaning before returning it to the user.
  17  */
  18 #define PG_dcache_clean PG_arch_1
  19 
  20 /*
  21  *      MM Cache Management
  22  *      ===================
  23  *
  24  *      The arch/arm64/mm/cache.S implements these methods.
  25  *
  26  *      Start addresses are inclusive and end addresses are exclusive; start
  27  *      addresses should be rounded down, end addresses up.
  28  *
  29  *      See Documentation/core-api/cachetlb.rst for more information. Please note that
  30  *      the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  31  *      VIPT I-cache.
  32  *
  33  *      flush_cache_mm(mm)
  34  *
  35  *              Clean and invalidate all user space cache entries
  36  *              before a change of page tables.
  37  *
  38  *      flush_icache_range(start, end)
  39  *
  40  *              Ensure coherency between the I-cache and the D-cache in the
  41  *              region described by start, end.
  42  *              - start  - virtual start address
  43  *              - end    - virtual end address
  44  *
  45  *      invalidate_icache_range(start, end)
  46  *
  47  *              Invalidate the I-cache in the region described by start, end.
  48  *              - start  - virtual start address
  49  *              - end    - virtual end address
  50  *
  51  *      __flush_cache_user_range(start, end)
  52  *
  53  *              Ensure coherency between the I-cache and the D-cache in the
  54  *              region described by start, end.
  55  *              - start  - virtual start address
  56  *              - end    - virtual end address
  57  *
  58  *      __flush_dcache_area(kaddr, size)
  59  *
  60  *              Ensure that the data held in page is written back.
  61  *              - kaddr  - page address
  62  *              - size   - region size
  63  */
  64 extern void __flush_icache_range(unsigned long start, unsigned long end);
  65 extern int  invalidate_icache_range(unsigned long start, unsigned long end);
  66 extern void __flush_dcache_area(void *addr, size_t len);
  67 extern void __inval_dcache_area(void *addr, size_t len);
  68 extern void __clean_dcache_area_poc(void *addr, size_t len);
  69 extern void __clean_dcache_area_pop(void *addr, size_t len);
  70 extern void __clean_dcache_area_pou(void *addr, size_t len);
  71 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
  72 extern void sync_icache_aliases(void *kaddr, unsigned long len);
  73 
  74 static inline void flush_icache_range(unsigned long start, unsigned long end)
  75 {
  76         __flush_icache_range(start, end);
  77 
  78         /*
  79          * IPI all online CPUs so that they undergo a context synchronization
  80          * event and are forced to refetch the new instructions.
  81          */
  82 #ifdef CONFIG_KGDB
  83         /*
  84          * KGDB performs cache maintenance with interrupts disabled, so we
  85          * will deadlock trying to IPI the secondary CPUs. In theory, we can
  86          * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
  87          * just means that KGDB will elide the maintenance altogether! As it
  88          * turns out, KGDB uses IPIs to round-up the secondary CPUs during
  89          * the patching operation, so we don't need extra IPIs here anyway.
  90          * In which case, add a KGDB-specific bodge and return early.
  91          */
  92         if (kgdb_connected && irqs_disabled())
  93                 return;
  94 #endif
  95         kick_all_cpus_sync();
  96 }
  97 
  98 static inline void flush_cache_mm(struct mm_struct *mm)
  99 {
 100 }
 101 
 102 static inline void flush_cache_page(struct vm_area_struct *vma,
 103                                     unsigned long user_addr, unsigned long pfn)
 104 {
 105 }
 106 
 107 static inline void flush_cache_range(struct vm_area_struct *vma,
 108                                      unsigned long start, unsigned long end)
 109 {
 110 }
 111 
 112 /*
 113  * Cache maintenance functions used by the DMA API. No to be used directly.
 114  */
 115 extern void __dma_map_area(const void *, size_t, int);
 116 extern void __dma_unmap_area(const void *, size_t, int);
 117 extern void __dma_flush_area(const void *, size_t);
 118 
 119 /*
 120  * Copy user data from/to a page which is mapped into a different
 121  * processes address space.  Really, we want to allow our "user
 122  * space" model to handle this.
 123  */
 124 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
 125         unsigned long, void *, const void *, unsigned long);
 126 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 127         do {                                                    \
 128                 memcpy(dst, src, len);                          \
 129         } while (0)
 130 
 131 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
 132 
 133 /*
 134  * flush_dcache_page is used when the kernel has written to the page
 135  * cache page at virtual address page->virtual.
 136  *
 137  * If this page isn't mapped (ie, page_mapping == NULL), or it might
 138  * have userspace mappings, then we _must_ always clean + invalidate
 139  * the dcache entries associated with the kernel mapping.
 140  *
 141  * Otherwise we can defer the operation, and clean the cache when we are
 142  * about to change to user space.  This is the same method as used on SPARC64.
 143  * See update_mmu_cache for the user space part.
 144  */
 145 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 146 extern void flush_dcache_page(struct page *);
 147 
 148 static inline void __flush_icache_all(void)
 149 {
 150         if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
 151                 return;
 152 
 153         asm("ic ialluis");
 154         dsb(ish);
 155 }
 156 
 157 #define flush_dcache_mmap_lock(mapping)         do { } while (0)
 158 #define flush_dcache_mmap_unlock(mapping)       do { } while (0)
 159 
 160 /*
 161  * We don't appear to need to do anything here.  In fact, if we did, we'd
 162  * duplicate cache flushing elsewhere performed by flush_dcache_page().
 163  */
 164 #define flush_icache_page(vma,page)     do { } while (0)
 165 
 166 /*
 167  * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
 168  */
 169 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 170 {
 171 }
 172 
 173 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 174 {
 175 }
 176 
 177 int set_memory_valid(unsigned long addr, int numpages, int enable);
 178 
 179 int set_direct_map_invalid_noflush(struct page *page);
 180 int set_direct_map_default_noflush(struct page *page);
 181 
 182 #endif

/* [<][>][^][v][top][bottom][index][help] */