root/arch/nds32/mm/cacheflush.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. flush_icache_range
  2. flush_icache_page
  3. flush_icache_user_range
  4. update_mmu_cache
  5. aliasing
  6. kremap0
  7. kunmap01
  8. kremap1
  9. flush_cache_mm
  10. flush_cache_dup_mm
  11. flush_cache_range
  12. flush_cache_page
  13. flush_cache_vmap
  14. flush_cache_vunmap
  15. copy_user_page
  16. clear_user_page
  17. copy_user_highpage
  18. clear_user_highpage
  19. flush_dcache_page
  20. copy_to_user_page
  21. copy_from_user_page
  22. flush_anon_page
  23. flush_kernel_dcache_page
  24. flush_kernel_vmap_range
  25. invalidate_kernel_vmap_range

   1 // SPDX-License-Identifier: GPL-2.0
   2 // Copyright (C) 2005-2017 Andes Technology Corporation
   3 
   4 #include <linux/mm.h>
   5 #include <linux/sched.h>
   6 #include <linux/fs.h>
   7 #include <linux/pagemap.h>
   8 #include <linux/module.h>
   9 #include <asm/cacheflush.h>
  10 #include <asm/proc-fns.h>
  11 #include <asm/shmparam.h>
  12 #include <asm/cache_info.h>
  13 
  14 extern struct cache_info L1_cache_info[2];
  15 
  16 void flush_icache_range(unsigned long start, unsigned long end)
  17 {
  18         unsigned long line_size, flags;
  19         line_size = L1_cache_info[DCACHE].line_size;
  20         start = start & ~(line_size - 1);
  21         end = (end + line_size - 1) & ~(line_size - 1);
  22         local_irq_save(flags);
  23         cpu_cache_wbinval_range(start, end, 1);
  24         local_irq_restore(flags);
  25 }
  26 EXPORT_SYMBOL(flush_icache_range);
  27 
  28 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  29 {
  30         unsigned long flags;
  31         unsigned long kaddr;
  32         local_irq_save(flags);
  33         kaddr = (unsigned long)kmap_atomic(page);
  34         cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
  35         kunmap_atomic((void *)kaddr);
  36         local_irq_restore(flags);
  37 }
  38 EXPORT_SYMBOL(flush_icache_page);
  39 
  40 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
  41                              unsigned long addr, int len)
  42 {
  43         unsigned long kaddr;
  44         kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
  45         flush_icache_range(kaddr, kaddr + len);
  46         kunmap_atomic((void *)kaddr);
  47 }
  48 
  49 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
  50                       pte_t * pte)
  51 {
  52         struct page *page;
  53         unsigned long pfn = pte_pfn(*pte);
  54         unsigned long flags;
  55 
  56         if (!pfn_valid(pfn))
  57                 return;
  58 
  59         if (vma->vm_mm == current->active_mm) {
  60                 local_irq_save(flags);
  61                 __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
  62                 __nds32__tlbop_rwr(*pte);
  63                 __nds32__isb();
  64                 local_irq_restore(flags);
  65         }
  66         page = pfn_to_page(pfn);
  67 
  68         if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
  69             (vma->vm_flags & VM_EXEC)) {
  70                 unsigned long kaddr;
  71                 local_irq_save(flags);
  72                 kaddr = (unsigned long)kmap_atomic(page);
  73                 cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
  74                 kunmap_atomic((void *)kaddr);
  75                 local_irq_restore(flags);
  76         }
  77 }
  78 #ifdef CONFIG_CPU_CACHE_ALIASING
  79 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
  80 
  81 static inline unsigned long aliasing(unsigned long addr, unsigned long page)
  82 {
  83         return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
  84 }
  85 
  86 static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
  87 {
  88         unsigned long kaddr, pte;
  89 
  90 #define BASE_ADDR0 0xffffc000
  91         kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
  92         pte = (pa | PAGE_KERNEL);
  93         __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
  94         __nds32__tlbop_rwlk(pte);
  95         __nds32__isb();
  96         return kaddr;
  97 }
  98 
  99 static inline void kunmap01(unsigned long kaddr)
 100 {
 101         __nds32__tlbop_unlk(kaddr);
 102         __nds32__tlbop_inv(kaddr);
 103         __nds32__isb();
 104 }
 105 
 106 static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
 107 {
 108         unsigned long kaddr, pte;
 109 
 110 #define BASE_ADDR1 0xffff8000
 111         kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
 112         pte = (pa | PAGE_KERNEL);
 113         __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
 114         __nds32__tlbop_rwlk(pte);
 115         __nds32__isb();
 116         return kaddr;
 117 }
 118 
 119 void flush_cache_mm(struct mm_struct *mm)
 120 {
 121         unsigned long flags;
 122 
 123         local_irq_save(flags);
 124         cpu_dcache_wbinval_all();
 125         cpu_icache_inval_all();
 126         local_irq_restore(flags);
 127 }
 128 
 129 void flush_cache_dup_mm(struct mm_struct *mm)
 130 {
 131 }
 132 
 133 void flush_cache_range(struct vm_area_struct *vma,
 134                        unsigned long start, unsigned long end)
 135 {
 136         unsigned long flags;
 137 
 138         if ((end - start) > 8 * PAGE_SIZE) {
 139                 cpu_dcache_wbinval_all();
 140                 if (vma->vm_flags & VM_EXEC)
 141                         cpu_icache_inval_all();
 142                 return;
 143         }
 144         local_irq_save(flags);
 145         while (start < end) {
 146                 if (va_present(vma->vm_mm, start))
 147                         cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
 148                 start += PAGE_SIZE;
 149         }
 150         local_irq_restore(flags);
 151         return;
 152 }
 153 
 154 void flush_cache_page(struct vm_area_struct *vma,
 155                       unsigned long addr, unsigned long pfn)
 156 {
 157         unsigned long vto, flags;
 158 
 159         local_irq_save(flags);
 160         vto = kremap0(addr, pfn << PAGE_SHIFT);
 161         cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
 162         kunmap01(vto);
 163         local_irq_restore(flags);
 164 }
 165 
 166 void flush_cache_vmap(unsigned long start, unsigned long end)
 167 {
 168         cpu_dcache_wbinval_all();
 169         cpu_icache_inval_all();
 170 }
 171 
 172 void flush_cache_vunmap(unsigned long start, unsigned long end)
 173 {
 174         cpu_dcache_wbinval_all();
 175         cpu_icache_inval_all();
 176 }
 177 
 178 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 179                     struct page *to)
 180 {
 181         cpu_dcache_wbinval_page((unsigned long)vaddr);
 182         cpu_icache_inval_page((unsigned long)vaddr);
 183         copy_page(vto, vfrom);
 184         cpu_dcache_wbinval_page((unsigned long)vto);
 185         cpu_icache_inval_page((unsigned long)vto);
 186 }
 187 
 188 void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
 189 {
 190         cpu_dcache_wbinval_page((unsigned long)vaddr);
 191         cpu_icache_inval_page((unsigned long)vaddr);
 192         clear_page(addr);
 193         cpu_dcache_wbinval_page((unsigned long)addr);
 194         cpu_icache_inval_page((unsigned long)addr);
 195 }
 196 
 197 void copy_user_highpage(struct page *to, struct page *from,
 198                         unsigned long vaddr, struct vm_area_struct *vma)
 199 {
 200         unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
 201         kto = ((unsigned long)page_address(to) & PAGE_MASK);
 202         kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
 203         pto = page_to_phys(to);
 204         pfrom = page_to_phys(from);
 205 
 206         local_irq_save(flags);
 207         if (aliasing(vaddr, (unsigned long)kfrom))
 208                 cpu_dcache_wb_page((unsigned long)kfrom);
 209         vto = kremap0(vaddr, pto);
 210         vfrom = kremap1(vaddr, pfrom);
 211         copy_page((void *)vto, (void *)vfrom);
 212         kunmap01(vfrom);
 213         kunmap01(vto);
 214         local_irq_restore(flags);
 215 }
 216 
 217 EXPORT_SYMBOL(copy_user_highpage);
 218 
 219 void clear_user_highpage(struct page *page, unsigned long vaddr)
 220 {
 221         unsigned long vto, flags, kto;
 222 
 223         kto = ((unsigned long)page_address(page) & PAGE_MASK);
 224 
 225         local_irq_save(flags);
 226         if (aliasing(kto, vaddr) && kto != 0) {
 227                 cpu_dcache_inval_page(kto);
 228                 cpu_icache_inval_page(kto);
 229         }
 230         vto = kremap0(vaddr, page_to_phys(page));
 231         clear_page((void *)vto);
 232         kunmap01(vto);
 233         local_irq_restore(flags);
 234 }
 235 
 236 EXPORT_SYMBOL(clear_user_highpage);
 237 
 238 void flush_dcache_page(struct page *page)
 239 {
 240         struct address_space *mapping;
 241 
 242         mapping = page_mapping(page);
 243         if (mapping && !mapping_mapped(mapping))
 244                 set_bit(PG_dcache_dirty, &page->flags);
 245         else {
 246                 unsigned long kaddr, flags;
 247 
 248                 kaddr = (unsigned long)page_address(page);
 249                 local_irq_save(flags);
 250                 cpu_dcache_wbinval_page(kaddr);
 251                 if (mapping) {
 252                         unsigned long vaddr, kto;
 253 
 254                         vaddr = page->index << PAGE_SHIFT;
 255                         if (aliasing(vaddr, kaddr)) {
 256                                 kto = kremap0(vaddr, page_to_phys(page));
 257                                 cpu_dcache_wbinval_page(kto);
 258                                 kunmap01(kto);
 259                         }
 260                 }
 261                 local_irq_restore(flags);
 262         }
 263 }
 264 EXPORT_SYMBOL(flush_dcache_page);
 265 
 266 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 267                        unsigned long vaddr, void *dst, void *src, int len)
 268 {
 269         unsigned long line_size, start, end, vto, flags;
 270 
 271         local_irq_save(flags);
 272         vto = kremap0(vaddr, page_to_phys(page));
 273         dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
 274         memcpy(dst, src, len);
 275         if (vma->vm_flags & VM_EXEC) {
 276                 line_size = L1_cache_info[DCACHE].line_size;
 277                 start = (unsigned long)dst & ~(line_size - 1);
 278                 end =
 279                     ((unsigned long)dst + len + line_size - 1) & ~(line_size -
 280                                                                    1);
 281                 cpu_cache_wbinval_range(start, end, 1);
 282         }
 283         kunmap01(vto);
 284         local_irq_restore(flags);
 285 }
 286 
 287 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
 288                          unsigned long vaddr, void *dst, void *src, int len)
 289 {
 290         unsigned long vto, flags;
 291 
 292         local_irq_save(flags);
 293         vto = kremap0(vaddr, page_to_phys(page));
 294         src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
 295         memcpy(dst, src, len);
 296         kunmap01(vto);
 297         local_irq_restore(flags);
 298 }
 299 
 300 void flush_anon_page(struct vm_area_struct *vma,
 301                      struct page *page, unsigned long vaddr)
 302 {
 303         unsigned long kaddr, flags, ktmp;
 304         if (!PageAnon(page))
 305                 return;
 306 
 307         if (vma->vm_mm != current->active_mm)
 308                 return;
 309 
 310         local_irq_save(flags);
 311         if (vma->vm_flags & VM_EXEC)
 312                 cpu_icache_inval_page(vaddr & PAGE_MASK);
 313         kaddr = (unsigned long)page_address(page);
 314         if (aliasing(vaddr, kaddr)) {
 315                 ktmp = kremap0(vaddr, page_to_phys(page));
 316                 cpu_dcache_wbinval_page(ktmp);
 317                 kunmap01(ktmp);
 318         }
 319         local_irq_restore(flags);
 320 }
 321 
 322 void flush_kernel_dcache_page(struct page *page)
 323 {
 324         unsigned long flags;
 325         local_irq_save(flags);
 326         cpu_dcache_wbinval_page((unsigned long)page_address(page));
 327         local_irq_restore(flags);
 328 }
 329 EXPORT_SYMBOL(flush_kernel_dcache_page);
 330 
 331 void flush_kernel_vmap_range(void *addr, int size)
 332 {
 333         unsigned long flags;
 334         local_irq_save(flags);
 335         cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr +  size);
 336         local_irq_restore(flags);
 337 }
 338 EXPORT_SYMBOL(flush_kernel_vmap_range);
 339 
 340 void invalidate_kernel_vmap_range(void *addr, int size)
 341 {
 342         unsigned long flags;
 343         local_irq_save(flags);
 344         cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
 345         local_irq_restore(flags);
 346 }
 347 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
 348 #endif

/* [<][>][^][v][top][bottom][index][help] */