root/arch/arm/include/asm/tlbflush.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __local_flush_tlb_all
  2. local_flush_tlb_all
  3. __flush_tlb_all
  4. __local_flush_tlb_mm
  5. local_flush_tlb_mm
  6. __flush_tlb_mm
  7. __local_flush_tlb_page
  8. local_flush_tlb_page
  9. __flush_tlb_page
  10. __local_flush_tlb_kernel_page
  11. local_flush_tlb_kernel_page
  12. __flush_tlb_kernel_page
  13. __local_flush_bp_all
  14. local_flush_bp_all
  15. __flush_bp_all
  16. flush_pmd_entry
  17. clean_pmd_entry
  18. update_mmu_cache
  19. local_flush_tlb_all
  20. local_flush_tlb_mm
  21. local_flush_tlb_page
  22. local_flush_tlb_kernel_page
  23. local_flush_tlb_range
  24. local_flush_tlb_kernel_range
  25. local_flush_bp_all
  26. erratum_a15_798181_init
  27. erratum_a15_798181

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  *  arch/arm/include/asm/tlbflush.h
   4  *
   5  *  Copyright (C) 1999-2003 Russell King
   6  */
   7 #ifndef _ASMARM_TLBFLUSH_H
   8 #define _ASMARM_TLBFLUSH_H
   9 
  10 #ifndef __ASSEMBLY__
  11 # include <linux/mm_types.h>
  12 #endif
  13 
  14 #ifdef CONFIG_MMU
  15 
  16 #include <asm/glue.h>
  17 
  18 #define TLB_V4_U_PAGE   (1 << 1)
  19 #define TLB_V4_D_PAGE   (1 << 2)
  20 #define TLB_V4_I_PAGE   (1 << 3)
  21 #define TLB_V6_U_PAGE   (1 << 4)
  22 #define TLB_V6_D_PAGE   (1 << 5)
  23 #define TLB_V6_I_PAGE   (1 << 6)
  24 
  25 #define TLB_V4_U_FULL   (1 << 9)
  26 #define TLB_V4_D_FULL   (1 << 10)
  27 #define TLB_V4_I_FULL   (1 << 11)
  28 #define TLB_V6_U_FULL   (1 << 12)
  29 #define TLB_V6_D_FULL   (1 << 13)
  30 #define TLB_V6_I_FULL   (1 << 14)
  31 
  32 #define TLB_V6_U_ASID   (1 << 16)
  33 #define TLB_V6_D_ASID   (1 << 17)
  34 #define TLB_V6_I_ASID   (1 << 18)
  35 
  36 #define TLB_V6_BP       (1 << 19)
  37 
  38 /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
  39 #define TLB_V7_UIS_PAGE (1 << 20)
  40 #define TLB_V7_UIS_FULL (1 << 21)
  41 #define TLB_V7_UIS_ASID (1 << 22)
  42 #define TLB_V7_UIS_BP   (1 << 23)
  43 
  44 #define TLB_BARRIER     (1 << 28)
  45 #define TLB_L2CLEAN_FR  (1 << 29)               /* Feroceon */
  46 #define TLB_DCLEAN      (1 << 30)
  47 #define TLB_WB          (1 << 31)
  48 
  49 /*
  50  *      MMU TLB Model
  51  *      =============
  52  *
  53  *      We have the following to choose from:
  54  *        v4    - ARMv4 without write buffer
  55  *        v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
  56  *        v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
  57  *        fr    - Feroceon (v4wbi with non-outer-cacheable page table walks)
  58  *        fa    - Faraday (v4 with write buffer with UTLB)
  59  *        v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
  60  *        v7wbi - identical to v6wbi
  61  */
  62 #undef _TLB
  63 #undef MULTI_TLB
  64 
  65 #ifdef CONFIG_SMP_ON_UP
  66 #define MULTI_TLB 1
  67 #endif
  68 
  69 #define v4_tlb_flags    (TLB_V4_U_FULL | TLB_V4_U_PAGE)
  70 
  71 #ifdef CONFIG_CPU_TLB_V4WT
  72 # define v4_possible_flags      v4_tlb_flags
  73 # define v4_always_flags        v4_tlb_flags
  74 # ifdef _TLB
  75 #  define MULTI_TLB 1
  76 # else
  77 #  define _TLB v4
  78 # endif
  79 #else
  80 # define v4_possible_flags      0
  81 # define v4_always_flags        (-1UL)
  82 #endif
  83 
  84 #define fa_tlb_flags    (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
  85                          TLB_V4_U_FULL | TLB_V4_U_PAGE)
  86 
  87 #ifdef CONFIG_CPU_TLB_FA
  88 # define fa_possible_flags      fa_tlb_flags
  89 # define fa_always_flags        fa_tlb_flags
  90 # ifdef _TLB
  91 #  define MULTI_TLB 1
  92 # else
  93 #  define _TLB fa
  94 # endif
  95 #else
  96 # define fa_possible_flags      0
  97 # define fa_always_flags        (-1UL)
  98 #endif
  99 
 100 #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
 101                          TLB_V4_I_FULL | TLB_V4_D_FULL | \
 102                          TLB_V4_I_PAGE | TLB_V4_D_PAGE)
 103 
 104 #ifdef CONFIG_CPU_TLB_V4WBI
 105 # define v4wbi_possible_flags   v4wbi_tlb_flags
 106 # define v4wbi_always_flags     v4wbi_tlb_flags
 107 # ifdef _TLB
 108 #  define MULTI_TLB 1
 109 # else
 110 #  define _TLB v4wbi
 111 # endif
 112 #else
 113 # define v4wbi_possible_flags   0
 114 # define v4wbi_always_flags     (-1UL)
 115 #endif
 116 
 117 #define fr_tlb_flags    (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
 118                          TLB_V4_I_FULL | TLB_V4_D_FULL | \
 119                          TLB_V4_I_PAGE | TLB_V4_D_PAGE)
 120 
 121 #ifdef CONFIG_CPU_TLB_FEROCEON
 122 # define fr_possible_flags      fr_tlb_flags
 123 # define fr_always_flags        fr_tlb_flags
 124 # ifdef _TLB
 125 #  define MULTI_TLB 1
 126 # else
 127 #  define _TLB v4wbi
 128 # endif
 129 #else
 130 # define fr_possible_flags      0
 131 # define fr_always_flags        (-1UL)
 132 #endif
 133 
 134 #define v4wb_tlb_flags  (TLB_WB | TLB_DCLEAN | \
 135                          TLB_V4_I_FULL | TLB_V4_D_FULL | \
 136                          TLB_V4_D_PAGE)
 137 
 138 #ifdef CONFIG_CPU_TLB_V4WB
 139 # define v4wb_possible_flags    v4wb_tlb_flags
 140 # define v4wb_always_flags      v4wb_tlb_flags
 141 # ifdef _TLB
 142 #  define MULTI_TLB 1
 143 # else
 144 #  define _TLB v4wb
 145 # endif
 146 #else
 147 # define v4wb_possible_flags    0
 148 # define v4wb_always_flags      (-1UL)
 149 #endif
 150 
 151 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 152                          TLB_V6_I_FULL | TLB_V6_D_FULL | \
 153                          TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
 154                          TLB_V6_I_ASID | TLB_V6_D_ASID | \
 155                          TLB_V6_BP)
 156 
 157 #ifdef CONFIG_CPU_TLB_V6
 158 # define v6wbi_possible_flags   v6wbi_tlb_flags
 159 # define v6wbi_always_flags     v6wbi_tlb_flags
 160 # ifdef _TLB
 161 #  define MULTI_TLB 1
 162 # else
 163 #  define _TLB v6wbi
 164 # endif
 165 #else
 166 # define v6wbi_possible_flags   0
 167 # define v6wbi_always_flags     (-1UL)
 168 #endif
 169 
 170 #define v7wbi_tlb_flags_smp     (TLB_WB | TLB_BARRIER | \
 171                                  TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
 172                                  TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
 173 #define v7wbi_tlb_flags_up      (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 174                                  TLB_V6_U_FULL | TLB_V6_U_PAGE | \
 175                                  TLB_V6_U_ASID | TLB_V6_BP)
 176 
 177 #ifdef CONFIG_CPU_TLB_V7
 178 
 179 # ifdef CONFIG_SMP_ON_UP
 180 #  define v7wbi_possible_flags  (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
 181 #  define v7wbi_always_flags    (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
 182 # elif defined(CONFIG_SMP)
 183 #  define v7wbi_possible_flags  v7wbi_tlb_flags_smp
 184 #  define v7wbi_always_flags    v7wbi_tlb_flags_smp
 185 # else
 186 #  define v7wbi_possible_flags  v7wbi_tlb_flags_up
 187 #  define v7wbi_always_flags    v7wbi_tlb_flags_up
 188 # endif
 189 # ifdef _TLB
 190 #  define MULTI_TLB 1
 191 # else
 192 #  define _TLB v7wbi
 193 # endif
 194 #else
 195 # define v7wbi_possible_flags   0
 196 # define v7wbi_always_flags     (-1UL)
 197 #endif
 198 
 199 #ifndef _TLB
 200 #error Unknown TLB model
 201 #endif
 202 
 203 #ifndef __ASSEMBLY__
 204 
 205 #include <linux/sched.h>
 206 
 207 struct cpu_tlb_fns {
 208         void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
 209         void (*flush_kern_range)(unsigned long, unsigned long);
 210         unsigned long tlb_flags;
 211 };
 212 
 213 /*
 214  * Select the calling method
 215  */
 216 #ifdef MULTI_TLB
 217 
 218 #define __cpu_flush_user_tlb_range      cpu_tlb.flush_user_range
 219 #define __cpu_flush_kern_tlb_range      cpu_tlb.flush_kern_range
 220 
 221 #else
 222 
 223 #define __cpu_flush_user_tlb_range      __glue(_TLB,_flush_user_tlb_range)
 224 #define __cpu_flush_kern_tlb_range      __glue(_TLB,_flush_kern_tlb_range)
 225 
 226 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
 227 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
 228 
 229 #endif
 230 
 231 extern struct cpu_tlb_fns cpu_tlb;
 232 
 233 #define __cpu_tlb_flags                 cpu_tlb.tlb_flags
 234 
 235 /*
 236  *      TLB Management
 237  *      ==============
 238  *
 239  *      The arch/arm/mm/tlb-*.S files implement these methods.
 240  *
 241  *      The TLB specific code is expected to perform whatever tests it
 242  *      needs to determine if it should invalidate the TLB for each
 243  *      call.  Start addresses are inclusive and end addresses are
 244  *      exclusive; it is safe to round these addresses down.
 245  *
 246  *      flush_tlb_all()
 247  *
 248  *              Invalidate the entire TLB.
 249  *
 250  *      flush_tlb_mm(mm)
 251  *
 252  *              Invalidate all TLB entries in a particular address
 253  *              space.
 254  *              - mm    - mm_struct describing address space
 255  *
 256  *      flush_tlb_range(mm,start,end)
 257  *
 258  *              Invalidate a range of TLB entries in the specified
 259  *              address space.
 260  *              - mm    - mm_struct describing address space
 261  *              - start - start address (may not be aligned)
 262  *              - end   - end address (exclusive, may not be aligned)
 263  *
 264  *      flush_tlb_page(vaddr,vma)
 265  *
 266  *              Invalidate the specified page in the specified address range.
 267  *              - vaddr - virtual address (may not be aligned)
 268  *              - vma   - vma_struct describing address range
 269  *
 270  *      flush_kern_tlb_page(kaddr)
 271  *
 272  *              Invalidate the TLB entry for the specified page.  The address
 273  *              will be in the kernels virtual memory space.  Current uses
 274  *              only require the D-TLB to be invalidated.
 275  *              - kaddr - Kernel virtual memory address
 276  */
 277 
 278 /*
 279  * We optimise the code below by:
 280  *  - building a set of TLB flags that might be set in __cpu_tlb_flags
 281  *  - building a set of TLB flags that will always be set in __cpu_tlb_flags
 282  *  - if we're going to need __cpu_tlb_flags, access it once and only once
 283  *
 284  * This allows us to build optimal assembly for the single-CPU type case,
 285  * and as close to optimal given the compiler constrants for multi-CPU
 286  * case.  We could do better for the multi-CPU case if the compiler
 287  * implemented the "%?" method, but this has been discontinued due to too
 288  * many people getting it wrong.
 289  */
 290 #define possible_tlb_flags      (v4_possible_flags | \
 291                                  v4wbi_possible_flags | \
 292                                  fr_possible_flags | \
 293                                  v4wb_possible_flags | \
 294                                  fa_possible_flags | \
 295                                  v6wbi_possible_flags | \
 296                                  v7wbi_possible_flags)
 297 
 298 #define always_tlb_flags        (v4_always_flags & \
 299                                  v4wbi_always_flags & \
 300                                  fr_always_flags & \
 301                                  v4wb_always_flags & \
 302                                  fa_always_flags & \
 303                                  v6wbi_always_flags & \
 304                                  v7wbi_always_flags)
 305 
 306 #define tlb_flag(f)     ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
 307 
 308 #define __tlb_op(f, insnarg, arg)                                       \
 309         do {                                                            \
 310                 if (always_tlb_flags & (f))                             \
 311                         asm("mcr " insnarg                              \
 312                             : : "r" (arg) : "cc");                      \
 313                 else if (possible_tlb_flags & (f))                      \
 314                         asm("tst %1, %2\n\t"                            \
 315                             "mcrne " insnarg                            \
 316                             : : "r" (arg), "r" (__tlb_flag), "Ir" (f)   \
 317                             : "cc");                                    \
 318         } while (0)
 319 
 320 #define tlb_op(f, regs, arg)    __tlb_op(f, "p15, 0, %0, " regs, arg)
 321 #define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
 322 
 323 static inline void __local_flush_tlb_all(void)
 324 {
 325         const int zero = 0;
 326         const unsigned int __tlb_flag = __cpu_tlb_flags;
 327 
 328         tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
 329         tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
 330         tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
 331 }
 332 
 333 static inline void local_flush_tlb_all(void)
 334 {
 335         const int zero = 0;
 336         const unsigned int __tlb_flag = __cpu_tlb_flags;
 337 
 338         if (tlb_flag(TLB_WB))
 339                 dsb(nshst);
 340 
 341         __local_flush_tlb_all();
 342         tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
 343 
 344         if (tlb_flag(TLB_BARRIER)) {
 345                 dsb(nsh);
 346                 isb();
 347         }
 348 }
 349 
 350 static inline void __flush_tlb_all(void)
 351 {
 352         const int zero = 0;
 353         const unsigned int __tlb_flag = __cpu_tlb_flags;
 354 
 355         if (tlb_flag(TLB_WB))
 356                 dsb(ishst);
 357 
 358         __local_flush_tlb_all();
 359         tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
 360 
 361         if (tlb_flag(TLB_BARRIER)) {
 362                 dsb(ish);
 363                 isb();
 364         }
 365 }
 366 
 367 static inline void __local_flush_tlb_mm(struct mm_struct *mm)
 368 {
 369         const int zero = 0;
 370         const int asid = ASID(mm);
 371         const unsigned int __tlb_flag = __cpu_tlb_flags;
 372 
 373         if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
 374                 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
 375                         tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
 376                         tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
 377                         tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
 378                 }
 379         }
 380 
 381         tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
 382         tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
 383         tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
 384 }
 385 
 386 static inline void local_flush_tlb_mm(struct mm_struct *mm)
 387 {
 388         const int asid = ASID(mm);
 389         const unsigned int __tlb_flag = __cpu_tlb_flags;
 390 
 391         if (tlb_flag(TLB_WB))
 392                 dsb(nshst);
 393 
 394         __local_flush_tlb_mm(mm);
 395         tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
 396 
 397         if (tlb_flag(TLB_BARRIER))
 398                 dsb(nsh);
 399 }
 400 
 401 static inline void __flush_tlb_mm(struct mm_struct *mm)
 402 {
 403         const unsigned int __tlb_flag = __cpu_tlb_flags;
 404 
 405         if (tlb_flag(TLB_WB))
 406                 dsb(ishst);
 407 
 408         __local_flush_tlb_mm(mm);
 409 #ifdef CONFIG_ARM_ERRATA_720789
 410         tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
 411 #else
 412         tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
 413 #endif
 414 
 415         if (tlb_flag(TLB_BARRIER))
 416                 dsb(ish);
 417 }
 418 
 419 static inline void
 420 __local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 421 {
 422         const int zero = 0;
 423         const unsigned int __tlb_flag = __cpu_tlb_flags;
 424 
 425         uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 426 
 427         if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
 428             cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
 429                 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
 430                 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
 431                 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
 432                 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
 433                         asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 434         }
 435 
 436         tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
 437         tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
 438         tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
 439 }
 440 
 441 static inline void
 442 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 443 {
 444         const unsigned int __tlb_flag = __cpu_tlb_flags;
 445 
 446         uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 447 
 448         if (tlb_flag(TLB_WB))
 449                 dsb(nshst);
 450 
 451         __local_flush_tlb_page(vma, uaddr);
 452         tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
 453 
 454         if (tlb_flag(TLB_BARRIER))
 455                 dsb(nsh);
 456 }
 457 
 458 static inline void
 459 __flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 460 {
 461         const unsigned int __tlb_flag = __cpu_tlb_flags;
 462 
 463         uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 464 
 465         if (tlb_flag(TLB_WB))
 466                 dsb(ishst);
 467 
 468         __local_flush_tlb_page(vma, uaddr);
 469 #ifdef CONFIG_ARM_ERRATA_720789
 470         tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
 471 #else
 472         tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
 473 #endif
 474 
 475         if (tlb_flag(TLB_BARRIER))
 476                 dsb(ish);
 477 }
 478 
 479 static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
 480 {
 481         const int zero = 0;
 482         const unsigned int __tlb_flag = __cpu_tlb_flags;
 483 
 484         tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
 485         tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
 486         tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
 487         if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
 488                 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 489 
 490         tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
 491         tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
 492         tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
 493 }
 494 
 495 static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 496 {
 497         const unsigned int __tlb_flag = __cpu_tlb_flags;
 498 
 499         kaddr &= PAGE_MASK;
 500 
 501         if (tlb_flag(TLB_WB))
 502                 dsb(nshst);
 503 
 504         __local_flush_tlb_kernel_page(kaddr);
 505         tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
 506 
 507         if (tlb_flag(TLB_BARRIER)) {
 508                 dsb(nsh);
 509                 isb();
 510         }
 511 }
 512 
 513 static inline void __flush_tlb_kernel_page(unsigned long kaddr)
 514 {
 515         const unsigned int __tlb_flag = __cpu_tlb_flags;
 516 
 517         kaddr &= PAGE_MASK;
 518 
 519         if (tlb_flag(TLB_WB))
 520                 dsb(ishst);
 521 
 522         __local_flush_tlb_kernel_page(kaddr);
 523         tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
 524 
 525         if (tlb_flag(TLB_BARRIER)) {
 526                 dsb(ish);
 527                 isb();
 528         }
 529 }
 530 
 531 /*
 532  * Branch predictor maintenance is paired with full TLB invalidation, so
 533  * there is no need for any barriers here.
 534  */
 535 static inline void __local_flush_bp_all(void)
 536 {
 537         const int zero = 0;
 538         const unsigned int __tlb_flag = __cpu_tlb_flags;
 539 
 540         if (tlb_flag(TLB_V6_BP))
 541                 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
 542 }
 543 
 544 static inline void local_flush_bp_all(void)
 545 {
 546         const int zero = 0;
 547         const unsigned int __tlb_flag = __cpu_tlb_flags;
 548 
 549         __local_flush_bp_all();
 550         if (tlb_flag(TLB_V7_UIS_BP))
 551                 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
 552 }
 553 
 554 static inline void __flush_bp_all(void)
 555 {
 556         const int zero = 0;
 557         const unsigned int __tlb_flag = __cpu_tlb_flags;
 558 
 559         __local_flush_bp_all();
 560         if (tlb_flag(TLB_V7_UIS_BP))
 561                 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
 562 }
 563 
 564 /*
 565  *      flush_pmd_entry
 566  *
 567  *      Flush a PMD entry (word aligned, or double-word aligned) to
 568  *      RAM if the TLB for the CPU we are running on requires this.
 569  *      This is typically used when we are creating PMD entries.
 570  *
 571  *      clean_pmd_entry
 572  *
 573  *      Clean (but don't drain the write buffer) if the CPU requires
 574  *      these operations.  This is typically used when we are removing
 575  *      PMD entries.
 576  */
 577 static inline void flush_pmd_entry(void *pmd)
 578 {
 579         const unsigned int __tlb_flag = __cpu_tlb_flags;
 580 
 581         tlb_op(TLB_DCLEAN, "c7, c10, 1  @ flush_pmd", pmd);
 582         tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
 583 
 584         if (tlb_flag(TLB_WB))
 585                 dsb(ishst);
 586 }
 587 
 588 static inline void clean_pmd_entry(void *pmd)
 589 {
 590         const unsigned int __tlb_flag = __cpu_tlb_flags;
 591 
 592         tlb_op(TLB_DCLEAN, "c7, c10, 1  @ flush_pmd", pmd);
 593         tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
 594 }
 595 
 596 #undef tlb_op
 597 #undef tlb_flag
 598 #undef always_tlb_flags
 599 #undef possible_tlb_flags
 600 
 601 /*
 602  * Convert calls to our calling convention.
 603  */
 604 #define local_flush_tlb_range(vma,start,end)    __cpu_flush_user_tlb_range(start,end,vma)
 605 #define local_flush_tlb_kernel_range(s,e)       __cpu_flush_kern_tlb_range(s,e)
 606 
 607 #ifndef CONFIG_SMP
 608 #define flush_tlb_all           local_flush_tlb_all
 609 #define flush_tlb_mm            local_flush_tlb_mm
 610 #define flush_tlb_page          local_flush_tlb_page
 611 #define flush_tlb_kernel_page   local_flush_tlb_kernel_page
 612 #define flush_tlb_range         local_flush_tlb_range
 613 #define flush_tlb_kernel_range  local_flush_tlb_kernel_range
 614 #define flush_bp_all            local_flush_bp_all
 615 #else
 616 extern void flush_tlb_all(void);
 617 extern void flush_tlb_mm(struct mm_struct *mm);
 618 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 619 extern void flush_tlb_kernel_page(unsigned long kaddr);
 620 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 621 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 622 extern void flush_bp_all(void);
 623 #endif
 624 
 625 /*
 626  * If PG_dcache_clean is not set for the page, we need to ensure that any
 627  * cache entries for the kernels virtual memory range are written
 628  * back to the page. On ARMv6 and later, the cache coherency is handled via
 629  * the set_pte_at() function.
 630  */
 631 #if __LINUX_ARM_ARCH__ < 6
 632 extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 633         pte_t *ptep);
 634 #else
 635 static inline void update_mmu_cache(struct vm_area_struct *vma,
 636                                     unsigned long addr, pte_t *ptep)
 637 {
 638 }
 639 #endif
 640 
 641 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 642 
 643 #endif
 644 
 645 #elif defined(CONFIG_SMP)       /* !CONFIG_MMU */
 646 
 647 #ifndef __ASSEMBLY__
 648 static inline void local_flush_tlb_all(void)                                                                    { }
 649 static inline void local_flush_tlb_mm(struct mm_struct *mm)                                                     { }
 650 static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)                        { }
 651 static inline void local_flush_tlb_kernel_page(unsigned long kaddr)                                             { }
 652 static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)    { }
 653 static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)                         { }
 654 static inline void local_flush_bp_all(void)                                                                     { }
 655 
 656 extern void flush_tlb_all(void);
 657 extern void flush_tlb_mm(struct mm_struct *mm);
 658 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 659 extern void flush_tlb_kernel_page(unsigned long kaddr);
 660 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 661 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 662 extern void flush_bp_all(void);
 663 #endif  /* __ASSEMBLY__ */
 664 
 665 #endif
 666 
 667 #ifndef __ASSEMBLY__
 668 #ifdef CONFIG_ARM_ERRATA_798181
 669 extern void erratum_a15_798181_init(void);
 670 #else
 671 static inline void erratum_a15_798181_init(void) {}
 672 #endif
 673 extern bool (*erratum_a15_798181_handler)(void);
 674 
 675 static inline bool erratum_a15_798181(void)
 676 {
 677         if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
 678                 erratum_a15_798181_handler))
 679                 return erratum_a15_798181_handler();
 680         return false;
 681 }
 682 #endif
 683 
 684 #endif

/* [<][>][^][v][top][bottom][index][help] */