root/include/asm-generic/tlb.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __tlb_adjust_range
  2. __tlb_reset_range
  3. tlb_flush
  4. tlb_update_vma_flags
  5. tlb_end_vma
  6. tlb_flush
  7. tlb_update_vma_flags
  8. tlb_update_vma_flags
  9. tlb_flush_mmu_tlbonly
  10. tlb_remove_page_size
  11. __tlb_remove_page
  12. tlb_remove_page
  13. tlb_change_page_size
  14. tlb_get_unmap_shift
  15. tlb_get_unmap_size
  16. tlb_start_vma
  17. tlb_end_vma

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /* include/asm-generic/tlb.h
   3  *
   4  *      Generic TLB shootdown code
   5  *
   6  * Copyright 2001 Red Hat, Inc.
   7  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
   8  *
   9  * Copyright 2011 Red Hat, Inc., Peter Zijlstra
  10  */
  11 #ifndef _ASM_GENERIC__TLB_H
  12 #define _ASM_GENERIC__TLB_H
  13 
  14 #include <linux/mmu_notifier.h>
  15 #include <linux/swap.h>
  16 #include <asm/pgalloc.h>
  17 #include <asm/tlbflush.h>
  18 #include <asm/cacheflush.h>
  19 
  20 /*
  21  * Blindly accessing user memory from NMI context can be dangerous
  22  * if we're in the middle of switching the current user task or switching
  23  * the loaded mm.
  24  */
  25 #ifndef nmi_uaccess_okay
  26 # define nmi_uaccess_okay() true
  27 #endif
  28 
  29 #ifdef CONFIG_MMU
  30 
  31 /*
  32  * Generic MMU-gather implementation.
  33  *
  34  * The mmu_gather data structure is used by the mm code to implement the
  35  * correct and efficient ordering of freeing pages and TLB invalidations.
  36  *
  37  * This correct ordering is:
  38  *
  39  *  1) unhook page
  40  *  2) TLB invalidate page
  41  *  3) free page
  42  *
  43  * That is, we must never free a page before we have ensured there are no live
  44  * translations left to it. Otherwise it might be possible to observe (or
  45  * worse, change) the page content after it has been reused.
  46  *
  47  * The mmu_gather API consists of:
  48  *
  49  *  - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
  50  *
  51  *    Finish in particular will issue a (final) TLB invalidate and free
  52  *    all (remaining) queued pages.
  53  *
  54  *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
  55  *
  56  *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
  57  *    there's large holes between the VMAs.
  58  *
  59  *  - tlb_remove_page() / __tlb_remove_page()
  60  *  - tlb_remove_page_size() / __tlb_remove_page_size()
  61  *
  62  *    __tlb_remove_page_size() is the basic primitive that queues a page for
  63  *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
  64  *    boolean indicating if the queue is (now) full and a call to
  65  *    tlb_flush_mmu() is required.
  66  *
  67  *    tlb_remove_page() and tlb_remove_page_size() imply the call to
  68  *    tlb_flush_mmu() when required and has no return value.
  69  *
  70  *  - tlb_change_page_size()
  71  *
  72  *    call before __tlb_remove_page*() to set the current page-size; implies a
  73  *    possible tlb_flush_mmu() call.
  74  *
  75  *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
  76  *
  77  *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
  78  *                              related state, like the range)
  79  *
  80  *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
  81  *                      whatever pages are still batched.
  82  *
  83  *  - mmu_gather::fullmm
  84  *
  85  *    A flag set by tlb_gather_mmu() to indicate we're going to free
  86  *    the entire mm; this allows a number of optimizations.
  87  *
  88  *    - We can ignore tlb_{start,end}_vma(); because we don't
  89  *      care about ranges. Everything will be shot down.
  90  *
  91  *    - (RISC) architectures that use ASIDs can cycle to a new ASID
  92  *      and delay the invalidation until ASID space runs out.
  93  *
  94  *  - mmu_gather::need_flush_all
  95  *
  96  *    A flag that can be set by the arch code if it wants to force
  97  *    flush the entire TLB irrespective of the range. For instance
  98  *    x86-PAE needs this when changing top-level entries.
  99  *
 100  * And allows the architecture to provide and implement tlb_flush():
 101  *
 102  * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
 103  * use of:
 104  *
 105  *  - mmu_gather::start / mmu_gather::end
 106  *
 107  *    which provides the range that needs to be flushed to cover the pages to
 108  *    be freed.
 109  *
 110  *  - mmu_gather::freed_tables
 111  *
 112  *    set when we freed page table pages
 113  *
 114  *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
 115  *
 116  *    returns the smallest TLB entry size unmapped in this range.
 117  *
 118  * If an architecture does not provide tlb_flush() a default implementation
 119  * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
 120  * specified, in which case we'll default to flush_tlb_mm().
 121  *
 122  * Additionally there are a few opt-in features:
 123  *
 124  *  HAVE_MMU_GATHER_PAGE_SIZE
 125  *
 126  *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
 127  *  changes the size and provides mmu_gather::page_size to tlb_flush().
 128  *
 129  *  HAVE_RCU_TABLE_FREE
 130  *
 131  *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
 132  *  for page directores (__p*_free_tlb()). This provides separate freeing of
 133  *  the page-table pages themselves in a semi-RCU fashion (see comment below).
 134  *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
 135  *  and therefore doesn't naturally serialize with software page-table walkers.
 136  *
 137  *  When used, an architecture is expected to provide __tlb_remove_table()
 138  *  which does the actual freeing of these pages.
 139  *
 140  *  MMU_GATHER_NO_RANGE
 141  *
 142  *  Use this if your architecture lacks an efficient flush_tlb_range().
 143  */
 144 
 145 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
 146 /*
 147  * Semi RCU freeing of the page directories.
 148  *
 149  * This is needed by some architectures to implement software pagetable walkers.
 150  *
 151  * gup_fast() and other software pagetable walkers do a lockless page-table
 152  * walk and therefore needs some synchronization with the freeing of the page
 153  * directories. The chosen means to accomplish that is by disabling IRQs over
 154  * the walk.
 155  *
 156  * Architectures that use IPIs to flush TLBs will then automagically DTRT,
 157  * since we unlink the page, flush TLBs, free the page. Since the disabling of
 158  * IRQs delays the completion of the TLB flush we can never observe an already
 159  * freed page.
 160  *
 161  * Architectures that do not have this (PPC) need to delay the freeing by some
 162  * other means, this is that means.
 163  *
 164  * What we do is batch the freed directory pages (tables) and RCU free them.
 165  * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
 166  * holds off grace periods.
 167  *
 168  * However, in order to batch these pages we need to allocate storage, this
 169  * allocation is deep inside the MM code and can thus easily fail on memory
 170  * pressure. To guarantee progress we fall back to single table freeing, see
 171  * the implementation of tlb_remove_table_one().
 172  *
 173  */
 174 struct mmu_table_batch {
 175         struct rcu_head         rcu;
 176         unsigned int            nr;
 177         void                    *tables[0];
 178 };
 179 
 180 #define MAX_TABLE_BATCH         \
 181         ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
 182 
 183 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 184 
 185 /*
 186  * This allows an architecture that does not use the linux page-tables for
 187  * hardware to skip the TLBI when freeing page tables.
 188  */
 189 #ifndef tlb_needs_table_invalidate
 190 #define tlb_needs_table_invalidate() (true)
 191 #endif
 192 
 193 #else
 194 
 195 #ifdef tlb_needs_table_invalidate
 196 #error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE
 197 #endif
 198 
 199 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 200 
 201 
 202 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 203 /*
 204  * If we can't allocate a page to make a big batch of page pointers
 205  * to work on, then just handle a few from the on-stack structure.
 206  */
 207 #define MMU_GATHER_BUNDLE       8
 208 
 209 struct mmu_gather_batch {
 210         struct mmu_gather_batch *next;
 211         unsigned int            nr;
 212         unsigned int            max;
 213         struct page             *pages[0];
 214 };
 215 
 216 #define MAX_GATHER_BATCH        \
 217         ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
 218 
 219 /*
 220  * Limit the maximum number of mmu_gather batches to reduce a risk of soft
 221  * lockups for non-preemptible kernels on huge machines when a lot of memory
 222  * is zapped during unmapping.
 223  * 10K pages freed at once should be safe even without a preemption point.
 224  */
 225 #define MAX_GATHER_BATCH_COUNT  (10000UL/MAX_GATHER_BATCH)
 226 
 227 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
 228                                    int page_size);
 229 #endif
 230 
 231 /*
 232  * struct mmu_gather is an opaque type used by the mm code for passing around
 233  * any data needed by arch specific code for tlb_remove_page.
 234  */
 235 struct mmu_gather {
 236         struct mm_struct        *mm;
 237 
 238 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
 239         struct mmu_table_batch  *batch;
 240 #endif
 241 
 242         unsigned long           start;
 243         unsigned long           end;
 244         /*
 245          * we are in the middle of an operation to clear
 246          * a full mm and can make some optimizations
 247          */
 248         unsigned int            fullmm : 1;
 249 
 250         /*
 251          * we have performed an operation which
 252          * requires a complete flush of the tlb
 253          */
 254         unsigned int            need_flush_all : 1;
 255 
 256         /*
 257          * we have removed page directories
 258          */
 259         unsigned int            freed_tables : 1;
 260 
 261         /*
 262          * at which levels have we cleared entries?
 263          */
 264         unsigned int            cleared_ptes : 1;
 265         unsigned int            cleared_pmds : 1;
 266         unsigned int            cleared_puds : 1;
 267         unsigned int            cleared_p4ds : 1;
 268 
 269         /*
 270          * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
 271          */
 272         unsigned int            vma_exec : 1;
 273         unsigned int            vma_huge : 1;
 274 
 275         unsigned int            batch_count;
 276 
 277 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 278         struct mmu_gather_batch *active;
 279         struct mmu_gather_batch local;
 280         struct page             *__pages[MMU_GATHER_BUNDLE];
 281 
 282 #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
 283         unsigned int page_size;
 284 #endif
 285 #endif
 286 };
 287 
 288 void arch_tlb_gather_mmu(struct mmu_gather *tlb,
 289         struct mm_struct *mm, unsigned long start, unsigned long end);
 290 void tlb_flush_mmu(struct mmu_gather *tlb);
 291 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
 292                          unsigned long start, unsigned long end, bool force);
 293 
 294 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 295                                       unsigned long address,
 296                                       unsigned int range_size)
 297 {
 298         tlb->start = min(tlb->start, address);
 299         tlb->end = max(tlb->end, address + range_size);
 300 }
 301 
 302 static inline void __tlb_reset_range(struct mmu_gather *tlb)
 303 {
 304         if (tlb->fullmm) {
 305                 tlb->start = tlb->end = ~0;
 306         } else {
 307                 tlb->start = TASK_SIZE;
 308                 tlb->end = 0;
 309         }
 310         tlb->freed_tables = 0;
 311         tlb->cleared_ptes = 0;
 312         tlb->cleared_pmds = 0;
 313         tlb->cleared_puds = 0;
 314         tlb->cleared_p4ds = 0;
 315         /*
 316          * Do not reset mmu_gather::vma_* fields here, we do not
 317          * call into tlb_start_vma() again to set them if there is an
 318          * intermediate flush.
 319          */
 320 }
 321 
 322 #ifdef CONFIG_MMU_GATHER_NO_RANGE
 323 
 324 #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
 325 #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
 326 #endif
 327 
 328 /*
 329  * When an architecture does not have efficient means of range flushing TLBs
 330  * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
 331  * range small. We equally don't have to worry about page granularity or other
 332  * things.
 333  *
 334  * All we need to do is issue a full flush for any !0 range.
 335  */
 336 static inline void tlb_flush(struct mmu_gather *tlb)
 337 {
 338         if (tlb->end)
 339                 flush_tlb_mm(tlb->mm);
 340 }
 341 
 342 static inline void
 343 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
 344 
 345 #define tlb_end_vma tlb_end_vma
 346 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
 347 
 348 #else /* CONFIG_MMU_GATHER_NO_RANGE */
 349 
 350 #ifndef tlb_flush
 351 
 352 #if defined(tlb_start_vma) || defined(tlb_end_vma)
 353 #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
 354 #endif
 355 
 356 /*
 357  * When an architecture does not provide its own tlb_flush() implementation
 358  * but does have a reasonably efficient flush_vma_range() implementation
 359  * use that.
 360  */
 361 static inline void tlb_flush(struct mmu_gather *tlb)
 362 {
 363         if (tlb->fullmm || tlb->need_flush_all) {
 364                 flush_tlb_mm(tlb->mm);
 365         } else if (tlb->end) {
 366                 struct vm_area_struct vma = {
 367                         .vm_mm = tlb->mm,
 368                         .vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
 369                                     (tlb->vma_huge ? VM_HUGETLB : 0),
 370                 };
 371 
 372                 flush_tlb_range(&vma, tlb->start, tlb->end);
 373         }
 374 }
 375 
 376 static inline void
 377 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
 378 {
 379         /*
 380          * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
 381          * mips-4k) flush only large pages.
 382          *
 383          * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
 384          * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
 385          * range.
 386          *
 387          * We rely on tlb_end_vma() to issue a flush, such that when we reset
 388          * these values the batch is empty.
 389          */
 390         tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
 391         tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
 392 }
 393 
 394 #else
 395 
 396 static inline void
 397 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
 398 
 399 #endif
 400 
 401 #endif /* CONFIG_MMU_GATHER_NO_RANGE */
 402 
 403 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 404 {
 405         if (!tlb->end)
 406                 return;
 407 
 408         tlb_flush(tlb);
 409         mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
 410         __tlb_reset_range(tlb);
 411 }
 412 
 413 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
 414                                         struct page *page, int page_size)
 415 {
 416         if (__tlb_remove_page_size(tlb, page, page_size))
 417                 tlb_flush_mmu(tlb);
 418 }
 419 
 420 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 421 {
 422         return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
 423 }
 424 
 425 /* tlb_remove_page
 426  *      Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
 427  *      required.
 428  */
 429 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 430 {
 431         return tlb_remove_page_size(tlb, page, PAGE_SIZE);
 432 }
 433 
 434 static inline void tlb_change_page_size(struct mmu_gather *tlb,
 435                                                      unsigned int page_size)
 436 {
 437 #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
 438         if (tlb->page_size && tlb->page_size != page_size) {
 439                 if (!tlb->fullmm)
 440                         tlb_flush_mmu(tlb);
 441         }
 442 
 443         tlb->page_size = page_size;
 444 #endif
 445 }
 446 
 447 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
 448 {
 449         if (tlb->cleared_ptes)
 450                 return PAGE_SHIFT;
 451         if (tlb->cleared_pmds)
 452                 return PMD_SHIFT;
 453         if (tlb->cleared_puds)
 454                 return PUD_SHIFT;
 455         if (tlb->cleared_p4ds)
 456                 return P4D_SHIFT;
 457 
 458         return PAGE_SHIFT;
 459 }
 460 
 461 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
 462 {
 463         return 1UL << tlb_get_unmap_shift(tlb);
 464 }
 465 
 466 /*
 467  * In the case of tlb vma handling, we can optimise these away in the
 468  * case where we're doing a full MM flush.  When we're doing a munmap,
 469  * the vmas are adjusted to only cover the region to be torn down.
 470  */
 471 #ifndef tlb_start_vma
 472 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 473 {
 474         if (tlb->fullmm)
 475                 return;
 476 
 477         tlb_update_vma_flags(tlb, vma);
 478         flush_cache_range(vma, vma->vm_start, vma->vm_end);
 479 }
 480 #endif
 481 
 482 #ifndef tlb_end_vma
 483 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 484 {
 485         if (tlb->fullmm)
 486                 return;
 487 
 488         /*
 489          * Do a TLB flush and reset the range at VMA boundaries; this avoids
 490          * the ranges growing with the unused space between consecutive VMAs,
 491          * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
 492          * this.
 493          */
 494         tlb_flush_mmu_tlbonly(tlb);
 495 }
 496 #endif
 497 
 498 #ifndef __tlb_remove_tlb_entry
 499 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 500 #endif
 501 
 502 /**
 503  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 504  *
 505  * Record the fact that pte's were really unmapped by updating the range,
 506  * so we can later optimise away the tlb invalidate.   This helps when
 507  * userspace is unmapping already-unmapped pages, which happens quite a lot.
 508  */
 509 #define tlb_remove_tlb_entry(tlb, ptep, address)                \
 510         do {                                                    \
 511                 __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
 512                 tlb->cleared_ptes = 1;                          \
 513                 __tlb_remove_tlb_entry(tlb, ptep, address);     \
 514         } while (0)
 515 
 516 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)        \
 517         do {                                                    \
 518                 unsigned long _sz = huge_page_size(h);          \
 519                 __tlb_adjust_range(tlb, address, _sz);          \
 520                 if (_sz == PMD_SIZE)                            \
 521                         tlb->cleared_pmds = 1;                  \
 522                 else if (_sz == PUD_SIZE)                       \
 523                         tlb->cleared_puds = 1;                  \
 524                 __tlb_remove_tlb_entry(tlb, ptep, address);     \
 525         } while (0)
 526 
 527 /**
 528  * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
 529  * This is a nop so far, because only x86 needs it.
 530  */
 531 #ifndef __tlb_remove_pmd_tlb_entry
 532 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
 533 #endif
 534 
 535 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)                    \
 536         do {                                                            \
 537                 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE);       \
 538                 tlb->cleared_pmds = 1;                                  \
 539                 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address);         \
 540         } while (0)
 541 
 542 /**
 543  * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
 544  * invalidation. This is a nop so far, because only x86 needs it.
 545  */
 546 #ifndef __tlb_remove_pud_tlb_entry
 547 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
 548 #endif
 549 
 550 #define tlb_remove_pud_tlb_entry(tlb, pudp, address)                    \
 551         do {                                                            \
 552                 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE);       \
 553                 tlb->cleared_puds = 1;                                  \
 554                 __tlb_remove_pud_tlb_entry(tlb, pudp, address);         \
 555         } while (0)
 556 
 557 /*
 558  * For things like page tables caches (ie caching addresses "inside" the
 559  * page tables, like x86 does), for legacy reasons, flushing an
 560  * individual page had better flush the page table caches behind it. This
 561  * is definitely how x86 works, for example. And if you have an
 562  * architected non-legacy page table cache (which I'm not aware of
 563  * anybody actually doing), you're going to have some architecturally
 564  * explicit flushing for that, likely *separate* from a regular TLB entry
 565  * flush, and thus you'd need more than just some range expansion..
 566  *
 567  * So if we ever find an architecture
 568  * that would want something that odd, I think it is up to that
 569  * architecture to do its own odd thing, not cause pain for others
 570  * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
 571  *
 572  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
 573  */
 574 
 575 #ifndef pte_free_tlb
 576 #define pte_free_tlb(tlb, ptep, address)                        \
 577         do {                                                    \
 578                 __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
 579                 tlb->freed_tables = 1;                          \
 580                 tlb->cleared_pmds = 1;                          \
 581                 __pte_free_tlb(tlb, ptep, address);             \
 582         } while (0)
 583 #endif
 584 
 585 #ifndef pmd_free_tlb
 586 #define pmd_free_tlb(tlb, pmdp, address)                        \
 587         do {                                                    \
 588                 __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
 589                 tlb->freed_tables = 1;                          \
 590                 tlb->cleared_puds = 1;                          \
 591                 __pmd_free_tlb(tlb, pmdp, address);             \
 592         } while (0)
 593 #endif
 594 
 595 #ifndef __ARCH_HAS_4LEVEL_HACK
 596 #ifndef pud_free_tlb
 597 #define pud_free_tlb(tlb, pudp, address)                        \
 598         do {                                                    \
 599                 __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
 600                 tlb->freed_tables = 1;                          \
 601                 tlb->cleared_p4ds = 1;                          \
 602                 __pud_free_tlb(tlb, pudp, address);             \
 603         } while (0)
 604 #endif
 605 #endif
 606 
 607 #ifndef __ARCH_HAS_5LEVEL_HACK
 608 #ifndef p4d_free_tlb
 609 #define p4d_free_tlb(tlb, pudp, address)                        \
 610         do {                                                    \
 611                 __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
 612                 tlb->freed_tables = 1;                          \
 613                 __p4d_free_tlb(tlb, pudp, address);             \
 614         } while (0)
 615 #endif
 616 #endif
 617 
 618 #endif /* CONFIG_MMU */
 619 
 620 #endif /* _ASM_GENERIC__TLB_H */

/* [<][>][^][v][top][bottom][index][help] */