root/arch/parisc/include/asm/pgtable.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. purge_tlb_entries
  2. pmd_clear
  3. pgd_clear
  4. pgd_none
  5. pgd_bad
  6. pgd_present
  7. pgd_clear
  8. pte_dirty
  9. pte_young
  10. pte_write
  11. pte_special
  12. pte_mkclean
  13. pte_mkold
  14. pte_wrprotect
  15. pte_mkdirty
  16. pte_mkyoung
  17. pte_mkwrite
  18. pte_mkspecial
  19. pfn_pte
  20. pte_modify
  21. pgd_spinlock
  22. ptep_test_and_clear_young
  23. ptep_get_and_clear
  24. ptep_set_wrprotect

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _PARISC_PGTABLE_H
   3 #define _PARISC_PGTABLE_H
   4 
   5 #include <asm/page.h>
   6 #include <asm-generic/4level-fixup.h>
   7 
   8 #include <asm/fixmap.h>
   9 
  10 #ifndef __ASSEMBLY__
  11 /*
  12  * we simulate an x86-style page table for the linux mm code
  13  */
  14 
  15 #include <linux/bitops.h>
  16 #include <linux/spinlock.h>
  17 #include <linux/mm_types.h>
  18 #include <asm/processor.h>
  19 #include <asm/cache.h>
  20 
  21 static inline spinlock_t *pgd_spinlock(pgd_t *);
  22 
  23 /*
  24  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
  25  * memory.  For the return value to be meaningful, ADDR must be >=
  26  * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
  27  * require a hash-, or multi-level tree-lookup or something of that
  28  * sort) but it guarantees to return TRUE only if accessing the page
  29  * at that address does not cause an error.  Note that there may be
  30  * addresses for which kern_addr_valid() returns FALSE even though an
  31  * access would not cause an error (e.g., this is typically true for
  32  * memory mapped I/O regions.
  33  *
  34  * XXX Need to implement this for parisc.
  35  */
  36 #define kern_addr_valid(addr)   (1)
  37 
  38 /* This is for the serialization of PxTLB broadcasts. At least on the N class
  39  * systems, only one PxTLB inter processor broadcast can be active at any one
  40  * time on the Merced bus.
  41 
  42  * PTE updates are protected by locks in the PMD.
  43  */
  44 extern spinlock_t pa_tlb_flush_lock;
  45 extern spinlock_t pa_swapper_pg_lock;
  46 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
  47 extern int pa_serialize_tlb_flushes;
  48 #else
  49 #define pa_serialize_tlb_flushes        (0)
  50 #endif
  51 
  52 #define purge_tlb_start(flags)  do { \
  53         if (pa_serialize_tlb_flushes)   \
  54                 spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
  55         else \
  56                 local_irq_save(flags);  \
  57         } while (0)
  58 #define purge_tlb_end(flags)    do { \
  59         if (pa_serialize_tlb_flushes)   \
  60                 spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
  61         else \
  62                 local_irq_restore(flags); \
  63         } while (0)
  64 
  65 /* Purge data and instruction TLB entries. The TLB purge instructions
  66  * are slow on SMP machines since the purge must be broadcast to all CPUs.
  67  */
  68 
  69 static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
  70 {
  71         unsigned long flags;
  72 
  73         purge_tlb_start(flags);
  74         mtsp(mm->context, 1);
  75         pdtlb(addr);
  76         pitlb(addr);
  77         purge_tlb_end(flags);
  78 }
  79 
  80 /* Certain architectures need to do special things when PTEs
  81  * within a page table are directly modified.  Thus, the following
  82  * hook is made available.
  83  */
  84 #define set_pte(pteptr, pteval)                                 \
  85         do{                                                     \
  86                 *(pteptr) = (pteval);                           \
  87         } while(0)
  88 
  89 #define set_pte_at(mm, addr, ptep, pteval)                      \
  90         do {                                                    \
  91                 pte_t old_pte;                                  \
  92                 unsigned long flags;                            \
  93                 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
  94                 old_pte = *ptep;                                \
  95                 set_pte(ptep, pteval);                          \
  96                 purge_tlb_entries(mm, addr);                    \
  97                 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
  98         } while (0)
  99 
 100 #endif /* !__ASSEMBLY__ */
 101 
 102 #define pte_ERROR(e) \
 103         printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 104 #define pmd_ERROR(e) \
 105         printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
 106 #define pgd_ERROR(e) \
 107         printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
 108 
 109 /* This is the size of the initially mapped kernel memory */
 110 #if defined(CONFIG_64BIT)
 111 #define KERNEL_INITIAL_ORDER    26      /* 1<<26 = 64MB */
 112 #else
 113 #define KERNEL_INITIAL_ORDER    25      /* 1<<25 = 32MB */
 114 #endif
 115 #define KERNEL_INITIAL_SIZE     (1 << KERNEL_INITIAL_ORDER)
 116 
 117 #if CONFIG_PGTABLE_LEVELS == 3
 118 #define PGD_ORDER       1 /* Number of pages per pgd */
 119 #define PMD_ORDER       1 /* Number of pages per pmd */
 120 #define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */
 121 #else
 122 #define PGD_ORDER       1 /* Number of pages per pgd */
 123 #define PGD_ALLOC_ORDER (PGD_ORDER + 1)
 124 #endif
 125 
 126 /* Definitions for 3rd level (we use PLD here for Page Lower directory
 127  * because PTE_SHIFT is used lower down to mean shift that has to be
 128  * done to get usable bits out of the PTE) */
 129 #define PLD_SHIFT       PAGE_SHIFT
 130 #define PLD_SIZE        PAGE_SIZE
 131 #define BITS_PER_PTE    (PAGE_SHIFT - BITS_PER_PTE_ENTRY)
 132 #define PTRS_PER_PTE    (1UL << BITS_PER_PTE)
 133 
 134 /* Definitions for 2nd level */
 135 #define PMD_SHIFT       (PLD_SHIFT + BITS_PER_PTE)
 136 #define PMD_SIZE        (1UL << PMD_SHIFT)
 137 #define PMD_MASK        (~(PMD_SIZE-1))
 138 #if CONFIG_PGTABLE_LEVELS == 3
 139 #define BITS_PER_PMD    (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
 140 #else
 141 #define __PAGETABLE_PMD_FOLDED 1
 142 #define BITS_PER_PMD    0
 143 #endif
 144 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
 145 
 146 /* Definitions for 1st level */
 147 #define PGDIR_SHIFT     (PMD_SHIFT + BITS_PER_PMD)
 148 #if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
 149 #define BITS_PER_PGD    (BITS_PER_LONG - PGDIR_SHIFT)
 150 #else
 151 #define BITS_PER_PGD    (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
 152 #endif
 153 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
 154 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
 155 #define PTRS_PER_PGD    (1UL << BITS_PER_PGD)
 156 #define USER_PTRS_PER_PGD       PTRS_PER_PGD
 157 
 158 #ifdef CONFIG_64BIT
 159 #define MAX_ADDRBITS    (PGDIR_SHIFT + BITS_PER_PGD)
 160 #define MAX_ADDRESS     (1UL << MAX_ADDRBITS)
 161 #define SPACEID_SHIFT   (MAX_ADDRBITS - 32)
 162 #else
 163 #define MAX_ADDRBITS    (BITS_PER_LONG)
 164 #define MAX_ADDRESS     (1UL << MAX_ADDRBITS)
 165 #define SPACEID_SHIFT   0
 166 #endif
 167 
 168 /* This calculates the number of initial pages we need for the initial
 169  * page tables */
 170 #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
 171 # define PT_INITIAL     (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
 172 #else
 173 # define PT_INITIAL     (1)  /* all initial PTEs fit into one page */
 174 #endif
 175 
 176 /*
 177  * pgd entries used up by user/kernel:
 178  */
 179 
 180 #define FIRST_USER_ADDRESS      0UL
 181 
 182 /* NB: The tlb miss handlers make certain assumptions about the order */
 183 /*     of the following bits, so be careful (One example, bits 25-31  */
 184 /*     are moved together in one instruction).                        */
 185 
 186 #define _PAGE_READ_BIT     31   /* (0x001) read access allowed */
 187 #define _PAGE_WRITE_BIT    30   /* (0x002) write access allowed */
 188 #define _PAGE_EXEC_BIT     29   /* (0x004) execute access allowed */
 189 #define _PAGE_GATEWAY_BIT  28   /* (0x008) privilege promotion allowed */
 190 #define _PAGE_DMB_BIT      27   /* (0x010) Data Memory Break enable (B bit) */
 191 #define _PAGE_DIRTY_BIT    26   /* (0x020) Page Dirty (D bit) */
 192 #define _PAGE_REFTRAP_BIT  25   /* (0x040) Page Ref. Trap enable (T bit) */
 193 #define _PAGE_NO_CACHE_BIT 24   /* (0x080) Uncached Page (U bit) */
 194 #define _PAGE_ACCESSED_BIT 23   /* (0x100) Software: Page Accessed */
 195 #define _PAGE_PRESENT_BIT  22   /* (0x200) Software: translation valid */
 196 #define _PAGE_HPAGE_BIT    21   /* (0x400) Software: Huge Page */
 197 #define _PAGE_USER_BIT     20   /* (0x800) Software: User accessible page */
 198 
 199 /* N.B. The bits are defined in terms of a 32 bit word above, so the */
 200 /*      following macro is ok for both 32 and 64 bit.                */
 201 
 202 #define xlate_pabit(x) (31 - x)
 203 
 204 /* this defines the shift to the usable bits in the PTE it is set so
 205  * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
 206  * to zero */
 207 #define PTE_SHIFT               xlate_pabit(_PAGE_USER_BIT)
 208 
 209 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
 210 #define PFN_PTE_SHIFT           12
 211 
 212 #define _PAGE_READ     (1 << xlate_pabit(_PAGE_READ_BIT))
 213 #define _PAGE_WRITE    (1 << xlate_pabit(_PAGE_WRITE_BIT))
 214 #define _PAGE_RW       (_PAGE_READ | _PAGE_WRITE)
 215 #define _PAGE_EXEC     (1 << xlate_pabit(_PAGE_EXEC_BIT))
 216 #define _PAGE_GATEWAY  (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
 217 #define _PAGE_DMB      (1 << xlate_pabit(_PAGE_DMB_BIT))
 218 #define _PAGE_DIRTY    (1 << xlate_pabit(_PAGE_DIRTY_BIT))
 219 #define _PAGE_REFTRAP  (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
 220 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
 221 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
 222 #define _PAGE_PRESENT  (1 << xlate_pabit(_PAGE_PRESENT_BIT))
 223 #define _PAGE_HUGE     (1 << xlate_pabit(_PAGE_HPAGE_BIT))
 224 #define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))
 225 
 226 #define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
 227 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 228 #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
 229 #define _PAGE_KERNEL_EXEC       (_PAGE_KERNEL_RO | _PAGE_EXEC)
 230 #define _PAGE_KERNEL_RWX        (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
 231 #define _PAGE_KERNEL            (_PAGE_KERNEL_RO | _PAGE_WRITE)
 232 
 233 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
 234  * are page-aligned, we don't care about the PAGE_OFFSET bits, except
 235  * for a few meta-information bits, so we shift the address to be
 236  * able to effectively address 40/42/44-bits of physical address space
 237  * depending on 4k/16k/64k PAGE_SIZE */
 238 #define _PxD_PRESENT_BIT   31
 239 #define _PxD_ATTACHED_BIT  30
 240 #define _PxD_VALID_BIT     29
 241 
 242 #define PxD_FLAG_PRESENT  (1 << xlate_pabit(_PxD_PRESENT_BIT))
 243 #define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
 244 #define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
 245 #define PxD_FLAG_MASK     (0xf)
 246 #define PxD_FLAG_SHIFT    (4)
 247 #define PxD_VALUE_SHIFT   (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
 248 
 249 #ifndef __ASSEMBLY__
 250 
 251 #define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_USER)
 252 #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
 253 /* Others seem to make this executable, I don't know if that's correct
 254    or not.  The stack is mapped this way though so this is necessary
 255    in the short term - dhd@linuxcare.com, 2000-08-08 */
 256 #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
 257 #define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
 258 #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
 259 #define PAGE_COPY       PAGE_EXECREAD
 260 #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
 261 #define PAGE_KERNEL     __pgprot(_PAGE_KERNEL)
 262 #define PAGE_KERNEL_EXEC        __pgprot(_PAGE_KERNEL_EXEC)
 263 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
 264 #define PAGE_KERNEL_RO  __pgprot(_PAGE_KERNEL_RO)
 265 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
 266 #define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
 267 
 268 
 269 /*
 270  * We could have an execute only page using "gateway - promote to priv
 271  * level 3", but that is kind of silly. So, the way things are defined
 272  * now, we must always have read permission for pages with execute
 273  * permission. For the fun of it we'll go ahead and support write only
 274  * pages.
 275  */
 276 
 277          /*xwr*/
 278 #define __P000  PAGE_NONE
 279 #define __P001  PAGE_READONLY
 280 #define __P010  __P000 /* copy on write */
 281 #define __P011  __P001 /* copy on write */
 282 #define __P100  PAGE_EXECREAD
 283 #define __P101  PAGE_EXECREAD
 284 #define __P110  __P100 /* copy on write */
 285 #define __P111  __P101 /* copy on write */
 286 
 287 #define __S000  PAGE_NONE
 288 #define __S001  PAGE_READONLY
 289 #define __S010  PAGE_WRITEONLY
 290 #define __S011  PAGE_SHARED
 291 #define __S100  PAGE_EXECREAD
 292 #define __S101  PAGE_EXECREAD
 293 #define __S110  PAGE_RWX
 294 #define __S111  PAGE_RWX
 295 
 296 
 297 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
 298 
 299 /* initial page tables for 0-8MB for kernel */
 300 
 301 extern pte_t pg0[];
 302 
 303 /* zero page used for uninitialized stuff */
 304 
 305 extern unsigned long *empty_zero_page;
 306 
 307 /*
 308  * ZERO_PAGE is a global shared page that is always zero: used
 309  * for zero-mapped memory areas etc..
 310  */
 311 
 312 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 313 
 314 #define pte_none(x)     (pte_val(x) == 0)
 315 #define pte_present(x)  (pte_val(x) & _PAGE_PRESENT)
 316 #define pte_clear(mm, addr, xp)  set_pte_at(mm, addr, xp, __pte(0))
 317 
 318 #define pmd_flag(x)     (pmd_val(x) & PxD_FLAG_MASK)
 319 #define pmd_address(x)  ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
 320 #define pgd_flag(x)     (pgd_val(x) & PxD_FLAG_MASK)
 321 #define pgd_address(x)  ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
 322 
 323 #if CONFIG_PGTABLE_LEVELS == 3
 324 /* The first entry of the permanent pmd is not there if it contains
 325  * the gateway marker */
 326 #define pmd_none(x)     (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
 327 #else
 328 #define pmd_none(x)     (!pmd_val(x))
 329 #endif
 330 #define pmd_bad(x)      (!(pmd_flag(x) & PxD_FLAG_VALID))
 331 #define pmd_present(x)  (pmd_flag(x) & PxD_FLAG_PRESENT)
 332 static inline void pmd_clear(pmd_t *pmd) {
 333 #if CONFIG_PGTABLE_LEVELS == 3
 334         if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
 335                 /* This is the entry pointing to the permanent pmd
 336                  * attached to the pgd; cannot clear it */
 337                 __pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
 338         else
 339 #endif
 340                 __pmd_val_set(*pmd,  0);
 341 }
 342 
 343 
 344 
 345 #if CONFIG_PGTABLE_LEVELS == 3
 346 #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
 347 #define pgd_page(pgd)   virt_to_page((void *)pgd_page_vaddr(pgd))
 348 
 349 /* For 64 bit we have three level tables */
 350 
 351 #define pgd_none(x)     (!pgd_val(x))
 352 #define pgd_bad(x)      (!(pgd_flag(x) & PxD_FLAG_VALID))
 353 #define pgd_present(x)  (pgd_flag(x) & PxD_FLAG_PRESENT)
 354 static inline void pgd_clear(pgd_t *pgd) {
 355 #if CONFIG_PGTABLE_LEVELS == 3
 356         if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
 357                 /* This is the permanent pmd attached to the pgd; cannot
 358                  * free it */
 359                 return;
 360 #endif
 361         __pgd_val_set(*pgd, 0);
 362 }
 363 #else
 364 /*
 365  * The "pgd_xxx()" functions here are trivial for a folded two-level
 366  * setup: the pgd is never bad, and a pmd always exists (as it's folded
 367  * into the pgd entry)
 368  */
 369 static inline int pgd_none(pgd_t pgd)           { return 0; }
 370 static inline int pgd_bad(pgd_t pgd)            { return 0; }
 371 static inline int pgd_present(pgd_t pgd)        { return 1; }
 372 static inline void pgd_clear(pgd_t * pgdp)      { }
 373 #endif
 374 
 375 /*
 376  * The following only work if pte_present() is true.
 377  * Undefined behaviour if not..
 378  */
 379 static inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
 380 static inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
 381 static inline int pte_write(pte_t pte)          { return pte_val(pte) & _PAGE_WRITE; }
 382 static inline int pte_special(pte_t pte)        { return 0; }
 383 
 384 static inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
 385 static inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
 386 static inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
 387 static inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= _PAGE_DIRTY; return pte; }
 388 static inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 389 static inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) |= _PAGE_WRITE; return pte; }
 390 static inline pte_t pte_mkspecial(pte_t pte)    { return pte; }
 391 
 392 /*
 393  * Huge pte definitions.
 394  */
 395 #ifdef CONFIG_HUGETLB_PAGE
 396 #define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
 397 #define pte_mkhuge(pte)         (__pte(pte_val(pte) | \
 398                                  (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
 399 #else
 400 #define pte_huge(pte)           (0)
 401 #define pte_mkhuge(pte)         (pte)
 402 #endif
 403 
 404 
 405 /*
 406  * Conversion functions: convert a page and protection to a page entry,
 407  * and a page entry and page directory to the page they refer to.
 408  */
 409 #define __mk_pte(addr,pgprot) \
 410 ({                                                                      \
 411         pte_t __pte;                                                    \
 412                                                                         \
 413         pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot));  \
 414                                                                         \
 415         __pte;                                                          \
 416 })
 417 
 418 #define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
 419 
 420 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
 421 {
 422         pte_t pte;
 423         pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
 424         return pte;
 425 }
 426 
 427 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 428 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 429 
 430 /* Permanent address of a page.  On parisc we don't have highmem. */
 431 
 432 #define pte_pfn(x)              (pte_val(x) >> PFN_PTE_SHIFT)
 433 
 434 #define pte_page(pte)           (pfn_to_page(pte_pfn(pte)))
 435 
 436 #define pmd_page_vaddr(pmd)     ((unsigned long) __va(pmd_address(pmd)))
 437 
 438 #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
 439 #define pmd_page(pmd)   virt_to_page((void *)__pmd_page(pmd))
 440 
 441 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
 442 
 443 /* to find an entry in a page-table-directory */
 444 #define pgd_offset(mm, address) \
 445 ((mm)->pgd + ((address) >> PGDIR_SHIFT))
 446 
 447 /* to find an entry in a kernel page-table-directory */
 448 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 449 
 450 /* Find an entry in the second-level page table.. */
 451 
 452 #if CONFIG_PGTABLE_LEVELS == 3
 453 #define pmd_index(addr)         (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 454 #define pmd_offset(dir,address) \
 455 ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
 456 #else
 457 #define pmd_offset(dir,addr) ((pmd_t *) dir)
 458 #endif
 459 
 460 /* Find an entry in the third-level page table.. */ 
 461 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
 462 #define pte_offset_kernel(pmd, address) \
 463         ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
 464 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
 465 #define pte_unmap(pte) do { } while (0)
 466 
 467 #define pte_unmap(pte)                  do { } while (0)
 468 #define pte_unmap_nested(pte)           do { } while (0)
 469 
 470 extern void paging_init (void);
 471 
 472 /* Used for deferring calls to flush_dcache_page() */
 473 
 474 #define PG_dcache_dirty         PG_arch_1
 475 
 476 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
 477 
 478 /* Encode and de-code a swap entry */
 479 
 480 #define __swp_type(x)                     ((x).val & 0x1f)
 481 #define __swp_offset(x)                   ( (((x).val >> 6) &  0x7) | \
 482                                           (((x).val >> 8) & ~0x7) )
 483 #define __swp_entry(type, offset)         ((swp_entry_t) { (type) | \
 484                                             ((offset &  0x7) << 6) | \
 485                                             ((offset & ~0x7) << 8) })
 486 #define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 487 #define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 488 
 489 
 490 static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
 491 {
 492         if (unlikely(pgd == swapper_pg_dir))
 493                 return &pa_swapper_pg_lock;
 494         return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
 495 }
 496 
 497 
 498 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 499 {
 500         pte_t pte;
 501         unsigned long flags;
 502 
 503         if (!pte_young(*ptep))
 504                 return 0;
 505 
 506         spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
 507         pte = *ptep;
 508         if (!pte_young(pte)) {
 509                 spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
 510                 return 0;
 511         }
 512         set_pte(ptep, pte_mkold(pte));
 513         purge_tlb_entries(vma->vm_mm, addr);
 514         spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
 515         return 1;
 516 }
 517 
 518 struct mm_struct;
 519 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 520 {
 521         pte_t old_pte;
 522         unsigned long flags;
 523 
 524         spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
 525         old_pte = *ptep;
 526         set_pte(ptep, __pte(0));
 527         purge_tlb_entries(mm, addr);
 528         spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
 529 
 530         return old_pte;
 531 }
 532 
 533 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 534 {
 535         unsigned long flags;
 536         spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
 537         set_pte(ptep, pte_wrprotect(*ptep));
 538         purge_tlb_entries(mm, addr);
 539         spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
 540 }
 541 
 542 #define pte_same(A,B)   (pte_val(A) == pte_val(B))
 543 
 544 struct seq_file;
 545 extern void arch_report_meminfo(struct seq_file *m);
 546 
 547 #endif /* !__ASSEMBLY__ */
 548 
 549 
 550 /* TLB page size encoding - see table 3-1 in parisc20.pdf */
 551 #define _PAGE_SIZE_ENCODING_4K          0
 552 #define _PAGE_SIZE_ENCODING_16K         1
 553 #define _PAGE_SIZE_ENCODING_64K         2
 554 #define _PAGE_SIZE_ENCODING_256K        3
 555 #define _PAGE_SIZE_ENCODING_1M          4
 556 #define _PAGE_SIZE_ENCODING_4M          5
 557 #define _PAGE_SIZE_ENCODING_16M         6
 558 #define _PAGE_SIZE_ENCODING_64M         7
 559 
 560 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
 561 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
 562 #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
 563 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
 564 #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
 565 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
 566 #endif
 567 
 568 
 569 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
 570 
 571 /* We provide our own get_unmapped_area to provide cache coherency */
 572 
 573 #define HAVE_ARCH_UNMAPPED_AREA
 574 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 575 
 576 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 577 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 578 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 579 #define __HAVE_ARCH_PTE_SAME
 580 #include <asm-generic/pgtable.h>
 581 
 582 #endif /* _PARISC_PGTABLE_H */

/* [<][>][^][v][top][bottom][index][help] */