root/arch/sparc/include/asm/pgtable_64.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pfn_pte
  2. pfn_pmd
  3. pte_pfn
  4. pte_modify
  5. pmd_modify
  6. pgprot_noncached
  7. __pte_default_huge_mask
  8. pte_mkhuge
  9. is_default_hugetlb_pte
  10. is_hugetlb_pmd
  11. is_hugetlb_pud
  12. pmd_mkhuge
  13. is_hugetlb_pte
  14. pte_mkdirty
  15. pte_mkclean
  16. pte_mkwrite
  17. pte_wrprotect
  18. pte_mkold
  19. pte_mkyoung
  20. pte_mkspecial
  21. pte_mkmcd
  22. pte_mknotmcd
  23. pte_young
  24. pte_dirty
  25. pte_write
  26. pte_exec
  27. pte_present
  28. pte_accessible
  29. pte_special
  30. pmd_large
  31. pmd_pfn
  32. pmd_write
  33. pmd_dirty
  34. pmd_young
  35. pmd_trans_huge
  36. pmd_mkold
  37. pmd_wrprotect
  38. pmd_mkdirty
  39. pmd_mkclean
  40. pmd_mkyoung
  41. pmd_mkwrite
  42. pmd_pgprot
  43. pmd_present
  44. set_pmd_at
  45. pmd_set
  46. __pmd_page
  47. pud_page_vaddr
  48. pud_large
  49. pud_pfn
  50. maybe_tlb_batch_add
  51. pmdp_huge_get_and_clear
  52. __set_pte_at
  53. arch_do_swap_page
  54. arch_unmap_one
  55. io_remap_pfn_range
  56. __untagged_addr
  57. pte_access_permitted

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * pgtable.h: SpitFire page table operations.
   4  *
   5  * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
   6  * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   7  */
   8 
   9 #ifndef _SPARC64_PGTABLE_H
  10 #define _SPARC64_PGTABLE_H
  11 
  12 /* This file contains the functions and defines necessary to modify and use
  13  * the SpitFire page tables.
  14  */
  15 
  16 #include <asm-generic/5level-fixup.h>
  17 #include <linux/compiler.h>
  18 #include <linux/const.h>
  19 #include <asm/types.h>
  20 #include <asm/spitfire.h>
  21 #include <asm/asi.h>
  22 #include <asm/adi.h>
  23 #include <asm/page.h>
  24 #include <asm/processor.h>
  25 
  26 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
  27  * The page copy blockops can use 0x6000000 to 0x8000000.
  28  * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
  29  * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
  30  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  31  * The vmalloc area spans 0x100000000 to 0x200000000.
  32  * Since modules need to be in the lowest 32-bits of the address space,
  33  * we place them right before the OBP area from 0x10000000 to 0xf0000000.
  34  * There is a single static kernel PMD which maps from 0x0 to address
  35  * 0x400000000.
  36  */
  37 #define TLBTEMP_BASE            _AC(0x0000000006000000,UL)
  38 #define TSBMAP_8K_BASE          _AC(0x0000000008000000,UL)
  39 #define TSBMAP_4M_BASE          _AC(0x0000000008400000,UL)
  40 #define MODULES_VADDR           _AC(0x0000000010000000,UL)
  41 #define MODULES_LEN             _AC(0x00000000e0000000,UL)
  42 #define MODULES_END             _AC(0x00000000f0000000,UL)
  43 #define LOW_OBP_ADDRESS         _AC(0x00000000f0000000,UL)
  44 #define HI_OBP_ADDRESS          _AC(0x0000000100000000,UL)
  45 #define VMALLOC_START           _AC(0x0000000100000000,UL)
  46 #define VMEMMAP_BASE            VMALLOC_END
  47 
  48 /* PMD_SHIFT determines the size of the area a second-level page
  49  * table can map
  50  */
  51 #define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
  52 #define PMD_SIZE        (_AC(1,UL) << PMD_SHIFT)
  53 #define PMD_MASK        (~(PMD_SIZE-1))
  54 #define PMD_BITS        (PAGE_SHIFT - 3)
  55 
  56 /* PUD_SHIFT determines the size of the area a third-level page
  57  * table can map
  58  */
  59 #define PUD_SHIFT       (PMD_SHIFT + PMD_BITS)
  60 #define PUD_SIZE        (_AC(1,UL) << PUD_SHIFT)
  61 #define PUD_MASK        (~(PUD_SIZE-1))
  62 #define PUD_BITS        (PAGE_SHIFT - 3)
  63 
  64 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
  65 #define PGDIR_SHIFT     (PUD_SHIFT + PUD_BITS)
  66 #define PGDIR_SIZE      (_AC(1,UL) << PGDIR_SHIFT)
  67 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  68 #define PGDIR_BITS      (PAGE_SHIFT - 3)
  69 
  70 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
  71 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
  72 #endif
  73 
  74 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
  75 #error Page table parameters do not cover virtual address space properly.
  76 #endif
  77 
  78 #if (PMD_SHIFT != HPAGE_SHIFT)
  79 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
  80 #endif
  81 
  82 #ifndef __ASSEMBLY__
  83 
  84 extern unsigned long VMALLOC_END;
  85 
  86 #define vmemmap                 ((struct page *)VMEMMAP_BASE)
  87 
  88 #include <linux/sched.h>
  89 
  90 bool kern_addr_valid(unsigned long addr);
  91 
  92 /* Entries per page directory level. */
  93 #define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
  94 #define PTRS_PER_PMD    (1UL << PMD_BITS)
  95 #define PTRS_PER_PUD    (1UL << PUD_BITS)
  96 #define PTRS_PER_PGD    (1UL << PGDIR_BITS)
  97 
  98 /* Kernel has a separate 44bit address space. */
  99 #define FIRST_USER_ADDRESS      0UL
 100 
 101 #define pmd_ERROR(e)                                                    \
 102         pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",             \
 103                __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
 104 #define pud_ERROR(e)                                                    \
 105         pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n",             \
 106                __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
 107 #define pgd_ERROR(e)                                                    \
 108         pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",             \
 109                __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
 110 
 111 #endif /* !(__ASSEMBLY__) */
 112 
 113 /* PTE bits which are the same in SUN4U and SUN4V format.  */
 114 #define _PAGE_VALID       _AC(0x8000000000000000,UL) /* Valid TTE            */
 115 #define _PAGE_R           _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
 116 #define _PAGE_SPECIAL     _AC(0x0200000000000000,UL) /* Special page         */
 117 #define _PAGE_PMD_HUGE    _AC(0x0100000000000000,UL) /* Huge page            */
 118 #define _PAGE_PUD_HUGE    _PAGE_PMD_HUGE
 119 
 120 /* SUN4U pte bits... */
 121 #define _PAGE_SZ4MB_4U    _AC(0x6000000000000000,UL) /* 4MB Page             */
 122 #define _PAGE_SZ512K_4U   _AC(0x4000000000000000,UL) /* 512K Page            */
 123 #define _PAGE_SZ64K_4U    _AC(0x2000000000000000,UL) /* 64K Page             */
 124 #define _PAGE_SZ8K_4U     _AC(0x0000000000000000,UL) /* 8K Page              */
 125 #define _PAGE_NFO_4U      _AC(0x1000000000000000,UL) /* No Fault Only        */
 126 #define _PAGE_IE_4U       _AC(0x0800000000000000,UL) /* Invert Endianness    */
 127 #define _PAGE_SOFT2_4U    _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
 128 #define _PAGE_SPECIAL_4U  _AC(0x0200000000000000,UL) /* Special page         */
 129 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page            */
 130 #define _PAGE_RES1_4U     _AC(0x0002000000000000,UL) /* Reserved             */
 131 #define _PAGE_SZ32MB_4U   _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
 132 #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
 133 #define _PAGE_SZALL_4U    _AC(0x6001000000000000,UL) /* All pgsz bits        */
 134 #define _PAGE_SN_4U       _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
 135 #define _PAGE_RES2_4U     _AC(0x0000780000000000,UL) /* Reserved             */
 136 #define _PAGE_PADDR_4U    _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
 137 #define _PAGE_SOFT_4U     _AC(0x0000000000001F80,UL) /* Software bits:       */
 138 #define _PAGE_EXEC_4U     _AC(0x0000000000001000,UL) /* Executable SW bit    */
 139 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty)     */
 140 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd)     */
 141 #define _PAGE_READ_4U     _AC(0x0000000000000200,UL) /* Readable SW Bit      */
 142 #define _PAGE_WRITE_4U    _AC(0x0000000000000100,UL) /* Writable SW Bit      */
 143 #define _PAGE_PRESENT_4U  _AC(0x0000000000000080,UL) /* Present              */
 144 #define _PAGE_L_4U        _AC(0x0000000000000040,UL) /* Locked TTE           */
 145 #define _PAGE_CP_4U       _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
 146 #define _PAGE_CV_4U       _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
 147 #define _PAGE_E_4U        _AC(0x0000000000000008,UL) /* side-Effect          */
 148 #define _PAGE_P_4U        _AC(0x0000000000000004,UL) /* Privileged Page      */
 149 #define _PAGE_W_4U        _AC(0x0000000000000002,UL) /* Writable             */
 150 
 151 /* SUN4V pte bits... */
 152 #define _PAGE_NFO_4V      _AC(0x4000000000000000,UL) /* No Fault Only        */
 153 #define _PAGE_SOFT2_4V    _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
 154 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty)     */
 155 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd)     */
 156 #define _PAGE_READ_4V     _AC(0x0800000000000000,UL) /* Readable SW Bit      */
 157 #define _PAGE_WRITE_4V    _AC(0x0400000000000000,UL) /* Writable SW Bit      */
 158 #define _PAGE_SPECIAL_4V  _AC(0x0200000000000000,UL) /* Special page         */
 159 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page            */
 160 #define _PAGE_PADDR_4V    _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
 161 #define _PAGE_IE_4V       _AC(0x0000000000001000,UL) /* Invert Endianness    */
 162 #define _PAGE_E_4V        _AC(0x0000000000000800,UL) /* side-Effect          */
 163 #define _PAGE_CP_4V       _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
 164 #define _PAGE_CV_4V       _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
 165 /* Bit 9 is used to enable MCD corruption detection instead on M7 */
 166 #define _PAGE_MCD_4V      _AC(0x0000000000000200,UL) /* Memory Corruption    */
 167 #define _PAGE_P_4V        _AC(0x0000000000000100,UL) /* Privileged Page      */
 168 #define _PAGE_EXEC_4V     _AC(0x0000000000000080,UL) /* Executable Page      */
 169 #define _PAGE_W_4V        _AC(0x0000000000000040,UL) /* Writable             */
 170 #define _PAGE_SOFT_4V     _AC(0x0000000000000030,UL) /* Software bits        */
 171 #define _PAGE_PRESENT_4V  _AC(0x0000000000000010,UL) /* Present              */
 172 #define _PAGE_RESV_4V     _AC(0x0000000000000008,UL) /* Reserved             */
 173 #define _PAGE_SZ16GB_4V   _AC(0x0000000000000007,UL) /* 16GB Page            */
 174 #define _PAGE_SZ2GB_4V    _AC(0x0000000000000006,UL) /* 2GB Page             */
 175 #define _PAGE_SZ256MB_4V  _AC(0x0000000000000005,UL) /* 256MB Page           */
 176 #define _PAGE_SZ32MB_4V   _AC(0x0000000000000004,UL) /* 32MB Page            */
 177 #define _PAGE_SZ4MB_4V    _AC(0x0000000000000003,UL) /* 4MB Page             */
 178 #define _PAGE_SZ512K_4V   _AC(0x0000000000000002,UL) /* 512K Page            */
 179 #define _PAGE_SZ64K_4V    _AC(0x0000000000000001,UL) /* 64K Page             */
 180 #define _PAGE_SZ8K_4V     _AC(0x0000000000000000,UL) /* 8K Page              */
 181 #define _PAGE_SZALL_4V    _AC(0x0000000000000007,UL) /* All pgsz bits        */
 182 
 183 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
 184 #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
 185 
 186 #if REAL_HPAGE_SHIFT != 22
 187 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
 188 #endif
 189 
 190 #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
 191 #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
 192 
 193 /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
 194 #define __P000  __pgprot(0)
 195 #define __P001  __pgprot(0)
 196 #define __P010  __pgprot(0)
 197 #define __P011  __pgprot(0)
 198 #define __P100  __pgprot(0)
 199 #define __P101  __pgprot(0)
 200 #define __P110  __pgprot(0)
 201 #define __P111  __pgprot(0)
 202 
 203 #define __S000  __pgprot(0)
 204 #define __S001  __pgprot(0)
 205 #define __S010  __pgprot(0)
 206 #define __S011  __pgprot(0)
 207 #define __S100  __pgprot(0)
 208 #define __S101  __pgprot(0)
 209 #define __S110  __pgprot(0)
 210 #define __S111  __pgprot(0)
 211 
 212 #ifndef __ASSEMBLY__
 213 
 214 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
 215 
 216 unsigned long pte_sz_bits(unsigned long size);
 217 
 218 extern pgprot_t PAGE_KERNEL;
 219 extern pgprot_t PAGE_KERNEL_LOCKED;
 220 extern pgprot_t PAGE_COPY;
 221 extern pgprot_t PAGE_SHARED;
 222 
 223 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
 224 extern unsigned long _PAGE_IE;
 225 extern unsigned long _PAGE_E;
 226 extern unsigned long _PAGE_CACHE;
 227 
 228 extern unsigned long pg_iobits;
 229 extern unsigned long _PAGE_ALL_SZ_BITS;
 230 
 231 extern struct page *mem_map_zero;
 232 #define ZERO_PAGE(vaddr)        (mem_map_zero)
 233 
 234 /* PFNs are real physical page numbers.  However, mem_map only begins to record
 235  * per-page information starting at pfn_base.  This is to handle systems where
 236  * the first physical page in the machine is at some huge physical address,
 237  * such as 4GB.   This is common on a partitioned E10000, for example.
 238  */
 239 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
 240 {
 241         unsigned long paddr = pfn << PAGE_SHIFT;
 242 
 243         BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
 244         return __pte(paddr | pgprot_val(prot));
 245 }
 246 #define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
 247 
 248 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 249 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 250 {
 251         pte_t pte = pfn_pte(page_nr, pgprot);
 252 
 253         return __pmd(pte_val(pte));
 254 }
 255 #define mk_pmd(page, pgprot)    pfn_pmd(page_to_pfn(page), (pgprot))
 256 #endif
 257 
 258 /* This one can be done with two shifts.  */
 259 static inline unsigned long pte_pfn(pte_t pte)
 260 {
 261         unsigned long ret;
 262 
 263         __asm__ __volatile__(
 264         "\n661: sllx            %1, %2, %0\n"
 265         "       srlx            %0, %3, %0\n"
 266         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 267         "       .word           661b\n"
 268         "       sllx            %1, %4, %0\n"
 269         "       srlx            %0, %5, %0\n"
 270         "       .previous\n"
 271         : "=r" (ret)
 272         : "r" (pte_val(pte)),
 273           "i" (21), "i" (21 + PAGE_SHIFT),
 274           "i" (8), "i" (8 + PAGE_SHIFT));
 275 
 276         return ret;
 277 }
 278 #define pte_page(x) pfn_to_page(pte_pfn(x))
 279 
 280 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
 281 {
 282         unsigned long mask, tmp;
 283 
 284         /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
 285          * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
 286          *
 287          * Even if we use negation tricks the result is still a 6
 288          * instruction sequence, so don't try to play fancy and just
 289          * do the most straightforward implementation.
 290          *
 291          * Note: We encode this into 3 sun4v 2-insn patch sequences.
 292          */
 293 
 294         BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
 295         __asm__ __volatile__(
 296         "\n661: sethi           %%uhi(%2), %1\n"
 297         "       sethi           %%hi(%2), %0\n"
 298         "\n662: or              %1, %%ulo(%2), %1\n"
 299         "       or              %0, %%lo(%2), %0\n"
 300         "\n663: sllx            %1, 32, %1\n"
 301         "       or              %0, %1, %0\n"
 302         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 303         "       .word           661b\n"
 304         "       sethi           %%uhi(%3), %1\n"
 305         "       sethi           %%hi(%3), %0\n"
 306         "       .word           662b\n"
 307         "       or              %1, %%ulo(%3), %1\n"
 308         "       or              %0, %%lo(%3), %0\n"
 309         "       .word           663b\n"
 310         "       sllx            %1, 32, %1\n"
 311         "       or              %0, %1, %0\n"
 312         "       .previous\n"
 313         "       .section        .sun_m7_2insn_patch, \"ax\"\n"
 314         "       .word           661b\n"
 315         "       sethi           %%uhi(%4), %1\n"
 316         "       sethi           %%hi(%4), %0\n"
 317         "       .word           662b\n"
 318         "       or              %1, %%ulo(%4), %1\n"
 319         "       or              %0, %%lo(%4), %0\n"
 320         "       .word           663b\n"
 321         "       sllx            %1, 32, %1\n"
 322         "       or              %0, %1, %0\n"
 323         "       .previous\n"
 324         : "=r" (mask), "=r" (tmp)
 325         : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
 326                _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
 327                _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
 328           "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
 329                _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
 330                _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
 331           "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
 332                _PAGE_CP_4V | _PAGE_E_4V |
 333                _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
 334 
 335         return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
 336 }
 337 
 338 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 339 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 340 {
 341         pte_t pte = __pte(pmd_val(pmd));
 342 
 343         pte = pte_modify(pte, newprot);
 344 
 345         return __pmd(pte_val(pte));
 346 }
 347 #endif
 348 
 349 static inline pgprot_t pgprot_noncached(pgprot_t prot)
 350 {
 351         unsigned long val = pgprot_val(prot);
 352 
 353         __asm__ __volatile__(
 354         "\n661: andn            %0, %2, %0\n"
 355         "       or              %0, %3, %0\n"
 356         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 357         "       .word           661b\n"
 358         "       andn            %0, %4, %0\n"
 359         "       or              %0, %5, %0\n"
 360         "       .previous\n"
 361         "       .section        .sun_m7_2insn_patch, \"ax\"\n"
 362         "       .word           661b\n"
 363         "       andn            %0, %6, %0\n"
 364         "       or              %0, %5, %0\n"
 365         "       .previous\n"
 366         : "=r" (val)
 367         : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
 368                      "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
 369                      "i" (_PAGE_CP_4V));
 370 
 371         return __pgprot(val);
 372 }
 373 /* Various pieces of code check for platform support by ifdef testing
 374  * on "pgprot_noncached".  That's broken and should be fixed, but for
 375  * now...
 376  */
 377 #define pgprot_noncached pgprot_noncached
 378 
 379 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 380 extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 381                                 struct page *page, int writable);
 382 #define arch_make_huge_pte arch_make_huge_pte
 383 static inline unsigned long __pte_default_huge_mask(void)
 384 {
 385         unsigned long mask;
 386 
 387         __asm__ __volatile__(
 388         "\n661: sethi           %%uhi(%1), %0\n"
 389         "       sllx            %0, 32, %0\n"
 390         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 391         "       .word           661b\n"
 392         "       mov             %2, %0\n"
 393         "       nop\n"
 394         "       .previous\n"
 395         : "=r" (mask)
 396         : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
 397 
 398         return mask;
 399 }
 400 
 401 static inline pte_t pte_mkhuge(pte_t pte)
 402 {
 403         return __pte(pte_val(pte) | __pte_default_huge_mask());
 404 }
 405 
 406 static inline bool is_default_hugetlb_pte(pte_t pte)
 407 {
 408         unsigned long mask = __pte_default_huge_mask();
 409 
 410         return (pte_val(pte) & mask) == mask;
 411 }
 412 
 413 static inline bool is_hugetlb_pmd(pmd_t pmd)
 414 {
 415         return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
 416 }
 417 
 418 static inline bool is_hugetlb_pud(pud_t pud)
 419 {
 420         return !!(pud_val(pud) & _PAGE_PUD_HUGE);
 421 }
 422 
 423 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 424 static inline pmd_t pmd_mkhuge(pmd_t pmd)
 425 {
 426         pte_t pte = __pte(pmd_val(pmd));
 427 
 428         pte = pte_mkhuge(pte);
 429         pte_val(pte) |= _PAGE_PMD_HUGE;
 430 
 431         return __pmd(pte_val(pte));
 432 }
 433 #endif
 434 #else
 435 static inline bool is_hugetlb_pte(pte_t pte)
 436 {
 437         return false;
 438 }
 439 #endif
 440 
 441 static inline pte_t pte_mkdirty(pte_t pte)
 442 {
 443         unsigned long val = pte_val(pte), tmp;
 444 
 445         __asm__ __volatile__(
 446         "\n661: or              %0, %3, %0\n"
 447         "       nop\n"
 448         "\n662: nop\n"
 449         "       nop\n"
 450         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 451         "       .word           661b\n"
 452         "       sethi           %%uhi(%4), %1\n"
 453         "       sllx            %1, 32, %1\n"
 454         "       .word           662b\n"
 455         "       or              %1, %%lo(%4), %1\n"
 456         "       or              %0, %1, %0\n"
 457         "       .previous\n"
 458         : "=r" (val), "=r" (tmp)
 459         : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
 460           "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
 461 
 462         return __pte(val);
 463 }
 464 
 465 static inline pte_t pte_mkclean(pte_t pte)
 466 {
 467         unsigned long val = pte_val(pte), tmp;
 468 
 469         __asm__ __volatile__(
 470         "\n661: andn            %0, %3, %0\n"
 471         "       nop\n"
 472         "\n662: nop\n"
 473         "       nop\n"
 474         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 475         "       .word           661b\n"
 476         "       sethi           %%uhi(%4), %1\n"
 477         "       sllx            %1, 32, %1\n"
 478         "       .word           662b\n"
 479         "       or              %1, %%lo(%4), %1\n"
 480         "       andn            %0, %1, %0\n"
 481         "       .previous\n"
 482         : "=r" (val), "=r" (tmp)
 483         : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
 484           "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
 485 
 486         return __pte(val);
 487 }
 488 
 489 static inline pte_t pte_mkwrite(pte_t pte)
 490 {
 491         unsigned long val = pte_val(pte), mask;
 492 
 493         __asm__ __volatile__(
 494         "\n661: mov             %1, %0\n"
 495         "       nop\n"
 496         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 497         "       .word           661b\n"
 498         "       sethi           %%uhi(%2), %0\n"
 499         "       sllx            %0, 32, %0\n"
 500         "       .previous\n"
 501         : "=r" (mask)
 502         : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
 503 
 504         return __pte(val | mask);
 505 }
 506 
 507 static inline pte_t pte_wrprotect(pte_t pte)
 508 {
 509         unsigned long val = pte_val(pte), tmp;
 510 
 511         __asm__ __volatile__(
 512         "\n661: andn            %0, %3, %0\n"
 513         "       nop\n"
 514         "\n662: nop\n"
 515         "       nop\n"
 516         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 517         "       .word           661b\n"
 518         "       sethi           %%uhi(%4), %1\n"
 519         "       sllx            %1, 32, %1\n"
 520         "       .word           662b\n"
 521         "       or              %1, %%lo(%4), %1\n"
 522         "       andn            %0, %1, %0\n"
 523         "       .previous\n"
 524         : "=r" (val), "=r" (tmp)
 525         : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
 526           "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
 527 
 528         return __pte(val);
 529 }
 530 
 531 static inline pte_t pte_mkold(pte_t pte)
 532 {
 533         unsigned long mask;
 534 
 535         __asm__ __volatile__(
 536         "\n661: mov             %1, %0\n"
 537         "       nop\n"
 538         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 539         "       .word           661b\n"
 540         "       sethi           %%uhi(%2), %0\n"
 541         "       sllx            %0, 32, %0\n"
 542         "       .previous\n"
 543         : "=r" (mask)
 544         : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
 545 
 546         mask |= _PAGE_R;
 547 
 548         return __pte(pte_val(pte) & ~mask);
 549 }
 550 
 551 static inline pte_t pte_mkyoung(pte_t pte)
 552 {
 553         unsigned long mask;
 554 
 555         __asm__ __volatile__(
 556         "\n661: mov             %1, %0\n"
 557         "       nop\n"
 558         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 559         "       .word           661b\n"
 560         "       sethi           %%uhi(%2), %0\n"
 561         "       sllx            %0, 32, %0\n"
 562         "       .previous\n"
 563         : "=r" (mask)
 564         : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
 565 
 566         mask |= _PAGE_R;
 567 
 568         return __pte(pte_val(pte) | mask);
 569 }
 570 
 571 static inline pte_t pte_mkspecial(pte_t pte)
 572 {
 573         pte_val(pte) |= _PAGE_SPECIAL;
 574         return pte;
 575 }
 576 
 577 static inline pte_t pte_mkmcd(pte_t pte)
 578 {
 579         pte_val(pte) |= _PAGE_MCD_4V;
 580         return pte;
 581 }
 582 
 583 static inline pte_t pte_mknotmcd(pte_t pte)
 584 {
 585         pte_val(pte) &= ~_PAGE_MCD_4V;
 586         return pte;
 587 }
 588 
 589 static inline unsigned long pte_young(pte_t pte)
 590 {
 591         unsigned long mask;
 592 
 593         __asm__ __volatile__(
 594         "\n661: mov             %1, %0\n"
 595         "       nop\n"
 596         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 597         "       .word           661b\n"
 598         "       sethi           %%uhi(%2), %0\n"
 599         "       sllx            %0, 32, %0\n"
 600         "       .previous\n"
 601         : "=r" (mask)
 602         : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
 603 
 604         return (pte_val(pte) & mask);
 605 }
 606 
 607 static inline unsigned long pte_dirty(pte_t pte)
 608 {
 609         unsigned long mask;
 610 
 611         __asm__ __volatile__(
 612         "\n661: mov             %1, %0\n"
 613         "       nop\n"
 614         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 615         "       .word           661b\n"
 616         "       sethi           %%uhi(%2), %0\n"
 617         "       sllx            %0, 32, %0\n"
 618         "       .previous\n"
 619         : "=r" (mask)
 620         : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
 621 
 622         return (pte_val(pte) & mask);
 623 }
 624 
 625 static inline unsigned long pte_write(pte_t pte)
 626 {
 627         unsigned long mask;
 628 
 629         __asm__ __volatile__(
 630         "\n661: mov             %1, %0\n"
 631         "       nop\n"
 632         "       .section        .sun4v_2insn_patch, \"ax\"\n"
 633         "       .word           661b\n"
 634         "       sethi           %%uhi(%2), %0\n"
 635         "       sllx            %0, 32, %0\n"
 636         "       .previous\n"
 637         : "=r" (mask)
 638         : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
 639 
 640         return (pte_val(pte) & mask);
 641 }
 642 
 643 static inline unsigned long pte_exec(pte_t pte)
 644 {
 645         unsigned long mask;
 646 
 647         __asm__ __volatile__(
 648         "\n661: sethi           %%hi(%1), %0\n"
 649         "       .section        .sun4v_1insn_patch, \"ax\"\n"
 650         "       .word           661b\n"
 651         "       mov             %2, %0\n"
 652         "       .previous\n"
 653         : "=r" (mask)
 654         : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
 655 
 656         return (pte_val(pte) & mask);
 657 }
 658 
 659 static inline unsigned long pte_present(pte_t pte)
 660 {
 661         unsigned long val = pte_val(pte);
 662 
 663         __asm__ __volatile__(
 664         "\n661: and             %0, %2, %0\n"
 665         "       .section        .sun4v_1insn_patch, \"ax\"\n"
 666         "       .word           661b\n"
 667         "       and             %0, %3, %0\n"
 668         "       .previous\n"
 669         : "=r" (val)
 670         : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
 671 
 672         return val;
 673 }
 674 
 675 #define pte_accessible pte_accessible
 676 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
 677 {
 678         return pte_val(a) & _PAGE_VALID;
 679 }
 680 
 681 static inline unsigned long pte_special(pte_t pte)
 682 {
 683         return pte_val(pte) & _PAGE_SPECIAL;
 684 }
 685 
 686 static inline unsigned long pmd_large(pmd_t pmd)
 687 {
 688         pte_t pte = __pte(pmd_val(pmd));
 689 
 690         return pte_val(pte) & _PAGE_PMD_HUGE;
 691 }
 692 
 693 static inline unsigned long pmd_pfn(pmd_t pmd)
 694 {
 695         pte_t pte = __pte(pmd_val(pmd));
 696 
 697         return pte_pfn(pte);
 698 }
 699 
 700 #define pmd_write pmd_write
 701 static inline unsigned long pmd_write(pmd_t pmd)
 702 {
 703         pte_t pte = __pte(pmd_val(pmd));
 704 
 705         return pte_write(pte);
 706 }
 707 
 708 #define pud_write(pud)  pte_write(__pte(pud_val(pud)))
 709 
 710 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 711 static inline unsigned long pmd_dirty(pmd_t pmd)
 712 {
 713         pte_t pte = __pte(pmd_val(pmd));
 714 
 715         return pte_dirty(pte);
 716 }
 717 
 718 static inline unsigned long pmd_young(pmd_t pmd)
 719 {
 720         pte_t pte = __pte(pmd_val(pmd));
 721 
 722         return pte_young(pte);
 723 }
 724 
 725 static inline unsigned long pmd_trans_huge(pmd_t pmd)
 726 {
 727         pte_t pte = __pte(pmd_val(pmd));
 728 
 729         return pte_val(pte) & _PAGE_PMD_HUGE;
 730 }
 731 
 732 static inline pmd_t pmd_mkold(pmd_t pmd)
 733 {
 734         pte_t pte = __pte(pmd_val(pmd));
 735 
 736         pte = pte_mkold(pte);
 737 
 738         return __pmd(pte_val(pte));
 739 }
 740 
 741 static inline pmd_t pmd_wrprotect(pmd_t pmd)
 742 {
 743         pte_t pte = __pte(pmd_val(pmd));
 744 
 745         pte = pte_wrprotect(pte);
 746 
 747         return __pmd(pte_val(pte));
 748 }
 749 
 750 static inline pmd_t pmd_mkdirty(pmd_t pmd)
 751 {
 752         pte_t pte = __pte(pmd_val(pmd));
 753 
 754         pte = pte_mkdirty(pte);
 755 
 756         return __pmd(pte_val(pte));
 757 }
 758 
 759 static inline pmd_t pmd_mkclean(pmd_t pmd)
 760 {
 761         pte_t pte = __pte(pmd_val(pmd));
 762 
 763         pte = pte_mkclean(pte);
 764 
 765         return __pmd(pte_val(pte));
 766 }
 767 
 768 static inline pmd_t pmd_mkyoung(pmd_t pmd)
 769 {
 770         pte_t pte = __pte(pmd_val(pmd));
 771 
 772         pte = pte_mkyoung(pte);
 773 
 774         return __pmd(pte_val(pte));
 775 }
 776 
 777 static inline pmd_t pmd_mkwrite(pmd_t pmd)
 778 {
 779         pte_t pte = __pte(pmd_val(pmd));
 780 
 781         pte = pte_mkwrite(pte);
 782 
 783         return __pmd(pte_val(pte));
 784 }
 785 
 786 static inline pgprot_t pmd_pgprot(pmd_t entry)
 787 {
 788         unsigned long val = pmd_val(entry);
 789 
 790         return __pgprot(val);
 791 }
 792 #endif
 793 
 794 static inline int pmd_present(pmd_t pmd)
 795 {
 796         return pmd_val(pmd) != 0UL;
 797 }
 798 
 799 #define pmd_none(pmd)                   (!pmd_val(pmd))
 800 
 801 /* pmd_bad() is only called on non-trans-huge PMDs.  Our encoding is
 802  * very simple, it's just the physical address.  PTE tables are of
 803  * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
 804  * the top bits outside of the range of any physical address size we
 805  * support are clear as well.  We also validate the physical itself.
 806  */
 807 #define pmd_bad(pmd)                    (pmd_val(pmd) & ~PAGE_MASK)
 808 
 809 #define pud_none(pud)                   (!pud_val(pud))
 810 
 811 #define pud_bad(pud)                    (pud_val(pud) & ~PAGE_MASK)
 812 
 813 #define pgd_none(pgd)                   (!pgd_val(pgd))
 814 
 815 #define pgd_bad(pgd)                    (pgd_val(pgd) & ~PAGE_MASK)
 816 
 817 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 818 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 819                 pmd_t *pmdp, pmd_t pmd);
 820 #else
 821 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 822                               pmd_t *pmdp, pmd_t pmd)
 823 {
 824         *pmdp = pmd;
 825 }
 826 #endif
 827 
 828 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
 829 {
 830         unsigned long val = __pa((unsigned long) (ptep));
 831 
 832         pmd_val(*pmdp) = val;
 833 }
 834 
 835 #define pud_set(pudp, pmdp)     \
 836         (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
 837 static inline unsigned long __pmd_page(pmd_t pmd)
 838 {
 839         pte_t pte = __pte(pmd_val(pmd));
 840         unsigned long pfn;
 841 
 842         pfn = pte_pfn(pte);
 843 
 844         return ((unsigned long) __va(pfn << PAGE_SHIFT));
 845 }
 846 
 847 static inline unsigned long pud_page_vaddr(pud_t pud)
 848 {
 849         pte_t pte = __pte(pud_val(pud));
 850         unsigned long pfn;
 851 
 852         pfn = pte_pfn(pte);
 853 
 854         return ((unsigned long) __va(pfn << PAGE_SHIFT));
 855 }
 856 
 857 #define pmd_page(pmd)                   virt_to_page((void *)__pmd_page(pmd))
 858 #define pud_page(pud)                   virt_to_page((void *)pud_page_vaddr(pud))
 859 #define pmd_clear(pmdp)                 (pmd_val(*(pmdp)) = 0UL)
 860 #define pud_present(pud)                (pud_val(pud) != 0U)
 861 #define pud_clear(pudp)                 (pud_val(*(pudp)) = 0UL)
 862 #define pgd_page_vaddr(pgd)             \
 863         ((unsigned long) __va(pgd_val(pgd)))
 864 #define pgd_present(pgd)                (pgd_val(pgd) != 0U)
 865 #define pgd_clear(pgdp)                 (pgd_val(*(pgdp)) = 0UL)
 866 
 867 /* only used by the stubbed out hugetlb gup code, should never be called */
 868 #define pgd_page(pgd)                   NULL
 869 
 870 static inline unsigned long pud_large(pud_t pud)
 871 {
 872         pte_t pte = __pte(pud_val(pud));
 873 
 874         return pte_val(pte) & _PAGE_PMD_HUGE;
 875 }
 876 
 877 static inline unsigned long pud_pfn(pud_t pud)
 878 {
 879         pte_t pte = __pte(pud_val(pud));
 880 
 881         return pte_pfn(pte);
 882 }
 883 
 884 /* Same in both SUN4V and SUN4U.  */
 885 #define pte_none(pte)                   (!pte_val(pte))
 886 
 887 #define pgd_set(pgdp, pudp)     \
 888         (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
 889 
 890 /* to find an entry in a page-table-directory. */
 891 #define pgd_index(address)      (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 892 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 893 
 894 /* to find an entry in a kernel page-table-directory */
 895 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 896 
 897 /* Find an entry in the third-level page table.. */
 898 #define pud_index(address)      (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
 899 #define pud_offset(pgdp, address)       \
 900         ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
 901 
 902 /* Find an entry in the second-level page table.. */
 903 #define pmd_offset(pudp, address)       \
 904         ((pmd_t *) pud_page_vaddr(*(pudp)) + \
 905          (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
 906 
 907 /* Find an entry in the third-level page table.. */
 908 #define pte_index(dir, address) \
 909         ((pte_t *) __pmd_page(*(dir)) + \
 910          ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
 911 #define pte_offset_kernel               pte_index
 912 #define pte_offset_map                  pte_index
 913 #define pte_unmap(pte)                  do { } while (0)
 914 
 915 /* We cannot include <linux/mm_types.h> at this point yet: */
 916 extern struct mm_struct init_mm;
 917 
 918 /* Actual page table PTE updates.  */
 919 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
 920                    pte_t *ptep, pte_t orig, int fullmm,
 921                    unsigned int hugepage_shift);
 922 
 923 static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
 924                                 pte_t *ptep, pte_t orig, int fullmm,
 925                                 unsigned int hugepage_shift)
 926 {
 927         /* It is more efficient to let flush_tlb_kernel_range()
 928          * handle init_mm tlb flushes.
 929          *
 930          * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
 931          *             and SUN4V pte layout, so this inline test is fine.
 932          */
 933         if (likely(mm != &init_mm) && pte_accessible(mm, orig))
 934                 tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
 935 }
 936 
 937 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
 938 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
 939                                             unsigned long addr,
 940                                             pmd_t *pmdp)
 941 {
 942         pmd_t pmd = *pmdp;
 943         set_pmd_at(mm, addr, pmdp, __pmd(0UL));
 944         return pmd;
 945 }
 946 
 947 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 948                              pte_t *ptep, pte_t pte, int fullmm)
 949 {
 950         pte_t orig = *ptep;
 951 
 952         *ptep = pte;
 953         maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
 954 }
 955 
 956 #define set_pte_at(mm,addr,ptep,pte)    \
 957         __set_pte_at((mm), (addr), (ptep), (pte), 0)
 958 
 959 #define pte_clear(mm,addr,ptep)         \
 960         set_pte_at((mm), (addr), (ptep), __pte(0UL))
 961 
 962 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
 963 #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
 964         __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
 965 
 966 #ifdef DCACHE_ALIASING_POSSIBLE
 967 #define __HAVE_ARCH_MOVE_PTE
 968 #define move_pte(pte, prot, old_addr, new_addr)                         \
 969 ({                                                                      \
 970         pte_t newpte = (pte);                                           \
 971         if (tlb_type != hypervisor && pte_present(pte)) {               \
 972                 unsigned long this_pfn = pte_pfn(pte);                  \
 973                                                                         \
 974                 if (pfn_valid(this_pfn) &&                              \
 975                     (((old_addr) ^ (new_addr)) & (1 << 13)))            \
 976                         flush_dcache_page_all(current->mm,              \
 977                                               pfn_to_page(this_pfn));   \
 978         }                                                               \
 979         newpte;                                                         \
 980 })
 981 #endif
 982 
 983 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 984 
 985 void paging_init(void);
 986 unsigned long find_ecache_flush_span(unsigned long size);
 987 
 988 struct seq_file;
 989 void mmu_info(struct seq_file *);
 990 
 991 struct vm_area_struct;
 992 void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
 993 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 994 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
 995                           pmd_t *pmd);
 996 
 997 #define __HAVE_ARCH_PMDP_INVALIDATE
 998 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 999                             pmd_t *pmdp);
1000 
1001 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1002 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1003                                 pgtable_t pgtable);
1004 
1005 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1006 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1007 #endif
1008 
1009 /* Encode and de-code a swap entry */
1010 #define __swp_type(entry)       (((entry).val >> PAGE_SHIFT) & 0xffUL)
1011 #define __swp_offset(entry)     ((entry).val >> (PAGE_SHIFT + 8UL))
1012 #define __swp_entry(type, offset)       \
1013         ( (swp_entry_t) \
1014           { \
1015                 (((long)(type) << PAGE_SHIFT) | \
1016                  ((long)(offset) << (PAGE_SHIFT + 8UL))) \
1017           } )
1018 #define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
1019 #define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
1020 
1021 int page_in_phys_avail(unsigned long paddr);
1022 
1023 /*
1024  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
1025  * its high 4 bits.  These macros/functions put it there or get it from there.
1026  */
1027 #define MK_IOSPACE_PFN(space, pfn)      (pfn | (space << (BITS_PER_LONG - 4)))
1028 #define GET_IOSPACE(pfn)                (pfn >> (BITS_PER_LONG - 4))
1029 #define GET_PFN(pfn)                    (pfn & 0x0fffffffffffffffUL)
1030 
1031 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
1032                     unsigned long, pgprot_t);
1033 
1034 void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1035                       unsigned long addr, pte_t pte);
1036 
1037 int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1038                   unsigned long addr, pte_t oldpte);
1039 
1040 #define __HAVE_ARCH_DO_SWAP_PAGE
1041 static inline void arch_do_swap_page(struct mm_struct *mm,
1042                                      struct vm_area_struct *vma,
1043                                      unsigned long addr,
1044                                      pte_t pte, pte_t oldpte)
1045 {
1046         /* If this is a new page being mapped in, there can be no
1047          * ADI tags stored away for this page. Skip looking for
1048          * stored tags
1049          */
1050         if (pte_none(oldpte))
1051                 return;
1052 
1053         if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V))
1054                 adi_restore_tags(mm, vma, addr, pte);
1055 }
1056 
1057 #define __HAVE_ARCH_UNMAP_ONE
1058 static inline int arch_unmap_one(struct mm_struct *mm,
1059                                  struct vm_area_struct *vma,
1060                                  unsigned long addr, pte_t oldpte)
1061 {
1062         if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V))
1063                 return adi_save_tags(mm, vma, addr, oldpte);
1064         return 0;
1065 }
1066 
1067 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
1068                                      unsigned long from, unsigned long pfn,
1069                                      unsigned long size, pgprot_t prot)
1070 {
1071         unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1072         int space = GET_IOSPACE(pfn);
1073         unsigned long phys_base;
1074 
1075         phys_base = offset | (((unsigned long) space) << 32UL);
1076 
1077         return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
1078 }
1079 #define io_remap_pfn_range io_remap_pfn_range 
1080 
1081 static inline unsigned long __untagged_addr(unsigned long start)
1082 {
1083         if (adi_capable()) {
1084                 long addr = start;
1085 
1086                 /* If userspace has passed a versioned address, kernel
1087                  * will not find it in the VMAs since it does not store
1088                  * the version tags in the list of VMAs. Storing version
1089                  * tags in list of VMAs is impractical since they can be
1090                  * changed any time from userspace without dropping into
1091                  * kernel. Any address search in VMAs will be done with
1092                  * non-versioned addresses. Ensure the ADI version bits
1093                  * are dropped here by sign extending the last bit before
1094                  * ADI bits. IOMMU does not implement version tags.
1095                  */
1096                 return (addr << (long)adi_nbits()) >> (long)adi_nbits();
1097         }
1098 
1099         return start;
1100 }
1101 #define untagged_addr(addr) \
1102         ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1103 
1104 static inline bool pte_access_permitted(pte_t pte, bool write)
1105 {
1106         u64 prot;
1107 
1108         if (tlb_type == hypervisor) {
1109                 prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
1110                 if (write)
1111                         prot |= _PAGE_WRITE_4V;
1112         } else {
1113                 prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
1114                 if (write)
1115                         prot |= _PAGE_WRITE_4U;
1116         }
1117 
1118         return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
1119 }
1120 #define pte_access_permitted pte_access_permitted
1121 
1122 #include <asm/tlbflush.h>
1123 #include <asm-generic/pgtable.h>
1124 
1125 /* We provide our own get_unmapped_area to cope with VA holes and
1126  * SHM area cache aliasing for userland.
1127  */
1128 #define HAVE_ARCH_UNMAPPED_AREA
1129 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1130 
1131 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1132  * the largest alignment possible such that larget PTEs can be used.
1133  */
1134 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1135                                    unsigned long, unsigned long,
1136                                    unsigned long);
1137 #define HAVE_ARCH_FB_UNMAPPED_AREA
1138 
1139 void sun4v_register_fault_status(void);
1140 void sun4v_ktsb_register(void);
1141 void __init cheetah_ecache_flush_init(void);
1142 void sun4v_patch_tlb_handlers(void);
1143 
1144 extern unsigned long cmdline_memory_size;
1145 
1146 asmlinkage void do_sparc64_fault(struct pt_regs *regs);
1147 
1148 #endif /* !(__ASSEMBLY__) */
1149 
1150 #endif /* !(_SPARC64_PGTABLE_H) */

/* [<][>][^][v][top][bottom][index][help] */