1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2002 by Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 * Copyright (C) 2002 Maciej W. Rozycki 9 */ 10#ifndef _ASM_PGTABLE_BITS_H 11#define _ASM_PGTABLE_BITS_H 12 13 14/* 15 * Note that we shift the lower 32bits of each EntryLo[01] entry 16 * 6 bits to the left. That way we can convert the PFN into the 17 * physical address by a single 'and' operation and gain 6 additional 18 * bits for storing information which isn't present in a normal 19 * MIPS page table. 20 * 21 * Similar to the Alpha port, we need to keep track of the ref 22 * and mod bits in software. We have a software "yeah you can read 23 * from this page" bit, and a hardware one which actually lets the 24 * process read from the page. On the same token we have a software 25 * writable bit and the real hardware one which actually lets the 26 * process write to the page, this keeps a mod bit via the hardware 27 * dirty bit. 28 * 29 * Certain revisions of the R4000 and R5000 have a bug where if a 30 * certain sequence occurs in the last 3 instructions of an executable 31 * page, and the following page is not mapped, the cpu can do 32 * unpredictable things. The code (when it is written) to deal with 33 * this problem will be in the update_mmu_cache() code for the r4k. 34 */ 35#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 36 37/* 38 * The following bits are implemented by the TLB hardware 39 */ 40#define _PAGE_NO_EXEC_SHIFT 0 41#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) 42#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) 43#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT) 44#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) 45#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 46#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 47#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 48#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 49#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 50#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) 51#define _CACHE_MASK (7 << _CACHE_SHIFT) 52 53/* 54 * The following bits are implemented in software 55 */ 56#define _PAGE_PRESENT_SHIFT (24) 57#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 58#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) 59#define _PAGE_READ (1 << _PAGE_READ_SHIFT) 60#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) 61#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 62#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) 63#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 64#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 65#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 66 67#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 68 69/* 70 * Bits for extended EntryLo0/EntryLo1 registers 71 */ 72#define _PFNX_MASK 0xffffff 73 74#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 75 76/* 77 * The following bits are implemented in software 78 */ 79#define _PAGE_PRESENT_SHIFT (0) 80#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 81#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) 82#define _PAGE_READ (1 << _PAGE_READ_SHIFT) 83#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) 84#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 85#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) 86#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 87#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 88#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 89 90/* 91 * The following bits are implemented by the TLB hardware 92 */ 93#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4) 94#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 95#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 96#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 97#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 98#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 99#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1) 100#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) 101#define _CACHE_MASK _CACHE_UNCACHED 102 103#define _PFN_SHIFT PAGE_SHIFT 104 105#else 106/* 107 * Below are the "Normal" R4K cases 108 */ 109 110/* 111 * The following bits are implemented in software 112 */ 113#define _PAGE_PRESENT_SHIFT 0 114#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 115/* R2 or later cores check for RI/XI support to determine _PAGE_READ */ 116#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 117#define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1) 118#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 119#else 120#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) 121#define _PAGE_READ (1 << _PAGE_READ_SHIFT) 122#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) 123#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 124#endif 125#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) 126#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 127#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 128#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 129 130#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) 131/* Huge TLB page */ 132#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 133#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) 134#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) 135#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) 136#endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */ 137 138#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 139/* XI - page cannot be executed */ 140#ifdef _PAGE_SPLITTING_SHIFT 141#define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1) 142#else 143#define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 144#endif 145#define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) 146 147/* RI - page cannot be read */ 148#define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) 149#define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT)) 150#define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT 151#define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0) 152#endif /* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */ 153 154#if defined(_PAGE_NO_READ_SHIFT) 155#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) 156#elif defined(_PAGE_SPLITTING_SHIFT) 157#define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1) 158#else 159#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 160#endif 161#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 162 163#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 164#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 165#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 166#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 167#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) 168#define _CACHE_MASK (7 << _CACHE_SHIFT) 169 170#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 171 172#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ 173 174#ifndef _PAGE_NO_EXEC 175#define _PAGE_NO_EXEC 0 176#endif 177#ifndef _PAGE_NO_READ 178#define _PAGE_NO_READ 0 179#endif 180 181#define _PAGE_SILENT_READ _PAGE_VALID 182#define _PAGE_SILENT_WRITE _PAGE_DIRTY 183 184#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) 185 186/* 187 * The final layouts of the PTE bits are: 188 * 189 * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P 190 * 32-bit, R1 or earler: CCC D V G M A W R P 191 * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P 192 * 32-bit, R2 or later: CCC D V G RI/R XI M A W P 193 */ 194 195 196#ifndef __ASSEMBLY__ 197/* 198 * pte_to_entrylo converts a page table entry (PTE) into a Mips 199 * entrylo0/1 value. 200 */ 201static inline uint64_t pte_to_entrylo(unsigned long pte_val) 202{ 203#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 204 if (cpu_has_rixi) { 205 int sa; 206#ifdef CONFIG_32BIT 207 sa = 31 - _PAGE_NO_READ_SHIFT; 208#else 209 sa = 63 - _PAGE_NO_READ_SHIFT; 210#endif 211 /* 212 * C has no way to express that this is a DSRL 213 * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily 214 * in the fast path this is done in assembly 215 */ 216 return (pte_val >> _PAGE_GLOBAL_SHIFT) | 217 ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); 218 } 219#endif 220 221 return pte_val >> _PAGE_GLOBAL_SHIFT; 222} 223#endif 224 225/* 226 * Cache attributes 227 */ 228#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 229 230#define _CACHE_CACHABLE_NONCOHERENT 0 231#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED 232 233#elif defined(CONFIG_CPU_SB1) 234 235/* No penalty for being coherent on the SB1, so just 236 use it for "noncoherent" spaces, too. Shouldn't hurt. */ 237 238#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) 239 240#elif defined(CONFIG_CPU_LOONGSON3) 241 242/* Using COHERENT flag for NONCOHERENT doesn't hurt. */ 243 244#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */ 245#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */ 246 247#elif defined(CONFIG_MACH_INGENIC) 248 249/* Ingenic uses the WA bit to achieve write-combine memory writes */ 250#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT) 251 252#endif 253 254#ifndef _CACHE_CACHABLE_NO_WA 255#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) 256#endif 257#ifndef _CACHE_CACHABLE_WA 258#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) 259#endif 260#ifndef _CACHE_UNCACHED 261#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) 262#endif 263#ifndef _CACHE_CACHABLE_NONCOHERENT 264#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) 265#endif 266#ifndef _CACHE_CACHABLE_CE 267#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) 268#endif 269#ifndef _CACHE_CACHABLE_COW 270#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) 271#endif 272#ifndef _CACHE_CACHABLE_CUW 273#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) 274#endif 275#ifndef _CACHE_UNCACHED_ACCELERATED 276#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) 277#endif 278 279#define __READABLE (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED) 280#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) 281 282#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ 283 _PFN_MASK | _CACHE_MASK) 284 285#endif /* _ASM_PGTABLE_BITS_H */ 286