1#ifndef _ASM_POWERPC_TLBFLUSH_H 2#define _ASM_POWERPC_TLBFLUSH_H 3 4/* 5 * TLB flushing: 6 * 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 8 * - flush_tlb_page(vma, vmaddr) flushes one page 9 * - local_flush_tlb_mm(mm, full) flushes the specified mm context on 10 * the local processor 11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor 12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 13 * - flush_tlb_range(vma, start, end) flushes a range of pages 14 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 15 * 16 * This program is free software; you can redistribute it and/or 17 * modify it under the terms of the GNU General Public License 18 * as published by the Free Software Foundation; either version 19 * 2 of the License, or (at your option) any later version. 20 */ 21#ifdef __KERNEL__ 22 23#ifdef CONFIG_PPC_MMU_NOHASH 24/* 25 * TLB flushing for software loaded TLB chips 26 * 27 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & 28 * flush_tlb_kernel_range are best implemented as tlbia vs 29 * specific tlbie's 30 */ 31 32struct vm_area_struct; 33struct mm_struct; 34 35#define MMU_NO_CONTEXT ((unsigned int)-1) 36 37extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 38 unsigned long end); 39extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 40 41extern void local_flush_tlb_mm(struct mm_struct *mm); 42extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 43 44extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 45 int tsize, int ind); 46 47#ifdef CONFIG_SMP 48extern void flush_tlb_mm(struct mm_struct *mm); 49extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 50extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 51 int tsize, int ind); 52#else 53#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 54#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) 55#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) 56#endif 57#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) 58 59#elif defined(CONFIG_PPC_STD_MMU_32) 60 61/* 62 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx 63 */ 64extern void flush_tlb_mm(struct mm_struct *mm); 65extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 66extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 67extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 68 unsigned long end); 69extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 70static inline void local_flush_tlb_page(struct vm_area_struct *vma, 71 unsigned long vmaddr) 72{ 73 flush_tlb_page(vma, vmaddr); 74} 75static inline void local_flush_tlb_mm(struct mm_struct *mm) 76{ 77 flush_tlb_mm(mm); 78} 79 80#elif defined(CONFIG_PPC_STD_MMU_64) 81 82#define MMU_NO_CONTEXT 0 83 84/* 85 * TLB flushing for 64-bit hash-MMU CPUs 86 */ 87 88#include <linux/percpu.h> 89#include <asm/page.h> 90 91#define PPC64_TLB_BATCH_NR 192 92 93struct ppc64_tlb_batch { 94 int active; 95 unsigned long index; 96 struct mm_struct *mm; 97 real_pte_t pte[PPC64_TLB_BATCH_NR]; 98 unsigned long vpn[PPC64_TLB_BATCH_NR]; 99 unsigned int psize; 100 int ssize; 101}; 102DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 103 104extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 105 106#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 107 108static inline void arch_enter_lazy_mmu_mode(void) 109{ 110 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); 111 112 batch->active = 1; 113} 114 115static inline void arch_leave_lazy_mmu_mode(void) 116{ 117 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); 118 119 if (batch->index) 120 __flush_tlb_pending(batch); 121 batch->active = 0; 122} 123 124#define arch_flush_lazy_mmu_mode() do {} while (0) 125 126 127extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, 128 int ssize, unsigned long flags); 129extern void flush_hash_range(unsigned long number, int local); 130extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr, 131 pmd_t *pmdp, unsigned int psize, int ssize, 132 unsigned long flags); 133 134static inline void local_flush_tlb_mm(struct mm_struct *mm) 135{ 136} 137 138static inline void flush_tlb_mm(struct mm_struct *mm) 139{ 140} 141 142static inline void local_flush_tlb_page(struct vm_area_struct *vma, 143 unsigned long vmaddr) 144{ 145} 146 147static inline void flush_tlb_page(struct vm_area_struct *vma, 148 unsigned long vmaddr) 149{ 150} 151 152static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 153 unsigned long vmaddr) 154{ 155} 156 157static inline void flush_tlb_range(struct vm_area_struct *vma, 158 unsigned long start, unsigned long end) 159{ 160} 161 162static inline void flush_tlb_kernel_range(unsigned long start, 163 unsigned long end) 164{ 165} 166 167/* Private function for use by PCI IO mapping code */ 168extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 169 unsigned long end); 170extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, 171 unsigned long addr); 172#else 173#error Unsupported MMU type 174#endif 175 176#endif /*__KERNEL__ */ 177#endif /* _ASM_POWERPC_TLBFLUSH_H */ 178