1#include <linux/init.h> 2 3#include <linux/mm.h> 4#include <linux/spinlock.h> 5#include <linux/smp.h> 6#include <linux/interrupt.h> 7#include <linux/module.h> 8#include <linux/cpu.h> 9 10#include <asm/tlbflush.h> 11#include <asm/mmu_context.h> 12#include <asm/cache.h> 13#include <asm/apic.h> 14#include <asm/uv/uv.h> 15#include <linux/debugfs.h> 16 17/* 18 * Smarter SMP flushing macros. 19 * c/o Linus Torvalds. 20 * 21 * These mean you can really definitely utterly forget about 22 * writing to user space from interrupts. (Its not allowed anyway). 23 * 24 * Optimizations Manfred Spraul <manfred@colorfullife.com> 25 * 26 * More scalable flush, from Andi Kleen 27 * 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi 29 */ 30 31struct flush_tlb_info { 32 struct mm_struct *flush_mm; 33 unsigned long flush_start; 34 unsigned long flush_end; 35}; 36 37/* 38 * We cannot call mmdrop() because we are in interrupt context, 39 * instead update mm->cpu_vm_mask. 40 */ 41void leave_mm(int cpu) 42{ 43 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); 44 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 45 BUG(); 46 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { 47 cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); 48 load_cr3(swapper_pg_dir); 49 /* 50 * This gets called in the idle path where RCU 51 * functions differently. Tracing normally 52 * uses RCU, so we have to call the tracepoint 53 * specially here. 54 */ 55 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 56 } 57} 58EXPORT_SYMBOL_GPL(leave_mm); 59 60/* 61 * The flush IPI assumes that a thread switch happens in this order: 62 * [cpu0: the cpu that switches] 63 * 1) switch_mm() either 1a) or 1b) 64 * 1a) thread switch to a different mm 65 * 1a1) set cpu_tlbstate to TLBSTATE_OK 66 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm 67 * if cpu0 was in lazy tlb mode. 68 * 1a2) update cpu active_mm 69 * Now cpu0 accepts tlb flushes for the new mm. 70 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); 71 * Now the other cpus will send tlb flush ipis. 72 * 1a4) change cr3. 73 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); 74 * Stop ipi delivery for the old mm. This is not synchronized with 75 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong 76 * mm, and in the worst case we perform a superfluous tlb flush. 77 * 1b) thread switch without mm change 78 * cpu active_mm is correct, cpu0 already handles flush ipis. 79 * 1b1) set cpu_tlbstate to TLBSTATE_OK 80 * 1b2) test_and_set the cpu bit in cpu_vm_mask. 81 * Atomically set the bit [other cpus will start sending flush ipis], 82 * and test the bit. 83 * 1b3) if the bit was 0: leave_mm was called, flush the tlb. 84 * 2) switch %%esp, ie current 85 * 86 * The interrupt must handle 2 special cases: 87 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. 88 * - the cpu performs speculative tlb reads, i.e. even if the cpu only 89 * runs in kernel space, the cpu could load tlb entries for user space 90 * pages. 91 * 92 * The good news is that cpu_tlbstate is local to each cpu, no 93 * write/read ordering problems. 94 */ 95 96/* 97 * TLB flush funcation: 98 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. 99 * 2) Leave the mm if we are in the lazy tlb mode. 100 */ 101static void flush_tlb_func(void *info) 102{ 103 struct flush_tlb_info *f = info; 104 105 inc_irq_stat(irq_tlb_count); 106 107 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) 108 return; 109 if (!f->flush_end) 110 f->flush_end = f->flush_start + PAGE_SIZE; 111 112 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 113 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 114 if (f->flush_end == TLB_FLUSH_ALL) { 115 local_flush_tlb(); 116 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); 117 } else { 118 unsigned long addr; 119 unsigned long nr_pages = 120 (f->flush_end - f->flush_start) / PAGE_SIZE; 121 addr = f->flush_start; 122 while (addr < f->flush_end) { 123 __flush_tlb_single(addr); 124 addr += PAGE_SIZE; 125 } 126 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); 127 } 128 } else 129 leave_mm(smp_processor_id()); 130 131} 132 133void native_flush_tlb_others(const struct cpumask *cpumask, 134 struct mm_struct *mm, unsigned long start, 135 unsigned long end) 136{ 137 struct flush_tlb_info info; 138 info.flush_mm = mm; 139 info.flush_start = start; 140 info.flush_end = end; 141 142 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 143 if (is_uv_system()) { 144 unsigned int cpu; 145 146 cpu = smp_processor_id(); 147 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); 148 if (cpumask) 149 smp_call_function_many(cpumask, flush_tlb_func, 150 &info, 1); 151 return; 152 } 153 smp_call_function_many(cpumask, flush_tlb_func, &info, 1); 154} 155 156void flush_tlb_current_task(void) 157{ 158 struct mm_struct *mm = current->mm; 159 160 preempt_disable(); 161 162 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 163 164 /* This is an implicit full barrier that synchronizes with switch_mm. */ 165 local_flush_tlb(); 166 167 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); 168 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 169 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); 170 preempt_enable(); 171} 172 173/* 174 * See Documentation/x86/tlb.txt for details. We choose 33 175 * because it is large enough to cover the vast majority (at 176 * least 95%) of allocations, and is small enough that we are 177 * confident it will not cause too much overhead. Each single 178 * flush is about 100 ns, so this caps the maximum overhead at 179 * _about_ 3,000 ns. 180 * 181 * This is in units of pages. 182 */ 183static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; 184 185void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 186 unsigned long end, unsigned long vmflag) 187{ 188 unsigned long addr; 189 /* do a global flush by default */ 190 unsigned long base_pages_to_flush = TLB_FLUSH_ALL; 191 192 preempt_disable(); 193 if (current->active_mm != mm) { 194 /* Synchronize with switch_mm. */ 195 smp_mb(); 196 197 goto out; 198 } 199 200 if (!current->mm) { 201 leave_mm(smp_processor_id()); 202 203 /* Synchronize with switch_mm. */ 204 smp_mb(); 205 206 goto out; 207 } 208 209 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) 210 base_pages_to_flush = (end - start) >> PAGE_SHIFT; 211 212 /* 213 * Both branches below are implicit full barriers (MOV to CR or 214 * INVLPG) that synchronize with switch_mm. 215 */ 216 if (base_pages_to_flush > tlb_single_page_flush_ceiling) { 217 base_pages_to_flush = TLB_FLUSH_ALL; 218 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 219 local_flush_tlb(); 220 } else { 221 /* flush range by one by one 'invlpg' */ 222 for (addr = start; addr < end; addr += PAGE_SIZE) { 223 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 224 __flush_tlb_single(addr); 225 } 226 } 227 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); 228out: 229 if (base_pages_to_flush == TLB_FLUSH_ALL) { 230 start = 0UL; 231 end = TLB_FLUSH_ALL; 232 } 233 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 234 flush_tlb_others(mm_cpumask(mm), mm, start, end); 235 preempt_enable(); 236} 237 238void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) 239{ 240 struct mm_struct *mm = vma->vm_mm; 241 242 preempt_disable(); 243 244 if (current->active_mm == mm) { 245 if (current->mm) { 246 /* 247 * Implicit full barrier (INVLPG) that synchronizes 248 * with switch_mm. 249 */ 250 __flush_tlb_one(start); 251 } else { 252 leave_mm(smp_processor_id()); 253 254 /* Synchronize with switch_mm. */ 255 smp_mb(); 256 } 257 } 258 259 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 260 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); 261 262 preempt_enable(); 263} 264 265static void do_flush_tlb_all(void *info) 266{ 267 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 268 __flush_tlb_all(); 269 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) 270 leave_mm(smp_processor_id()); 271} 272 273void flush_tlb_all(void) 274{ 275 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 276 on_each_cpu(do_flush_tlb_all, NULL, 1); 277} 278 279static void do_kernel_range_flush(void *info) 280{ 281 struct flush_tlb_info *f = info; 282 unsigned long addr; 283 284 /* flush range by one by one 'invlpg' */ 285 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) 286 __flush_tlb_single(addr); 287} 288 289void flush_tlb_kernel_range(unsigned long start, unsigned long end) 290{ 291 292 /* Balance as user space task's flush, a bit conservative */ 293 if (end == TLB_FLUSH_ALL || 294 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { 295 on_each_cpu(do_flush_tlb_all, NULL, 1); 296 } else { 297 struct flush_tlb_info info; 298 info.flush_start = start; 299 info.flush_end = end; 300 on_each_cpu(do_kernel_range_flush, &info, 1); 301 } 302} 303 304static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, 305 size_t count, loff_t *ppos) 306{ 307 char buf[32]; 308 unsigned int len; 309 310 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); 311 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 312} 313 314static ssize_t tlbflush_write_file(struct file *file, 315 const char __user *user_buf, size_t count, loff_t *ppos) 316{ 317 char buf[32]; 318 ssize_t len; 319 int ceiling; 320 321 len = min(count, sizeof(buf) - 1); 322 if (copy_from_user(buf, user_buf, len)) 323 return -EFAULT; 324 325 buf[len] = '\0'; 326 if (kstrtoint(buf, 0, &ceiling)) 327 return -EINVAL; 328 329 if (ceiling < 0) 330 return -EINVAL; 331 332 tlb_single_page_flush_ceiling = ceiling; 333 return count; 334} 335 336static const struct file_operations fops_tlbflush = { 337 .read = tlbflush_read_file, 338 .write = tlbflush_write_file, 339 .llseek = default_llseek, 340}; 341 342static int __init create_tlb_single_page_flush_ceiling(void) 343{ 344 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, 345 arch_debugfs_dir, NULL, &fops_tlbflush); 346 return 0; 347} 348late_initcall(create_tlb_single_page_flush_ceiling); 349