1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 16#include <linux/cpumask.h> 17#include <linux/module.h> 18#include <linux/hugetlb.h> 19#include <asm/tlbflush.h> 20#include <asm/homecache.h> 21#include <hv/hypervisor.h> 22 23/* From tlbflush.h */ 24DEFINE_PER_CPU(int, current_asid); 25int min_asid, max_asid; 26 27/* 28 * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB 29 * so that when we are unmapping an executable page, we also flush it. 30 * Combined with flushing the L1I at context switch time, this means 31 * we don't have to do any other icache flushes. 32 */ 33 34void flush_tlb_mm(struct mm_struct *mm) 35{ 36 HV_Remote_ASID asids[NR_CPUS]; 37 int i = 0, cpu; 38 for_each_cpu(cpu, mm_cpumask(mm)) { 39 HV_Remote_ASID *asid = &asids[i++]; 40 asid->y = cpu / smp_topology.width; 41 asid->x = cpu % smp_topology.width; 42 asid->asid = per_cpu(current_asid, cpu); 43 } 44 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm), 45 0, 0, 0, NULL, asids, i); 46} 47 48void flush_tlb_current_task(void) 49{ 50 flush_tlb_mm(current->mm); 51} 52 53void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, 54 unsigned long va) 55{ 56 unsigned long size = vma_kernel_pagesize(vma); 57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 58 flush_remote(0, cache, mm_cpumask(mm), 59 va, size, size, mm_cpumask(mm), NULL, 0); 60} 61 62void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) 63{ 64 flush_tlb_page_mm(vma, vma->vm_mm, va); 65} 66EXPORT_SYMBOL(flush_tlb_page); 67 68void flush_tlb_range(struct vm_area_struct *vma, 69 unsigned long start, unsigned long end) 70{ 71 unsigned long size = vma_kernel_pagesize(vma); 72 struct mm_struct *mm = vma->vm_mm; 73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 74 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size, 75 mm_cpumask(mm), NULL, 0); 76} 77 78void flush_tlb_all(void) 79{ 80 int i; 81 for (i = 0; ; ++i) { 82 HV_VirtAddrRange r = hv_inquire_virtual(i); 83 if (r.size == 0) 84 break; 85 flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, 86 r.start, r.size, PAGE_SIZE, cpu_online_mask, 87 NULL, 0); 88 flush_remote(0, 0, NULL, 89 r.start, r.size, HPAGE_SIZE, cpu_online_mask, 90 NULL, 0); 91 } 92} 93 94/* 95 * Callers need to flush the L1I themselves if necessary, e.g. for 96 * kernel module unload. Otherwise we assume callers are not using 97 * executable pgprot_t's. Using EVICT_L1I means that dataplane cpus 98 * will get an unnecessary interrupt otherwise. 99 */ 100void flush_tlb_kernel_range(unsigned long start, unsigned long end) 101{ 102 flush_remote(0, 0, NULL, 103 start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0); 104} 105