Lines Matching refs:vma
137 struct vm_area_struct *vma; in kobjsize() local
139 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
140 if (vma) in kobjsize()
141 return vma->vm_end - vma->vm_start; in kobjsize()
156 struct vm_area_struct *vma; in __get_user_pages() local
169 vma = find_vma(mm, start); in __get_user_pages()
170 if (!vma) in __get_user_pages()
174 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages()
175 !(vm_flags & vma->vm_flags)) in __get_user_pages()
184 vmas[i] = vma; in __get_user_pages()
261 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
264 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
297 struct vm_area_struct *vma; in vmalloc_user() local
300 vma = find_vma(current->mm, (unsigned long)ret); in vmalloc_user()
301 if (vma) in vmalloc_user()
302 vma->vm_flags |= VM_USERMAP; in vmalloc_user()
531 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
722 static void protect_vma(struct vm_area_struct *vma, unsigned long flags) in protect_vma() argument
725 struct mm_struct *mm = vma->vm_mm; in protect_vma()
726 long start = vma->vm_start & PAGE_MASK; in protect_vma()
727 while (start < vma->vm_end) { in protect_vma()
741 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) in add_vma_to_mm() argument
747 kenter(",%p", vma); in add_vma_to_mm()
749 BUG_ON(!vma->vm_region); in add_vma_to_mm()
752 vma->vm_mm = mm; in add_vma_to_mm()
754 protect_vma(vma, vma->vm_flags); in add_vma_to_mm()
757 if (vma->vm_file) { in add_vma_to_mm()
758 mapping = vma->vm_file->f_mapping; in add_vma_to_mm()
762 vma_interval_tree_insert(vma, &mapping->i_mmap); in add_vma_to_mm()
776 if (vma->vm_start < pvma->vm_start) in add_vma_to_mm()
778 else if (vma->vm_start > pvma->vm_start) { in add_vma_to_mm()
781 } else if (vma->vm_end < pvma->vm_end) in add_vma_to_mm()
783 else if (vma->vm_end > pvma->vm_end) { in add_vma_to_mm()
786 } else if (vma < pvma) in add_vma_to_mm()
788 else if (vma > pvma) { in add_vma_to_mm()
795 rb_link_node(&vma->vm_rb, parent, p); in add_vma_to_mm()
796 rb_insert_color(&vma->vm_rb, &mm->mm_rb); in add_vma_to_mm()
803 __vma_link_list(mm, vma, prev, parent); in add_vma_to_mm()
809 static void delete_vma_from_mm(struct vm_area_struct *vma) in delete_vma_from_mm() argument
813 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
816 kenter("%p", vma); in delete_vma_from_mm()
818 protect_vma(vma, 0); in delete_vma_from_mm()
823 if (curr->vmacache[i] == vma) { in delete_vma_from_mm()
830 if (vma->vm_file) { in delete_vma_from_mm()
831 mapping = vma->vm_file->f_mapping; in delete_vma_from_mm()
835 vma_interval_tree_remove(vma, &mapping->i_mmap); in delete_vma_from_mm()
841 rb_erase(&vma->vm_rb, &mm->mm_rb); in delete_vma_from_mm()
843 if (vma->vm_prev) in delete_vma_from_mm()
844 vma->vm_prev->vm_next = vma->vm_next; in delete_vma_from_mm()
846 mm->mmap = vma->vm_next; in delete_vma_from_mm()
848 if (vma->vm_next) in delete_vma_from_mm()
849 vma->vm_next->vm_prev = vma->vm_prev; in delete_vma_from_mm()
855 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) in delete_vma() argument
857 kenter("%p", vma); in delete_vma()
858 if (vma->vm_ops && vma->vm_ops->close) in delete_vma()
859 vma->vm_ops->close(vma); in delete_vma()
860 if (vma->vm_file) in delete_vma()
861 fput(vma->vm_file); in delete_vma()
862 put_nommu_region(vma->vm_region); in delete_vma()
863 kmem_cache_free(vm_area_cachep, vma); in delete_vma()
872 struct vm_area_struct *vma; in find_vma() local
875 vma = vmacache_find(mm, addr); in find_vma()
876 if (likely(vma)) in find_vma()
877 return vma; in find_vma()
881 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma()
882 if (vma->vm_start > addr) in find_vma()
884 if (vma->vm_end > addr) { in find_vma()
885 vmacache_update(addr, vma); in find_vma()
886 return vma; in find_vma()
907 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
920 struct vm_area_struct *vma; in find_vma_exact() local
924 vma = vmacache_find_exact(mm, addr, end); in find_vma_exact()
925 if (vma) in find_vma_exact()
926 return vma; in find_vma_exact()
930 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma_exact()
931 if (vma->vm_start < addr) in find_vma_exact()
933 if (vma->vm_start > addr) in find_vma_exact()
935 if (vma->vm_end == end) { in find_vma_exact()
936 vmacache_update(addr, vma); in find_vma_exact()
937 return vma; in find_vma_exact()
1152 static int do_mmap_shared_file(struct vm_area_struct *vma) in do_mmap_shared_file() argument
1156 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); in do_mmap_shared_file()
1158 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_shared_file()
1173 static int do_mmap_private(struct vm_area_struct *vma, in do_mmap_private() argument
1187 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); in do_mmap_private()
1190 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); in do_mmap_private()
1191 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_private()
1225 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_private()
1230 vma->vm_start = region->vm_start; in do_mmap_private()
1231 vma->vm_end = region->vm_start + len; in do_mmap_private()
1233 if (vma->vm_file) { in do_mmap_private()
1238 fpos = vma->vm_pgoff; in do_mmap_private()
1243 ret = __vfs_read(vma->vm_file, base, len, &fpos); in do_mmap_private()
1259 region->vm_start = vma->vm_start = 0; in do_mmap_private()
1260 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1282 struct vm_area_struct *vma; in do_mmap_pgoff() local
1314 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in do_mmap_pgoff()
1315 if (!vma) in do_mmap_pgoff()
1322 INIT_LIST_HEAD(&vma->anon_vma_chain); in do_mmap_pgoff()
1323 vma->vm_flags = vm_flags; in do_mmap_pgoff()
1324 vma->vm_pgoff = pgoff; in do_mmap_pgoff()
1328 vma->vm_file = get_file(file); in do_mmap_pgoff()
1380 vma->vm_region = pregion; in do_mmap_pgoff()
1383 vma->vm_start = start; in do_mmap_pgoff()
1384 vma->vm_end = start + len; in do_mmap_pgoff()
1388 vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_pgoff()
1391 ret = do_mmap_shared_file(vma); in do_mmap_pgoff()
1393 vma->vm_region = NULL; in do_mmap_pgoff()
1394 vma->vm_start = 0; in do_mmap_pgoff()
1395 vma->vm_end = 0; in do_mmap_pgoff()
1429 vma->vm_start = region->vm_start = addr; in do_mmap_pgoff()
1430 vma->vm_end = region->vm_end = addr + len; in do_mmap_pgoff()
1435 vma->vm_region = region; in do_mmap_pgoff()
1440 if (file && vma->vm_flags & VM_SHARED) in do_mmap_pgoff()
1441 ret = do_mmap_shared_file(vma); in do_mmap_pgoff()
1443 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap_pgoff()
1449 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) in do_mmap_pgoff()
1454 result = vma->vm_start; in do_mmap_pgoff()
1459 add_vma_to_mm(current->mm, vma); in do_mmap_pgoff()
1463 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap_pgoff()
1479 if (vma->vm_file) in do_mmap_pgoff()
1480 fput(vma->vm_file); in do_mmap_pgoff()
1481 kmem_cache_free(vm_area_cachep, vma); in do_mmap_pgoff()
1559 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
1570 if (vma->vm_file) in split_vma()
1587 *new = *vma; in split_vma()
1588 *region = *vma->vm_region; in split_vma()
1591 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1603 delete_vma_from_mm(vma); in split_vma()
1605 delete_nommu_region(vma->vm_region); in split_vma()
1607 vma->vm_region->vm_start = vma->vm_start = addr; in split_vma()
1608 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
1610 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma()
1611 vma->vm_region->vm_top = addr; in split_vma()
1613 add_nommu_region(vma->vm_region); in split_vma()
1616 add_vma_to_mm(mm, vma); in split_vma()
1626 struct vm_area_struct *vma, in shrink_vma() argument
1635 delete_vma_from_mm(vma); in shrink_vma()
1636 if (from > vma->vm_start) in shrink_vma()
1637 vma->vm_end = from; in shrink_vma()
1639 vma->vm_start = to; in shrink_vma()
1640 add_vma_to_mm(mm, vma); in shrink_vma()
1643 region = vma->vm_region; in shrink_vma()
1668 struct vm_area_struct *vma; in do_munmap() local
1681 vma = find_vma(mm, start); in do_munmap()
1682 if (!vma) { in do_munmap()
1696 if (vma->vm_file) { in do_munmap()
1698 if (start > vma->vm_start) { in do_munmap()
1702 if (end == vma->vm_end) in do_munmap()
1704 vma = vma->vm_next; in do_munmap()
1705 } while (vma); in do_munmap()
1710 if (start == vma->vm_start && end == vma->vm_end) in do_munmap()
1712 if (start < vma->vm_start || end > vma->vm_end) { in do_munmap()
1720 if (end != vma->vm_end && end & ~PAGE_MASK) { in do_munmap()
1724 if (start != vma->vm_start && end != vma->vm_end) { in do_munmap()
1725 ret = split_vma(mm, vma, start, 1); in do_munmap()
1731 return shrink_vma(mm, vma, start, end); in do_munmap()
1735 delete_vma_from_mm(vma); in do_munmap()
1736 delete_vma(mm, vma); in do_munmap()
1764 struct vm_area_struct *vma; in exit_mmap() local
1773 while ((vma = mm->mmap)) { in exit_mmap()
1774 mm->mmap = vma->vm_next; in exit_mmap()
1775 delete_vma_from_mm(vma); in exit_mmap()
1776 delete_vma(mm, vma); in exit_mmap()
1802 struct vm_area_struct *vma; in do_mremap() local
1816 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1817 if (!vma) in do_mremap()
1820 if (vma->vm_end != vma->vm_start + old_len) in do_mremap()
1823 if (vma->vm_flags & VM_MAYSHARE) in do_mremap()
1826 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) in do_mremap()
1830 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1831 return vma->vm_start; in do_mremap()
1846 struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument
1854 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
1860 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
1865 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
1868 unsigned long vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1870 pfn += vma->vm_pgoff; in vm_iomap_memory()
1871 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1875 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, in remap_vmalloc_range() argument
1878 unsigned int size = vma->vm_end - vma->vm_start; in remap_vmalloc_range()
1880 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range()
1883 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); in remap_vmalloc_range()
1884 vma->vm_end = vma->vm_start + size; in remap_vmalloc_range()
1997 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in filemap_fault() argument
2004 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) in filemap_map_pages() argument
2013 struct vm_area_struct *vma; in __access_remote_vm() local
2018 vma = find_vma(mm, addr); in __access_remote_vm()
2019 if (vma) { in __access_remote_vm()
2021 if (addr + len >= vma->vm_end) in __access_remote_vm()
2022 len = vma->vm_end - addr; in __access_remote_vm()
2025 if (write && vma->vm_flags & VM_MAYWRITE) in __access_remote_vm()
2026 copy_to_user_page(vma, NULL, addr, in __access_remote_vm()
2028 else if (!write && vma->vm_flags & VM_MAYREAD) in __access_remote_vm()
2029 copy_from_user_page(vma, NULL, addr, in __access_remote_vm()
2093 struct vm_area_struct *vma; in nommu_shrink_inode_mappings() local
2105 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings()
2108 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings()
2121 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
2122 if (!(vma->vm_flags & VM_SHARED)) in nommu_shrink_inode_mappings()
2125 region = vma->vm_region; in nommu_shrink_inode_mappings()