Lines Matching refs:vma
127 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) in m_next_vma() argument
129 if (vma == priv->tail_vma) in m_next_vma()
131 return vma->vm_next ?: priv->tail_vma; in m_next_vma()
134 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) in m_cache_vma() argument
137 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL; in m_cache_vma()
145 struct vm_area_struct *vma; in m_start() local
165 vma = find_vma(mm, last_addr); in m_start()
166 if (vma && (vma = m_next_vma(priv, vma))) in m_start()
167 return vma; in m_start()
172 for (vma = mm->mmap; pos; pos--) { in m_start()
173 m->version = vma->vm_start; in m_start()
174 vma = vma->vm_next; in m_start()
176 return vma; in m_start()
250 struct vm_area_struct *vma, bool is_pid) in pid_of_stack() argument
259 task = task_of_stack(task, vma, is_pid); in pid_of_stack()
269 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) in show_map_vma() argument
271 struct mm_struct *mm = vma->vm_mm; in show_map_vma()
272 struct file *file = vma->vm_file; in show_map_vma()
274 vm_flags_t flags = vma->vm_flags; in show_map_vma()
282 struct inode *inode = file_inode(vma->vm_file); in show_map_vma()
285 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; in show_map_vma()
289 start = vma->vm_start; in show_map_vma()
290 if (stack_guard_page_start(vma, start)) in show_map_vma()
292 end = vma->vm_end; in show_map_vma()
293 if (stack_guard_page_end(vma, end)) in show_map_vma()
317 if (vma->vm_ops && vma->vm_ops->name) { in show_map_vma()
318 name = vma->vm_ops->name(vma); in show_map_vma()
323 name = arch_vma_name(vma); in show_map_vma()
332 if (vma->vm_start <= mm->brk && in show_map_vma()
333 vma->vm_end >= mm->start_brk) { in show_map_vma()
338 tid = pid_of_stack(priv, vma, is_pid); in show_map_vma()
344 if (!is_pid || (vma->vm_start <= mm->start_stack && in show_map_vma()
345 vma->vm_end >= mm->start_stack)) { in show_map_vma()
487 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry() local
491 page = vm_normal_page(vma, addr, *pte); in smaps_pte_entry()
511 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry() local
515 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); in smaps_pmd_entry()
532 struct vm_area_struct *vma = walk->vma; in smaps_pte_range() local
536 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in smaps_pte_range()
549 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
557 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) in show_smap_vma_flags() argument
605 if (vma->vm_flags & (1UL << i)) { in show_smap_vma_flags()
615 struct vm_area_struct *vma = v; in show_smap() local
619 .mm = vma->vm_mm, in show_smap()
625 walk_page_vma(vma, &smaps_walk); in show_smap()
627 show_map_vma(m, vma, is_pid); in show_smap()
644 (vma->vm_end - vma->vm_start) >> 10, in show_smap()
655 vma_kernel_pagesize(vma) >> 10, in show_smap()
656 vma_mmu_pagesize(vma) >> 10, in show_smap()
657 (vma->vm_flags & VM_LOCKED) ? in show_smap()
660 show_smap_vma_flags(m, vma); in show_smap()
661 m_cache_vma(m, vma); in show_smap()
744 static inline void clear_soft_dirty(struct vm_area_struct *vma, in clear_soft_dirty() argument
762 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
765 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, in clear_soft_dirty_pmd() argument
773 if (vma->vm_flags & VM_SOFTDIRTY) in clear_soft_dirty_pmd()
774 vma->vm_flags &= ~VM_SOFTDIRTY; in clear_soft_dirty_pmd()
776 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
781 static inline void clear_soft_dirty(struct vm_area_struct *vma, in clear_soft_dirty() argument
786 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, in clear_soft_dirty_pmd() argument
796 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range() local
801 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in clear_refs_pte_range()
803 clear_soft_dirty_pmd(vma, addr, pmd); in clear_refs_pte_range()
810 pmdp_test_and_clear_young(vma, addr, pmd); in clear_refs_pte_range()
820 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
825 clear_soft_dirty(vma, addr, pte); in clear_refs_pte_range()
832 page = vm_normal_page(vma, addr, ptent); in clear_refs_pte_range()
837 ptep_test_and_clear_young(vma, addr, pte); in clear_refs_pte_range()
849 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk() local
851 if (vma->vm_flags & VM_PFNMAP) in clear_refs_test_walk()
860 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) in clear_refs_test_walk()
862 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) in clear_refs_test_walk()
873 struct vm_area_struct *vma; in clear_refs_write() local
925 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
926 if (!(vma->vm_flags & VM_SOFTDIRTY)) in clear_refs_write()
930 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
931 vma->vm_flags &= ~VM_SOFTDIRTY; in clear_refs_write()
932 vma_set_page_prot(vma); in clear_refs_write()
1013 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole() local
1018 if (vma) in pagemap_pte_hole()
1019 hole_end = min(end, vma->vm_start); in pagemap_pte_hole()
1029 if (!vma) in pagemap_pte_hole()
1033 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pte_hole()
1035 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { in pagemap_pte_hole()
1046 struct vm_area_struct *vma, unsigned long addr, pte_t pte) in pte_to_pagemap_entry() argument
1055 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry()
1069 if (vma->vm_flags & VM_SOFTDIRTY) in pte_to_pagemap_entry()
1077 if ((vma->vm_flags & VM_SOFTDIRTY)) in pte_to_pagemap_entry()
1108 struct vm_area_struct *vma = walk->vma; in pagemap_pte_range() local
1114 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in pagemap_pte_range()
1117 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) in pagemap_pte_range()
1148 pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); in pagemap_pte_range()
1179 struct vm_area_struct *vma = walk->vma; in pagemap_hugetlb_range() local
1184 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_hugetlb_range()
1389 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, in can_gather_numa_stats() argument
1398 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
1416 struct vm_area_struct *vma = walk->vma; in gather_pte_stats() local
1421 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in gather_pte_stats()
1425 page = can_gather_numa_stats(huge_pte, vma, addr); in gather_pte_stats()
1437 struct page *page = can_gather_numa_stats(*pte, vma, addr); in gather_pte_stats()
1480 struct vm_area_struct *vma = v; in show_numa_map() local
1482 struct file *file = vma->vm_file; in show_numa_map()
1483 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
1500 pol = __get_vma_policy(vma, vma->vm_start); in show_numa_map()
1508 seq_printf(m, "%08lx %s", vma->vm_start, buffer); in show_numa_map()
1513 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { in show_numa_map()
1516 pid_t tid = pid_of_stack(proc_priv, vma, is_pid); in show_numa_map()
1522 if (!is_pid || (vma->vm_start <= mm->start_stack && in show_numa_map()
1523 vma->vm_end >= mm->start_stack)) in show_numa_map()
1530 if (is_vm_hugetlb_page(vma)) in show_numa_map()
1534 walk_page_vma(vma, &walk); in show_numa_map()
1554 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) in show_numa_map()
1564 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); in show_numa_map()
1567 m_cache_vma(m, vma); in show_numa_map()