Lines Matching refs:mm
22 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument
34 hiwater_vm = total_vm = mm->total_vm; in task_mem()
35 if (hiwater_vm < mm->hiwater_vm) in task_mem()
36 hiwater_vm = mm->hiwater_vm; in task_mem()
37 hiwater_rss = total_rss = get_mm_rss(mm); in task_mem()
38 if (hiwater_rss < mm->hiwater_rss) in task_mem()
39 hiwater_rss = mm->hiwater_rss; in task_mem()
41 data = mm->total_vm - mm->shared_vm - mm->stack_vm; in task_mem()
42 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; in task_mem()
43 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; in task_mem()
44 swap = get_mm_counter(mm, MM_SWAPENTS); in task_mem()
45 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); in task_mem()
46 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm); in task_mem()
63 mm->locked_vm << (PAGE_SHIFT-10), in task_mem()
64 mm->pinned_vm << (PAGE_SHIFT-10), in task_mem()
68 mm->stack_vm << (PAGE_SHIFT-10), text, lib, in task_mem()
74 unsigned long task_vsize(struct mm_struct *mm) in task_vsize() argument
76 return PAGE_SIZE * mm->total_vm; in task_vsize()
79 unsigned long task_statm(struct mm_struct *mm, in task_statm() argument
83 *shared = get_mm_counter(mm, MM_FILEPAGES); in task_statm()
84 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) in task_statm()
86 *data = mm->total_vm - mm->shared_vm; in task_statm()
87 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); in task_statm()
88 return mm->total_vm; in task_statm()
119 struct mm_struct *mm = priv->mm; in vma_stop() local
122 up_read(&mm->mmap_sem); in vma_stop()
123 mmput(mm); in vma_stop()
144 struct mm_struct *mm; in m_start() local
156 mm = priv->mm; in m_start()
157 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) in m_start()
160 down_read(&mm->mmap_sem); in m_start()
162 priv->tail_vma = get_gate_vma(mm); in m_start()
165 vma = find_vma(mm, last_addr); in m_start()
171 if (pos < mm->map_count) { in m_start()
172 for (vma = mm->mmap; pos; pos--) { in m_start()
180 if (pos == mm->map_count && priv->tail_vma) in m_start()
220 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in proc_maps_open()
221 if (IS_ERR(priv->mm)) { in proc_maps_open()
222 int err = PTR_ERR(priv->mm); in proc_maps_open()
236 if (priv->mm) in proc_map_release()
237 mmdrop(priv->mm); in proc_map_release()
271 struct mm_struct *mm = vma->vm_mm; in show_map_vma() local
327 if (!mm) { in show_map_vma()
332 if (vma->vm_start <= mm->brk && in show_map_vma()
333 vma->vm_end >= mm->start_brk) { in show_map_vma()
344 if (!is_pid || (vma->vm_start <= mm->start_stack && in show_map_vma()
345 vma->vm_end >= mm->start_stack)) { in show_map_vma()
619 .mm = vma->vm_mm, in show_smap()
872 struct mm_struct *mm; in clear_refs_write() local
900 mm = get_task_mm(task); in clear_refs_write()
901 if (mm) { in clear_refs_write()
908 .mm = mm, in clear_refs_write()
917 down_write(&mm->mmap_sem); in clear_refs_write()
918 reset_mm_hiwater_rss(mm); in clear_refs_write()
919 up_write(&mm->mmap_sem); in clear_refs_write()
923 down_read(&mm->mmap_sem); in clear_refs_write()
925 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
928 up_read(&mm->mmap_sem); in clear_refs_write()
929 down_write(&mm->mmap_sem); in clear_refs_write()
930 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
934 downgrade_write(&mm->mmap_sem); in clear_refs_write()
937 mmu_notifier_invalidate_range_start(mm, 0, -1); in clear_refs_write()
941 mmu_notifier_invalidate_range_end(mm, 0, -1); in clear_refs_write()
942 flush_tlb_mm(mm); in clear_refs_write()
943 up_read(&mm->mmap_sem); in clear_refs_write()
945 mmput(mm); in clear_refs_write()
1013 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole()
1144 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in pagemap_pte_range()
1231 struct mm_struct *mm; in pagemap_read() local
1260 mm = mm_access(task, PTRACE_MODE_READ); in pagemap_read()
1261 ret = PTR_ERR(mm); in pagemap_read()
1262 if (!mm || IS_ERR(mm)) in pagemap_read()
1270 pagemap_walk.mm = mm; in pagemap_read()
1298 down_read(&mm->mmap_sem); in pagemap_read()
1300 up_read(&mm->mmap_sem); in pagemap_read()
1317 mmput(mm); in pagemap_read()
1435 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats()
1483 struct mm_struct *mm = vma->vm_mm; in show_numa_map() local
1488 .mm = mm, in show_numa_map()
1494 if (!mm) in show_numa_map()
1513 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { in show_numa_map()
1522 if (!is_pid || (vma->vm_start <= mm->start_stack && in show_numa_map()
1523 vma->vm_end >= mm->start_stack)) in show_numa_map()