Lines Matching refs:mm

86 	struct mm_struct *mm;  member
717 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, in __do_huge_pmd_anonymous_page() argument
728 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) in __do_huge_pmd_anonymous_page()
731 pgtable = pte_alloc_one(mm, haddr); in __do_huge_pmd_anonymous_page()
745 ptl = pmd_lock(mm, pmd); in __do_huge_pmd_anonymous_page()
750 pte_free(mm, pgtable); in __do_huge_pmd_anonymous_page()
758 pgtable_trans_huge_deposit(mm, pmd, pgtable); in __do_huge_pmd_anonymous_page()
759 set_pmd_at(mm, haddr, pmd, entry); in __do_huge_pmd_anonymous_page()
760 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
761 atomic_long_inc(&mm->nr_ptes); in __do_huge_pmd_anonymous_page()
774 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, in set_huge_zero_page() argument
783 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page()
784 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_page()
785 atomic_long_inc(&mm->nr_ptes); in set_huge_zero_page()
789 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_anonymous_page() argument
803 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) && in do_huge_pmd_anonymous_page()
809 pgtable = pte_alloc_one(mm, haddr); in do_huge_pmd_anonymous_page()
814 pte_free(mm, pgtable); in do_huge_pmd_anonymous_page()
818 ptl = pmd_lock(mm, pmd); in do_huge_pmd_anonymous_page()
819 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, in do_huge_pmd_anonymous_page()
823 pte_free(mm, pgtable); in do_huge_pmd_anonymous_page()
834 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) { in do_huge_pmd_anonymous_page()
919 void huge_pmd_set_accessed(struct mm_struct *mm, in huge_pmd_set_accessed() argument
929 ptl = pmd_lock(mm, pmd); in huge_pmd_set_accessed()
973 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, in do_huge_pmd_wp_page_fallback() argument
1001 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, in do_huge_pmd_wp_page_fallback()
1027 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1029 ptl = pmd_lock(mm, pmd); in do_huge_pmd_wp_page_fallback()
1037 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in do_huge_pmd_wp_page_fallback()
1038 pmd_populate(mm, &_pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1051 set_pte_at(mm, haddr, pte, entry); in do_huge_pmd_wp_page_fallback()
1057 pmd_populate(mm, pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1061 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1071 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1082 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_wp_page() argument
1094 ptl = pmd_lockptr(mm, pmd); in do_huge_pmd_wp_page()
1129 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, in do_huge_pmd_wp_page()
1141 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) { in do_huge_pmd_wp_page()
1163 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page()
1181 set_pmd_at(mm, haddr, pmd, entry); in do_huge_pmd_wp_page()
1184 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1195 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page()
1208 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd() local
1211 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_trans_huge_pmd()
1259 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_numa_page() argument
1276 ptl = pmd_lock(mm, pmdp); in do_huge_pmd_numa_page()
1355 migrated = migrate_misplaced_transhuge_page(mm, vma, in do_huge_pmd_numa_page()
1371 set_pmd_at(mm, haddr, pmdp, pmd); in do_huge_pmd_numa_page()
1403 orig_pmd = pmdp_get_and_clear_full(tlb->mm, addr, pmd, in zap_huge_pmd()
1406 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); in zap_huge_pmd()
1408 atomic_long_dec(&tlb->mm->nr_ptes); in zap_huge_pmd()
1415 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); in zap_huge_pmd()
1417 atomic_long_dec(&tlb->mm->nr_ptes); in zap_huge_pmd()
1421 pte_free(tlb->mm, pgtable); in zap_huge_pmd()
1436 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd() local
1459 new_ptl = pmd_lockptr(mm, new_pmd); in move_huge_pmd()
1462 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
1467 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); in move_huge_pmd()
1468 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); in move_huge_pmd()
1470 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); in move_huge_pmd()
1488 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd() local
1508 entry = pmdp_get_and_clear_notify(mm, addr, pmd); in change_huge_pmd()
1513 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
1557 struct mm_struct *mm, in page_check_address_pmd() argument
1569 pgd = pgd_offset(mm, address); in page_check_address_pmd()
1577 *ptl = pmd_lock(mm, pmd); in page_check_address_pmd()
1606 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting() local
1614 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in __split_huge_page_splitting()
1615 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_splitting()
1630 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_splitting()
1754 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map() local
1761 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_map()
1764 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_page_map()
1765 pmd_populate(mm, &_pmd, pgtable); in __split_huge_page_map()
1786 set_pte_at(mm, haddr, pte, entry); in __split_huge_page_map()
1818 pmd_populate(mm, pmd, pgtable); in __split_huge_page_map()
1998 static struct mm_slot *get_mm_slot(struct mm_struct *mm) in get_mm_slot() argument
2002 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) in get_mm_slot()
2003 if (mm == mm_slot->mm) in get_mm_slot()
2009 static void insert_to_mm_slots_hash(struct mm_struct *mm, in insert_to_mm_slots_hash() argument
2012 mm_slot->mm = mm; in insert_to_mm_slots_hash()
2013 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); in insert_to_mm_slots_hash()
2016 static inline int khugepaged_test_exit(struct mm_struct *mm) in khugepaged_test_exit() argument
2018 return atomic_read(&mm->mm_users) == 0; in khugepaged_test_exit()
2021 int __khugepaged_enter(struct mm_struct *mm) in __khugepaged_enter() argument
2031 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); in __khugepaged_enter()
2032 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { in __khugepaged_enter()
2038 insert_to_mm_slots_hash(mm, mm_slot); in __khugepaged_enter()
2047 atomic_inc(&mm->mm_count); in __khugepaged_enter()
2074 void __khugepaged_exit(struct mm_struct *mm) in __khugepaged_exit() argument
2080 mm_slot = get_mm_slot(mm); in __khugepaged_exit()
2089 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in __khugepaged_exit()
2091 mmdrop(mm); in __khugepaged_exit()
2101 down_write(&mm->mmap_sem); in __khugepaged_exit()
2102 up_write(&mm->mmap_sem); in __khugepaged_exit()
2334 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, in khugepaged_alloc_page() argument
2346 up_read(&mm->mmap_sem); in khugepaged_alloc_page()
2402 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, in khugepaged_alloc_page() argument
2406 up_read(&mm->mmap_sem); in khugepaged_alloc_page()
2426 static void collapse_huge_page(struct mm_struct *mm, in collapse_huge_page() argument
2451 new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node); in collapse_huge_page()
2455 if (unlikely(mem_cgroup_try_charge(new_page, mm, in collapse_huge_page()
2464 down_write(&mm->mmap_sem); in collapse_huge_page()
2465 if (unlikely(khugepaged_test_exit(mm))) in collapse_huge_page()
2468 vma = find_vma(mm, address); in collapse_huge_page()
2477 pmd = mm_find_pmd(mm, address); in collapse_huge_page()
2484 pte_ptl = pte_lockptr(mm, pmd); in collapse_huge_page()
2488 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in collapse_huge_page()
2489 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ in collapse_huge_page()
2498 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in collapse_huge_page()
2513 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); in collapse_huge_page()
2545 pgtable_trans_huge_deposit(mm, pmd, pgtable); in collapse_huge_page()
2546 set_pmd_at(mm, address, pmd, _pmd); in collapse_huge_page()
2554 up_write(&mm->mmap_sem); in collapse_huge_page()
2562 static int khugepaged_scan_pmd(struct mm_struct *mm, in khugepaged_scan_pmd() argument
2578 pmd = mm_find_pmd(mm, address); in khugepaged_scan_pmd()
2583 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in khugepaged_scan_pmd()
2632 collapse_huge_page(mm, address, hpage, vma, node); in khugepaged_scan_pmd()
2640 struct mm_struct *mm = mm_slot->mm; in collect_mm_slot() local
2644 if (khugepaged_test_exit(mm)) { in collect_mm_slot()
2657 mmdrop(mm); in collect_mm_slot()
2667 struct mm_struct *mm; in khugepaged_scan_mm_slot() local
2684 mm = mm_slot->mm; in khugepaged_scan_mm_slot()
2685 down_read(&mm->mmap_sem); in khugepaged_scan_mm_slot()
2686 if (unlikely(khugepaged_test_exit(mm))) in khugepaged_scan_mm_slot()
2689 vma = find_vma(mm, khugepaged_scan.address); in khugepaged_scan_mm_slot()
2696 if (unlikely(khugepaged_test_exit(mm))) { in khugepaged_scan_mm_slot()
2718 if (unlikely(khugepaged_test_exit(mm))) in khugepaged_scan_mm_slot()
2724 ret = khugepaged_scan_pmd(mm, vma, in khugepaged_scan_mm_slot()
2738 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ in khugepaged_scan_mm_slot()
2747 if (khugepaged_test_exit(mm) || !vma) { in khugepaged_scan_mm_slot()
2857 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd() local
2865 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
2866 pmd_populate(mm, &_pmd, pgtable); in __split_huge_zero_page_pmd()
2874 set_pte_at(mm, haddr, pte, entry); in __split_huge_zero_page_pmd()
2878 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
2887 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_pmd() local
2897 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
2898 ptl = pmd_lock(mm, pmd); in __split_huge_page_pmd()
2901 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
2907 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
2914 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
2929 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, in split_huge_page_pmd_mm() argument
2934 vma = find_vma(mm, address); in split_huge_page_pmd_mm()
2939 static void split_huge_page_address(struct mm_struct *mm, in split_huge_page_address() argument
2948 pgd = pgd_offset(mm, address); in split_huge_page_address()
2963 split_huge_page_pmd_mm(mm, address, pmd); in split_huge_page_address()