Lines Matching refs:mm
34 unsigned long *crst_table_alloc(struct mm_struct *mm) in crst_table_alloc() argument
43 void crst_table_free(struct mm_struct *mm, unsigned long *table) in crst_table_free() argument
50 struct mm_struct *mm = arg; in __crst_table_upgrade() local
52 if (current->active_mm == mm) { in __crst_table_upgrade()
54 set_user_asce(mm); in __crst_table_upgrade()
59 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) in crst_table_upgrade() argument
68 table = crst_table_alloc(mm); in crst_table_upgrade()
71 spin_lock_bh(&mm->page_table_lock); in crst_table_upgrade()
72 if (mm->context.asce_limit < limit) { in crst_table_upgrade()
73 pgd = (unsigned long *) mm->pgd; in crst_table_upgrade()
74 if (mm->context.asce_limit <= (1UL << 31)) { in crst_table_upgrade()
76 mm->context.asce_limit = 1UL << 42; in crst_table_upgrade()
77 mm->context.asce_bits = _ASCE_TABLE_LENGTH | in crst_table_upgrade()
82 mm->context.asce_limit = 1UL << 53; in crst_table_upgrade()
83 mm->context.asce_bits = _ASCE_TABLE_LENGTH | in crst_table_upgrade()
88 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); in crst_table_upgrade()
89 mm->pgd = (pgd_t *) table; in crst_table_upgrade()
90 mm->task_size = mm->context.asce_limit; in crst_table_upgrade()
94 spin_unlock_bh(&mm->page_table_lock); in crst_table_upgrade()
96 crst_table_free(mm, table); in crst_table_upgrade()
97 if (mm->context.asce_limit < limit) in crst_table_upgrade()
100 on_each_cpu(__crst_table_upgrade, mm, 0); in crst_table_upgrade()
104 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) in crst_table_downgrade() argument
108 if (current->active_mm == mm) { in crst_table_downgrade()
110 __tlb_flush_mm(mm); in crst_table_downgrade()
112 while (mm->context.asce_limit > limit) { in crst_table_downgrade()
113 pgd = mm->pgd; in crst_table_downgrade()
116 mm->context.asce_limit = 1UL << 42; in crst_table_downgrade()
117 mm->context.asce_bits = _ASCE_TABLE_LENGTH | in crst_table_downgrade()
122 mm->context.asce_limit = 1UL << 31; in crst_table_downgrade()
123 mm->context.asce_bits = _ASCE_TABLE_LENGTH | in crst_table_downgrade()
130 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); in crst_table_downgrade()
131 mm->task_size = mm->context.asce_limit; in crst_table_downgrade()
132 crst_table_free(mm, (unsigned long *) pgd); in crst_table_downgrade()
134 if (current->active_mm == mm) in crst_table_downgrade()
135 set_user_asce(mm); in crst_table_downgrade()
147 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) in gmap_alloc() argument
178 gmap->mm = mm; in gmap_alloc()
190 down_write(&mm->mmap_sem); in gmap_alloc()
191 list_add(&gmap->list, &mm->context.gmap_list); in gmap_alloc()
192 up_write(&mm->mmap_sem); in gmap_alloc()
205 __tlb_flush_asce(gmap->mm, gmap->asce); in gmap_flush_tlb()
244 __tlb_flush_asce(gmap->mm, gmap->asce); in gmap_free()
253 down_write(&gmap->mm->mmap_sem); in gmap_free()
255 up_write(&gmap->mm->mmap_sem); in gmap_free()
295 spin_lock(&gmap->mm->page_table_lock); in gmap_alloc_table()
303 spin_unlock(&gmap->mm->page_table_lock); in gmap_alloc_table()
384 down_write(&gmap->mm->mmap_sem); in gmap_unmap_segment()
387 up_write(&gmap->mm->mmap_sem); in gmap_unmap_segment()
416 down_write(&gmap->mm->mmap_sem); in gmap_map_segment()
426 up_write(&gmap->mm->mmap_sem); in gmap_map_segment()
470 down_read(&gmap->mm->mmap_sem); in gmap_translate()
472 up_read(&gmap->mm->mmap_sem); in gmap_translate()
483 static void gmap_unlink(struct mm_struct *mm, unsigned long *table, in gmap_unlink() argument
489 list_for_each_entry(gmap, &mm->context.gmap_list, list) { in gmap_unlink()
509 struct mm_struct *mm; in __gmap_link() local
545 mm = gmap->mm; in __gmap_link()
546 pgd = pgd_offset(mm, vmaddr); in __gmap_link()
559 ptl = pmd_lock(mm, pmd); in __gmap_link()
589 down_read(&gmap->mm->mmap_sem); in gmap_fault()
595 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) { in gmap_fault()
601 up_read(&gmap->mm->mmap_sem); in gmap_fault()
606 static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm) in gmap_zap_swap_entry() argument
609 dec_mm_counter(mm, MM_SWAPENTS); in gmap_zap_swap_entry()
614 dec_mm_counter(mm, MM_ANONPAGES); in gmap_zap_swap_entry()
616 dec_mm_counter(mm, MM_FILEPAGES); in gmap_zap_swap_entry()
638 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap()
650 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm); in __gmap_zap()
651 pte_clear(gmap->mm, vmaddr, ptep); in __gmap_zap()
664 down_read(&gmap->mm->mmap_sem); in gmap_discard()
675 vma = find_vma(gmap->mm, vmaddr); in gmap_discard()
679 up_read(&gmap->mm->mmap_sem); in gmap_discard()
731 down_read(&gmap->mm->mmap_sem); in gmap_ipte_notify()
740 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { in gmap_ipte_notify()
748 ptep = get_locked_pte(gmap->mm, addr, &ptl); in gmap_ipte_notify()
761 up_read(&gmap->mm->mmap_sem); in gmap_ipte_notify()
775 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) in gmap_do_ipte_notify() argument
785 list_for_each_entry(gmap, &mm->context.gmap_list, list) { in gmap_do_ipte_notify()
803 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) in page_table_alloc_pgste() argument
832 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in set_guest_storage_key() argument
839 down_read(&mm->mmap_sem); in set_guest_storage_key()
841 ptep = get_locked_pte(mm, addr, &ptl); in set_guest_storage_key()
843 up_read(&mm->mmap_sem); in set_guest_storage_key()
849 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { in set_guest_storage_key()
850 up_read(&mm->mmap_sem); in set_guest_storage_key()
880 up_read(&mm->mmap_sem); in set_guest_storage_key()
885 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr) in get_guest_storage_key() argument
893 down_read(&mm->mmap_sem); in get_guest_storage_key()
894 ptep = get_locked_pte(mm, addr, &ptl); in get_guest_storage_key()
896 up_read(&mm->mmap_sem); in get_guest_storage_key()
919 up_read(&mm->mmap_sem); in get_guest_storage_key()
965 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) in page_table_alloc_pgste() argument
974 static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table, in gmap_unlink() argument
995 unsigned long *page_table_alloc(struct mm_struct *mm) in page_table_alloc() argument
1001 if (mm_alloc_pgste(mm)) in page_table_alloc()
1002 return page_table_alloc_pgste(mm); in page_table_alloc()
1004 spin_lock_bh(&mm->context.list_lock); in page_table_alloc()
1006 if (!list_empty(&mm->context.pgtable_list)) { in page_table_alloc()
1007 page = list_first_entry(&mm->context.pgtable_list, in page_table_alloc()
1014 spin_unlock_bh(&mm->context.list_lock); in page_table_alloc()
1025 spin_lock_bh(&mm->context.list_lock); in page_table_alloc()
1026 list_add(&page->lru, &mm->context.pgtable_list); in page_table_alloc()
1034 spin_unlock_bh(&mm->context.list_lock); in page_table_alloc()
1038 void page_table_free(struct mm_struct *mm, unsigned long *table) in page_table_free() argument
1048 spin_lock_bh(&mm->context.list_lock); in page_table_free()
1053 list_add(&page->lru, &mm->context.pgtable_list); in page_table_free()
1054 spin_unlock_bh(&mm->context.list_lock); in page_table_free()
1080 struct mm_struct *mm; in page_table_free_rcu() local
1084 mm = tlb->mm; in page_table_free_rcu()
1087 gmap_unlink(mm, table, vmaddr); in page_table_free_rcu()
1093 spin_lock_bh(&mm->context.list_lock); in page_table_free_rcu()
1098 list_add_tail(&page->lru, &mm->context.pgtable_list); in page_table_free_rcu()
1099 spin_unlock_bh(&mm->context.list_lock); in page_table_free_rcu()
1161 tlb->mm->context.flush_mm = 1; in tlb_remove_table()
1166 __tlb_flush_mm_lazy(tlb->mm); in tlb_remove_table()
1186 static inline void thp_split_mm(struct mm_struct *mm) in thp_split_mm() argument
1190 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { in thp_split_mm()
1195 mm->def_flags |= VM_NOHUGEPAGE; in thp_split_mm()
1198 static inline void thp_split_mm(struct mm_struct *mm) in thp_split_mm() argument
1208 struct mm_struct *mm = current->mm; in s390_enable_sie() local
1211 if (mm_has_pgste(mm)) in s390_enable_sie()
1214 if (!mm_alloc_pgste(mm)) in s390_enable_sie()
1216 down_write(&mm->mmap_sem); in s390_enable_sie()
1217 mm->context.has_pgste = 1; in s390_enable_sie()
1219 thp_split_mm(mm); in s390_enable_sie()
1220 up_write(&mm->mmap_sem); in s390_enable_sie()
1242 ptep_flush_direct(walk->mm, addr, pte); in __s390_enable_skey()
1258 struct mm_struct *mm = current->mm; in s390_enable_skey() local
1262 down_write(&mm->mmap_sem); in s390_enable_skey()
1263 if (mm_use_skey(mm)) in s390_enable_skey()
1266 mm->context.use_skey = 1; in s390_enable_skey()
1267 for (vma = mm->mmap; vma; vma = vma->vm_next) { in s390_enable_skey()
1270 mm->context.use_skey = 0; in s390_enable_skey()
1275 mm->def_flags &= ~VM_MERGEABLE; in s390_enable_skey()
1277 walk.mm = mm; in s390_enable_skey()
1281 up_write(&mm->mmap_sem); in s390_enable_skey()
1300 void s390_reset_cmma(struct mm_struct *mm) in s390_reset_cmma() argument
1304 down_write(&mm->mmap_sem); in s390_reset_cmma()
1305 walk.mm = mm; in s390_reset_cmma()
1307 up_write(&mm->mmap_sem); in s390_reset_cmma()
1320 pte = get_locked_pte(gmap->mm, address, &ptl); in gmap_test_and_clear_dirty()
1324 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) in gmap_test_and_clear_dirty()
1374 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, in pgtable_trans_huge_deposit() argument
1379 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_deposit()
1382 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit()
1385 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); in pgtable_trans_huge_deposit()
1386 pmd_huge_pte(mm, pmdp) = pgtable; in pgtable_trans_huge_deposit()
1389 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) in pgtable_trans_huge_withdraw() argument
1395 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_withdraw()
1398 pgtable = pmd_huge_pte(mm, pmdp); in pgtable_trans_huge_withdraw()
1401 pmd_huge_pte(mm, pmdp) = NULL; in pgtable_trans_huge_withdraw()
1403 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in pgtable_trans_huge_withdraw()