Searched refs:pmd (Results 1 - 200 of 319) sorted by relevance

12

/linux-4.1.27/drivers/md/
H A Ddm-thin-metadata.c206 struct dm_pool_metadata *pmd; member in struct:dm_thin_device
362 static int superblock_lock_zero(struct dm_pool_metadata *pmd, superblock_lock_zero() argument
365 return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION, superblock_lock_zero()
369 static int superblock_lock(struct dm_pool_metadata *pmd, superblock_lock() argument
372 return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, superblock_lock()
403 static void __setup_btree_details(struct dm_pool_metadata *pmd) __setup_btree_details() argument
405 pmd->info.tm = pmd->tm; __setup_btree_details()
406 pmd->info.levels = 2; __setup_btree_details()
407 pmd->info.value_type.context = pmd->data_sm; __setup_btree_details()
408 pmd->info.value_type.size = sizeof(__le64); __setup_btree_details()
409 pmd->info.value_type.inc = data_block_inc; __setup_btree_details()
410 pmd->info.value_type.dec = data_block_dec; __setup_btree_details()
411 pmd->info.value_type.equal = data_block_equal; __setup_btree_details()
413 memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info)); __setup_btree_details()
414 pmd->nb_info.tm = pmd->nb_tm; __setup_btree_details()
416 pmd->tl_info.tm = pmd->tm; __setup_btree_details()
417 pmd->tl_info.levels = 1; __setup_btree_details()
418 pmd->tl_info.value_type.context = &pmd->bl_info; __setup_btree_details()
419 pmd->tl_info.value_type.size = sizeof(__le64); __setup_btree_details()
420 pmd->tl_info.value_type.inc = subtree_inc; __setup_btree_details()
421 pmd->tl_info.value_type.dec = subtree_dec; __setup_btree_details()
422 pmd->tl_info.value_type.equal = subtree_equal; __setup_btree_details()
424 pmd->bl_info.tm = pmd->tm; __setup_btree_details()
425 pmd->bl_info.levels = 1; __setup_btree_details()
426 pmd->bl_info.value_type.context = pmd->data_sm; __setup_btree_details()
427 pmd->bl_info.value_type.size = sizeof(__le64); __setup_btree_details()
428 pmd->bl_info.value_type.inc = data_block_inc; __setup_btree_details()
429 pmd->bl_info.value_type.dec = data_block_dec; __setup_btree_details()
430 pmd->bl_info.value_type.equal = data_block_equal; __setup_btree_details()
432 pmd->details_info.tm = pmd->tm; __setup_btree_details()
433 pmd->details_info.levels = 1; __setup_btree_details()
434 pmd->details_info.value_type.context = NULL; __setup_btree_details()
435 pmd->details_info.value_type.size = sizeof(struct disk_device_details); __setup_btree_details()
436 pmd->details_info.value_type.inc = NULL; __setup_btree_details()
437 pmd->details_info.value_type.dec = NULL; __setup_btree_details()
438 pmd->details_info.value_type.equal = NULL; __setup_btree_details()
441 static int save_sm_roots(struct dm_pool_metadata *pmd) save_sm_roots() argument
446 r = dm_sm_root_size(pmd->metadata_sm, &len); save_sm_roots()
450 r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len); save_sm_roots()
454 r = dm_sm_root_size(pmd->data_sm, &len); save_sm_roots()
458 return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len); save_sm_roots()
461 static void copy_sm_roots(struct dm_pool_metadata *pmd, copy_sm_roots() argument
465 &pmd->metadata_space_map_root, copy_sm_roots()
466 sizeof(pmd->metadata_space_map_root)); copy_sm_roots()
469 &pmd->data_space_map_root, copy_sm_roots()
470 sizeof(pmd->data_space_map_root)); copy_sm_roots()
473 static int __write_initial_superblock(struct dm_pool_metadata *pmd) __write_initial_superblock() argument
478 sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; __write_initial_superblock()
483 r = dm_sm_commit(pmd->data_sm); __write_initial_superblock()
487 r = save_sm_roots(pmd); __write_initial_superblock()
491 r = dm_tm_pre_commit(pmd->tm); __write_initial_superblock()
495 r = superblock_lock_zero(pmd, &sblock); __write_initial_superblock()
508 copy_sm_roots(pmd, disk_super); __write_initial_superblock()
510 disk_super->data_mapping_root = cpu_to_le64(pmd->root); __write_initial_superblock()
511 disk_super->device_details_root = cpu_to_le64(pmd->details_root); __write_initial_superblock()
514 disk_super->data_block_size = cpu_to_le32(pmd->data_block_size); __write_initial_superblock()
516 return dm_tm_commit(pmd->tm, sblock); __write_initial_superblock()
519 static int __format_metadata(struct dm_pool_metadata *pmd) __format_metadata() argument
523 r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, __format_metadata()
524 &pmd->tm, &pmd->metadata_sm); __format_metadata()
530 pmd->data_sm = dm_sm_disk_create(pmd->tm, 0); __format_metadata()
531 if (IS_ERR(pmd->data_sm)) { __format_metadata()
533 r = PTR_ERR(pmd->data_sm); __format_metadata()
537 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm); __format_metadata()
538 if (!pmd->nb_tm) { __format_metadata()
544 __setup_btree_details(pmd); __format_metadata()
546 r = dm_btree_empty(&pmd->info, &pmd->root); __format_metadata()
550 r = dm_btree_empty(&pmd->details_info, &pmd->details_root); __format_metadata()
556 r = __write_initial_superblock(pmd); __format_metadata()
563 dm_tm_destroy(pmd->nb_tm); __format_metadata()
565 dm_sm_destroy(pmd->data_sm); __format_metadata()
567 dm_tm_destroy(pmd->tm); __format_metadata()
568 dm_sm_destroy(pmd->metadata_sm); __format_metadata()
574 struct dm_pool_metadata *pmd) __check_incompat_features()
588 if (get_disk_ro(pmd->bdev->bd_disk)) __check_incompat_features()
601 static int __open_metadata(struct dm_pool_metadata *pmd) __open_metadata() argument
607 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, __open_metadata()
617 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) { __open_metadata()
620 (unsigned long long)pmd->data_block_size); __open_metadata()
625 r = __check_incompat_features(disk_super, pmd); __open_metadata()
629 r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, __open_metadata()
632 &pmd->tm, &pmd->metadata_sm); __open_metadata()
638 pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root, __open_metadata()
640 if (IS_ERR(pmd->data_sm)) { __open_metadata()
642 r = PTR_ERR(pmd->data_sm); __open_metadata()
646 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm); __open_metadata()
647 if (!pmd->nb_tm) { __open_metadata()
653 __setup_btree_details(pmd); __open_metadata()
657 dm_sm_destroy(pmd->data_sm); __open_metadata()
659 dm_tm_destroy(pmd->tm); __open_metadata()
660 dm_sm_destroy(pmd->metadata_sm); __open_metadata()
667 static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device) __open_or_format_metadata() argument
671 r = __superblock_all_zeroes(pmd->bm, &unformatted); __open_or_format_metadata()
676 return format_device ? __format_metadata(pmd) : -EPERM; __open_or_format_metadata()
678 return __open_metadata(pmd); __open_or_format_metadata()
681 static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device) __create_persistent_data_objects() argument
685 pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, __create_persistent_data_objects()
688 if (IS_ERR(pmd->bm)) { __create_persistent_data_objects()
690 return PTR_ERR(pmd->bm); __create_persistent_data_objects()
693 r = __open_or_format_metadata(pmd, format_device); __create_persistent_data_objects()
695 dm_block_manager_destroy(pmd->bm); __create_persistent_data_objects()
700 static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd) __destroy_persistent_data_objects() argument
702 dm_sm_destroy(pmd->data_sm); __destroy_persistent_data_objects()
703 dm_sm_destroy(pmd->metadata_sm); __destroy_persistent_data_objects()
704 dm_tm_destroy(pmd->nb_tm); __destroy_persistent_data_objects()
705 dm_tm_destroy(pmd->tm); __destroy_persistent_data_objects()
706 dm_block_manager_destroy(pmd->bm); __destroy_persistent_data_objects()
709 static int __begin_transaction(struct dm_pool_metadata *pmd) __begin_transaction() argument
719 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, __begin_transaction()
725 pmd->time = le32_to_cpu(disk_super->time); __begin_transaction()
726 pmd->root = le64_to_cpu(disk_super->data_mapping_root); __begin_transaction()
727 pmd->details_root = le64_to_cpu(disk_super->device_details_root); __begin_transaction()
728 pmd->trans_id = le64_to_cpu(disk_super->trans_id); __begin_transaction()
729 pmd->flags = le32_to_cpu(disk_super->flags); __begin_transaction()
730 pmd->data_block_size = le32_to_cpu(disk_super->data_block_size); __begin_transaction()
736 static int __write_changed_details(struct dm_pool_metadata *pmd) __write_changed_details() argument
743 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { __write_changed_details()
755 r = dm_btree_insert(&pmd->details_info, pmd->details_root, __write_changed_details()
756 &key, &details, &pmd->details_root); __write_changed_details()
771 static int __commit_transaction(struct dm_pool_metadata *pmd) __commit_transaction() argument
783 r = __write_changed_details(pmd); __commit_transaction()
787 r = dm_sm_commit(pmd->data_sm); __commit_transaction()
791 r = dm_tm_pre_commit(pmd->tm); __commit_transaction()
795 r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); __commit_transaction()
799 r = dm_sm_root_size(pmd->data_sm, &data_len); __commit_transaction()
803 r = save_sm_roots(pmd); __commit_transaction()
807 r = superblock_lock(pmd, &sblock); __commit_transaction()
812 disk_super->time = cpu_to_le32(pmd->time); __commit_transaction()
813 disk_super->data_mapping_root = cpu_to_le64(pmd->root); __commit_transaction()
814 disk_super->device_details_root = cpu_to_le64(pmd->details_root); __commit_transaction()
815 disk_super->trans_id = cpu_to_le64(pmd->trans_id); __commit_transaction()
816 disk_super->flags = cpu_to_le32(pmd->flags); __commit_transaction()
818 copy_sm_roots(pmd, disk_super); __commit_transaction()
820 return dm_tm_commit(pmd->tm, sblock); __commit_transaction()
828 struct dm_pool_metadata *pmd; dm_pool_metadata_open() local
830 pmd = kmalloc(sizeof(*pmd), GFP_KERNEL); dm_pool_metadata_open()
831 if (!pmd) { dm_pool_metadata_open()
836 init_rwsem(&pmd->root_lock); dm_pool_metadata_open()
837 pmd->time = 0; dm_pool_metadata_open()
838 INIT_LIST_HEAD(&pmd->thin_devices); dm_pool_metadata_open()
839 pmd->read_only = false; dm_pool_metadata_open()
840 pmd->fail_io = false; dm_pool_metadata_open()
841 pmd->bdev = bdev; dm_pool_metadata_open()
842 pmd->data_block_size = data_block_size; dm_pool_metadata_open()
844 r = __create_persistent_data_objects(pmd, format_device); dm_pool_metadata_open()
846 kfree(pmd); dm_pool_metadata_open()
850 r = __begin_transaction(pmd); dm_pool_metadata_open()
852 if (dm_pool_metadata_close(pmd) < 0) dm_pool_metadata_open()
857 return pmd; dm_pool_metadata_open()
860 int dm_pool_metadata_close(struct dm_pool_metadata *pmd) dm_pool_metadata_close() argument
866 down_read(&pmd->root_lock); dm_pool_metadata_close()
867 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { dm_pool_metadata_close()
875 up_read(&pmd->root_lock); dm_pool_metadata_close()
878 DMERR("attempt to close pmd when %u device(s) are still open", dm_pool_metadata_close()
883 if (!pmd->read_only && !pmd->fail_io) { dm_pool_metadata_close()
884 r = __commit_transaction(pmd); dm_pool_metadata_close()
890 if (!pmd->fail_io) dm_pool_metadata_close()
891 __destroy_persistent_data_objects(pmd); dm_pool_metadata_close()
893 kfree(pmd); dm_pool_metadata_close()
902 static int __open_device(struct dm_pool_metadata *pmd, __open_device() argument
914 list_for_each_entry(td2, &pmd->thin_devices, list) __open_device()
930 r = dm_btree_lookup(&pmd->details_info, pmd->details_root, __open_device()
941 details_le.transaction_id = cpu_to_le64(pmd->trans_id); __open_device()
942 details_le.creation_time = cpu_to_le32(pmd->time); __open_device()
943 details_le.snapshotted_time = cpu_to_le32(pmd->time); __open_device()
950 (*td)->pmd = pmd; __open_device()
960 list_add(&(*td)->list, &pmd->thin_devices); __open_device()
970 static int __create_thin(struct dm_pool_metadata *pmd, __create_thin() argument
980 r = dm_btree_lookup(&pmd->details_info, pmd->details_root, __create_thin()
988 r = dm_btree_empty(&pmd->bl_info, &dev_root); __create_thin()
997 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root); __create_thin()
999 dm_btree_del(&pmd->bl_info, dev_root); __create_thin()
1003 r = __open_device(pmd, dev, 1, &td); __create_thin()
1005 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root); __create_thin()
1006 dm_btree_del(&pmd->bl_info, dev_root); __create_thin()
1014 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev) dm_pool_create_thin() argument
1018 down_write(&pmd->root_lock); dm_pool_create_thin()
1019 if (!pmd->fail_io) dm_pool_create_thin()
1020 r = __create_thin(pmd, dev); dm_pool_create_thin()
1021 up_write(&pmd->root_lock); dm_pool_create_thin()
1026 static int __set_snapshot_details(struct dm_pool_metadata *pmd, __set_snapshot_details() argument
1033 r = __open_device(pmd, origin, 0, &td); __set_snapshot_details()
1047 static int __create_snap(struct dm_pool_metadata *pmd, __create_snap() argument
1058 r = dm_btree_lookup(&pmd->details_info, pmd->details_root, __create_snap()
1064 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value); __create_snap()
1070 dm_tm_inc(pmd->tm, origin_root); __create_snap()
1076 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root); __create_snap()
1078 dm_tm_dec(pmd->tm, origin_root); __create_snap()
1082 pmd->time++; __create_snap()
1084 r = __open_device(pmd, dev, 1, &td); __create_snap()
1088 r = __set_snapshot_details(pmd, td, origin, pmd->time); __create_snap()
1097 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root); __create_snap()
1098 dm_btree_remove(&pmd->details_info, pmd->details_root, __create_snap()
1099 &key, &pmd->details_root); __create_snap()
1103 int dm_pool_create_snap(struct dm_pool_metadata *pmd, dm_pool_create_snap() argument
1109 down_write(&pmd->root_lock); dm_pool_create_snap()
1110 if (!pmd->fail_io) dm_pool_create_snap()
1111 r = __create_snap(pmd, dev, origin); dm_pool_create_snap()
1112 up_write(&pmd->root_lock); dm_pool_create_snap()
1117 static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev) __delete_device() argument
1124 r = __open_device(pmd, dev, 0, &td); __delete_device()
1135 r = dm_btree_remove(&pmd->details_info, pmd->details_root, __delete_device()
1136 &key, &pmd->details_root); __delete_device()
1140 r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root); __delete_device()
1147 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd, dm_pool_delete_thin_device() argument
1152 down_write(&pmd->root_lock); dm_pool_delete_thin_device()
1153 if (!pmd->fail_io) dm_pool_delete_thin_device()
1154 r = __delete_device(pmd, dev); dm_pool_delete_thin_device()
1155 up_write(&pmd->root_lock); dm_pool_delete_thin_device()
1160 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd, dm_pool_set_metadata_transaction_id() argument
1166 down_write(&pmd->root_lock); dm_pool_set_metadata_transaction_id()
1168 if (pmd->fail_io) dm_pool_set_metadata_transaction_id()
1171 if (pmd->trans_id != current_id) { dm_pool_set_metadata_transaction_id()
1176 pmd->trans_id = new_id; dm_pool_set_metadata_transaction_id()
1180 up_write(&pmd->root_lock); dm_pool_set_metadata_transaction_id()
1185 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, dm_pool_get_metadata_transaction_id() argument
1190 down_read(&pmd->root_lock); dm_pool_get_metadata_transaction_id()
1191 if (!pmd->fail_io) { dm_pool_get_metadata_transaction_id()
1192 *result = pmd->trans_id; dm_pool_get_metadata_transaction_id()
1195 up_read(&pmd->root_lock); dm_pool_get_metadata_transaction_id()
1200 static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) __reserve_metadata_snap() argument
1210 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); __reserve_metadata_snap()
1211 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION, __reserve_metadata_snap()
1224 dm_tm_dec(pmd->tm, held_root); __reserve_metadata_snap()
1225 dm_tm_unlock(pmd->tm, copy); __reserve_metadata_snap()
1240 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root)); __reserve_metadata_snap()
1241 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root)); __reserve_metadata_snap()
1242 dm_tm_unlock(pmd->tm, copy); __reserve_metadata_snap()
1247 r = superblock_lock(pmd, &sblock); __reserve_metadata_snap()
1249 dm_tm_dec(pmd->tm, held_root); __reserve_metadata_snap()
1259 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd) dm_pool_reserve_metadata_snap() argument
1263 down_write(&pmd->root_lock); dm_pool_reserve_metadata_snap()
1264 if (!pmd->fail_io) dm_pool_reserve_metadata_snap()
1265 r = __reserve_metadata_snap(pmd); dm_pool_reserve_metadata_snap()
1266 up_write(&pmd->root_lock); dm_pool_reserve_metadata_snap()
1271 static int __release_metadata_snap(struct dm_pool_metadata *pmd) __release_metadata_snap() argument
1278 r = superblock_lock(pmd, &sblock); __release_metadata_snap()
1293 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy); __release_metadata_snap()
1298 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); __release_metadata_snap()
1299 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); __release_metadata_snap()
1300 dm_sm_dec_block(pmd->metadata_sm, held_root); __release_metadata_snap()
1302 return dm_tm_unlock(pmd->tm, copy); __release_metadata_snap()
1305 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd) dm_pool_release_metadata_snap() argument
1309 down_write(&pmd->root_lock); dm_pool_release_metadata_snap()
1310 if (!pmd->fail_io) dm_pool_release_metadata_snap()
1311 r = __release_metadata_snap(pmd); dm_pool_release_metadata_snap()
1312 up_write(&pmd->root_lock); dm_pool_release_metadata_snap()
1317 static int __get_metadata_snap(struct dm_pool_metadata *pmd, __get_metadata_snap() argument
1324 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, __get_metadata_snap()
1335 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, dm_pool_get_metadata_snap() argument
1340 down_read(&pmd->root_lock); dm_pool_get_metadata_snap()
1341 if (!pmd->fail_io) dm_pool_get_metadata_snap()
1342 r = __get_metadata_snap(pmd, result); dm_pool_get_metadata_snap()
1343 up_read(&pmd->root_lock); dm_pool_get_metadata_snap()
1348 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev, dm_pool_open_thin_device() argument
1353 down_write(&pmd->root_lock); dm_pool_open_thin_device()
1354 if (!pmd->fail_io) dm_pool_open_thin_device()
1355 r = __open_device(pmd, dev, 0, td); dm_pool_open_thin_device()
1356 up_write(&pmd->root_lock); dm_pool_open_thin_device()
1363 down_write(&td->pmd->root_lock); dm_pool_close_thin_device()
1365 up_write(&td->pmd->root_lock); dm_pool_close_thin_device()
1391 struct dm_pool_metadata *pmd = td->pmd; dm_thin_find_block() local
1395 if (pmd->fail_io) dm_thin_find_block()
1398 down_read(&pmd->root_lock); dm_thin_find_block()
1401 info = &pmd->info; dm_thin_find_block()
1403 info = &pmd->nb_info; dm_thin_find_block()
1405 r = dm_btree_lookup(info, pmd->root, keys, &value); dm_thin_find_block()
1418 up_read(&pmd->root_lock); dm_thin_find_block()
1427 struct dm_pool_metadata *pmd = td->pmd; __insert() local
1430 value = cpu_to_le64(pack_block_time(data_block, pmd->time)); __insert()
1433 r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value, __insert()
1434 &pmd->root, &inserted); __insert()
1450 down_write(&td->pmd->root_lock); dm_thin_insert_block()
1451 if (!td->pmd->fail_io) dm_thin_insert_block()
1453 up_write(&td->pmd->root_lock); dm_thin_insert_block()
1461 struct dm_pool_metadata *pmd = td->pmd; __remove() local
1464 r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root); __remove()
1478 down_write(&td->pmd->root_lock); dm_thin_remove_block()
1479 if (!td->pmd->fail_io) dm_thin_remove_block()
1481 up_write(&td->pmd->root_lock); dm_thin_remove_block()
1486 int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) dm_pool_block_is_used() argument
1491 down_read(&pmd->root_lock); dm_pool_block_is_used()
1492 r = dm_sm_get_count(pmd->data_sm, b, &ref_count); dm_pool_block_is_used()
1495 up_read(&pmd->root_lock); dm_pool_block_is_used()
1504 down_read(&td->pmd->root_lock); dm_thin_changed_this_transaction()
1506 up_read(&td->pmd->root_lock); dm_thin_changed_this_transaction()
1511 bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd) dm_pool_changed_this_transaction() argument
1516 down_read(&pmd->root_lock); dm_pool_changed_this_transaction()
1517 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { dm_pool_changed_this_transaction()
1523 up_read(&pmd->root_lock); dm_pool_changed_this_transaction()
1532 down_read(&td->pmd->root_lock); dm_thin_aborted_changes()
1534 up_read(&td->pmd->root_lock); dm_thin_aborted_changes()
1539 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result) dm_pool_alloc_data_block() argument
1543 down_write(&pmd->root_lock); dm_pool_alloc_data_block()
1544 if (!pmd->fail_io) dm_pool_alloc_data_block()
1545 r = dm_sm_new_block(pmd->data_sm, result); dm_pool_alloc_data_block()
1546 up_write(&pmd->root_lock); dm_pool_alloc_data_block()
1551 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd) dm_pool_commit_metadata() argument
1555 down_write(&pmd->root_lock); dm_pool_commit_metadata()
1556 if (pmd->fail_io) dm_pool_commit_metadata()
1559 r = __commit_transaction(pmd); dm_pool_commit_metadata()
1566 r = __begin_transaction(pmd); dm_pool_commit_metadata()
1568 up_write(&pmd->root_lock); dm_pool_commit_metadata()
1572 static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd) __set_abort_with_changes_flags() argument
1576 list_for_each_entry(td, &pmd->thin_devices, list) __set_abort_with_changes_flags()
1580 int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) dm_pool_abort_metadata() argument
1584 down_write(&pmd->root_lock); dm_pool_abort_metadata()
1585 if (pmd->fail_io) dm_pool_abort_metadata()
1588 __set_abort_with_changes_flags(pmd); dm_pool_abort_metadata()
1589 __destroy_persistent_data_objects(pmd); dm_pool_abort_metadata()
1590 r = __create_persistent_data_objects(pmd, false); dm_pool_abort_metadata()
1592 pmd->fail_io = true; dm_pool_abort_metadata()
1595 up_write(&pmd->root_lock); dm_pool_abort_metadata()
1600 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result) dm_pool_get_free_block_count() argument
1604 down_read(&pmd->root_lock); dm_pool_get_free_block_count()
1605 if (!pmd->fail_io) dm_pool_get_free_block_count()
1606 r = dm_sm_get_nr_free(pmd->data_sm, result); dm_pool_get_free_block_count()
1607 up_read(&pmd->root_lock); dm_pool_get_free_block_count()
1612 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, dm_pool_get_free_metadata_block_count() argument
1617 down_read(&pmd->root_lock); dm_pool_get_free_metadata_block_count()
1618 if (!pmd->fail_io) dm_pool_get_free_metadata_block_count()
1619 r = dm_sm_get_nr_free(pmd->metadata_sm, result); dm_pool_get_free_metadata_block_count()
1620 up_read(&pmd->root_lock); dm_pool_get_free_metadata_block_count()
1625 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, dm_pool_get_metadata_dev_size() argument
1630 down_read(&pmd->root_lock); dm_pool_get_metadata_dev_size()
1631 if (!pmd->fail_io) dm_pool_get_metadata_dev_size()
1632 r = dm_sm_get_nr_blocks(pmd->metadata_sm, result); dm_pool_get_metadata_dev_size()
1633 up_read(&pmd->root_lock); dm_pool_get_metadata_dev_size()
1638 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result) dm_pool_get_data_dev_size() argument
1642 down_read(&pmd->root_lock); dm_pool_get_data_dev_size()
1643 if (!pmd->fail_io) dm_pool_get_data_dev_size()
1644 r = dm_sm_get_nr_blocks(pmd->data_sm, result); dm_pool_get_data_dev_size()
1645 up_read(&pmd->root_lock); dm_pool_get_data_dev_size()
1653 struct dm_pool_metadata *pmd = td->pmd; dm_thin_get_mapped_count() local
1655 down_read(&pmd->root_lock); dm_thin_get_mapped_count()
1656 if (!pmd->fail_io) { dm_thin_get_mapped_count()
1660 up_read(&pmd->root_lock); dm_thin_get_mapped_count()
1670 struct dm_pool_metadata *pmd = td->pmd; __highest_block() local
1672 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le); __highest_block()
1678 return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result); __highest_block()
1685 struct dm_pool_metadata *pmd = td->pmd; dm_thin_get_highest_mapped_block() local
1687 down_read(&pmd->root_lock); dm_thin_get_highest_mapped_block()
1688 if (!pmd->fail_io) dm_thin_get_highest_mapped_block()
1690 up_read(&pmd->root_lock); dm_thin_get_highest_mapped_block()
1715 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) dm_pool_resize_data_dev() argument
1719 down_write(&pmd->root_lock); dm_pool_resize_data_dev()
1720 if (!pmd->fail_io) dm_pool_resize_data_dev()
1721 r = __resize_space_map(pmd->data_sm, new_count); dm_pool_resize_data_dev()
1722 up_write(&pmd->root_lock); dm_pool_resize_data_dev()
1727 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) dm_pool_resize_metadata_dev() argument
1731 down_write(&pmd->root_lock); dm_pool_resize_metadata_dev()
1732 if (!pmd->fail_io) dm_pool_resize_metadata_dev()
1733 r = __resize_space_map(pmd->metadata_sm, new_count); dm_pool_resize_metadata_dev()
1734 up_write(&pmd->root_lock); dm_pool_resize_metadata_dev()
1739 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) dm_pool_metadata_read_only() argument
1741 down_write(&pmd->root_lock); dm_pool_metadata_read_only()
1742 pmd->read_only = true; dm_pool_metadata_read_only()
1743 dm_bm_set_read_only(pmd->bm); dm_pool_metadata_read_only()
1744 up_write(&pmd->root_lock); dm_pool_metadata_read_only()
1747 void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd) dm_pool_metadata_read_write() argument
1749 down_write(&pmd->root_lock); dm_pool_metadata_read_write()
1750 pmd->read_only = false; dm_pool_metadata_read_write()
1751 dm_bm_set_read_write(pmd->bm); dm_pool_metadata_read_write()
1752 up_write(&pmd->root_lock); dm_pool_metadata_read_write()
1755 int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, dm_pool_register_metadata_threshold() argument
1762 down_write(&pmd->root_lock); dm_pool_register_metadata_threshold()
1763 r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); dm_pool_register_metadata_threshold()
1764 up_write(&pmd->root_lock); dm_pool_register_metadata_threshold()
1769 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) dm_pool_metadata_set_needs_check() argument
1775 down_write(&pmd->root_lock); dm_pool_metadata_set_needs_check()
1776 pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; dm_pool_metadata_set_needs_check()
1778 r = superblock_lock(pmd, &sblock); dm_pool_metadata_set_needs_check()
1785 disk_super->flags = cpu_to_le32(pmd->flags); dm_pool_metadata_set_needs_check()
1789 up_write(&pmd->root_lock); dm_pool_metadata_set_needs_check()
1793 bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd) dm_pool_metadata_needs_check() argument
1797 down_read(&pmd->root_lock); dm_pool_metadata_needs_check()
1798 needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG; dm_pool_metadata_needs_check()
1799 up_read(&pmd->root_lock); dm_pool_metadata_needs_check()
1804 void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd) dm_pool_issue_prefetches() argument
1806 down_read(&pmd->root_lock); dm_pool_issue_prefetches()
1807 if (!pmd->fail_io) dm_pool_issue_prefetches()
1808 dm_tm_issue_prefetches(pmd->tm); dm_pool_issue_prefetches()
1809 up_read(&pmd->root_lock); dm_pool_issue_prefetches()
573 __check_incompat_features(struct thin_disk_superblock *disk_super, struct dm_pool_metadata *pmd) __check_incompat_features() argument
H A Ddm-thin-metadata.h48 int dm_pool_metadata_close(struct dm_pool_metadata *pmd);
61 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev);
69 int dm_pool_create_snap(struct dm_pool_metadata *pmd, dm_thin_id dev,
77 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
84 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd);
94 int dm_pool_abort_metadata(struct dm_pool_metadata *pmd);
99 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
103 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
115 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
116 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
118 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
128 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
152 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result);
167 bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
176 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd,
179 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
182 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
185 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
187 int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
193 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
194 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
200 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
201 void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);
203 int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
211 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd);
212 bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
217 void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd);
/linux-4.1.27/arch/s390/mm/
H A Dhugetlbpage.c13 pmd_t pmd; __pte_to_pmd() local
16 * Convert encoding pte bits pmd bits __pte_to_pmd()
36 pmd_val(pmd) = pte_val(pte) & PAGE_MASK; __pte_to_pmd()
37 pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4; __pte_to_pmd()
38 pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4; __pte_to_pmd()
39 pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5; __pte_to_pmd()
40 pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT); __pte_to_pmd()
41 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; __pte_to_pmd()
42 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; __pte_to_pmd()
44 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; __pte_to_pmd()
45 return pmd; __pte_to_pmd()
48 static inline pte_t __pmd_to_pte(pmd_t pmd) __pmd_to_pte() argument
53 * Convert encoding pmd bits pte bits __pmd_to_pte()
72 if (pmd_present(pmd)) { __pmd_to_pte()
73 pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE; __pmd_to_pte()
75 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4; __pmd_to_pte()
76 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4; __pmd_to_pte()
77 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5; __pmd_to_pte()
78 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); __pmd_to_pte()
79 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10; __pmd_to_pte()
80 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10; __pmd_to_pte()
89 pmd_t pmd; set_huge_pte_at() local
91 pmd = __pte_to_pmd(pte); set_huge_pte_at()
94 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; set_huge_pte_at()
95 pmd_val(pmd) |= pte_page(pte)[1].index; set_huge_pte_at()
97 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; set_huge_pte_at()
98 *(pmd_t *) ptep = pmd; set_huge_pte_at()
104 pmd_t pmd; huge_ptep_get() local
106 pmd = *(pmd_t *) ptep; huge_ptep_get()
107 if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) { huge_ptep_get()
108 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; huge_ptep_get()
109 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; huge_ptep_get()
110 pmd_val(pmd) |= *(unsigned long *) origin; huge_ptep_get()
112 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY; huge_ptep_get()
114 return __pmd_to_pte(pmd); huge_ptep_get()
201 int pmd_huge(pmd_t pmd) pmd_huge() argument
206 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); pmd_huge()
H A Dgup.c20 static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, gup_pte_range() argument
29 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); gup_pte_range()
51 static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, gup_huge_pmd() argument
60 if ((pmd_val(pmd) & mask) != result) gup_huge_pmd()
62 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); gup_huge_pmd()
65 head = pmd_page(pmd); gup_huge_pmd()
81 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) { gup_huge_pmd()
106 pmd_t *pmdp, pmd; gup_pmd_range() local
113 pmd = *pmdp; gup_pmd_range()
121 * splitting bit in the pmd. Returning zero will take gup_pmd_range()
123 * if the pmd is still in splitting state. gup_pmd_range()
125 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) gup_pmd_range()
127 if (unlikely(pmd_large(pmd))) { gup_pmd_range()
128 if (!gup_huge_pmd(pmdp, pmd, addr, next, gup_pmd_range()
131 } else if (!gup_pte_range(pmdp, pmd, addr, next, gup_pmd_range()
H A Ddump_pagetables.c111 pmd_t *pmd, unsigned long addr) walk_pte_level()
119 pte = pte_offset_kernel(pmd, addr); walk_pte_level()
130 pmd_t *pmd; walk_pmd_level() local
135 pmd = pmd_offset(pud, addr); walk_pmd_level()
136 if (!pmd_none(*pmd)) { walk_pmd_level()
137 if (pmd_large(*pmd)) { walk_pmd_level()
138 prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT; walk_pmd_level()
141 walk_pte_level(m, st, pmd, addr); walk_pmd_level()
110 walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t *pmd, unsigned long addr) walk_pte_level() argument
H A Dpageattr.c129 pmd_t *pmd; __kernel_map_pages() local
136 pmd = pmd_offset(pud, address); __kernel_map_pages()
137 pte = pte_offset_kernel(pmd, address); __kernel_map_pages()
H A Dvmem.c50 pmd_t *pmd = NULL; vmem_pmd_alloc() local
52 pmd = vmem_alloc_pages(2); vmem_pmd_alloc()
53 if (!pmd) vmem_pmd_alloc()
55 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); vmem_pmd_alloc()
56 return pmd; vmem_pmd_alloc()
/linux-4.1.27/arch/sh/mm/
H A Dhugetlbpage.c29 pmd_t *pmd; huge_pte_alloc() local
36 pmd = pmd_alloc(mm, pud, addr); huge_pte_alloc()
37 if (pmd) huge_pte_alloc()
38 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc()
49 pmd_t *pmd; huge_pte_offset() local
56 pmd = pmd_offset(pud, addr); huge_pte_offset()
57 if (pmd) huge_pte_offset()
58 pte = pte_offset_map(pmd, addr); huge_pte_offset()
70 int pmd_huge(pmd_t pmd) pmd_huge() argument
H A Dpgtable.c43 void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
45 set_pud(pud, __pud((unsigned long)pmd)); pud_populate()
53 void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
55 kmem_cache_free(pmd_cachep, pmd); pmd_free()
H A Dinit.c49 pmd_t *pmd; __get_pte_phys() local
63 pmd = pmd_alloc(NULL, pud, addr); __get_pte_phys()
64 if (unlikely(!pmd)) { __get_pte_phys()
65 pmd_ERROR(*pmd); __get_pte_phys()
69 return pte_offset_kernel(pmd, addr); __get_pte_phys()
129 pmd_t *pmd; one_md_table_init() local
131 pmd = alloc_bootmem_pages(PAGE_SIZE); one_md_table_init()
132 pud_populate(&init_mm, pud, pmd); one_md_table_init()
133 BUG_ON(pmd != pmd_offset(pud, 0)); one_md_table_init()
139 static pte_t * __init one_page_table_init(pmd_t *pmd) one_page_table_init() argument
141 if (pmd_none(*pmd)) { one_page_table_init()
145 pmd_populate_kernel(&init_mm, pmd, pte); one_page_table_init()
146 BUG_ON(pte != pte_offset_kernel(pmd, 0)); one_page_table_init()
149 return pte_offset_kernel(pmd, 0); one_page_table_init()
152 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd, page_table_kmap_check() argument
163 pmd_t *pmd; page_table_range_init() local
177 pmd = one_md_table_init(pud); page_table_range_init()
179 pmd += k; page_table_range_init()
181 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { page_table_range_init()
182 pte = page_table_kmap_check(one_page_table_init(pmd), page_table_range_init()
183 pmd, vaddr, pte); page_table_range_init()
H A Dtlbex_32.c27 pmd_t *pmd; handle_tlbmiss() local
48 pmd = pmd_offset(pud, address); handle_tlbmiss()
49 if (pmd_none_or_clear_bad(pmd)) handle_tlbmiss()
51 pte = pte_offset_kernel(pmd, address); handle_tlbmiss()
H A Dfault.c77 pmd_t *pmd; show_pte() local
101 pmd = pmd_offset(pud, addr); show_pte()
103 printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2), show_pte()
104 (u64)pmd_val(*pmd)); show_pte()
106 if (pmd_none(*pmd)) show_pte()
109 if (pmd_bad(*pmd)) { show_pte()
115 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) show_pte()
118 pte = pte_offset_kernel(pmd, addr); show_pte()
131 pmd_t *pmd, *pmd_k; vmalloc_sync_one() local
147 pmd = pmd_offset(pud, address); vmalloc_sync_one()
152 if (!pmd_present(*pmd)) vmalloc_sync_one()
153 set_pmd(pmd, *pmd_k); vmalloc_sync_one()
160 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); vmalloc_sync_one()
H A Dtlbex_64.c48 pmd_t *pmd; handle_tlbmiss() local
65 pmd = pmd_offset(pud, address); handle_tlbmiss()
66 if (pmd_none(*pmd) || !pmd_present(*pmd)) handle_tlbmiss()
69 pte = pte_offset_kernel(pmd, address); handle_tlbmiss()
H A Dgup.c74 static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, gup_pte_range() argument
96 ptep = pte_offset_map(&pmd, addr); gup_pte_range()
127 pmd_t pmd = *pmdp; gup_pmd_range() local
130 if (pmd_none(pmd)) gup_pmd_range()
132 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) gup_pmd_range()
/linux-4.1.27/arch/parisc/include/asm/
H A Dpgalloc.h15 * allocate the first pmd adjacent to the pgd. This means that we can
16 * subtract a constant offset to get to it. The pmd and pgd sizes are
17 * arranged so that a single pmd covers 4GB (giving a full 64-bit
31 /* Populate first pmd with allocated memory. We mark it pgd_alloc()
33 * pmd entry may not be cleared. */ pgd_alloc()
38 /* The first pmd entry also is marked with _PAGE_GATEWAY as pgd_alloc()
39 * a signal that this pmd may not be freed */ pgd_alloc()
56 /* Three Level Page Table Support for pmd's */
58 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_populate() argument
61 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); pgd_populate()
66 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, pmd_alloc_one() local
68 if (pmd) pmd_alloc_one()
69 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); pmd_alloc_one()
70 return pmd; pmd_alloc_one()
73 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
75 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { pmd_free()
77 * This is the permanent pmd attached to the pgd; pmd_free()
85 free_pages((unsigned long)pmd, PMD_ORDER); pmd_free()
90 /* Two Level Page Table Support for pmd's */
93 * allocating and freeing a pmd is trivial: the 1-entry pmd is
99 #define pgd_populate(mm, pmd, pte) BUG()
104 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
108 * the permanent pmd */ pmd_populate_kernel()
109 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) pmd_populate_kernel()
110 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | pmd_populate_kernel()
116 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) pmd_populate_kernel()
120 #define pmd_populate(mm, pmd, pte_page) \
121 pmd_populate_kernel(mm, pmd, page_address(pte_page))
122 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dtlb.h24 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
H A Dpgtable.h81 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
91 #define PMD_ORDER 1 /* Number of pages per pmd */
92 #define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
206 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
297 /* The first entry of the permanent pmd is not there if it contains
305 static inline void pmd_clear(pmd_t *pmd) { pmd_clear() argument
307 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) pmd_clear()
308 /* This is the entry pointing to the permanent pmd pmd_clear()
310 __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); pmd_clear()
313 __pmd_val_set(*pmd, 0); pmd_clear()
330 /* This is the permanent pmd attached to the pgd; cannot pgd_clear()
339 * setup: the pgd is never bad, and a pmd always exists (as it's folded
396 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd)))
398 #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
399 #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
421 #define pte_offset_kernel(pmd, address) \
422 ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
423 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
H A Dpage.h43 * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
44 typedef struct { __u32 pmd; } pmd_t; member in struct:__anon2202
50 #define pmd_val(x) ((x).pmd + 0)
59 #define __pmd_val_set(x,n) (x).pmd = (n)
/linux-4.1.27/arch/m68k/include/asm/
H A Dmotorola_pgalloc.h74 static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
76 return free_pointer_table(pmd); pmd_free()
79 static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, __pmd_free_tlb() argument
82 return free_pointer_table(pmd); __pmd_free_tlb()
97 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
99 pmd_set(pmd, pte); pmd_populate_kernel()
102 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd_populate() argument
104 pmd_set(pmd, page_address(page)); pmd_populate()
106 #define pmd_pgtable(pmd) pmd_page(pmd)
108 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_populate() argument
110 pgd_set(pgd, pmd); pgd_populate()
H A Dsun3_pgalloc.h66 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
68 pmd_val(*pmd) = __pa((unsigned long)pte); pmd_populate_kernel()
71 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd_populate() argument
73 pmd_val(*pmd) = __pa((unsigned long)page_address(page)); pmd_populate()
75 #define pmd_pgtable(pmd) pmd_page(pmd)
78 * allocating and freeing a pmd is trivial: the 1-entry pmd is
99 #define pgd_populate(mm, pmd, pte) BUG()
H A Dmcf_pgalloc.h36 #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
39 #define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
41 #define pmd_pgtable(pmd) pmd_page(pmd)
49 #define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
82 * In our implementation, each pgd entry contains 1 pmd that is never allocated
85 #define pmd_free(mm, pmd) BUG()
104 #define pgd_populate(mm, pmd, pte) BUG()
H A Dsun3_pgtable.h119 #define __pmd_page(pmd) \
120 ((unsigned long) __va (pmd_val (pmd) & PAGE_MASK))
134 #define pmd_page(pmd) virt_to_page(__pmd_page(pmd))
137 static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); } pmd_none2() argument
138 #define pmd_none(pmd) pmd_none2(&(pmd)) pmd_none2()
139 //static inline int pmd_bad (pmd_t pmd) { return (pmd_val (pmd) & SUN3_PMD_MASK) != SUN3_PMD_MAGIC; } pmd_bad2()
140 static inline int pmd_bad2 (pmd_t *pmd) { return 0; } pmd_bad2() argument
141 #define pmd_bad(pmd) pmd_bad2(&(pmd)) pmd_present2()
142 static inline int pmd_present2 (pmd_t *pmd) { return pmd_val (*pmd) & SUN3_PMD_VALID; } pmd_present2() argument
143 /* #define pmd_present(pmd) pmd_present2(&(pmd)) */ pmd_present2()
144 #define pmd_present(pmd) (!pmd_none2(&(pmd))) pmd_clear()
156 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) pgd_clear()
204 #define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
205 #define pte_offset_map(pmd, address) ((pte_t *)page_address(pmd_page(*pmd)) + pte_index(address))
H A Dmcf_pgtable.h172 #define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
193 static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); } pmd_none2() argument
194 #define pmd_none(pmd) pmd_none2(&(pmd)) pmd_bad2()
195 static inline int pmd_bad2(pmd_t *pmd) { return 0; } pmd_bad2() argument
196 #define pmd_bad(pmd) pmd_bad2(&(pmd)) pmd_bad2()
197 #define pmd_present(pmd) (!pmd_none2(&(pmd))) pmd_clear()
209 printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \ pgd_clear()
398 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
H A Dpage.h24 typedef struct { unsigned long pmd[16]; } pmd_t; member in struct:__anon1816
30 #define pmd_val(x) ((&x)->pmd[0])
H A Dmotorola_pgtable.h111 unsigned long *ptr = pmdp->pmd; pmd_set()
125 #define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
137 #define pmd_none(pmd) (!pmd_val(pmd))
138 #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
139 #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE)
141 unsigned long *__ptr = pmdp->pmd; \
146 #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
158 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
H A Dmmu_context.h102 pmd_t *pmd; load_ksp_mmu() local
129 pmd = pmd_offset(pgd, mmuar); load_ksp_mmu()
130 if (pmd_none(*pmd)) load_ksp_mmu()
133 pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar) load_ksp_mmu()
134 : pte_offset_map(pmd, mmuar); load_ksp_mmu()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dpgalloc-32.h9 extern void __bad_pte(pmd_t *pmd);
15 * We don't have any real pmd's, and this code never triggers because
21 /* #define pgd_populate(mm, pmd, pte) BUG() */
24 #define pmd_populate_kernel(mm, pmd, pte) \
25 (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
26 #define pmd_populate(mm, pmd, pte) \
27 (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
28 #define pmd_pgtable(pmd) pmd_page(pmd)
30 #define pmd_populate_kernel(mm, pmd, pte) \
31 (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
32 #define pmd_populate(mm, pmd, pte) \
33 (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
34 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dpgtable-ppc64.h164 #define pmd_none(pmd) (!pmd_val(pmd))
165 #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
166 || (pmd_val(pmd) & PMD_BAD_BITS))
167 #define pmd_present(pmd) (!pmd_none(pmd))
169 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
170 extern struct page *pmd_page(pmd_t pmd);
357 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
392 * The linux hugepage PMD now include the pmd entries followed by the address
441 extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
443 pmd_t *pmdp, pmd_t pmd);
445 pmd_t *pmd);
458 * for THP we also track the subpage details at the pmd level. We don't do
462 static inline int pmd_trans_huge(pmd_t pmd) pmd_trans_huge() argument
467 return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); pmd_trans_huge()
470 static inline int pmd_trans_splitting(pmd_t pmd) pmd_trans_splitting() argument
472 if (pmd_trans_huge(pmd)) pmd_trans_splitting()
473 return pmd_val(pmd) & _PAGE_SPLITTING; pmd_trans_splitting()
480 static inline int pmd_large(pmd_t pmd) pmd_large() argument
485 return ((pmd_val(pmd) & 0x3) != 0x0); pmd_large()
488 static inline pte_t pmd_pte(pmd_t pmd) pmd_pte() argument
490 return __pte(pmd_val(pmd)); pmd_pte()
498 static inline pte_t *pmdp_ptep(pmd_t *pmd) pmdp_ptep() argument
500 return (pte_t *)pmd; pmdp_ptep()
503 #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
504 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
505 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
506 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
507 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
508 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
509 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
510 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
513 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
515 static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd_mkhuge() argument
518 return pmd; pmd_mkhuge()
521 static inline pmd_t pmd_mknotpresent(pmd_t pmd) pmd_mknotpresent() argument
523 pmd_val(pmd) &= ~_PAGE_PRESENT; pmd_mknotpresent()
524 return pmd; pmd_mknotpresent()
527 static inline pmd_t pmd_mksplitting(pmd_t pmd) pmd_mksplitting() argument
529 pmd_val(pmd) |= _PAGE_SPLITTING; pmd_mksplitting()
530 return pmd; pmd_mksplitting()
607 * Archs like ppc64 use pgtable to store per pmd pmd_move_must_withdraw()
608 * specific information. So when we switch the pmd, pmd_move_must_withdraw()
H A Dpgalloc-64.h69 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
71 pud_set(pud, (unsigned long)pmd); pud_populate()
74 #define pmd_populate(mm, pmd, pte_page) \
75 pmd_populate_kernel(mm, pmd, page_address(pte_page))
76 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
77 #define pmd_pgtable(pmd) pmd_page(pmd)
175 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
177 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
180 pmd_set(pmd, (unsigned long)pte); pmd_populate_kernel()
183 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
186 pmd_set(pmd, (unsigned long)pte_page); pmd_populate()
189 static inline pgtable_t pmd_pgtable(pmd_t pmd) pmd_pgtable() argument
191 return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS); pmd_pgtable()
230 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
232 kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); pmd_free() local
235 #define __pmd_free_tlb(tlb, pmd, addr) \
236 pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
H A Dpgtable-ppc32.h25 * are an index to the second level table. The combined pgdir/pmd first
130 #define pmd_none(pmd) (!pmd_val(pmd))
131 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
132 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
299 * Note that on Book E processors, the pmd contains the kernel virtual
302 * handler). On everything else the pmd contains the physical address
306 #define pmd_page_vaddr(pmd) \
307 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
308 #define pmd_page(pmd) \
309 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
311 #define pmd_page_vaddr(pmd) \
312 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
313 #define pmd_page(pmd) \
314 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
/linux-4.1.27/arch/alpha/include/asm/
H A Dpgalloc.h14 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) pmd_populate() argument
16 pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET)); pmd_populate()
18 #define pmd_pgtable(pmd) pmd_page(pmd)
21 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
23 pmd_set(pmd, pte); pmd_populate_kernel()
27 pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_populate() argument
29 pgd_set(pgd, pmd); pgd_populate()
48 pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
50 free_page((unsigned long)pmd); pmd_free()
H A Dtlb.h13 #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
H A Dpage.h31 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon73
36 #define pmd_val(x) ((x).pmd)
H A Dpgtable.h233 pmd_page_vaddr(pmd_t pmd) pmd_page_vaddr() argument
235 return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET; pmd_page_vaddr()
239 #define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
253 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } pmd_bad() argument
254 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; } pmd_present() argument
255 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; } pmd_clear() argument
353 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
H A Dmmzone.h74 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
/linux-4.1.27/include/asm-generic/
H A Dpgtable-nopmd.h13 * Having the pmd type consist of a pud gets the size right, and allows
14 * us to conceptually access the pud entry that this pmd is folded into
26 * setup: the pmd is never bad, and a pmd always exists (as it's folded
33 #define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) pud_clear()
35 #define pud_populate(mm, pmd, pte) do { } while (0)
55 * allocating and freeing a pmd is trivial: the 1-entry pmd is
59 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
H A Dpgtable.h59 pmd_t pmd = *pmdp; pmdp_test_and_clear_young() local
61 if (!pmd_young(pmd)) pmdp_test_and_clear_young()
64 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); pmdp_test_and_clear_young()
105 pmd_t pmd = *pmdp; pmdp_get_and_clear() local
107 return pmd; pmdp_get_and_clear()
339 static inline int pmd_none_or_clear_bad(pmd_t *pmd) pmd_none_or_clear_bad() argument
341 if (pmd_none(*pmd)) pmd_none_or_clear_bad()
343 if (unlikely(pmd_bad(*pmd))) { pmd_none_or_clear_bad()
344 pmd_clear_bad(pmd); pmd_none_or_clear_bad()
450 static inline int pmd_soft_dirty(pmd_t pmd) pmd_soft_dirty() argument
460 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) pmd_mksoft_dirty() argument
462 return pmd; pmd_mksoft_dirty()
565 static inline int pmd_trans_huge(pmd_t pmd) pmd_trans_huge() argument
569 static inline int pmd_trans_splitting(pmd_t pmd) pmd_trans_splitting() argument
574 static inline int pmd_write(pmd_t pmd) pmd_write() argument
586 * Depend on compiler for an atomic pmd read. NOTE: this is pmd_read_atomic()
599 * With split pmd lock we also need to move preallocated pmd_move_must_withdraw()
609 * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
610 * into a null pmd and the transhuge page fault can convert a null pmd
611 * into an hugepmd or into a regular pmd (if the hugepage allocation
612 * fails). While holding the mmap_sem in read mode the pmd becomes
614 * transhuge pmd. When those races occurs and this function makes a
616 * undefined so behaving like if the pmd was none is safe (because it
621 * care of reading the pmd atomically to avoid SMP race conditions
625 * fault can populate the pmd from under us).
627 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) pmd_none_or_trans_huge_or_clear_bad() argument
629 pmd_t pmdval = pmd_read_atomic(pmd); pmd_none_or_trans_huge_or_clear_bad()
637 * mapped in the pmd). The below checks will only care about pmd_none_or_trans_huge_or_clear_bad()
638 * the low part of the pmd with 32bit PAE x86 anyway, with the pmd_none_or_trans_huge_or_clear_bad()
640 * the low part of the pmd is found null, the high part will pmd_none_or_trans_huge_or_clear_bad()
650 pmd_clear_bad(pmd); pmd_none_or_trans_huge_or_clear_bad()
660 * places that already verified the pmd is not none and they want to
662 * need this). If THP is not enabled, the pmd can't go away under the
665 * split_huge_page_pmd returns (because it may have run when the pmd
669 static inline int pmd_trans_unstable(pmd_t *pmd) pmd_trans_unstable() argument
672 return pmd_none_or_trans_huge_or_clear_bad(pmd); pmd_trans_unstable()
692 static inline int pmd_protnone(pmd_t pmd) pmd_protnone() argument
702 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
704 int pmd_clear_huge(pmd_t *pmd);
710 static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) pmd_set_huge() argument
718 static inline int pmd_clear_huge(pmd_t *pmd) pmd_clear_huge() argument
H A D4level-fixup.h26 #define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd)
H A Dpage.h43 unsigned long pmd[16]; member in struct:__anon11437
54 #define pmd_val(x) ((&x)->pmd[0])
/linux-4.1.27/arch/arm/include/asm/
H A Dpgtable-3level.h145 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
147 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
149 #define pmd_large(pmd) pmd_sect(pmd)
175 #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
211 #define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
212 : !!(pmd_val(pmd) & (val)))
213 #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
215 #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
225 #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
226 #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
230 #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
231 #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
234 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
235 #define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
245 static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
254 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
256 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
260 /* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ pmd_mknotpresent()
261 static inline pmd_t pmd_mknotpresent(pmd_t pmd) pmd_mknotpresent() argument
266 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd_modify() argument
270 pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); pmd_modify()
271 return pmd; pmd_modify()
275 pmd_t *pmdp, pmd_t pmd) set_pmd_at()
280 if (pmd_val(pmd) & L_PMD_SECT_NONE) set_pmd_at()
281 pmd_val(pmd) &= ~L_PMD_SECT_VALID; set_pmd_at()
283 if (pmd_write(pmd) && pmd_dirty(pmd)) set_pmd_at()
284 pmd_val(pmd) &= ~PMD_SECT_AP2; set_pmd_at()
286 pmd_val(pmd) |= PMD_SECT_AP2; set_pmd_at()
288 *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); set_pmd_at()
274 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) set_pmd_at() argument
H A Dpgalloc.h35 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
37 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); pmd_free()
38 free_page((unsigned long)pmd); pmd_free()
41 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
43 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); pud_populate()
52 #define pmd_free(mm, pmd) do { } while (0)
53 #define pud_populate(mm,pmd,pte) BUG()
143 * Populate the pmdp entry with a pointer to the pte. This pmd is part
152 * The pmd must be loaded with the physical address of the PTE table pmd_populate_kernel()
170 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dkvm_mmu.h72 static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) kvm_set_pmd() argument
74 *pmd = new_pmd; kvm_set_pmd()
75 flush_pmd_entry(pmd); kvm_set_pmd()
93 static inline void kvm_clean_pmd(pmd_t *pmd) kvm_clean_pmd() argument
95 clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); kvm_clean_pmd()
98 static inline void kvm_clean_pmd_entry(pmd_t *pmd) kvm_clean_pmd_entry() argument
100 clean_pmd_entry(pmd); kvm_clean_pmd_entry()
113 static inline void kvm_set_s2pmd_writable(pmd_t *pmd) kvm_set_s2pmd_writable() argument
115 pmd_val(*pmd) |= L_PMD_S2_RDWR; kvm_set_s2pmd_writable()
128 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) kvm_set_s2pmd_readonly() argument
130 pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY; kvm_set_s2pmd_readonly()
133 static inline bool kvm_s2pmd_readonly(pmd_t *pmd) kvm_s2pmd_readonly() argument
135 return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY; kvm_s2pmd_readonly()
246 static inline void __kvm_flush_dcache_pmd(pmd_t pmd) __kvm_flush_dcache_pmd() argument
249 pfn_t pfn = pmd_pfn(pmd); __kvm_flush_dcache_pmd()
H A Dpgtable.h56 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
68 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
184 #define pmd_none(pmd) (!pmd_val(pmd))
185 #define pmd_present(pmd) (pmd_val(pmd))
187 static inline pte_t *pmd_page_vaddr(pmd_t pmd) pmd_page_vaddr() argument
189 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); pmd_page_vaddr()
192 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
195 #define __pte_map(pmd) pmd_page_vaddr(*(pmd))
198 #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
204 #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
206 #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
H A Dpgtable-2level-types.h34 typedef struct { pmdval_t pmd; } pmd_t; member in struct:__anon174
39 #define pmd_val(x) ((x).pmd)
H A Dpgtable-2level.h150 * The "pud_xxx()" functions here are trivial when the pmd is folded into
165 #define pmd_large(pmd) (pmd_val(pmd) & 2)
166 #define pmd_bad(pmd) (pmd_val(pmd) & 2)
182 /* we don't need complex calculations here as the pmd is folded into the pgd */
193 #define pmd_hugewillfault(pmd) (0)
194 #define pmd_thp_or_huge(pmd) (0)
H A Dpgtable-3level-types.h37 typedef struct { pmdval_t pmd; } pmd_t; member in struct:__anon178
42 #define pmd_val(x) ((x).pmd)
/linux-4.1.27/arch/mips/include/asm/
H A Dpgalloc.h16 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
19 set_pmd(pmd, __pmd((unsigned long)pte)); pmd_populate_kernel()
22 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
25 set_pmd(pmd, __pmd((unsigned long)page_address(pte))); pmd_populate()
27 #define pmd_pgtable(pmd) pmd_page(pmd)
30 * Initialize a new pmd table with invalid pointers.
36 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
38 set_pud(pud, __pud((unsigned long)pmd)); pud_populate()
43 * Initialize a new pgd / pmd table with invalid pointers.
114 pmd_t *pmd; pmd_alloc_one() local
116 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); pmd_alloc_one()
117 if (pmd) pmd_alloc_one()
118 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); pmd_alloc_one()
119 return pmd; pmd_alloc_one()
122 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
124 free_pages((unsigned long)pmd, PMD_ORDER); pmd_free()
H A Dpgtable.h91 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
93 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
95 #define pmd_page(pmd) __pmd_page(pmd)
98 #define pmd_page_vaddr(pmd) pmd_val(pmd)
487 static inline int pmd_trans_huge(pmd_t pmd) pmd_trans_huge() argument
489 return !!(pmd_val(pmd) & _PAGE_HUGE); pmd_trans_huge()
492 static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd_mkhuge() argument
494 pmd_val(pmd) |= _PAGE_HUGE; pmd_mkhuge()
496 return pmd; pmd_mkhuge()
499 static inline int pmd_trans_splitting(pmd_t pmd) pmd_trans_splitting() argument
501 return !!(pmd_val(pmd) & _PAGE_SPLITTING); pmd_trans_splitting()
504 static inline pmd_t pmd_mksplitting(pmd_t pmd) pmd_mksplitting() argument
506 pmd_val(pmd) |= _PAGE_SPLITTING; pmd_mksplitting()
508 return pmd; pmd_mksplitting()
512 pmd_t *pmdp, pmd_t pmd);
521 static inline int pmd_write(pmd_t pmd) pmd_write() argument
523 return !!(pmd_val(pmd) & _PAGE_WRITE); pmd_write()
526 static inline pmd_t pmd_wrprotect(pmd_t pmd) pmd_wrprotect() argument
528 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); pmd_wrprotect()
529 return pmd; pmd_wrprotect()
532 static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd_mkwrite() argument
534 pmd_val(pmd) |= _PAGE_WRITE; pmd_mkwrite()
535 if (pmd_val(pmd) & _PAGE_MODIFIED) pmd_mkwrite()
536 pmd_val(pmd) |= _PAGE_SILENT_WRITE; pmd_mkwrite()
538 return pmd; pmd_mkwrite()
541 static inline int pmd_dirty(pmd_t pmd) pmd_dirty() argument
543 return !!(pmd_val(pmd) & _PAGE_MODIFIED); pmd_dirty()
546 static inline pmd_t pmd_mkclean(pmd_t pmd) pmd_mkclean() argument
548 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); pmd_mkclean()
549 return pmd; pmd_mkclean()
552 static inline pmd_t pmd_mkdirty(pmd_t pmd) pmd_mkdirty() argument
554 pmd_val(pmd) |= _PAGE_MODIFIED; pmd_mkdirty()
555 if (pmd_val(pmd) & _PAGE_WRITE) pmd_mkdirty()
556 pmd_val(pmd) |= _PAGE_SILENT_WRITE; pmd_mkdirty()
558 return pmd; pmd_mkdirty()
561 static inline int pmd_young(pmd_t pmd) pmd_young() argument
563 return !!(pmd_val(pmd) & _PAGE_ACCESSED); pmd_young()
566 static inline pmd_t pmd_mkold(pmd_t pmd) pmd_mkold() argument
568 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); pmd_mkold()
570 return pmd; pmd_mkold()
573 static inline pmd_t pmd_mkyoung(pmd_t pmd) pmd_mkyoung() argument
575 pmd_val(pmd) |= _PAGE_ACCESSED; pmd_mkyoung()
578 if (!(pmd_val(pmd) & _PAGE_NO_READ)) pmd_mkyoung()
579 pmd_val(pmd) |= _PAGE_SILENT_READ; pmd_mkyoung()
582 if (pmd_val(pmd) & _PAGE_READ) pmd_mkyoung()
583 pmd_val(pmd) |= _PAGE_SILENT_READ; pmd_mkyoung()
585 return pmd; pmd_mkyoung()
591 static inline unsigned long pmd_pfn(pmd_t pmd) pmd_pfn() argument
593 return pmd_val(pmd) >> _PFN_SHIFT; pmd_pfn()
596 static inline struct page *pmd_page(pmd_t pmd) pmd_page() argument
598 if (pmd_trans_huge(pmd)) pmd_page()
599 return pfn_to_page(pmd_pfn(pmd)); pmd_page()
601 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); pmd_page()
604 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd_modify() argument
606 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); pmd_modify()
607 return pmd; pmd_modify()
610 static inline pmd_t pmd_mknotpresent(pmd_t pmd) pmd_mknotpresent() argument
612 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); pmd_mknotpresent()
614 return pmd; pmd_mknotpresent()
H A Dpgtable-64.h28 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
32 * invalid_pmd_table, each pmd entry is initialized to point to
34 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
66 * We used to implement 41 bits by having an order 1 pmd level but that seemed
146 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
160 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon2060
161 #define pmd_val(x) ((x).pmd)
169 * Empty pgd/pmd entries point to the invalid_pte_table.
171 static inline int pmd_none(pmd_t pmd) pmd_none() argument
173 return pmd_val(pmd) == (unsigned long) invalid_pte_table; pmd_none()
176 static inline int pmd_bad(pmd_t pmd) pmd_bad() argument
179 /* pmd_huge(pmd) but inline */ pmd_bad()
180 if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) pmd_bad()
184 if (unlikely(pmd_val(pmd) & ~PAGE_MASK)) pmd_bad()
190 static inline int pmd_present(pmd_t pmd) pmd_present() argument
192 return pmd_val(pmd) != (unsigned long) invalid_pte_table; pmd_present()
276 * Initialize a new pgd / pmd table with invalid pointers.
H A Dpgtable-32.h87 * Empty pgd/pmd entries point to the invalid_pte_table.
89 static inline int pmd_none(pmd_t pmd) pmd_none() argument
91 return pmd_val(pmd) == (unsigned long) invalid_pte_table; pmd_none()
94 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
96 static inline int pmd_present(pmd_t pmd) pmd_present() argument
98 return pmd_val(pmd) != (unsigned long) invalid_pte_table; pmd_present()
/linux-4.1.27/arch/cris/include/asm/
H A Dpgalloc.h7 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
8 #define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte))
9 #define pmd_pgtable(pmd) pmd_page(pmd)
/linux-4.1.27/arch/unicore32/mm/
H A Dpgd.c81 pmd_t *pmd; free_pgd_slow() local
88 pmd = pmd_off(pgd, 0); free_pgd_slow()
89 if (pmd_none(*pmd)) free_pgd_slow()
91 if (pmd_bad(*pmd)) { free_pgd_slow()
92 pmd_ERROR(*pmd); free_pgd_slow()
93 pmd_clear(pmd); free_pgd_slow()
97 pte = pmd_pgtable(*pmd); free_pgd_slow()
98 pmd_clear(pmd); free_pgd_slow()
101 pmd_free(mm, pmd); free_pgd_slow()
H A Dmmu.c42 * The pmd table for the upper-most set of pages.
152 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, early_pte_alloc() argument
155 if (pmd_none(*pmd)) { early_pte_alloc()
157 __pmd_populate(pmd, __pa(pte) | prot); early_pte_alloc()
159 BUG_ON(pmd_bad(*pmd)); early_pte_alloc()
160 return pte_offset_kernel(pmd, addr); early_pte_alloc()
163 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, alloc_init_pte() argument
167 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); alloc_init_pte()
178 pmd_t *pmd = pmd_offset((pud_t *)pgd, addr); alloc_init_section() local
185 pmd_t *p = pmd; alloc_init_section()
188 set_pmd(pmd, __pmd(phys | type->prot_sect)); alloc_init_section()
190 } while (pmd++, addr += SECTION_SIZE, addr != end); alloc_init_section()
198 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); alloc_init_section()
465 pmd_t *pmd; setup_mm_for_reboot() local
467 pmd = pmd_off(pgd, i << PGDIR_SHIFT); setup_mm_for_reboot()
468 set_pmd(pmd, __pmd(pmdval)); setup_mm_for_reboot()
469 flush_pmd_entry(pmd); setup_mm_for_reboot()
H A Dioremap.c74 pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr); unmap_area_sections() local
76 pmd = *pmdp; unmap_area_sections()
77 if (!pmd_none(pmd)) { unmap_area_sections()
90 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) unmap_area_sections()
91 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); unmap_area_sections()
116 pmd_t *pmd = pmd_offset((pud_t *)pgd, addr); remap_area_sections() local
118 set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect)); remap_area_sections()
120 flush_pmd_entry(pmd); remap_area_sections()
H A Dfault.c53 pmd_t *pmd; show_pte() local
64 pmd = pmd_offset((pud_t *) pgd, addr); show_pte()
66 printk(", *pmd=%08lx", pmd_val(*pmd)); show_pte()
68 if (pmd_none(*pmd)) show_pte()
71 if (pmd_bad(*pmd)) { show_pte()
77 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) show_pte()
80 pte = pte_offset_map(pmd, addr); show_pte()
347 pmd_t *pmd, *pmd_k; do_ifault() local
364 pmd = pmd_offset((pud_t *) pgd, addr); do_ifault()
369 set_pmd(pmd, *pmd_k); do_ifault()
370 flush_pmd_entry(pmd); do_ifault()
/linux-4.1.27/arch/m32r/include/asm/
H A Dpgalloc.h8 #define pmd_populate_kernel(mm, pmd, pte) \
9 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
11 static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
14 set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte))); pmd_populate()
16 #define pmd_pgtable(pmd) pmd_page(pmd)
69 * allocating and freeing a pmd is trivial: the 1-entry pmd is
77 #define pgd_populate(mm, pmd, pte) BUG()
H A Dmmzone.h18 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
H A Dpage.h27 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon1751
35 #define pmd_val(x) ((x).pmd)
H A Dpgtable-2level.h25 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
31 * setup: the pgd is never bad, and a pmd always exists (as it's folded
/linux-4.1.27/arch/arm/mm/
H A Didmap.c24 pmd_t *pmd; idmap_add_pmd() local
28 pmd = pmd_alloc_one(&init_mm, addr); idmap_add_pmd()
29 if (!pmd) { idmap_add_pmd()
30 pr_warn("Failed to allocate identity pmd.\n"); idmap_add_pmd()
38 memcpy(pmd, pmd_offset(pud, 0), idmap_add_pmd()
40 pud_populate(&init_mm, pud, pmd); idmap_add_pmd()
41 pmd += pmd_index(addr); idmap_add_pmd()
43 pmd = pmd_offset(pud, addr); idmap_add_pmd()
47 *pmd = __pmd((addr & PMD_MASK) | prot); idmap_add_pmd()
48 flush_pmd_entry(pmd); idmap_add_pmd()
49 } while (pmd++, addr = next, addr != end); idmap_add_pmd()
55 pmd_t *pmd = pmd_offset(pud, addr); idmap_add_pmd() local
58 pmd[0] = __pmd(addr); idmap_add_pmd()
60 pmd[1] = __pmd(addr); idmap_add_pmd()
61 flush_pmd_entry(pmd); idmap_add_pmd()
H A Dpgd.c113 pmd_t *pmd; pgd_free() local
127 pmd = pmd_offset(pud, 0); pgd_free()
128 if (pmd_none_or_clear_bad(pmd)) pgd_free()
131 pte = pmd_pgtable(*pmd); pgd_free()
132 pmd_clear(pmd); pgd_free()
137 pmd_free(mm, pmd); pgd_free()
145 * Free modules/pkmap or identity pmd tables. pgd_free()
155 pmd = pmd_offset(pud, 0); pgd_free()
157 pmd_free(mm, pmd); pgd_free()
H A Dhugetlbpage.c35 * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot
49 int pmd_huge(pmd_t pmd) pmd_huge() argument
51 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); pmd_huge()
H A Dioremap.c151 pmd_t pmd = *pmdp; unmap_area_sections() local
153 if (!pmd_none(pmd)) { unmap_area_sections()
167 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) unmap_area_sections()
168 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); unmap_area_sections()
192 pmd_t *pmd; remap_area_sections() local
202 pmd = pmd_offset(pud, addr); remap_area_sections()
204 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); remap_area_sections()
206 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); remap_area_sections()
208 flush_pmd_entry(pmd); remap_area_sections()
211 pmd += 2; remap_area_sections()
224 pmd_t *pmd; remap_area_supersections() local
234 pmd = pmd_offset(pud, addr); remap_area_supersections()
243 pmd[0] = __pmd(super_pmd_val); remap_area_supersections()
244 pmd[1] = __pmd(super_pmd_val); remap_area_supersections()
245 flush_pmd_entry(pmd); remap_area_supersections()
248 pmd += 2; remap_area_supersections()
H A Ddump.c192 }, { /* pmd */
257 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) walk_pte() argument
259 pte_t *pte = pte_offset_kernel(pmd, 0); walk_pte()
271 pmd_t *pmd = pmd_offset(pud, 0); walk_pmd() local
275 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { walk_pmd()
277 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) walk_pmd()
278 note_page(st, addr, 3, pmd_val(*pmd)); walk_pmd()
280 walk_pte(st, pmd, addr); walk_pmd()
282 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) walk_pmd()
283 note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1])); walk_pmd()
H A Dmmu.c52 * The pmd table for the upper-most set of pages.
78 pmdval_t pmd; member in struct:cachepolicy
93 .pmd = PMD_SECT_UNCACHED,
99 .pmd = PMD_SECT_BUFFERED,
105 .pmd = PMD_SECT_WT,
111 .pmd = PMD_SECT_WB,
117 .pmd = PMD_SECT_WBWA,
128 * via the "pmd" value. This is used to ensure that on ARMv6 and later,
133 void __init init_default_cache_policy(unsigned long pmd) init_default_cache_policy() argument
137 initial_pmd_value = pmd; init_default_cache_policy()
139 pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE; init_default_cache_policy()
142 if (cache_policies[i].pmd == pmd) { init_default_cache_policy()
629 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; build_mem_type_table()
631 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; build_mem_type_table()
635 mem_types[MT_ROM].prot_sect |= cp->pmd; build_mem_type_table()
637 switch (cp->pmd) { build_mem_type_table()
685 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) early_pte_alloc() argument
687 if (pmd_none(*pmd)) { early_pte_alloc()
689 __pmd_populate(pmd, __pa(pte), prot); early_pte_alloc()
691 BUG_ON(pmd_bad(*pmd)); early_pte_alloc()
692 return pte_offset_kernel(pmd, addr); early_pte_alloc()
695 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, alloc_init_pte() argument
699 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); alloc_init_pte()
706 static void __init __map_init_section(pmd_t *pmd, unsigned long addr, __map_init_section() argument
710 pmd_t *p = pmd; __map_init_section()
723 pmd++; __map_init_section()
726 *pmd = __pmd(phys | type->prot_sect); __map_init_section()
728 } while (pmd++, addr += SECTION_SIZE, addr != end); __map_init_section()
737 pmd_t *pmd = pmd_offset(pud, addr); alloc_init_pmd() local
753 __map_init_section(pmd, addr, next, phys, type); alloc_init_pmd()
755 alloc_init_pte(pmd, addr, next, alloc_init_pmd()
761 } while (pmd++, addr = next, addr != end); alloc_init_pmd()
824 pmd_t *pmd = pmd_offset(pud, addr); create_36bit_mapping() local
828 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); create_36bit_mapping()
967 pmd_t *pmd; fill_pmd_gaps() local
981 pmd = pmd_off_k(addr); fill_pmd_gaps()
982 if (pmd_none(*pmd)) fill_pmd_gaps()
993 pmd = pmd_off_k(addr) + 1; fill_pmd_gaps()
994 if (pmd_none(*pmd)) fill_pmd_gaps()
1115 * Find the first non-pmd-aligned page, and point for_each_memblock()
1117 * limit down to be pmd-aligned, which happens at the for_each_memblock()
1121 * bank can be non-pmd-aligned. The only exception is for_each_memblock()
1140 * Round the memblock limit down to a pmd size. This
1142 * last full pmd, which should be mapped.
H A Dfault.c73 pmd_t *pmd; show_pte() local
96 pmd = pmd_offset(pud, addr); show_pte()
98 pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd)); show_pte()
100 if (pmd_none(*pmd)) show_pte()
103 if (pmd_bad(*pmd)) { show_pte()
109 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) show_pte()
112 pte = pte_offset_map(pmd, addr); show_pte()
428 pmd_t *pmd, *pmd_k; do_translation_fault() local
454 pmd = pmd_offset(pud, addr); do_translation_fault()
476 copy_pmd(pmd, pmd_k); do_translation_fault()
H A Dfault-armv.c98 pmd_t *pmd; adjust_pte() local
110 pmd = pmd_offset(pud, address); adjust_pte()
111 if (pmd_none_or_clear_bad(pmd)) adjust_pte()
119 ptl = pte_lockptr(vma->vm_mm, pmd); adjust_pte()
120 pte = pte_offset_map(pmd, address); adjust_pte()
/linux-4.1.27/arch/sparc/include/asm/
H A Dpgalloc_32.h46 static inline void free_pmd_fast(pmd_t * pmd) free_pmd_fast() argument
48 srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE); free_pmd_fast()
51 #define pmd_free(mm, pmd) free_pmd_fast(pmd)
52 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
55 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dpgalloc_64.h35 static inline void __pud_populate(pud_t *pud, pmd_t *pmd) __pud_populate() argument
37 pud_set(pud, pmd); __pud_populate()
59 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
61 kmem_cache_free(pgtable_cache, pmd); pmd_free()
114 #define __pmd_free_tlb(tlb, pmd, addr) \
115 pgtable_free_tlb(tlb, pmd, false)
H A Dpgtable_64.h99 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
337 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd_modify() argument
339 pte_t pte = __pte(pmd_val(pmd)); pmd_modify()
396 static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd_mkhuge() argument
398 pte_t pte = __pte(pmd_val(pmd)); pmd_mkhuge()
641 static inline unsigned long pmd_large(pmd_t pmd) pmd_large() argument
643 pte_t pte = __pte(pmd_val(pmd)); pmd_large()
648 static inline unsigned long pmd_pfn(pmd_t pmd) pmd_pfn() argument
650 pte_t pte = __pte(pmd_val(pmd)); pmd_pfn()
656 static inline unsigned long pmd_dirty(pmd_t pmd) pmd_dirty() argument
658 pte_t pte = __pte(pmd_val(pmd)); pmd_dirty()
663 static inline unsigned long pmd_young(pmd_t pmd) pmd_young() argument
665 pte_t pte = __pte(pmd_val(pmd)); pmd_young()
670 static inline unsigned long pmd_write(pmd_t pmd) pmd_write() argument
672 pte_t pte = __pte(pmd_val(pmd)); pmd_write()
677 static inline unsigned long pmd_trans_huge(pmd_t pmd) pmd_trans_huge() argument
679 pte_t pte = __pte(pmd_val(pmd)); pmd_trans_huge()
684 static inline unsigned long pmd_trans_splitting(pmd_t pmd) pmd_trans_splitting() argument
686 pte_t pte = __pte(pmd_val(pmd)); pmd_trans_splitting()
688 return pmd_trans_huge(pmd) && pte_special(pte); pmd_trans_splitting()
693 static inline pmd_t pmd_mkold(pmd_t pmd) pmd_mkold() argument
695 pte_t pte = __pte(pmd_val(pmd)); pmd_mkold()
702 static inline pmd_t pmd_wrprotect(pmd_t pmd) pmd_wrprotect() argument
704 pte_t pte = __pte(pmd_val(pmd)); pmd_wrprotect()
711 static inline pmd_t pmd_mkdirty(pmd_t pmd) pmd_mkdirty() argument
713 pte_t pte = __pte(pmd_val(pmd)); pmd_mkdirty()
720 static inline pmd_t pmd_mkyoung(pmd_t pmd) pmd_mkyoung() argument
722 pte_t pte = __pte(pmd_val(pmd)); pmd_mkyoung()
729 static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd_mkwrite() argument
731 pte_t pte = __pte(pmd_val(pmd)); pmd_mkwrite()
738 static inline pmd_t pmd_mksplitting(pmd_t pmd) pmd_mksplitting() argument
740 pte_t pte = __pte(pmd_val(pmd)); pmd_mksplitting()
755 static inline int pmd_present(pmd_t pmd) pmd_present() argument
757 return pmd_val(pmd) != 0UL; pmd_present()
760 #define pmd_none(pmd) (!pmd_val(pmd))
768 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
780 pmd_t *pmdp, pmd_t pmd);
783 pmd_t *pmdp, pmd_t pmd) set_pmd_at()
785 *pmdp = pmd; set_pmd_at()
798 static inline unsigned long __pmd_page(pmd_t pmd) __pmd_page() argument
800 pte_t pte = __pte(pmd_val(pmd)); __pmd_page()
807 #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
873 pmd_t pmd = *pmdp; pmdp_get_and_clear() local
875 return pmd; pmdp_get_and_clear()
934 pmd_t *pmd);
782 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) set_pmd_at() argument
H A Dpgtable_32.h127 static inline struct page *pmd_page(pmd_t pmd) pmd_page() argument
129 if (srmmu_device_memory(pmd_val(pmd))) pmd_page()
131 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); pmd_page()
164 static inline int pmd_bad(pmd_t pmd) pmd_bad() argument
166 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; pmd_bad()
169 static inline int pmd_present(pmd_t pmd) pmd_present() argument
171 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); pmd_present()
174 static inline int pmd_none(pmd_t pmd) pmd_none() argument
176 return !pmd_val(pmd); pmd_none()
H A Dpage_64.h59 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon2702
66 #define pmd_val(x) ((x).pmd)
/linux-4.1.27/arch/um/include/asm/
H A Dpgalloc.h13 #define pmd_populate_kernel(mm, pmd, pte) \
14 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
16 #define pmd_populate(mm, pmd, pte) \
17 set_pmd(pmd, __pmd(_PAGE_TABLE + \
20 #define pmd_pgtable(pmd) pmd_page(pmd)
50 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
52 free_page((unsigned long)pmd); pmd_free()
H A Dpgtable-3level.h50 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
59 #define pud_populate(mm, pud, pmd) \
60 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
H A Dpage.h37 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon2931
52 #define pmd_val(x) ((x).pmd)
64 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon2935
65 #define pmd_val(x) ((x).pmd)
H A Dpgtable.h111 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
312 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
314 * this macro returns the index of the entry in the pmd page which would
317 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
320 #define pmd_page_vaddr(pmd) \
321 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
/linux-4.1.27/arch/sh/include/asm/
H A Dpgalloc.h13 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
15 extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
18 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
21 set_pmd(pmd, __pmd((unsigned long)pte)); pmd_populate_kernel()
24 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
27 set_pmd(pmd, __pmd((unsigned long)page_address(pte))); pmd_populate()
29 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dpgtable-3level.h28 printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
30 typedef struct { unsigned long long pmd; } pmd_t; member in struct:__anon2578
31 #define pmd_val(x) ((x).pmd)
/linux-4.1.27/arch/arm64/include/asm/
H A Dpgtable.h258 static inline pte_t pmd_pte(pmd_t pmd) pmd_pte() argument
260 return __pte(pmd_val(pmd)); pmd_pte()
278 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
279 #define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
288 #define pmd_present(pmd) pte_present(pmd_pte(pmd))
289 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
290 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
291 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
292 #define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
293 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
294 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
295 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
296 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
297 #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
300 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
302 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
304 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
311 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
335 #define pmd_none(pmd) (!pmd_val(pmd))
337 #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
339 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
341 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
354 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) set_pmd() argument
356 *pmdp = pmd; set_pmd()
366 static inline pte_t *pmd_page_vaddr(pmd_t pmd) pmd_page_vaddr() argument
368 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); pmd_page_vaddr()
371 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
381 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
470 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd_modify() argument
472 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); pmd_modify()
H A Dpgalloc.h38 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
40 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); pmd_free()
41 free_page((unsigned long)pmd); pmd_free()
44 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
46 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); pud_populate()
117 * Populate the pmdp entry with a pointer to the pte. This pmd is part
124 * The pmd must be loaded with the physical address of the PTE table pmd_populate_kernel()
134 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dkvm_mmu.h105 #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
108 static inline void kvm_clean_pmd(pmd_t *pmd) {}
109 static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
118 static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
120 pmd_val(*pmd) |= PMD_S2_RDWR;
133 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
135 pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
138 static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
140 return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
257 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
259 struct page *page = pmd_page(pmd);
H A Dpgtable-types.h42 typedef struct { pmdval_t pmd; } pmd_t; member in struct:__anon308
43 #define pmd_val(x) ((x).pmd)
/linux-4.1.27/include/trace/events/
H A Dthp.h30 TP_PROTO(unsigned long addr, unsigned long pmd),
31 TP_ARGS(addr, pmd),
34 __field(unsigned long, pmd)
39 __entry->pmd = pmd;
42 TP_printk("Set pmd with 0x%lx with 0x%lx", __entry->addr, __entry->pmd)
/linux-4.1.27/arch/mips/mm/
H A Dpgtable-64.c72 pmd_t pmd = pmd_mksplitting(*pmdp); pmdp_splitting_flush() local
73 set_pmd_at(vma->vm_mm, address, pmdp, pmd); pmdp_splitting_flush()
81 pmd_t pmd; mk_pmd() local
83 pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); mk_pmd()
85 return pmd; mk_pmd()
89 pmd_t *pmdp, pmd_t pmd) set_pmd_at()
91 *pmdp = pmd; set_pmd_at()
88 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) set_pmd_at() argument
H A Dhugetlbpage.c43 pmd_t *pmd = NULL; huge_pte_offset() local
49 pmd = pmd_offset(pud, addr); huge_pte_offset()
51 return (pte_t *) pmd; huge_pte_offset()
71 int pmd_huge(pmd_t pmd) pmd_huge() argument
73 return (pmd_val(pmd) & _PAGE_HUGE) != 0; pmd_huge()
H A Dgup.c37 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, gup_pte_range() argument
40 pte_t *ptep = pte_offset_map(&pmd, addr); gup_pte_range()
71 static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end, gup_huge_pmd() argument
74 pte_t pte = *(pte_t *)&pmd; gup_huge_pmd()
109 pmd_t pmd = *pmdp; gup_pmd_range() local
116 * splitting bit in the pmd. Returning zero will take gup_pmd_range()
118 * if the pmd is still in splitting state. gup-fast gup_pmd_range()
123 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) gup_pmd_range()
125 if (unlikely(pmd_huge(pmd))) { gup_pmd_range()
126 if (!gup_huge_pmd(pmd, addr, next, write, pages,nr)) gup_pmd_range()
129 if (!gup_pte_range(pmd, addr, next, write, pages,nr)) gup_pmd_range()
H A Dpgtable-32.c40 pmd_t *pmd; pagetable_init() local
66 pmd = pmd_offset(pud, vaddr); pagetable_init()
67 pte = pte_offset_kernel(pmd, vaddr); pagetable_init()
H A Dioremap.c45 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, remap_area_pmd() argument
57 pte_t * pte = pte_alloc_kernel(pmd, address); remap_area_pmd()
62 pmd++; remap_area_pmd()
80 pmd_t *pmd; remap_area_pages() local
86 pmd = pmd_alloc(&init_mm, pud, address); remap_area_pages()
87 if (!pmd) remap_area_pages()
89 if (remap_area_pmd(pmd, address, end - address, remap_area_pages()
H A Dinit.c222 pmd_t *pmd; fixrange_init() local
236 pmd = (pmd_t *)pud; fixrange_init()
237 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { fixrange_init()
238 if (pmd_none(*pmd)) { fixrange_init()
240 set_pmd(pmd, __pmd((unsigned long)pte)); fixrange_init()
241 BUG_ON(pte != pte_offset_kernel(pmd, 0)); fixrange_init()
/linux-4.1.27/arch/nios2/include/asm/
H A Dpgalloc.h15 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
18 set_pmd(pmd, __pmd((unsigned long)pte)); pmd_populate_kernel()
21 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
24 set_pmd(pmd, __pmd((unsigned long)page_address(pte))); pmd_populate()
26 #define pmd_pgtable(pmd) pmd_page(pmd)
29 * Initialize a new pmd table with invalid pointers.
H A Dpgtable.h186 static inline int pmd_present(pmd_t pmd) pmd_present() argument
188 return (pmd_val(pmd) != (unsigned long) invalid_pte_table) pmd_present()
189 && (pmd_val(pmd) != 0UL); pmd_present()
218 static inline int pmd_none(pmd_t pmd) pmd_none() argument
220 return (pmd_val(pmd) == pmd_none()
221 (unsigned long) invalid_pte_table) || (pmd_val(pmd) == 0UL); pmd_none()
224 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
249 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
250 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
251 #define pmd_page_vaddr(pmd) pmd_val(pmd)
/linux-4.1.27/arch/x86/include/asm/
H A Dpgalloc.h63 pmd_t *pmd, pte_t *pte) pmd_populate_kernel()
66 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); pmd_populate_kernel()
69 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
75 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); pmd_populate()
78 #define pmd_pgtable(pmd) pmd_page(pmd)
94 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
96 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); pmd_free()
97 pgtable_pmd_page_dtor(virt_to_page(pmd)); pmd_free()
98 free_page((unsigned long)pmd); pmd_free()
101 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
103 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, __pmd_free_tlb() argument
106 ___pmd_free_tlb(tlb, pmd); __pmd_free_tlb()
110 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
112 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
114 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); pud_populate()
115 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); pud_populate()
62 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
H A Dpgtable.h41 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
46 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
62 #define pmd_clear(pmd) native_pmd_clear(pmd)
103 static inline int pmd_dirty(pmd_t pmd) pmd_dirty() argument
105 return pmd_flags(pmd) & _PAGE_DIRTY; pmd_dirty()
108 static inline int pmd_young(pmd_t pmd) pmd_young() argument
110 return pmd_flags(pmd) & _PAGE_ACCESSED; pmd_young()
143 static inline unsigned long pmd_pfn(pmd_t pmd) pmd_pfn() argument
145 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; pmd_pfn()
161 static inline int pmd_trans_splitting(pmd_t pmd) pmd_trans_splitting() argument
163 return pmd_val(pmd) & _PAGE_SPLITTING; pmd_trans_splitting()
166 static inline int pmd_trans_huge(pmd_t pmd) pmd_trans_huge() argument
168 return pmd_val(pmd) & _PAGE_PSE; pmd_trans_huge()
251 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) pmd_set_flags() argument
253 pmdval_t v = native_pmd_val(pmd); pmd_set_flags()
258 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) pmd_clear_flags() argument
260 pmdval_t v = native_pmd_val(pmd); pmd_clear_flags()
265 static inline pmd_t pmd_mkold(pmd_t pmd) pmd_mkold() argument
267 return pmd_clear_flags(pmd, _PAGE_ACCESSED); pmd_mkold()
270 static inline pmd_t pmd_wrprotect(pmd_t pmd) pmd_wrprotect() argument
272 return pmd_clear_flags(pmd, _PAGE_RW); pmd_wrprotect()
275 static inline pmd_t pmd_mkdirty(pmd_t pmd) pmd_mkdirty() argument
277 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); pmd_mkdirty()
280 static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd_mkhuge() argument
282 return pmd_set_flags(pmd, _PAGE_PSE); pmd_mkhuge()
285 static inline pmd_t pmd_mkyoung(pmd_t pmd) pmd_mkyoung() argument
287 return pmd_set_flags(pmd, _PAGE_ACCESSED); pmd_mkyoung()
290 static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd_mkwrite() argument
292 return pmd_set_flags(pmd, _PAGE_RW); pmd_mkwrite()
295 static inline pmd_t pmd_mknotpresent(pmd_t pmd) pmd_mknotpresent() argument
297 return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); pmd_mknotpresent()
306 static inline int pmd_soft_dirty(pmd_t pmd) pmd_soft_dirty() argument
308 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; pmd_soft_dirty()
316 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) pmd_mksoft_dirty() argument
318 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); pmd_mksoft_dirty()
363 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd_modify() argument
365 pmdval_t val = pmd_val(pmd); pmd_modify()
461 static inline int pmd_present(pmd_t pmd) pmd_present() argument
469 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); pmd_present()
483 static inline int pmd_protnone(pmd_t pmd) pmd_protnone() argument
485 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) pmd_protnone()
490 static inline int pmd_none(pmd_t pmd) pmd_none() argument
494 return (unsigned long)native_pmd_val(pmd) == 0; pmd_none()
497 static inline unsigned long pmd_page_vaddr(pmd_t pmd) pmd_page_vaddr() argument
499 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); pmd_page_vaddr()
506 #define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
509 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
511 * this macro returns the index of the entry in the pmd page which would
539 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) pte_offset_kernel() argument
541 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); pte_offset_kernel()
544 static inline int pmd_bad(pmd_t pmd) pmd_bad() argument
546 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; pmd_bad()
694 pmd_t *pmdp , pmd_t pmd) native_set_pmd_at()
696 native_set_pmd(pmdp, pmd); native_set_pmd_at()
797 static inline int pmd_write(pmd_t pmd) pmd_write() argument
799 return pmd_flags(pmd) & _PAGE_RW; pmd_write()
806 pmd_t pmd = native_pmdp_get_and_clear(pmdp); pmdp_get_and_clear() local
808 return pmd; pmdp_get_and_clear()
857 unsigned long addr, pmd_t *pmd) update_mmu_cache_pmd()
693 native_set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp , pmd_t pmd) native_set_pmd_at() argument
856 update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) update_mmu_cache_pmd() argument
H A Dpgtable-3level.h15 pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
43 * because gcc will not read the 64bit of the pmd atomically. To fix
46 * function to know if the pmd is null nor not, and in turn to know if
47 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
50 * Without THP if the mmap_sem is hold for reading, the pmd can only
52 * we can always return atomic pmd values with this function.
54 * With THP if the mmap_sem is hold for reading, the pmd can become
61 * pmdval if the low part of the pmd is none. In some cases the high
66 * of the pmd to be read atomically to decide if the pmd is unstable
67 * or not, with the only exception of when the low part of the pmd is
68 * zero in which case we return a none pmd.
79 * or we can end up with a partial pmd. pmd_read_atomic()
93 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) native_set_pmd() argument
95 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); native_set_pmd()
116 static inline void native_pmd_clear(pmd_t *pmd) native_pmd_clear() argument
118 u32 *tmp = (u32 *)pmd; native_pmd_clear()
162 pmd_t pmd; member in union:split_pmd
173 return res.pmd; native_pmdp_get_and_clear()
H A Dpgtable_64.h33 pr_err("%s:%d: bad pmd %p(%016lx)\n", \
63 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) native_set_pmd() argument
65 *pmdp = pmd; native_set_pmd()
68 static inline void native_pmd_clear(pmd_t *pmd) native_pmd_clear() argument
70 native_set_pmd(pmd, native_make_pmd(0)); native_pmd_clear()
89 return native_make_pmd(xchg(&xp->pmd, 0)); native_pmdp_get_and_clear()
H A Dpgtable_types.h25 #define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */
212 /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
215 /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
259 typedef struct { pmdval_t pmd; } pmd_t; member in struct:__anon3075
266 static inline pmdval_t native_pmd_val(pmd_t pmd) native_pmd_val() argument
268 return pmd.pmd; native_pmd_val()
273 static inline pmdval_t native_pmd_val(pmd_t pmd) native_pmd_val() argument
275 return native_pgd_val(pmd.pud.pgd); native_pmd_val()
284 static inline pmdval_t pmd_flags(pmd_t pmd) pmd_flags() argument
286 return native_pmd_val(pmd) & PTE_FLAGS_MASK; pmd_flags()
407 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
H A Dpgtable-2level.h19 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) native_set_pmd() argument
21 *pmdp = pmd; native_set_pmd()
/linux-4.1.27/arch/arm64/mm/
H A Dmmu.c74 static void split_pmd(pmd_t *pmd, pte_t *pte) split_pmd() argument
76 unsigned long pfn = pmd_pfn(*pmd); split_pmd()
89 static void alloc_init_pte(pmd_t *pmd, unsigned long addr, alloc_init_pte() argument
96 if (pmd_none(*pmd) || pmd_sect(*pmd)) { alloc_init_pte()
98 if (pmd_sect(*pmd)) alloc_init_pte()
99 split_pmd(pmd, pte); alloc_init_pte()
100 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); alloc_init_pte()
103 BUG_ON(pmd_bad(*pmd)); alloc_init_pte()
105 pte = pte_offset_kernel(pmd, addr); alloc_init_pte()
112 void split_pud(pud_t *old_pud, pmd_t *pmd) split_pud() argument
119 set_pmd(pmd, __pmd(addr | prot)); split_pud()
121 } while (pmd++, i++, i < PTRS_PER_PMD); split_pud()
129 pmd_t *pmd; alloc_init_pmd() local
136 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t)); alloc_init_pmd()
142 split_pud(pud, pmd); alloc_init_pmd()
144 pud_populate(mm, pud, pmd); alloc_init_pmd()
149 pmd = pmd_offset(pud, addr); alloc_init_pmd()
154 pmd_t old_pmd =*pmd; alloc_init_pmd()
155 set_pmd(pmd, __pmd(phys | alloc_init_pmd()
170 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), alloc_init_pmd()
174 } while (pmd++, addr = next, addr != end); alloc_init_pmd()
217 * be pointing to a pmd table that we no longer alloc_init_pud()
220 * Look up the old pmd table and free it. alloc_init_pud()
483 pmd_t *pmd; kern_addr_valid() local
500 pmd = pmd_offset(pud, addr); kern_addr_valid()
501 if (pmd_none(*pmd)) kern_addr_valid()
504 if (pmd_sect(*pmd)) kern_addr_valid()
505 return pfn_valid(pmd_pfn(*pmd)); kern_addr_valid()
507 pte = pte_offset_kernel(pmd, addr); kern_addr_valid()
526 pmd_t *pmd; vmemmap_populate() local
539 pmd = pmd_offset(pud, addr); vmemmap_populate()
540 if (pmd_none(*pmd)) { vmemmap_populate()
547 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); vmemmap_populate()
549 vmemmap_verify((pte_t *)pmd, node, addr, next); vmemmap_populate()
588 pmd_t *pmd = fixmap_pmd(addr); fixmap_pte() local
590 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd)); fixmap_pte()
592 return pte_offset_kernel(pmd, addr); fixmap_pte()
599 pmd_t *pmd; early_fixmap_init() local
606 pmd = pmd_offset(pud, addr); early_fixmap_init()
607 pmd_populate_kernel(&init_mm, pmd, bm_pte); early_fixmap_init()
616 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) early_fixmap_init()
617 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { early_fixmap_init()
619 pr_warn("pmd %p != %p, %p\n", early_fixmap_init()
620 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), early_fixmap_init()
H A Dhugetlbpage.c41 int pmd_huge(pmd_t pmd) pmd_huge() argument
43 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); pmd_huge()
H A Ddump.c157 }, { /* pmd */
233 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) walk_pte() argument
235 pte_t *pte = pte_offset_kernel(pmd, 0); walk_pte()
247 pmd_t *pmd = pmd_offset(pud, 0); walk_pmd() local
251 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { walk_pmd()
253 if (pmd_none(*pmd) || pmd_sect(*pmd)) { walk_pmd()
254 note_page(st, addr, 3, pmd_val(*pmd)); walk_pmd()
256 BUG_ON(pmd_bad(*pmd)); walk_pmd()
257 walk_pte(st, pmd, addr); walk_pmd()
H A Dflush.c113 pmd_t pmd = pmd_mksplitting(*pmdp); pmdp_splitting_flush() local
116 set_pmd_at(vma->vm_mm, address, pmdp, pmd); pmdp_splitting_flush()
/linux-4.1.27/include/linux/
H A Dhuge_mm.h6 unsigned long address, pmd_t *pmd,
13 unsigned long address, pmd_t *pmd,
16 unsigned long address, pmd_t *pmd,
20 pmd_t *pmd,
24 pmd_t *pmd, unsigned long addr);
25 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
33 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
102 unsigned long address, pmd_t *pmd);
119 pmd_t *pmd);
129 extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
132 static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, pmd_trans_huge_lock() argument
136 if (pmd_trans_huge(*pmd)) pmd_trans_huge_lock()
137 return __pmd_trans_huge_lock(pmd, vma, ptl); pmd_trans_huge_lock()
158 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
204 static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, pmd_trans_huge_lock() argument
211 unsigned long addr, pmd_t pmd, pmd_t *pmdp) do_huge_pmd_numa_page()
210 do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, pmd_t *pmdp) do_huge_pmd_numa_page() argument
H A Dmigrate.h69 extern bool pmd_trans_migrating(pmd_t pmd);
73 static inline bool pmd_trans_migrating(pmd_t pmd) pmd_trans_migrating() argument
87 pmd_t *pmd, pmd_t entry,
93 pmd_t *pmd, pmd_t entry, migrate_misplaced_transhuge_page()
91 migrate_misplaced_transhuge_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, pmd_t entry, unsigned long address, struct page *page, int node) migrate_misplaced_transhuge_page() argument
/linux-4.1.27/arch/x86/power/
H A Dhibernate_32.c59 static pte_t *resume_one_page_table_init(pmd_t *pmd) resume_one_page_table_init() argument
61 if (pmd_none(*pmd)) { resume_one_page_table_init()
66 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); resume_one_page_table_init()
68 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); resume_one_page_table_init()
73 return pte_offset_kernel(pmd, 0); resume_one_page_table_init()
85 pmd_t *pmd; resume_physical_mapping_init() local
94 pmd = resume_one_md_table_init(pgd); resume_physical_mapping_init()
95 if (!pmd) resume_physical_mapping_init()
101 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { resume_physical_mapping_init()
110 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); resume_physical_mapping_init()
115 pte = resume_one_page_table_init(pmd); resume_physical_mapping_init()
/linux-4.1.27/arch/s390/include/asm/
H A Dpgalloc.h85 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
87 pgtable_pmd_page_dtor(virt_to_page(pmd)); pmd_free()
88 crst_table_free(mm, (unsigned long *) pmd); pmd_free()
96 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_populate() argument
98 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); pud_populate()
125 pmd_t *pmd, pgtable_t pte) pmd_populate()
127 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); pmd_populate()
130 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
132 #define pmd_pgtable(pmd) \
133 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
124 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) pmd_populate() argument
H A Dpgtable.h20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
93 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
419 * pgd/pmd/pte query functions
482 static inline int pmd_present(pmd_t pmd) pmd_present() argument
484 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; pmd_present()
487 static inline int pmd_none(pmd_t pmd) pmd_none() argument
489 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; pmd_none()
492 static inline int pmd_large(pmd_t pmd) pmd_large() argument
494 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; pmd_large()
497 static inline unsigned long pmd_pfn(pmd_t pmd) pmd_pfn() argument
502 if (pmd_large(pmd)) pmd_pfn()
504 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; pmd_pfn()
507 static inline int pmd_bad(pmd_t pmd) pmd_bad() argument
509 if (pmd_large(pmd)) pmd_bad()
510 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; pmd_bad()
511 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; pmd_bad()
528 static inline int pmd_write(pmd_t pmd) pmd_write() argument
530 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; pmd_write()
533 static inline int pmd_dirty(pmd_t pmd) pmd_dirty() argument
536 if (pmd_large(pmd)) pmd_dirty()
537 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; pmd_dirty()
541 static inline int pmd_young(pmd_t pmd) pmd_young() argument
544 if (pmd_large(pmd)) pmd_young()
545 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; pmd_young()
807 * pgd/pmd/pte modification functions
1256 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1270 pmd_t *pmd = (pmd_t *) pud; pmd_offset() local
1272 pmd = (pmd_t *) pud_deref(*pud); pmd_offset()
1273 return pmd + pmd_index(address); pmd_offset()
1280 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1283 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1284 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1285 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1302 static inline pmd_t pmd_wrprotect(pmd_t pmd) pmd_wrprotect() argument
1304 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; pmd_wrprotect()
1305 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; pmd_wrprotect()
1306 return pmd; pmd_wrprotect()
1309 static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd_mkwrite() argument
1311 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; pmd_mkwrite()
1312 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) pmd_mkwrite()
1313 return pmd; pmd_mkwrite()
1314 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; pmd_mkwrite()
1315 return pmd; pmd_mkwrite()
1318 static inline pmd_t pmd_mkclean(pmd_t pmd) pmd_mkclean() argument
1320 if (pmd_large(pmd)) { pmd_mkclean()
1321 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; pmd_mkclean()
1322 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; pmd_mkclean()
1324 return pmd; pmd_mkclean()
1327 static inline pmd_t pmd_mkdirty(pmd_t pmd) pmd_mkdirty() argument
1329 if (pmd_large(pmd)) { pmd_mkdirty()
1330 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY; pmd_mkdirty()
1331 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) pmd_mkdirty()
1332 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; pmd_mkdirty()
1334 return pmd; pmd_mkdirty()
1337 static inline pmd_t pmd_mkyoung(pmd_t pmd) pmd_mkyoung() argument
1339 if (pmd_large(pmd)) { pmd_mkyoung()
1340 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; pmd_mkyoung()
1341 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) pmd_mkyoung()
1342 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; pmd_mkyoung()
1344 return pmd; pmd_mkyoung()
1347 static inline pmd_t pmd_mkold(pmd_t pmd) pmd_mkold() argument
1349 if (pmd_large(pmd)) { pmd_mkold()
1350 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; pmd_mkold()
1351 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; pmd_mkold()
1353 return pmd; pmd_mkold()
1356 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd_modify() argument
1358 if (pmd_large(pmd)) { pmd_modify()
1359 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | pmd_modify()
1362 pmd_val(pmd) |= massage_pgprot_pmd(newprot); pmd_modify()
1363 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) pmd_modify()
1364 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; pmd_modify()
1365 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) pmd_modify()
1366 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; pmd_modify()
1367 return pmd; pmd_modify()
1369 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; pmd_modify()
1370 pmd_val(pmd) |= massage_pgprot_pmd(newprot); pmd_modify()
1371 return pmd; pmd_modify()
1469 static inline int pmd_trans_splitting(pmd_t pmd) pmd_trans_splitting() argument
1471 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) && pmd_trans_splitting()
1472 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT); pmd_trans_splitting()
1481 static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd_mkhuge() argument
1483 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; pmd_mkhuge()
1484 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; pmd_mkhuge()
1485 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; pmd_mkhuge()
1486 return pmd; pmd_mkhuge()
1493 pmd_t pmd; pmdp_test_and_clear_young() local
1495 pmd = *pmdp; pmdp_test_and_clear_young()
1497 *pmdp = pmd_mkold(pmd); pmdp_test_and_clear_young()
1498 return pmd_young(pmd); pmdp_test_and_clear_young()
1505 pmd_t pmd = *pmdp; pmdp_get_and_clear() local
1509 return pmd; pmdp_get_and_clear()
1517 pmd_t pmd = *pmdp; pmdp_get_and_clear_full() local
1522 return pmd; pmdp_get_and_clear_full()
1543 pmd_t pmd = *pmdp; pmdp_set_wrprotect() local
1545 if (pmd_write(pmd)) { pmdp_set_wrprotect()
1547 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); pmdp_set_wrprotect()
1554 static inline int pmd_trans_huge(pmd_t pmd) pmd_trans_huge() argument
1556 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; pmd_trans_huge()
H A Dtlb.h112 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
114 * If the mm uses a two level page table the single pmd is freed
116 * to avoid the double free of the pmd in this case.
118 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, pmd_free_tlb() argument
123 pgtable_pmd_page_dtor(virt_to_page(pmd)); pmd_free_tlb()
124 tlb_remove_table(tlb, pmd); pmd_free_tlb()
H A Dpage.h73 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon2460
81 #define pmd_val(x) ((x).pmd)
/linux-4.1.27/arch/ia64/include/asm/
H A Dpgalloc.h55 pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) pud_populate() argument
57 pud_val(*pud_entry) = __pa(pmd); pud_populate()
65 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd_free() argument
67 quicklist_free(0, NULL, pmd); pmd_free()
70 #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
77 #define pmd_pgtable(pmd) pmd_page(pmd)
/linux-4.1.27/arch/xtensa/include/asm/
H A Dpgalloc.h20 * Allocating and freeing a pmd is trivial: the 1-entry pmd is
28 #define pmd_pgtable(pmd) pmd_page(pmd)
82 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dpgtable.h232 * The pmd contains the kernel virtual address of the pte page. pgtable_cache_init()
234 #define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK)) pgtable_cache_init()
235 #define pmd_page(pmd) virt_to_page(pmd_val(pmd)) pgtable_cache_init()
251 #define pmd_none(pmd) (!pmd_val(pmd))
252 #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
253 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
401 * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
411 #define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
412 srli pmd, pmd, PAGE_SHIFT; \
413 slli pmd, pmd, PAGE_SHIFT; \
414 addx4 pmd, tmp, pmd
/linux-4.1.27/arch/score/include/asm/
H A Dpgalloc.h6 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
9 set_pmd(pmd, __pmd((unsigned long)pte)); pmd_populate_kernel()
12 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
15 set_pmd(pmd, __pmd((unsigned long)page_address(pte))); pmd_populate()
18 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dpgtable.h46 * Empty pgd/pmd entries point to the invalid_pte_table.
48 static inline int pmd_none(pmd_t pmd) pmd_none() argument
50 return pmd_val(pmd) == (unsigned long) invalid_pte_table; pmd_none()
53 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
55 static inline int pmd_present(pmd_t pmd) pmd_present() argument
57 return pmd_val(pmd) != (unsigned long) invalid_pte_table; pmd_present()
97 #define pmd_phys(pmd) __pa((void *)pmd_val(pmd))
98 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
109 * setup: the pgd is never bad, and a pmd always exists (as it's folded
118 #define pmd_page_vaddr(pmd) pmd_val(pmd)
/linux-4.1.27/arch/mn10300/include/asm/
H A Dpgalloc.h22 #define pmd_populate_kernel(mm, pmd, pte) \
23 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE))
26 void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) pmd_populate() argument
28 set_pmd(pmd, __pmd((page_to_pfn(pte) << PAGE_SHIFT) | _PAGE_TABLE)); pmd_populate()
30 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dpgtable.h291 * setup: the pgd is never bad, and a pmd always exists (as it's folded pte_mkspecial()
404 #define pmd_page_kernel(pmd) \
405 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
407 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
409 #define pmd_large(pmd) \
410 ((pmd_val(pmd) & (_PAGE_PSE | _PAGE_PRESENT)) == \
434 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
436 * this macro returns the index of the entry in the pmd page which would
/linux-4.1.27/arch/avr32/include/asm/
H A Dpgalloc.h20 pmd_t *pmd, pte_t *pte) pmd_populate_kernel()
22 set_pmd(pmd, __pmd((unsigned long)pte)); pmd_populate_kernel()
25 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
28 set_pmd(pmd, __pmd((unsigned long)page_address(pte))); pmd_populate()
30 #define pmd_pgtable(pmd) pmd_page(pmd)
19 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
/linux-4.1.27/arch/frv/include/asm/
H A Dpgalloc.h23 #define pmd_populate_kernel(mm, pmd, pte) __set_pmd(pmd, __pa(pte) | _PAGE_TABLE)
28 #define pmd_pgtable(pmd) pmd_page(pmd)
59 * allocating and freeing a pmd is trivial: the 1-entry pmd is
/linux-4.1.27/arch/metag/include/asm/
H A Dpgalloc.h7 #define pmd_populate_kernel(mm, pmd, pte) \
8 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
10 #define pmd_populate(mm, pmd, pte) \
11 set_pmd(pmd, __pmd(_PAGE_TABLE | page_to_phys(pte)))
13 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dpgtable.h162 static inline unsigned long pmd_page_vaddr(pmd_t pmd) pmd_page_vaddr() argument
164 unsigned long paddr = pmd_val(pmd) & PAGE_MASK; pmd_page_vaddr()
170 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
171 #define pmd_page_shift(pmd) (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \
173 #define pmd_num_ptrs(pmd) (PGDIR_SIZE >> pmd_page_shift(pmd))
195 # define pte_index(pmd, address) \
199 # define pte_index(pmd, address) \
200 (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1))
/linux-4.1.27/arch/um/kernel/
H A Dmem.c66 static void __init one_page_table_init(pmd_t *pmd) one_page_table_init() argument
68 if (pmd_none(*pmd)) { one_page_table_init()
70 set_pmd(pmd, __pmd(_KERNPG_TABLE + one_page_table_init()
72 if (pte != pte_offset_kernel(pmd, 0)) one_page_table_init()
92 pmd_t *pmd; fixrange_init() local
105 pmd = pmd_offset(pud, vaddr); fixrange_init()
106 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) { fixrange_init()
107 one_page_table_init(pmd); fixrange_init()
120 pmd_t *pmd; fixaddr_user_init() local
136 pmd = pmd_offset(pud, vaddr); fixaddr_user_init()
137 pte = pte_offset_kernel(pmd, vaddr); fixaddr_user_init()
228 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL); pmd_alloc_one() local
230 if (pmd) pmd_alloc_one()
231 memset(pmd, 0, PAGE_SIZE); pmd_alloc_one()
233 return pmd; pmd_alloc_one()
H A Dtlb.c183 static inline int update_pte_range(pmd_t *pmd, unsigned long addr, update_pte_range() argument
190 pte = pte_offset_kernel(pmd, addr); update_pte_range()
223 pmd_t *pmd; update_pmd_range() local
227 pmd = pmd_offset(pud, addr); update_pmd_range()
230 if (!pmd_present(*pmd)) { update_pmd_range()
231 if (hvc->force || pmd_newpage(*pmd)) { update_pmd_range()
233 pmd_mkuptodate(*pmd); update_pmd_range()
236 else ret = update_pte_range(pmd, addr, next, hvc); update_pmd_range()
237 } while (pmd++, addr = next, ((addr < end) && !ret)); update_pmd_range()
303 pmd_t *pmd; flush_tlb_kernel_range_common() local
344 pmd = pmd_offset(pud, addr); flush_tlb_kernel_range_common()
345 if (!pmd_present(*pmd)) { flush_tlb_kernel_range_common()
349 if (pmd_newpage(*pmd)) { flush_tlb_kernel_range_common()
361 pte = pte_offset_kernel(pmd, addr); flush_tlb_kernel_range_common()
387 pmd_t *pmd; flush_tlb_page() local
403 pmd = pmd_offset(pud, address); flush_tlb_page()
404 if (!pmd_present(*pmd)) flush_tlb_page()
407 pte = pte_offset_kernel(pmd, address); flush_tlb_page()
463 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address) pte_offset_proc() argument
465 return pte_offset_kernel(pmd, address); pte_offset_proc()
472 pmd_t *pmd = pmd_offset(pud, addr); addr_pte() local
474 return pte_offset_map(pmd, addr); addr_pte()
/linux-4.1.27/arch/unicore32/kernel/
H A Dhibernate.c51 static pte_t *resume_one_page_table_init(pmd_t *pmd) resume_one_page_table_init() argument
53 if (pmd_none(*pmd)) { resume_one_page_table_init()
58 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE)); resume_one_page_table_init()
60 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); resume_one_page_table_init()
65 return pte_offset_kernel(pmd, 0); resume_one_page_table_init()
77 pmd_t *pmd; resume_physical_mapping_init() local
86 pmd = resume_one_md_table_init(pgd); resume_physical_mapping_init()
87 if (!pmd) resume_physical_mapping_init()
93 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { resume_physical_mapping_init()
102 pte = resume_one_page_table_init(pmd); resume_physical_mapping_init()
/linux-4.1.27/arch/x86/mm/
H A Dinit_64.c64 pmd_t *pmd = pmd_page + pmd_index(addr); ident_pmd_init() local
66 if (!pmd_present(*pmd)) ident_pmd_init()
67 set_pmd(pmd, __pmd(addr | pmd_flag)); ident_pmd_init()
77 pmd_t *pmd; ident_pud_init() local
84 pmd = pmd_offset(pud, 0); ident_pud_init()
85 ident_pmd_init(info->pmd_flag, pmd, addr, next); ident_pud_init()
88 pmd = (pmd_t *)info->alloc_pgt_page(info->context); ident_pud_init()
89 if (!pmd) ident_pud_init()
91 ident_pmd_init(info->pmd_flag, pmd, addr, next); ident_pud_init()
92 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); ident_pud_init()
248 pmd_t *pmd = (pmd_t *) spp_getpage(); fill_pmd() local
249 pud_populate(&init_mm, pud, pmd); fill_pmd()
250 if (pmd != pmd_offset(pud, 0)) fill_pmd()
252 pmd, pmd_offset(pud, 0)); fill_pmd()
257 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) fill_pte() argument
259 if (pmd_none(*pmd)) { fill_pte()
261 pmd_populate_kernel(&init_mm, pmd, pte); fill_pte()
262 if (pte != pte_offset_kernel(pmd, 0)) fill_pte()
265 return pte_offset_kernel(pmd, vaddr); fill_pte()
271 pmd_t *pmd; set_pte_vaddr_pud() local
275 pmd = fill_pmd(pud, vaddr); set_pte_vaddr_pud()
276 pte = fill_pte(pmd, vaddr); set_pte_vaddr_pud()
316 pmd_t *pmd; populate_extra_pte() local
318 pmd = populate_extra_pmd(vaddr); populate_extra_pte()
319 return fill_pte(pmd, vaddr); populate_extra_pte()
330 pmd_t *pmd; __init_extra_mapping() local
345 pmd = (pmd_t *) spp_getpage(); __init_extra_mapping()
346 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | __init_extra_mapping()
349 pmd = pmd_offset(pud, phys); __init_extra_mapping()
350 BUG_ON(!pmd_none(*pmd)); __init_extra_mapping()
351 set_pmd(pmd, __pmd(phys | pgprot_val(prot))); __init_extra_mapping()
383 pmd_t *pmd = level2_kernel_pgt; cleanup_highmap() local
393 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { cleanup_highmap()
394 if (pmd_none(*pmd)) cleanup_highmap()
397 set_pmd(pmd, __pmd(0)); cleanup_highmap()
456 pmd_t *pmd = pmd_page + pmd_index(address); phys_pmd_init() local
465 set_pmd(pmd, __pmd(0)); phys_pmd_init()
469 if (pmd_val(*pmd)) { phys_pmd_init()
470 if (!pmd_large(*pmd)) { phys_pmd_init()
472 pte = (pte_t *)pmd_page_vaddr(*pmd); phys_pmd_init()
496 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); phys_pmd_init()
502 set_pte((pte_t *)pmd, phys_pmd_init()
514 pmd_populate_kernel(&init_mm, pmd, pte); phys_pmd_init()
531 pmd_t *pmd; phys_pud_init() local
545 pmd = pmd_offset(pud, 0); phys_pud_init()
546 last_map_addr = phys_pmd_init(pmd, addr, end, phys_pud_init()
583 pmd = alloc_low_page(); phys_pud_init()
584 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, phys_pud_init()
588 pud_populate(&init_mm, pud, pmd); phys_pud_init()
733 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) free_pte_table() argument
745 free_pagetable(pmd_page(*pmd), 0); free_pte_table()
747 pmd_clear(pmd); free_pte_table()
753 pmd_t *pmd; free_pmd_table() local
757 pmd = pmd_start + i; free_pmd_table()
758 if (pmd_val(*pmd)) free_pmd_table()
762 /* free a pmd talbe */ free_pmd_table()
868 pmd_t *pmd; remove_pmd_table() local
871 pmd = pmd_start + pmd_index(addr); remove_pmd_table()
872 for (; addr < end; addr = next, pmd++) { remove_pmd_table()
875 if (!pmd_present(*pmd)) remove_pmd_table()
878 if (pmd_large(*pmd)) { remove_pmd_table()
882 free_pagetable(pmd_page(*pmd), remove_pmd_table()
886 pmd_clear(pmd); remove_pmd_table()
893 page_addr = page_address(pmd_page(*pmd)); remove_pmd_table()
896 free_pagetable(pmd_page(*pmd), remove_pmd_table()
900 pmd_clear(pmd); remove_pmd_table()
908 pte_base = (pte_t *)pmd_page_vaddr(*pmd); remove_pmd_table()
910 free_pte_table(pte_base, pmd); remove_pmd_table()
1162 pmd_t *pmd; kern_addr_valid() local
1179 pmd = pmd_offset(pud, addr); kern_addr_valid()
1180 if (pmd_none(*pmd)) kern_addr_valid()
1183 if (pmd_large(*pmd)) kern_addr_valid()
1184 return pfn_valid(pmd_pfn(*pmd)); kern_addr_valid()
1186 pte = pte_offset_kernel(pmd, addr); kern_addr_valid()
1243 pmd_t *pmd; vmemmap_populate_hugepages() local
1256 pmd = pmd_offset(pud, addr); vmemmap_populate_hugepages()
1257 if (pmd_none(*pmd)) { vmemmap_populate_hugepages()
1266 set_pmd(pmd, __pmd(pte_val(entry))); vmemmap_populate_hugepages()
1282 } else if (pmd_large(*pmd)) { vmemmap_populate_hugepages()
1283 vmemmap_verify((pte_t *)pmd, node, addr, next); vmemmap_populate_hugepages()
1315 pmd_t *pmd; register_page_bootmem_memmap() local
1338 pmd = pmd_offset(pud, addr); register_page_bootmem_memmap()
1339 if (pmd_none(*pmd)) register_page_bootmem_memmap()
1341 get_page_bootmem(section_nr, pmd_page(*pmd), register_page_bootmem_memmap()
1344 pte = pte_offset_kernel(pmd, addr); register_page_bootmem_memmap()
1352 pmd = pmd_offset(pud, addr); register_page_bootmem_memmap()
1353 if (pmd_none(*pmd)) register_page_bootmem_memmap()
1357 page = pmd_page(*pmd); register_page_bootmem_memmap()
H A Dpgtable.c63 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) ___pmd_free_tlb() argument
65 struct page *page = virt_to_page(pmd); ___pmd_free_tlb()
66 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); ___pmd_free_tlb()
149 * kernel pmd is shared. If PAE were not to share the pmd a similar
165 * Also, if we're in a paravirt environment where the kernel pmd is
171 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) pud_populate() argument
173 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); pud_populate()
176 reserved at the pmd (PDPT) level. */ pud_populate()
177 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); pud_populate()
212 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); preallocate_pmds() local
213 if (!pmd) preallocate_pmds()
215 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { preallocate_pmds()
216 free_page((unsigned long)pmd); preallocate_pmds()
217 pmd = NULL; preallocate_pmds()
220 if (pmd) preallocate_pmds()
222 pmds[i] = pmd; preallocate_pmds()
234 * Mop up any pmd pages which may still be attached to the pgd.
235 * Normally they will be freed by munmap/exit_mmap, but any pmd we
247 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); pgd_mop_up_pmds() local
252 pmd_free(mm, pmd); pgd_mop_up_pmds()
269 pmd_t *pmd = pmds[i]; pgd_prepopulate_pmd() local
272 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), pgd_prepopulate_pmd()
275 pud_populate(mm, pud, pmd); pgd_prepopulate_pmd()
299 * shared kernel pmd. And this requires a whole page for pgd. pgd_cache_init()
306 * shared kernel pmd. Shared kernel pmd does not require a whole pgd_cache_init()
436 * We had a write-protection fault here and changed the pmd pmdp_set_access_flags()
587 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) pmd_set_huge() argument
601 set_pte((pte_t *)pmd, pfn_pte( pmd_set_huge()
618 int pmd_clear_huge(pmd_t *pmd) pmd_clear_huge() argument
620 if (pmd_large(*pmd)) { pmd_clear_huge()
621 pmd_clear(pmd); pmd_clear_huge()
H A Dkasan_init_64.c64 static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr, zero_pte_populate() argument
67 pte_t *pte = pte_offset_kernel(pmd, addr); zero_pte_populate()
74 pte = pte_offset_kernel(pmd, addr); zero_pte_populate()
83 pmd_t *pmd = pmd_offset(pud, addr); zero_pmd_populate() local
86 WARN_ON(!pmd_none(*pmd)); zero_pmd_populate()
87 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) zero_pmd_populate()
90 pmd = pmd_offset(pud, addr); zero_pmd_populate()
93 if (pmd_none(*pmd)) { zero_pmd_populate()
97 set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE)); zero_pmd_populate()
99 ret = zero_pte_populate(pmd, addr, end); zero_pmd_populate()
H A Dpgtable_32.c31 pmd_t *pmd; set_pte_vaddr() local
44 pmd = pmd_offset(pud, vaddr); set_pte_vaddr()
45 if (pmd_none(*pmd)) { set_pte_vaddr()
49 pte = pte_offset_kernel(pmd, vaddr); set_pte_vaddr()
H A Dinit_32.c95 static pte_t * __init one_page_table_init(pmd_t *pmd) one_page_table_init() argument
97 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { one_page_table_init()
101 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); one_page_table_init()
102 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); one_page_table_init()
105 return pte_offset_kernel(pmd, 0); one_page_table_init()
119 pmd_t *pmd; populate_extra_pte() local
121 pmd = populate_extra_pmd(vaddr); populate_extra_pte()
122 return one_page_table_init(pmd) + pte_idx; populate_extra_pte()
156 static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, page_table_kmap_check() argument
183 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); page_table_kmap_check()
184 BUG_ON(newpte != pte_offset_kernel(pmd, 0)); page_table_kmap_check()
212 pmd_t *pmd; page_table_range_init() local
226 pmd = one_md_table_init(pgd); page_table_range_init()
227 pmd = pmd + pmd_index(vaddr); page_table_range_init()
229 pmd++, pmd_idx++) { page_table_range_init()
230 pte = page_table_kmap_check(one_page_table_init(pmd), page_table_range_init()
231 pmd, vaddr, pte, &adr); page_table_range_init()
263 pmd_t *pmd; kernel_physical_mapping_init() local
296 pmd = one_md_table_init(pgd); kernel_physical_mapping_init()
302 pmd += pmd_idx; kernel_physical_mapping_init()
307 pmd++, pmd_idx++) { kernel_physical_mapping_init()
335 set_pmd(pmd, pfn_pmd(pfn, init_prot)); kernel_physical_mapping_init()
337 set_pmd(pmd, pfn_pmd(pfn, prot)); kernel_physical_mapping_init()
342 pte = one_page_table_init(pmd); kernel_physical_mapping_init()
418 pmd_t *pmd; permanent_kmaps_init() local
426 pmd = pmd_offset(pud, vaddr); permanent_kmaps_init()
427 pte = pte_offset_kernel(pmd, vaddr); permanent_kmaps_init()
458 pmd_t *pmd; native_pagetable_init() local
468 * should have pte used near max_low_pfn or one pmd is not present. native_pagetable_init()
477 pmd = pmd_offset(pud, va); native_pagetable_init()
478 if (!pmd_present(*pmd)) native_pagetable_init()
482 if (pmd_large(*pmd)) { native_pagetable_init()
483 pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n", native_pagetable_init()
484 pfn, pmd, __pa(pmd)); native_pagetable_init()
488 pte = pte_offset_kernel(pmd, va); native_pagetable_init()
492 printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n", native_pagetable_init()
493 pfn, pmd, __pa(pmd), pte, __pa(pte)); native_pagetable_init()
H A Dpageattr.c332 pmd_t *pmd; lookup_address_in_pgd() local
347 pmd = pmd_offset(pud, address); lookup_address_in_pgd()
348 if (pmd_none(*pmd)) lookup_address_in_pgd()
352 if (pmd_large(*pmd) || !pmd_present(*pmd)) lookup_address_in_pgd()
353 return (pte_t *)pmd; lookup_address_in_pgd()
357 return pte_offset_kernel(pmd, address); lookup_address_in_pgd()
364 * Note: We return pud and pmd either when the entry is marked large
436 * Set the new pmd in all the pgds we know about:
449 pmd_t *pmd; __set_pmd_pte() local
453 pmd = pmd_offset(pud, address); __set_pmd_pte()
454 set_pte_atomic((pte_t *)pmd, pte); __set_pmd_pte()
525 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL try_preserve_large_page()
626 * even on a non present pmd. __split_large_page()
637 * otherwise pmd/pte_present will return true even on a non __split_large_page()
638 * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL __split_large_page()
711 static bool try_to_free_pmd_page(pmd_t *pmd) try_to_free_pmd_page() argument
716 if (!pmd_none(pmd[i])) try_to_free_pmd_page()
719 free_page((unsigned long)pmd); try_to_free_pmd_page()
735 static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) unmap_pte_range() argument
737 pte_t *pte = pte_offset_kernel(pmd, start); unmap_pte_range()
746 if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { unmap_pte_range()
747 pmd_clear(pmd); unmap_pte_range()
753 static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, __unmap_pmd_range() argument
756 if (unmap_pte_range(pmd, start, end)) __unmap_pmd_range()
763 pmd_t *pmd = pmd_offset(pud, start); unmap_pmd_range() local
772 __unmap_pmd_range(pud, pmd, start, pre_end); unmap_pmd_range()
775 pmd++; unmap_pmd_range()
782 if (pmd_large(*pmd)) unmap_pmd_range()
783 pmd_clear(pmd); unmap_pmd_range()
785 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); unmap_pmd_range()
788 pmd++; unmap_pmd_range()
795 return __unmap_pmd_range(pud, pmd, start, end); unmap_pmd_range()
858 static int alloc_pte_page(pmd_t *pmd) alloc_pte_page() argument
864 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); alloc_pte_page()
870 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); alloc_pmd_page() local
871 if (!pmd) alloc_pmd_page()
874 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); alloc_pmd_page()
880 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) populate_pte()
884 pte = pte_offset_kernel(pmd, start); populate_pte()
905 pmd_t *pmd; populate_pmd() local
922 pmd = pmd_offset(pud, start); populate_pmd()
923 if (pmd_none(*pmd)) populate_pmd()
924 if (alloc_pte_page(pmd)) populate_pmd()
927 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); populate_pmd()
949 pmd = pmd_offset(pud, start); populate_pmd()
951 set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | populate_pmd()
963 pmd = pmd_offset(pud, start); populate_pmd()
964 if (pmd_none(*pmd)) populate_pmd()
965 if (alloc_pte_page(pmd)) populate_pmd()
969 pmd, pgprot); populate_pmd()
878 populate_pte(struct cpa_data *cpa, unsigned long start, unsigned long end, unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) populate_pte() argument
H A Dfault.c197 pmd_t *pmd, *pmd_k; vmalloc_sync_one() local
215 pmd = pmd_offset(pud, address); vmalloc_sync_one()
220 if (!pmd_present(*pmd)) vmalloc_sync_one()
221 set_pmd(pmd, *pmd_k); vmalloc_sync_one()
223 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); vmalloc_sync_one()
325 pmd_t *pmd; dump_pagetable() local
333 pmd = pmd_offset(pud_offset(pgd, address), address); dump_pagetable()
334 printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); dump_pagetable()
342 if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) dump_pagetable()
345 pte = pte_offset_kernel(pmd, address); dump_pagetable()
367 pmd_t *pmd, *pmd_ref; vmalloc_fault() local
409 pmd = pmd_offset(pud, address); vmalloc_fault()
414 if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) vmalloc_fault()
417 if (pmd_huge(*pmd)) vmalloc_fault()
424 pte = pte_offset_kernel(pmd, address); vmalloc_fault()
468 pmd_t *pmd; dump_pagetable() local
487 pmd = pmd_offset(pud, address); dump_pagetable()
488 if (bad_address(pmd)) dump_pagetable()
491 printk("PMD %lx ", pmd_val(*pmd)); dump_pagetable()
492 if (!pmd_present(*pmd) || pmd_large(*pmd)) dump_pagetable()
495 pte = pte_offset_kernel(pmd, address); dump_pagetable()
952 pmd_t *pmd; spurious_fault() local
980 pmd = pmd_offset(pud, address); spurious_fault()
981 if (!pmd_present(*pmd)) spurious_fault()
984 if (pmd_large(*pmd)) spurious_fault()
985 return spurious_fault_check(error_code, (pte_t *) pmd); spurious_fault()
987 pte = pte_offset_kernel(pmd, address); spurious_fault()
999 ret = spurious_fault_check(error_code, (pte_t *) pmd); spurious_fault()
H A Dgup.c71 static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, gup_pte_range() argument
81 ptep = pte_offset_map(&pmd, addr); gup_pte_range()
117 static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, gup_huge_pmd() argument
121 pte_t pte = *(pte_t *)&pmd; gup_huge_pmd()
159 pmd_t pmd = *pmdp; gup_pmd_range() local
166 * splitting bit in the pmd. Returning zero will take gup_pmd_range()
168 * if the pmd is still in splitting state. gup-fast gup_pmd_range()
173 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) gup_pmd_range()
175 if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) { gup_pmd_range()
181 if (pmd_protnone(pmd)) gup_pmd_range()
183 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) gup_pmd_range()
186 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) gup_pmd_range()
H A Dhugetlbpage.c45 int pmd_huge(pmd_t pmd)
58 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
62 int pmd_huge(pmd_t pmd) pmd_huge() argument
64 return !pmd_none(pmd) && pmd_huge()
65 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; pmd_huge()
H A Dioremap.c387 pmd_t *pmd = pmd_offset(pud, addr); early_ioremap_pmd() local
389 return pmd; early_ioremap_pmd()
404 pmd_t *pmd; early_ioremap_init() local
414 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); early_ioremap_init()
416 pmd_populate_kernel(&init_mm, pmd, bm_pte); early_ioremap_init()
426 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { early_ioremap_init()
428 printk(KERN_WARNING "pmd %p != %p\n", early_ioremap_init()
429 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); early_ioremap_init()
/linux-4.1.27/arch/xtensa/mm/
H A Dmmu.c24 pmd_t *pmd = pmd_offset(pgd, vaddr); init_pmd() local
38 for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) { init_pmd()
41 BUG_ON(!pmd_none(*pmd)); init_pmd()
42 set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK)); init_pmd()
43 BUG_ON(cur_pte != pte_offset_kernel(pmd, 0)); init_pmd()
44 pr_debug("%s: pmd: 0x%p, pte: 0x%p\n", init_pmd()
45 __func__, pmd, cur_pte); init_pmd()
/linux-4.1.27/arch/tile/include/asm/
H A Dpgalloc.h41 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) set_pmd() argument
44 set_pte(pmdp, pmd); set_pmd()
46 set_pte(&pmdp->pud.pgd, pmd.pud.pgd); set_pmd()
51 pmd_t *pmd, pte_t *ptep) pmd_populate_kernel()
53 set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)), pmd_populate_kernel()
57 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
60 set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))), pmd_populate()
86 #define pmd_pgtable(pmd) pmd_page(pmd)
118 void shatter_pmd(pmd_t *pmd);
125 #define pud_populate(mm, pud, pmd) \
126 pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
50 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *ptep) pmd_populate_kernel() argument
H A Dpgtable.h366 static inline int pmd_none(pmd_t pmd) pmd_none() argument
372 return (unsigned long)pmd_val(pmd) == 0; pmd_none()
375 static inline int pmd_present(pmd_t pmd) pmd_present() argument
377 return pmd_val(pmd) & _PAGE_PRESENT; pmd_present()
380 static inline int pmd_bad(pmd_t pmd) pmd_bad() argument
382 return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE); pmd_bad()
391 * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
393 * This function returns the index of the entry in the pmd which would
432 /* Create a pmd from a PTFN. */ ptfn_pmd()
439 #define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
447 static inline unsigned long pmd_page_vaddr(pmd_t pmd) pmd_page_vaddr() argument
450 (phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN; pmd_page_vaddr()
461 #define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
468 #define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
469 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
470 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
471 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
472 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
473 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
474 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
475 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
476 #define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
477 #define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
481 #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
484 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd_modify() argument
486 return pfn_pmd(pmd_pfn(pmd), newprot); pmd_modify()
493 static inline pmd_t pmd_mksplitting(pmd_t pmd) pmd_mksplitting() argument
495 return pte_pmd(hv_pte_set_client2(pmd_pte(pmd))); pmd_mksplitting()
498 static inline int pmd_trans_splitting(pmd_t pmd) pmd_trans_splitting() argument
500 return hv_pte_get_client2(pmd_pte(pmd)); pmd_trans_splitting()
515 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) pte_offset_kernel() argument
517 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); pte_offset_kernel()
H A Dpgtable_32.h76 /* We have no pmd or pud since we are strictly a two-level page table */
115 #define pmd_pte(pmd) ((pmd).pud.pgd)
H A Dpgtable_64.h67 #define pmd_pte(pmd) (pmd)
89 pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e))
/linux-4.1.27/arch/openrisc/include/asm/
H A Dpgalloc.h30 #define pmd_populate_kernel(mm, pmd, pte) \
31 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
33 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
36 set_pmd(pmd, __pmd(_KERNPG_TABLE + pmd_populate()
104 #define pmd_pgtable(pmd) pmd_page(pmd)
/linux-4.1.27/lib/
H A Dioremap.c53 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, ioremap_pte_range() argument
60 pte = pte_alloc_kernel(pmd, addr); ioremap_pte_range()
74 pmd_t *pmd; ioremap_pmd_range() local
78 pmd = pmd_alloc(&init_mm, pud, addr); ioremap_pmd_range()
79 if (!pmd) ioremap_pmd_range()
87 if (pmd_set_huge(pmd, phys_addr + addr, prot)) ioremap_pmd_range()
91 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot)) ioremap_pmd_range()
93 } while (pmd++, addr = next, addr != end); ioremap_pmd_range()
/linux-4.1.27/mm/
H A Dhuge_memory.c174 static inline bool is_huge_zero_pmd(pmd_t pmd) is_huge_zero_pmd() argument
176 return is_huge_zero_page(pmd_page(pmd)); is_huge_zero_pmd()
702 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) maybe_pmd_mkwrite() argument
705 pmd = pmd_mkwrite(pmd); maybe_pmd_mkwrite()
706 return pmd; maybe_pmd_mkwrite()
719 unsigned long haddr, pmd_t *pmd, __do_huge_pmd_anonymous_page()
745 ptl = pmd_lock(mm, pmd); __do_huge_pmd_anonymous_page()
746 if (unlikely(!pmd_none(*pmd))) { __do_huge_pmd_anonymous_page()
758 pgtable_trans_huge_deposit(mm, pmd, pgtable); __do_huge_pmd_anonymous_page()
759 set_pmd_at(mm, haddr, pmd, entry); __do_huge_pmd_anonymous_page()
775 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, set_huge_zero_page()
779 if (!pmd_none(*pmd)) set_huge_zero_page()
783 pgtable_trans_huge_deposit(mm, pmd, pgtable); set_huge_zero_page()
784 set_pmd_at(mm, haddr, pmd, entry); set_huge_zero_page()
790 unsigned long address, pmd_t *pmd, do_huge_pmd_anonymous_page()
818 ptl = pmd_lock(mm, pmd); do_huge_pmd_anonymous_page()
819 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, do_huge_pmd_anonymous_page()
834 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) { do_huge_pmd_anonymous_page()
850 pmd_t pmd; copy_huge_pmd() local
864 pmd = *src_pmd; copy_huge_pmd()
865 if (unlikely(!pmd_trans_huge(pmd))) { copy_huge_pmd()
870 * When page table lock is held, the huge zero pmd should not be copy_huge_pmd()
871 * under splitting since we don't split the page itself, only pmd to copy_huge_pmd()
874 if (is_huge_zero_pmd(pmd)) { copy_huge_pmd()
890 if (unlikely(pmd_trans_splitting(pmd))) { copy_huge_pmd()
899 src_page = pmd_page(pmd); copy_huge_pmd()
906 pmd = pmd_mkold(pmd_wrprotect(pmd)); copy_huge_pmd()
908 set_pmd_at(dst_mm, addr, dst_pmd, pmd); copy_huge_pmd()
922 pmd_t *pmd, pmd_t orig_pmd, huge_pmd_set_accessed()
929 ptl = pmd_lock(mm, pmd); huge_pmd_set_accessed()
930 if (unlikely(!pmd_same(*pmd, orig_pmd))) huge_pmd_set_accessed()
935 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) huge_pmd_set_accessed()
936 update_mmu_cache_pmd(vma, address, pmd); huge_pmd_set_accessed()
946 * Called under pmd_lock of checked pmd, so safe from splitting itself.
976 pmd_t *pmd, pmd_t orig_pmd, do_huge_pmd_wp_page_fallback()
1029 ptl = pmd_lock(mm, pmd); do_huge_pmd_wp_page_fallback()
1030 if (unlikely(!pmd_same(*pmd, orig_pmd))) do_huge_pmd_wp_page_fallback()
1034 pmdp_clear_flush_notify(vma, haddr, pmd); do_huge_pmd_wp_page_fallback()
1035 /* leave pmd empty until pte is filled */ do_huge_pmd_wp_page_fallback()
1037 pgtable = pgtable_trans_huge_withdraw(mm, pmd); do_huge_pmd_wp_page_fallback()
1056 smp_wmb(); /* make pte visible before pmd */ do_huge_pmd_wp_page_fallback()
1057 pmd_populate(mm, pmd, pgtable); do_huge_pmd_wp_page_fallback()
1083 unsigned long address, pmd_t *pmd, pmd_t orig_pmd) do_huge_pmd_wp_page()
1094 ptl = pmd_lockptr(mm, pmd); do_huge_pmd_wp_page()
1100 if (unlikely(!pmd_same(*pmd, orig_pmd))) do_huge_pmd_wp_page()
1109 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) do_huge_pmd_wp_page()
1110 update_mmu_cache_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1126 split_huge_page_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1130 pmd, orig_pmd, page, haddr); do_huge_pmd_wp_page()
1147 split_huge_page_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1168 if (unlikely(!pmd_same(*pmd, orig_pmd))) { do_huge_pmd_wp_page()
1177 pmdp_clear_flush_notify(vma, haddr, pmd); do_huge_pmd_wp_page()
1181 set_pmd_at(mm, haddr, pmd, entry); do_huge_pmd_wp_page()
1182 update_mmu_cache_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1205 pmd_t *pmd, follow_trans_huge_pmd()
1211 assert_spin_locked(pmd_lockptr(mm, pmd)); follow_trans_huge_pmd()
1213 if (flags & FOLL_WRITE && !pmd_write(*pmd)) follow_trans_huge_pmd()
1217 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) follow_trans_huge_pmd()
1221 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) follow_trans_huge_pmd()
1224 page = pmd_page(*pmd); follow_trans_huge_pmd()
1230 * for now the dirty bit in the pmd is meaningless. follow_trans_huge_pmd()
1233 * set_bit will be required on the pmd to set the follow_trans_huge_pmd()
1236 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); follow_trans_huge_pmd()
1238 pmd, _pmd, 1)) follow_trans_huge_pmd()
1239 update_mmu_cache_pmd(vma, addr, pmd); follow_trans_huge_pmd()
1260 unsigned long addr, pmd_t pmd, pmd_t *pmdp) do_huge_pmd_numa_page()
1277 if (unlikely(!pmd_same(pmd, *pmdp))) do_huge_pmd_numa_page()
1292 page = pmd_page(pmd); do_huge_pmd_numa_page()
1336 if (unlikely(!pmd_same(pmd, *pmdp))) { do_huge_pmd_numa_page()
1356 pmdp, pmd, addr, page, target_nid); do_huge_pmd_numa_page()
1366 was_writable = pmd_write(pmd); do_huge_pmd_numa_page()
1367 pmd = pmd_modify(pmd, vma->vm_page_prot); do_huge_pmd_numa_page()
1368 pmd = pmd_mkyoung(pmd); do_huge_pmd_numa_page()
1370 pmd = pmd_mkwrite(pmd); do_huge_pmd_numa_page()
1371 set_pmd_at(mm, haddr, pmdp, pmd); do_huge_pmd_numa_page()
1388 pmd_t *pmd, unsigned long addr) zap_huge_pmd()
1393 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { zap_huge_pmd()
1403 orig_pmd = pmdp_get_and_clear_full(tlb->mm, addr, pmd, zap_huge_pmd()
1405 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); zap_huge_pmd()
1406 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); zap_huge_pmd()
1434 pmd_t pmd; move_huge_pmd() local
1445 * The destination pmd shouldn't be established, free_pgtables() move_huge_pmd()
1462 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); move_huge_pmd()
1470 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); move_huge_pmd()
1485 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, change_huge_pmd() argument
1492 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { change_huge_pmd()
1494 bool preserve_write = prot_numa && pmd_write(*pmd); change_huge_pmd()
1502 if (prot_numa && is_huge_zero_pmd(*pmd)) { change_huge_pmd()
1507 if (!prot_numa || !pmd_protnone(*pmd)) { change_huge_pmd()
1508 entry = pmdp_get_and_clear_notify(mm, addr, pmd); change_huge_pmd()
1513 set_pmd_at(mm, addr, pmd, entry); change_huge_pmd()
1523 * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1529 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, __pmd_trans_huge_lock() argument
1532 *ptl = pmd_lock(vma->vm_mm, pmd); __pmd_trans_huge_lock()
1533 if (likely(pmd_trans_huge(*pmd))) { __pmd_trans_huge_lock()
1534 if (unlikely(pmd_trans_splitting(*pmd))) { __pmd_trans_huge_lock()
1536 wait_split_huge_page(vma->anon_vma, pmd); __pmd_trans_huge_lock()
1539 /* Thp mapped by 'pmd' is stable, so we can __pmd_trans_huge_lock()
1552 * When it's true, this function returns *pmd with holding the page table lock
1564 pmd_t *pmd; page_check_address_pmd() local
1575 pmd = pmd_offset(pud, address); page_check_address_pmd()
1577 *ptl = pmd_lock(mm, pmd); page_check_address_pmd()
1578 if (!pmd_present(*pmd)) page_check_address_pmd()
1580 if (pmd_page(*pmd) != page) page_check_address_pmd()
1584 * no risk as long as all huge pmd are found and have their page_check_address_pmd()
1586 * runs. Finding the same huge pmd more than once during the page_check_address_pmd()
1590 pmd_trans_splitting(*pmd)) page_check_address_pmd()
1592 if (pmd_trans_huge(*pmd)) { page_check_address_pmd()
1594 !pmd_trans_splitting(*pmd)); page_check_address_pmd()
1595 return pmd; page_check_address_pmd()
1608 pmd_t *pmd; __split_huge_page_splitting() local
1615 pmd = page_check_address_pmd(page, mm, address, __split_huge_page_splitting()
1617 if (pmd) { __split_huge_page_splitting()
1619 * We can't temporarily set the pmd to null in order __split_huge_page_splitting()
1620 * to split it, the pmd must remain marked huge at all __split_huge_page_splitting()
1625 pmdp_splitting_flush(vma, address, pmd); __split_huge_page_splitting()
1694 * splitting bit in all pmd that could map this __split_huge_page_refcount()
1702 * we transfer the mapcount, so the pmd splitting __split_huge_page_refcount()
1704 * pmd, not by clearing the present bit. __split_huge_page_refcount()
1756 pmd_t *pmd, _pmd; __split_huge_page_map() local
1761 pmd = page_check_address_pmd(page, mm, address, __split_huge_page_map()
1763 if (pmd) { __split_huge_page_map()
1764 pgtable = pgtable_trans_huge_withdraw(mm, pmd); __split_huge_page_map()
1766 if (pmd_write(*pmd)) __split_huge_page_map()
1780 if (!pmd_write(*pmd)) __split_huge_page_map()
1782 if (!pmd_young(*pmd)) __split_huge_page_map()
1790 smp_wmb(); /* make pte visible before pmd */ __split_huge_page_map()
1792 * Up to this point the pmd is present and huge and __split_huge_page_map()
1795 * overwrite the pmd with the not-huge version __split_huge_page_map()
1810 * mark the current pmd notpresent (atomically because __split_huge_page_map()
1812 * must remain set at all times on the pmd until the __split_huge_page_map()
1813 * split is complete for this pmd), then we flush the __split_huge_page_map()
1815 * of the pmd entry with pmd_populate. __split_huge_page_map()
1817 pmdp_invalidate(vma, address, pmd); __split_huge_page_map()
1818 pmd_populate(mm, pmd, pgtable); __split_huge_page_map()
1848 * and establishes a child pmd before __split_huge_page()
1849 * __split_huge_page_splitting() freezes the parent pmd (so if __split_huge_page()
1852 * the newly established pmd of the child later during the __split_huge_page()
2432 pmd_t *pmd, _pmd; collapse_huge_page() local
2477 pmd = mm_find_pmd(mm, address); collapse_huge_page()
2478 if (!pmd) collapse_huge_page()
2483 pte = pte_offset_map(pmd, address); collapse_huge_page()
2484 pte_ptl = pte_lockptr(mm, pmd); collapse_huge_page()
2489 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ collapse_huge_page()
2496 _pmd = pmdp_clear_flush(vma, address, pmd); collapse_huge_page()
2507 BUG_ON(!pmd_none(*pmd)); collapse_huge_page()
2513 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); collapse_huge_page()
2541 BUG_ON(!pmd_none(*pmd)); collapse_huge_page()
2545 pgtable_trans_huge_deposit(mm, pmd, pgtable); collapse_huge_page()
2546 set_pmd_at(mm, address, pmd, _pmd); collapse_huge_page()
2547 update_mmu_cache_pmd(vma, address, pmd); collapse_huge_page()
2567 pmd_t *pmd; khugepaged_scan_pmd() local
2578 pmd = mm_find_pmd(mm, address); khugepaged_scan_pmd()
2579 if (!pmd) khugepaged_scan_pmd()
2583 pte = pte_offset_map_lock(mm, pmd, address, &ptl); khugepaged_scan_pmd()
2855 unsigned long haddr, pmd_t *pmd) __split_huge_zero_page_pmd()
2862 pmdp_clear_flush_notify(vma, haddr, pmd); __split_huge_zero_page_pmd()
2863 /* leave pmd empty until pte is filled */ __split_huge_zero_page_pmd()
2865 pgtable = pgtable_trans_huge_withdraw(mm, pmd); __split_huge_zero_page_pmd()
2877 smp_wmb(); /* make pte visible before pmd */ __split_huge_zero_page_pmd()
2878 pmd_populate(mm, pmd, pgtable); __split_huge_zero_page_pmd()
2883 pmd_t *pmd) __split_huge_page_pmd()
2898 ptl = pmd_lock(mm, pmd); __split_huge_page_pmd()
2899 if (unlikely(!pmd_trans_huge(*pmd))) { __split_huge_page_pmd()
2904 if (is_huge_zero_pmd(*pmd)) { __split_huge_page_pmd()
2905 __split_huge_zero_page_pmd(vma, haddr, pmd); __split_huge_page_pmd()
2910 page = pmd_page(*pmd); __split_huge_page_pmd()
2925 if (unlikely(pmd_trans_huge(*pmd))) __split_huge_page_pmd()
2930 pmd_t *pmd) split_huge_page_pmd_mm()
2936 split_huge_page_pmd(vma, address, pmd); split_huge_page_pmd_mm()
2944 pmd_t *pmd; split_huge_page_address() local
2956 pmd = pmd_offset(pud, address); split_huge_page_address()
2957 if (!pmd_present(*pmd)) split_huge_page_address()
2960 * Caller holds the mmap_sem write mode, so a huge pmd cannot split_huge_page_address()
2963 split_huge_page_pmd_mm(mm, address, pmd); split_huge_page_address()
2974 * an huge pmd. __vma_adjust_trans_huge()
2984 * an huge pmd. __vma_adjust_trans_huge()
2994 * contain an hugepage: check if we need to split an huge pmd. __vma_adjust_trans_huge()
717 __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *page, gfp_t gfp) __do_huge_pmd_anonymous_page() argument
774 set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) set_huge_zero_page() argument
789 do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) do_huge_pmd_anonymous_page() argument
919 huge_pmd_set_accessed(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, int dirty) huge_pmd_set_accessed() argument
973 do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) do_huge_pmd_wp_page_fallback() argument
1082 do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd) do_huge_pmd_wp_page() argument
1203 follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) follow_trans_huge_pmd() argument
1259 do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, pmd_t *pmdp) do_huge_pmd_numa_page() argument
1387 zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) zap_huge_pmd() argument
2854 __split_huge_zero_page_pmd(struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd) __split_huge_zero_page_pmd() argument
2882 __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) __split_huge_page_pmd() argument
2929 split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, pmd_t *pmd) split_huge_page_pmd_mm() argument
H A Dmprotect.c34 * potential race with faulting where a pmd was temporarily none. This
35 * function checks for a transhuge pmd under the appropriate lock. It
39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, lock_pte_protection() argument
47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); lock_pte_protection()
49 pmdl = pmd_lock(vma->vm_mm, pmd); lock_pte_protection()
50 if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) { lock_pte_protection()
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); lock_pte_protection()
60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, change_pte_range() argument
69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); change_pte_range()
138 pmd_t *pmd; change_pmd_range() local
145 pmd = pmd_offset(pud, addr); change_pmd_range()
150 if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd)) change_pmd_range()
153 /* invoke the mmu notifier if the pmd is populated */ change_pmd_range()
159 if (pmd_trans_huge(*pmd)) { change_pmd_range()
161 split_huge_page_pmd(vma, addr, pmd); change_pmd_range()
163 int nr_ptes = change_huge_pmd(vma, pmd, addr, change_pmd_range()
172 /* huge pmd was handled */ change_pmd_range()
176 /* fall through, the trans huge pmd just split */ change_pmd_range()
178 this_pages = change_pte_range(vma, pmd, addr, next, newprot, change_pmd_range()
181 } while (pmd++, addr = next, addr != end); change_pmd_range()
H A Dpgtable-generic.c31 void pmd_clear_bad(pmd_t *pmd) pmd_clear_bad() argument
33 pmd_ERROR(*pmd); pmd_clear_bad()
34 pmd_clear(pmd); pmd_clear_bad()
127 pmd_t pmd; pmdp_clear_flush() local
129 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); pmdp_clear_flush()
131 return pmd; pmdp_clear_flush()
141 pmd_t pmd = pmd_mksplitting(*pmdp); pmdp_splitting_flush() local
143 set_pmd_at(vma->vm_mm, address, pmdp, pmd); pmdp_splitting_flush()
H A Dpagewalk.c6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, walk_pte_range() argument
12 pte = pte_offset_map(pmd, addr); walk_pte_range()
30 pmd_t *pmd; walk_pmd_range() local
34 pmd = pmd_offset(pud, addr); walk_pmd_range()
38 if (pmd_none(*pmd) || !walk->vma) { walk_pmd_range()
50 err = walk->pmd_entry(pmd, addr, next, walk); walk_pmd_range()
61 split_huge_page_pmd_mm(walk->mm, addr, pmd); walk_pmd_range()
62 if (pmd_trans_unstable(pmd)) walk_pmd_range()
64 err = walk_pte_range(pmd, addr, next, walk); walk_pmd_range()
67 } while (pmd++, addr = next, addr != end); walk_pmd_range()
230 * struct mm_walk keeps current values of some common data like vma and pmd,
H A Dsparse-vmemmap.c101 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) vmemmap_pte_populate() argument
103 pte_t *pte = pte_offset_kernel(pmd, addr); vmemmap_pte_populate()
117 pmd_t *pmd = pmd_offset(pud, addr); vmemmap_pmd_populate() local
118 if (pmd_none(*pmd)) { vmemmap_pmd_populate()
122 pmd_populate_kernel(&init_mm, pmd, p); vmemmap_pmd_populate()
124 return pmd; vmemmap_pmd_populate()
157 pmd_t *pmd; vmemmap_populate_basepages() local
167 pmd = vmemmap_pmd_populate(pud, addr, node); vmemmap_populate_basepages()
168 if (!pmd) vmemmap_populate_basepages()
170 pte = vmemmap_pte_populate(pmd, addr, node); vmemmap_populate_basepages()
H A Dmemory.c391 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, free_pte_range() argument
394 pgtable_t token = pmd_pgtable(*pmd); free_pte_range()
395 pmd_clear(pmd); free_pte_range()
404 pmd_t *pmd; free_pmd_range() local
409 pmd = pmd_offset(pud, addr); free_pmd_range()
412 if (pmd_none_or_clear_bad(pmd)) free_pmd_range()
414 free_pte_range(tlb, pmd, addr); free_pmd_range()
415 } while (pmd++, addr = next, addr != end); free_pmd_range()
428 pmd = pmd_offset(pud, start); free_pmd_range()
430 pmd_free_tlb(tlb, pmd, start); free_pmd_range()
564 pmd_t *pmd, unsigned long address) __pte_alloc()
587 ptl = pmd_lock(mm, pmd); __pte_alloc()
589 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ __pte_alloc()
591 pmd_populate(mm, pmd, new); __pte_alloc()
593 } else if (unlikely(pmd_trans_splitting(*pmd))) __pte_alloc()
599 wait_split_huge_page(vma->anon_vma, pmd); __pte_alloc()
603 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) __pte_alloc_kernel() argument
612 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ __pte_alloc_kernel()
613 pmd_populate_kernel(&init_mm, pmd, new); __pte_alloc_kernel()
616 VM_BUG_ON(pmd_trans_splitting(*pmd)); __pte_alloc_kernel()
651 pmd_t *pmd = pmd_offset(pud, addr); print_bad_pte() local
682 "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", print_bad_pte()
684 (long long)pte_val(pte), (long long)pmd_val(*pmd)); print_bad_pte()
1073 struct vm_area_struct *vma, pmd_t *pmd, zap_pte_range()
1087 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); zap_pte_range()
1189 pmd_t *pmd; zap_pmd_range() local
1192 pmd = pmd_offset(pud, addr); zap_pmd_range()
1195 if (pmd_trans_huge(*pmd)) { zap_pmd_range()
1206 split_huge_page_pmd(vma, addr, pmd); zap_pmd_range()
1207 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) zap_pmd_range()
1213 * trans huge page faults running, and if the pmd is zap_pmd_range()
1218 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) zap_pmd_range()
1220 next = zap_pte_range(tlb, vma, pmd, addr, next, details); zap_pmd_range()
1223 } while (pmd++, addr = next, addr != end); zap_pmd_range()
1424 pmd_t * pmd = pmd_alloc(mm, pud, addr); __get_locked_pte() local
1425 if (pmd) { __get_locked_pte()
1426 VM_BUG_ON(pmd_trans_huge(*pmd)); __get_locked_pte()
1427 return pte_alloc_map_lock(mm, pmd, addr, ptl); __get_locked_pte()
1621 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, remap_pte_range() argument
1628 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); remap_pte_range()
1646 pmd_t *pmd; remap_pmd_range() local
1650 pmd = pmd_alloc(mm, pud, addr); remap_pmd_range()
1651 if (!pmd) remap_pmd_range()
1653 VM_BUG_ON(pmd_trans_huge(*pmd)); remap_pmd_range()
1656 if (remap_pte_range(mm, pmd, addr, next, remap_pmd_range()
1659 } while (pmd++, addr = next, addr != end); remap_pmd_range()
1798 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, apply_to_pte_range() argument
1808 pte_alloc_kernel(pmd, addr) : apply_to_pte_range()
1809 pte_alloc_map_lock(mm, pmd, addr, &ptl); apply_to_pte_range()
1813 BUG_ON(pmd_huge(*pmd)); apply_to_pte_range()
1817 token = pmd_pgtable(*pmd); apply_to_pte_range()
1836 pmd_t *pmd; apply_to_pmd_range() local
1842 pmd = pmd_alloc(mm, pud, addr); apply_to_pmd_range()
1843 if (!pmd) apply_to_pmd_range()
1847 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); apply_to_pmd_range()
1850 } while (pmd++, addr = next, addr != end); apply_to_pmd_range()
1907 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, pte_unmap_same() argument
1913 spinlock_t *ptl = pte_lockptr(mm, pmd); pte_unmap_same()
2060 unsigned long address, pte_t *page_table, pmd_t *pmd, wp_page_copy()
2094 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); wp_page_copy()
2190 pmd_t *pmd) wp_pfn_shared()
2205 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); wp_pfn_shared()
2221 pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte,
2250 page_table = pte_offset_map_lock(mm, pmd, address, __releases()
2284 unsigned long address, pte_t *page_table, pmd_t *pmd,
2302 orig_pte, pmd); __releases()
2305 return wp_page_copy(mm, vma, address, page_table, pmd, __releases()
2318 page_table = pte_offset_map_lock(mm, pmd, address, __releases()
2342 return wp_page_shared(mm, vma, address, page_table, pmd, __releases()
2352 return wp_page_copy(mm, vma, address, page_table, pmd, __releases()
2445 unsigned long address, pte_t *page_table, pmd_t *pmd, do_swap_page()
2457 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) do_swap_page()
2463 migration_entry_wait(mm, pmd, address); do_swap_page()
2482 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); do_swap_page()
2537 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); do_swap_page()
2596 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); do_swap_page()
2662 unsigned long address, pte_t *page_table, pmd_t *pmd, do_anonymous_page()
2684 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); do_anonymous_page()
2710 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); do_anonymous_page()
2922 unsigned long address, pmd_t *pmd, do_read_fault()
2936 pte = pte_offset_map_lock(mm, pmd, address, &ptl); do_read_fault()
2947 pte = pte_offset_map_lock(mm, pmd, address, &ptl); do_read_fault()
2962 unsigned long address, pmd_t *pmd, do_cow_fault()
2991 pte = pte_offset_map_lock(mm, pmd, address, &ptl); do_cow_fault()
3028 unsigned long address, pmd_t *pmd, do_shared_fault()
3056 pte = pte_offset_map_lock(mm, pmd, address, &ptl); do_shared_fault()
3097 unsigned long address, pte_t *page_table, pmd_t *pmd, do_fault()
3108 return do_read_fault(mm, vma, address, pmd, pgoff, flags, do_fault()
3111 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, do_fault()
3113 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); do_fault()
3132 unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) do_numa_page()
3155 ptl = pte_lockptr(mm, pmd); do_numa_page()
3235 pte_t *pte, pmd_t *pmd, unsigned int flags) handle_pte_fault()
3253 return do_fault(mm, vma, address, pte, pmd, handle_pte_fault()
3256 return do_anonymous_page(mm, vma, address, pte, pmd, handle_pte_fault()
3260 pte, pmd, flags, entry); handle_pte_fault()
3264 return do_numa_page(mm, vma, address, entry, pte, pmd); handle_pte_fault()
3266 ptl = pte_lockptr(mm, pmd); handle_pte_fault()
3273 pte, pmd, ptl, entry); handle_pte_fault()
3305 pmd_t *pmd; __handle_mm_fault() local
3315 pmd = pmd_alloc(mm, pud, address); __handle_mm_fault()
3316 if (!pmd) __handle_mm_fault()
3318 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { __handle_mm_fault()
3322 pmd, flags); __handle_mm_fault()
3326 pmd_t orig_pmd = *pmd; __handle_mm_fault()
3334 * If the pmd is splitting, return and retry the __handle_mm_fault()
3343 orig_pmd, pmd); __handle_mm_fault()
3346 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, __handle_mm_fault()
3351 huge_pmd_set_accessed(mm, vma, address, pmd, __handle_mm_fault()
3360 * run pte_offset_map on the pmd, if an huge pmd could __handle_mm_fault()
3363 if (unlikely(pmd_none(*pmd)) && __handle_mm_fault()
3364 unlikely(__pte_alloc(mm, vma, pmd, address))) __handle_mm_fault()
3367 * If a huge pmd materialized under us just retry later. Use __handle_mm_fault()
3368 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd __handle_mm_fault()
3370 * a result of MADV_DONTNEED running immediately after a huge pmd fault __handle_mm_fault()
3373 * regular pmd that we can walk with pte_offset_map() and we can do that __handle_mm_fault()
3377 if (unlikely(pmd_trans_unstable(pmd))) __handle_mm_fault()
3380 * A regular pmd is established and it can't morph into a huge pmd __handle_mm_fault()
3385 pte = pte_offset_map(pmd, address); __handle_mm_fault()
3387 return handle_pte_fault(mm, vma, address, pte, pmd, flags); __handle_mm_fault()
3494 pmd_t *pmd; __follow_pte() local
3505 pmd = pmd_offset(pud, address); __follow_pte()
3506 VM_BUG_ON(pmd_trans_huge(*pmd)); __follow_pte()
3507 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) __follow_pte()
3511 if (pmd_huge(*pmd)) __follow_pte()
3514 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); __follow_pte()
563 __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, unsigned long address) __pte_alloc() argument
1072 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) zap_pte_range() argument
2059 wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t orig_pte, struct page *old_page) wp_page_copy() argument
2187 wp_pfn_shared(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, pmd_t *pmd) wp_pfn_shared() argument
2444 do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) do_swap_page() argument
2661 do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) do_anonymous_page() argument
2921 do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) do_read_fault() argument
2961 do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) do_cow_fault() argument
3027 do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) do_shared_fault() argument
3096 do_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) do_fault() argument
3131 do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) do_numa_page() argument
3233 handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) handle_pte_fault() argument
H A Dgup.c36 unsigned long address, pmd_t *pmd, unsigned int flags) follow_page_pte()
44 if (unlikely(pmd_bad(*pmd))) follow_page_pte()
47 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); follow_page_pte()
64 migration_entry_wait(mm, pmd, address); follow_page_pte()
149 pmd_t *pmd; follow_page_mask() local
178 pmd = pmd_offset(pud, address); follow_page_mask()
179 if (pmd_none(*pmd)) follow_page_mask()
181 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { follow_page_mask()
182 page = follow_huge_pmd(mm, address, pmd, flags); follow_page_mask()
187 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) follow_page_mask()
189 if (pmd_trans_huge(*pmd)) { follow_page_mask()
191 split_huge_page_pmd(vma, address, pmd); follow_page_mask()
192 return follow_page_pte(vma, address, pmd, flags); follow_page_mask()
194 ptl = pmd_lock(mm, pmd); follow_page_mask()
195 if (likely(pmd_trans_huge(*pmd))) { follow_page_mask()
196 if (unlikely(pmd_trans_splitting(*pmd))) { follow_page_mask()
198 wait_split_huge_page(vma->anon_vma, pmd); follow_page_mask()
201 pmd, flags); follow_page_mask()
209 return follow_page_pte(vma, address, pmd, flags); follow_page_mask()
218 pmd_t *pmd; get_gate_page() local
232 pmd = pmd_offset(pud, address); get_gate_page()
233 if (pmd_none(*pmd)) get_gate_page()
235 VM_BUG_ON(pmd_trans_huge(*pmd)); get_gate_page()
236 pte = pte_offset_map(pmd, address); get_gate_page()
1007 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, gup_pte_range() argument
1013 ptem = ptep = pte_offset_map(&pmd, addr); gup_pte_range()
1066 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, gup_pte_range() argument
1213 pmd_t pmd = READ_ONCE(*pmdp); gup_pmd_range() local
1216 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) gup_pmd_range()
1219 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { gup_pmd_range()
1225 if (pmd_protnone(pmd)) gup_pmd_range()
1228 if (!gup_huge_pmd(pmd, pmdp, addr, next, write, gup_pmd_range()
1232 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { gup_pmd_range()
1235 * pmd format and THP pmd format gup_pmd_range()
1237 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, gup_pmd_range()
1240 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) gup_pmd_range()
35 follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) follow_page_pte() argument
H A Dmremap.c35 pmd_t *pmd; get_old_pmd() local
45 pmd = pmd_offset(pud, addr); get_old_pmd()
46 if (pmd_none(*pmd)) get_old_pmd()
49 return pmd; get_old_pmd()
57 pmd_t *pmd; alloc_new_pmd() local
64 pmd = pmd_alloc(mm, pud, addr); alloc_new_pmd()
65 if (!pmd) alloc_new_pmd()
68 VM_BUG_ON(pmd_trans_huge(*pmd)); alloc_new_pmd()
70 return pmd; alloc_new_pmd()
/linux-4.1.27/arch/microblaze/include/asm/
H A Dpgalloc.h61 extern void __bad_pte(pmd_t *pmd);
102 #define pmd_pgtable(pmd) pmd_page(pmd)
105 * We don't have any real pmd's, and this code never triggers because
174 #define pmd_populate(mm, pmd, pte) \
175 (pmd_val(*(pmd)) = (unsigned long)page_address(pte))
177 #define pmd_populate_kernel(mm, pmd, pte) \
178 (pmd_val(*(pmd)) = (unsigned long) (pte))
181 * We don't have any real pmd's, and this code never triggers because
187 #define pgd_populate(mm, pmd, pte) BUG()
H A Dpgtable.h169 printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \ pte_mkspecial()
301 #define pmd_none(pmd) (!pmd_val(pmd))
302 #define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
303 #define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
318 * setup: the pgd is never bad, and a pmd always exists (as it's folded
467 /* Convert pmd entry to page */
468 /* our pmd entry is an effective address of pte table*/
469 /* returns effective address of the pmd entry*/
470 #define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
472 /* returns struct *page of the pmd entry*/
473 #define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
/linux-4.1.27/arch/arm/kvm/
H A Dmmu.c91 static void kvm_flush_dcache_pmd(pmd_t pmd) kvm_flush_dcache_pmd() argument
93 __kvm_flush_dcache_pmd(pmd); kvm_flush_dcache_pmd()
110 * @pmd: pmd pointer for IPA
115 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) stage2_dissolve_pmd() argument
117 if (!kvm_pmd_huge(*pmd)) stage2_dissolve_pmd()
120 pmd_clear(pmd); stage2_dissolve_pmd()
122 put_page(virt_to_page(pmd)); stage2_dissolve_pmd()
176 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) clear_pmd_entry() argument
178 pte_t *pte_table = pte_offset_kernel(pmd, 0); clear_pmd_entry()
179 VM_BUG_ON(kvm_pmd_huge(*pmd)); clear_pmd_entry()
180 pmd_clear(pmd); clear_pmd_entry()
183 put_page(virt_to_page(pmd)); clear_pmd_entry()
206 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, unmap_ptes() argument
212 start_pte = pte = pte_offset_kernel(pmd, addr); unmap_ptes()
229 clear_pmd_entry(kvm, pmd, start_addr); unmap_ptes()
236 pmd_t *pmd, *start_pmd; unmap_pmds() local
238 start_pmd = pmd = pmd_offset(pud, addr); unmap_pmds()
241 if (!pmd_none(*pmd)) { unmap_pmds()
242 if (kvm_pmd_huge(*pmd)) { unmap_pmds()
243 pmd_t old_pmd = *pmd; unmap_pmds()
245 pmd_clear(pmd); unmap_pmds()
250 put_page(virt_to_page(pmd)); unmap_pmds()
252 unmap_ptes(kvm, pmd, addr, next); unmap_pmds()
255 } while (pmd++, addr = next, addr != end); unmap_pmds()
306 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, stage2_flush_ptes() argument
311 pte = pte_offset_kernel(pmd, addr); stage2_flush_ptes()
321 pmd_t *pmd; stage2_flush_pmds() local
324 pmd = pmd_offset(pud, addr); stage2_flush_pmds()
327 if (!pmd_none(*pmd)) { stage2_flush_pmds()
328 if (kvm_pmd_huge(*pmd)) stage2_flush_pmds()
329 kvm_flush_dcache_pmd(*pmd); stage2_flush_pmds()
331 stage2_flush_ptes(kvm, pmd, addr, next); stage2_flush_pmds()
333 } while (pmd++, addr = next, addr != end); stage2_flush_pmds()
451 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, create_hyp_pte_mappings() argument
460 pte = pte_offset_kernel(pmd, addr); create_hyp_pte_mappings()
472 pmd_t *pmd; create_hyp_pmd_mappings() local
478 pmd = pmd_offset(pud, addr); create_hyp_pmd_mappings()
480 BUG_ON(pmd_sect(*pmd)); create_hyp_pmd_mappings()
482 if (pmd_none(*pmd)) { create_hyp_pmd_mappings()
488 pmd_populate_kernel(NULL, pmd, pte); create_hyp_pmd_mappings()
489 get_page(virt_to_page(pmd)); create_hyp_pmd_mappings()
490 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); create_hyp_pmd_mappings()
495 create_hyp_pte_mappings(pmd, addr, next, pfn, prot); create_hyp_pmd_mappings()
507 pmd_t *pmd; create_hyp_pud_mappings() local
516 pmd = pmd_alloc_one(NULL, addr); create_hyp_pud_mappings()
517 if (!pmd) { create_hyp_pud_mappings()
518 kvm_err("Cannot allocate Hyp pmd\n"); create_hyp_pud_mappings()
521 pud_populate(NULL, pud, pmd); create_hyp_pud_mappings()
855 pmd_t *pmd; stage2_get_pmd() local
861 pmd = mmu_memory_cache_alloc(cache); stage2_get_pmd()
862 pud_populate(NULL, pud, pmd); stage2_get_pmd()
872 pmd_t *pmd, old_pmd; stage2_set_pmd_huge() local
874 pmd = stage2_get_pmd(kvm, cache, addr); stage2_set_pmd_huge()
875 VM_BUG_ON(!pmd); stage2_set_pmd_huge()
886 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); stage2_set_pmd_huge()
888 old_pmd = *pmd; stage2_set_pmd_huge()
890 pmd_clear(pmd); stage2_set_pmd_huge()
893 get_page(virt_to_page(pmd)); stage2_set_pmd_huge()
896 kvm_set_pmd(pmd, *new_pmd); stage2_set_pmd_huge()
904 pmd_t *pmd; stage2_set_pte() local
912 pmd = stage2_get_pmd(kvm, cache, addr); stage2_set_pte()
913 if (!pmd) { stage2_set_pte()
926 stage2_dissolve_pmd(kvm, addr, pmd); stage2_set_pte()
929 if (pmd_none(*pmd)) { stage2_set_pte()
934 pmd_populate_kernel(NULL, pmd, pte); stage2_set_pte()
935 get_page(virt_to_page(pmd)); stage2_set_pte()
938 pte = pte_offset_kernel(pmd, addr); stage2_set_pte()
1051 * @pmd: pointer to pmd entry
1055 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) stage2_wp_ptes() argument
1059 pte = pte_offset_kernel(pmd, addr); stage2_wp_ptes()
1076 pmd_t *pmd; stage2_wp_pmds() local
1079 pmd = pmd_offset(pud, addr); stage2_wp_pmds()
1083 if (!pmd_none(*pmd)) { stage2_wp_pmds()
1084 if (kvm_pmd_huge(*pmd)) { stage2_wp_pmds()
1085 if (!kvm_s2pmd_readonly(pmd)) stage2_wp_pmds()
1086 kvm_set_s2pmd_readonly(pmd); stage2_wp_pmds()
1088 stage2_wp_ptes(pmd, addr, next); stage2_wp_pmds()
1091 } while (pmd++, addr = next, addr != end); stage2_wp_pmds()
1351 pmd_t *pmd; handle_access_fault() local
1360 pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa); handle_access_fault()
1361 if (!pmd || pmd_none(*pmd)) /* Nothing there */ handle_access_fault()
1364 if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */ handle_access_fault()
1365 *pmd = pmd_mkyoung(*pmd); handle_access_fault()
1366 pfn = pmd_pfn(*pmd); handle_access_fault()
1371 pte = pte_offset_kernel(pmd, fault_ipa); handle_access_fault()
1565 pmd_t *pmd; kvm_age_hva_handler() local
1568 pmd = stage2_get_pmd(kvm, NULL, gpa); kvm_age_hva_handler()
1569 if (!pmd || pmd_none(*pmd)) /* Nothing there */ kvm_age_hva_handler()
1572 if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */ kvm_age_hva_handler()
1573 if (pmd_young(*pmd)) { kvm_age_hva_handler()
1574 *pmd = pmd_mkold(*pmd); kvm_age_hva_handler()
1581 pte = pte_offset_kernel(pmd, gpa); kvm_age_hva_handler()
1595 pmd_t *pmd; kvm_test_age_hva_handler() local
1598 pmd = stage2_get_pmd(kvm, NULL, gpa); kvm_test_age_hva_handler()
1599 if (!pmd || pmd_none(*pmd)) /* Nothing there */ kvm_test_age_hva_handler()
1602 if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */ kvm_test_age_hva_handler()
1603 return pmd_young(*pmd); kvm_test_age_hva_handler()
1605 pte = pte_offset_kernel(pmd, gpa); kvm_test_age_hva_handler()
/linux-4.1.27/arch/sparc/mm/
H A Dhugetlbpage.c134 pmd_t *pmd; huge_pte_alloc() local
147 pmd = pmd_alloc(mm, pud, addr); huge_pte_alloc()
148 if (pmd) huge_pte_alloc()
149 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc()
158 pmd_t *pmd; huge_pte_offset() local
167 pmd = pmd_offset(pud, addr); huge_pte_offset()
168 if (!pmd_none(*pmd)) huge_pte_offset()
169 pte = pte_offset_map(pmd, addr); huge_pte_offset()
218 int pmd_huge(pmd_t pmd) pmd_huge() argument
H A Dgup.c20 static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, gup_pte_range() argument
37 ptep = pte_offset_kernel(&pmd, addr); gup_pte_range()
69 static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, gup_huge_pmd() argument
76 if (!(pmd_val(pmd) & _PAGE_VALID)) gup_huge_pmd()
79 if (write && !pmd_write(pmd)) gup_huge_pmd()
83 head = pmd_page(pmd); gup_huge_pmd()
99 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) { gup_huge_pmd()
126 pmd_t pmd = *pmdp; gup_pmd_range() local
129 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) gup_pmd_range()
131 if (unlikely(pmd_large(pmd))) { gup_pmd_range()
132 if (!gup_huge_pmd(pmdp, pmd, addr, next, gup_pmd_range()
135 } else if (!gup_pte_range(pmd, addr, next, write, gup_pmd_range()
H A Dleon_mm.c39 unsigned int pgd, pmd, ped; leon_swprobe() local
92 pmd = LEON_BYPASS_LOAD_PA(ptr); leon_swprobe()
93 if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { leon_swprobe()
95 printk(KERN_INFO "swprobe: pmd is entry level 2\n"); leon_swprobe()
97 pte = pmd; leon_swprobe()
98 paddrbase = pmd & _SRMMU_PTE_PMASK_LEON; leon_swprobe()
101 if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { leon_swprobe()
103 printk(KERN_INFO "swprobe: pmd is invalid => 0\n"); leon_swprobe()
108 printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd); leon_swprobe()
110 ptr = (pmd & SRMMU_PTD_PMASK) << 4; leon_swprobe()
H A Dtlb.c137 pmd_t pmd) tlb_batch_pmd_scan()
142 pte = pte_offset_map(&pmd, vaddr); tlb_batch_pmd_scan()
157 pmd_t *pmdp, pmd_t pmd) set_pmd_at()
161 *pmdp = pmd; set_pmd_at()
166 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { set_pmd_at()
167 if (pmd_val(pmd) & _PAGE_PMD_HUGE) set_pmd_at()
136 tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, pmd_t pmd) tlb_batch_pmd_scan() argument
156 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) set_pmd_at() argument
/linux-4.1.27/arch/powerpc/mm/
H A Dtlb_hash32.c93 pmd_t *pmd; flush_range() local
106 pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); flush_range()
111 if (!pmd_none(*pmd)) { flush_range()
113 flush_hash_pages(ctx, start, pmd_val(*pmd), count); flush_range()
118 ++pmd; flush_range()
157 pmd_t *pmd; flush_tlb_page() local
164 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); flush_tlb_page()
165 if (!pmd_none(*pmd)) flush_tlb_page()
166 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); flush_tlb_page()
H A Dpgtable_64.c366 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
369 struct page *pmd_page(pmd_t pmd) pmd_page() argument
371 if (pmd_trans_huge(pmd) || pmd_huge(pmd)) pmd_page()
372 return pfn_to_page(pmd_pfn(pmd)); pmd_page()
373 return virt_to_page(pmd_page_vaddr(pmd)); pmd_page()
560 pmd_t pmd; pmdp_clear_flush() local
564 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); pmdp_clear_flush()
567 * khugepaged calls this for normal pmd pmdp_clear_flush()
569 pmd = *pmdp; pmdp_clear_flush()
574 * to hugepage, we first clear the pmd, then invalidate all pmdp_clear_flush()
576 * page fault will see a none pmd and take the slow path that pmdp_clear_flush()
589 * covered by pmd. This make sure we take a pmdp_clear_flush()
590 * fault and will find the pmd as none, which will pmdp_clear_flush()
596 flush_tlb_pmd_range(vma->vm_mm, &pmd, address); pmdp_clear_flush()
598 return pmd; pmdp_clear_flush()
622 * We mark the pmd splitting and invalidate all the hpte
671 * We want to put the pgtable in pmd and use pgtable for tracking
686 * before we set the hugepage PTE at pmd level pgtable_trans_huge_deposit()
714 * set a new huge pmd. We should not be called for updating
715 * an existing pmd entry. That should go via pmd_hugepage_update.
718 pmd_t *pmdp, pmd_t pmd) set_pmd_at()
724 WARN_ON(!pmd_trans_huge(pmd)); set_pmd_at()
726 trace_hugepage_set_pmd(addr, pmd_val(pmd)); set_pmd_at()
727 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); set_pmd_at()
775 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) pmd_set_protbits() argument
777 pmd_val(pmd) |= pgprot_val(pgprot); pmd_set_protbits()
778 return pmd; pmd_set_protbits()
783 pmd_t pmd; pfn_pmd() local
786 * set. We use this to check THP page at pmd level. pfn_pmd()
789 pmd_val(pmd) = pfn << PTE_RPN_SHIFT; pfn_pmd()
790 pmd_val(pmd) |= _PAGE_THP_HUGE; pfn_pmd()
791 pmd = pmd_set_protbits(pmd, pgprot); pfn_pmd()
792 return pmd; pfn_pmd()
800 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd_modify() argument
803 pmd_val(pmd) &= _HPAGE_CHG_MASK; pmd_modify()
804 pmd = pmd_set_protbits(pmd, newprot); pmd_modify()
805 return pmd; pmd_modify()
815 pmd_t *pmd) update_mmu_cache_pmd()
831 * We have pmd == none and we are holding page_table_lock. pmdp_get_and_clear()
846 * pmd_t we want to prevent transit from pmd pointing to page table pmdp_get_and_clear()
847 * to pmd pointing to huge page (and back) while interrupts are disabled. pmdp_get_and_clear()
848 * We clear pmd to possibly replace it with page table pointer in pmdp_get_and_clear()
717 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) set_pmd_at() argument
814 update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) update_mmu_cache_pmd() argument
H A Dpgtable.c220 pmd_t *pmd; assert_pte_locked() local
228 pmd = pmd_offset(pud, addr); assert_pte_locked()
231 * pmd to none to force page fault/gup to take mmap_sem. After assert_pte_locked()
232 * pmd is set to none, we do a pte_clear which does this assertion assert_pte_locked()
233 * so if we find pmd none, return. assert_pte_locked()
235 if (pmd_none(*pmd)) assert_pte_locked()
237 BUG_ON(!pmd_present(*pmd)); assert_pte_locked()
238 assert_spin_locked(pte_lockptr(mm, pmd)); assert_pte_locked()
H A Dhugepage-hash64.c36 pmd_t pmd = READ_ONCE(*pmdp); __hash_page_thp() local
38 old_pmd = pmd_val(pmd); __hash_page_thp()
166 * Hypervisor failure. Restore old pmd and return -1 __hash_page_thp()
191 * huge pmd. __hash_page_thp()
H A Dsubpage-prot.c65 pmd_t *pmd; hpte_flush_range() local
75 pmd = pmd_offset(pud, addr); hpte_flush_range()
76 if (pmd_none(*pmd)) hpte_flush_range()
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); hpte_flush_range()
134 static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, subpage_walk_pmd_entry() argument
138 split_huge_page_pmd(vma, addr, pmd); subpage_walk_pmd_entry()
H A Dhugetlbpage.c69 int pmd_huge(pmd_t pmd) pmd_huge() argument
74 return ((pmd_val(pmd) & 0x3) != 0x0); pmd_huge()
93 int pmd_huge(pmd_t pmd) pmd_huge() argument
541 pmd_t *pmd; hugetlb_free_pmd_range() local
547 pmd = pmd_offset(pud, addr); hugetlb_free_pmd_range()
549 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { hugetlb_free_pmd_range()
554 WARN_ON(!pmd_none_or_clear_bad(pmd)); hugetlb_free_pmd_range()
564 next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); hugetlb_free_pmd_range()
566 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, hugetlb_free_pmd_range()
581 pmd = pmd_offset(pud, start); hugetlb_free_pmd_range()
583 pmd_free_tlb(tlb, pmd, start); hugetlb_free_pmd_range()
726 pmd_t *pmd, int write) follow_huge_pmd()
976 pmd_t pmd, *pmdp; __find_linux_pte_or_hugepte() local
1019 pmd = READ_ONCE(*pmdp); __find_linux_pte_or_hugepte()
1022 * it mark the pmd none and do a hpte invalidate. __find_linux_pte_or_hugepte()
1028 if (pmd_none(pmd)) __find_linux_pte_or_hugepte()
1031 if (pmd_huge(pmd) || pmd_large(pmd)) { __find_linux_pte_or_hugepte()
1034 } else if (is_hugepd(__hugepd(pmd_val(pmd)))) __find_linux_pte_or_hugepte()
1035 hpdp = (hugepd_t *)&pmd; __find_linux_pte_or_hugepte()
1037 return pte_offset_kernel(&pmd, ea); __find_linux_pte_or_hugepte()
725 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) follow_huge_pmd() argument
/linux-4.1.27/arch/ia64/mm/
H A Dhugetlbpage.c33 pmd_t *pmd; huge_pte_alloc() local
39 pmd = pmd_alloc(mm, pud, taddr); huge_pte_alloc()
40 if (pmd) huge_pte_alloc()
41 pte = pte_alloc_map(mm, NULL, pmd, taddr); huge_pte_alloc()
52 pmd_t *pmd; huge_pte_offset() local
59 pmd = pmd_offset(pud, taddr); huge_pte_offset()
60 if (pmd_present(*pmd)) huge_pte_offset()
61 pte = pte_offset_map(pmd, taddr); huge_pte_offset()
107 int pmd_huge(pmd_t pmd) pmd_huge() argument
H A Dinit.c216 pmd_t *pmd; put_kernel_page() local
229 pmd = pmd_alloc(&init_mm, pud, address); put_kernel_page()
230 if (!pmd) put_kernel_page()
232 pte = pte_alloc_kernel(pmd, address); put_kernel_page()
396 pmd_t *pmd; vmemmap_find_next_valid_pfn() local
411 pmd = pmd_offset(pud, end_address); vmemmap_find_next_valid_pfn()
412 if (pmd_none(*pmd)) { vmemmap_find_next_valid_pfn()
417 pte = pte_offset_kernel(pmd, end_address); vmemmap_find_next_valid_pfn()
444 pmd_t *pmd; create_mem_map_page_table() local
462 pmd = pmd_offset(pud, address); create_mem_map_page_table()
464 if (pmd_none(*pmd)) create_mem_map_page_table()
465 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); create_mem_map_page_table()
466 pte = pte_offset_kernel(pmd, address); create_mem_map_page_table()
H A Dfault.c52 pmd_t *pmd; mapped_kernel_page_is_present() local
63 pmd = pmd_offset(pud, address); mapped_kernel_page_is_present()
64 if (pmd_none(*pmd) || pmd_bad(*pmd)) mapped_kernel_page_is_present()
67 ptep = pte_offset_kernel(pmd, address); mapped_kernel_page_is_present()
/linux-4.1.27/arch/metag/mm/
H A Dinit.c47 pmd_t *pmd; insert_gateway_page() local
55 pmd = pmd_offset(pud, address); insert_gateway_page()
56 if (!pmd_present(*pmd)) { insert_gateway_page()
58 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))); insert_gateway_page()
61 pte = pte_offset_kernel(pmd, address); insert_gateway_page()
267 pmd_t *pmd; allocate_pgtables() local
278 pmd = (pmd_t *)pgd; allocate_pgtables()
279 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { allocate_pgtables()
282 if (!pmd_none(*pmd)) allocate_pgtables()
286 pmd_populate_kernel(&init_mm, pmd, pte); allocate_pgtables()
297 pmd_t *pmd; fixedrange_init() local
315 pmd = pmd_offset(pud, vaddr); fixedrange_init()
316 pte = pte_offset_kernel(pmd, vaddr); fixedrange_init()
H A Dhugetlbpage.c64 pmd_t *pmd; huge_pte_alloc() local
69 pmd = pmd_offset(pud, addr); huge_pte_alloc()
70 pte = pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc()
81 pmd_t *pmd; huge_pte_offset() local
86 pmd = pmd_offset(pud, addr); huge_pte_offset()
87 pte = pte_offset_kernel(pmd, addr); huge_pte_offset()
97 int pmd_huge(pmd_t pmd) pmd_huge() argument
99 return pmd_page_shift(pmd) > PAGE_SHIFT; pmd_huge()
108 pmd_t *pmd, int write) follow_huge_pmd()
107 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) follow_huge_pmd() argument
H A Dfault.c71 pmd_t *pmd, *pmd_k; do_page_fault() local
91 pmd = pmd_offset(pud, address); do_page_fault()
95 set_pmd(pmd, *pmd_k); do_page_fault()
/linux-4.1.27/arch/unicore32/include/asm/
H A Dpgalloc.h89 * Populate the pmdp entry with a pointer to the pte. This pmd is part
98 * The pmd must be loaded with the physical pmd_populate_kernel()
110 #define pmd_pgtable(pmd) pmd_page(pmd)
H A Dpgtable.h206 #define pmd_none(pmd) (!pmd_val(pmd)) pte_mkspecial()
207 #define pmd_present(pmd) (pmd_val(pmd) & PMD_PRESENT) pte_mkspecial()
208 #define pmd_bad(pmd) (((pmd_val(pmd) & \ pte_mkspecial()
223 #define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK))
224 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
H A Dtlbflush.h135 static inline void flush_pmd_entry(pmd_t *pmd) flush_pmd_entry() argument
146 : : "r" (pmd) : "r1", "r2"); flush_pmd_entry()
150 : : "r" (pmd) : "cc"); flush_pmd_entry()
154 static inline void clean_pmd_entry(pmd_t *pmd) clean_pmd_entry() argument
159 : : "r" (__pa(pmd) & ~(L1_CACHE_BYTES - 1)) : "cc"); clean_pmd_entry()
163 : : "r" (pmd) : "cc"); clean_pmd_entry()
/linux-4.1.27/arch/cris/arch-v10/mm/
H A Dfault.c49 pmd_t *pmd; handle_mmu_bus_fault() local
84 pmd = (pmd_t *)(pgd + pgd_index(address)); handle_mmu_bus_fault()
85 if (pmd_none(*pmd)) handle_mmu_bus_fault()
87 pte = *pte_offset_kernel(pmd, address); handle_mmu_bus_fault()
/linux-4.1.27/arch/um/kernel/skas/
H A Dmmu.c22 pmd_t *pmd; init_stub_pte() local
30 pmd = pmd_alloc(mm, pud, proc); init_stub_pte()
31 if (!pmd) init_stub_pte()
34 pte = pte_alloc_map(mm, NULL, pmd, proc); init_stub_pte()
43 pmd_free(mm, pmd); init_stub_pte()
H A Duaccess.c21 pmd_t *pmd; virt_to_pte() local
34 pmd = pmd_offset(pud, addr); virt_to_pte()
35 if (!pmd_present(*pmd)) virt_to_pte()
38 return pte_offset_kernel(pmd, addr); virt_to_pte()
/linux-4.1.27/arch/x86/kernel/
H A Dmachine_kexec_32.c103 pgd_t *pgd, pmd_t *pmd, pte_t *pte, machine_kexec_page_table_set_one()
111 set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT)); machine_kexec_page_table_set_one()
114 pmd = pmd_offset(pud, vaddr); machine_kexec_page_table_set_one()
115 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) machine_kexec_page_table_set_one()
116 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); machine_kexec_page_table_set_one()
117 pte = pte_offset_kernel(pmd, vaddr); machine_kexec_page_table_set_one()
124 pmd_t *pmd = NULL; machine_kexec_prepare_page_tables() local
128 pmd = image->arch.pmd0; machine_kexec_prepare_page_tables()
131 image->arch.pgd, pmd, image->arch.pte0, machine_kexec_prepare_page_tables()
134 pmd = image->arch.pmd1; machine_kexec_prepare_page_tables()
137 image->arch.pgd, pmd, image->arch.pte1, machine_kexec_prepare_page_tables()
102 machine_kexec_page_table_set_one( pgd_t *pgd, pmd_t *pmd, pte_t *pte, unsigned long vaddr, unsigned long paddr) machine_kexec_page_table_set_one() argument
H A Dmachine_kexec_64.c38 free_page((unsigned long)image->arch.pmd); free_transition_pgtable()
45 pmd_t *pmd; init_transition_pgtable() local
62 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); init_transition_pgtable()
63 if (!pmd) init_transition_pgtable()
65 image->arch.pmd = pmd; init_transition_pgtable()
66 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); init_transition_pgtable()
68 pmd = pmd_offset(pud, vaddr); init_transition_pgtable()
69 if (!pmd_present(*pmd)) { init_transition_pgtable()
74 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); init_transition_pgtable()
76 pte = pte_offset_kernel(pmd, vaddr); init_transition_pgtable()
H A Despfix_64.c142 pmd_t pmd, *pmd_p; init_espfix_ap() local
181 pmd = *pmd_p; init_espfix_ap()
182 if (!pmd_present(pmd)) { init_espfix_ap()
184 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); init_espfix_ap()
187 set_pmd(&pmd_p[n], pmd); init_espfix_ap()
190 pte_p = pte_offset_kernel(&pmd, addr); init_espfix_ap()
H A Dhead64.c60 pmdval_t pmd, *pmd_p; early_make_pgtable() local
104 pmd = (physaddr & PMD_MASK) + early_pmd_flags; early_make_pgtable()
105 pmd_p[pmd_index(address)] = pmd; early_make_pgtable()
/linux-4.1.27/arch/arm/lib/
H A Duaccess_with_memcpy.c30 pmd_t *pmd; pin_page_for_write() local
43 pmd = pmd_offset(pud, addr); pin_page_for_write()
44 if (unlikely(pmd_none(*pmd))) pin_page_for_write()
48 * A pmd can be bad if it refers to a HugeTLB or THP page. pin_page_for_write()
50 * Both THP and HugeTLB pages have the same pmd layout pin_page_for_write()
57 if (unlikely(pmd_thp_or_huge(*pmd))) { pin_page_for_write()
60 if (unlikely(!pmd_thp_or_huge(*pmd) pin_page_for_write()
61 || pmd_hugewillfault(*pmd) pin_page_for_write()
62 || pmd_trans_splitting(*pmd))) { pin_page_for_write()
72 if (unlikely(pmd_bad(*pmd))) pin_page_for_write()
75 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); pin_page_for_write()
/linux-4.1.27/arch/nios2/mm/
H A Dioremap.c50 static inline int remap_area_pmd(pmd_t *pmd, unsigned long address, remap_area_pmd() argument
64 pte_t *pte = pte_alloc_kernel(pmd, address); remap_area_pmd()
71 pmd++; remap_area_pmd()
90 pmd_t *pmd; remap_area_pages() local
96 pmd = pmd_alloc(&init_mm, pud, address); remap_area_pages()
97 if (!pmd) remap_area_pages()
99 if (remap_area_pmd(pmd, address, end - address, remap_area_pages()
/linux-4.1.27/arch/frv/mm/
H A Dpgalloc.c52 void __set_pmd(pmd_t *pmdptr, unsigned long pmd) __set_pmd() argument
57 if (!pmd) { __set_pmd()
61 BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe)); __set_pmd()
64 *__ste_p++ = pmd; __set_pmd()
65 pmd += __frv_PT_SIZE; __set_pmd()
75 * kernel pmd is shared. If PAE were not to share the pmd a similar
/linux-4.1.27/arch/parisc/kernel/
H A Dpci-dma.c110 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, map_pmd_uncached() argument
121 pte_t * pte = pte_alloc_kernel(pmd, vaddr); map_pmd_uncached()
128 pmd++; map_pmd_uncached()
141 pmd_t *pmd; map_uncached_pages() local
143 pmd = pmd_alloc(NULL, dir, vaddr); map_uncached_pages()
144 if (!pmd) map_uncached_pages()
146 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) map_uncached_pages()
154 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, unmap_uncached_pte() argument
161 if (pmd_none(*pmd)) unmap_uncached_pte()
163 if (pmd_bad(*pmd)) { unmap_uncached_pte()
164 pmd_ERROR(*pmd); unmap_uncached_pte()
165 pmd_clear(pmd); unmap_uncached_pte()
168 pte = pte_offset_map(pmd, vaddr); unmap_uncached_pte()
193 pmd_t * pmd; unmap_uncached_pmd() local
204 pmd = pmd_offset(dir, vaddr); unmap_uncached_pmd()
210 unmap_uncached_pte(pmd, orig_vaddr, end - vaddr); unmap_uncached_pmd()
213 pmd++; unmap_uncached_pmd()
/linux-4.1.27/arch/mn10300/mm/
H A Dpgtable.c33 * The pmd must already be instantiated. Assumes PAE mode.
39 pmd_t *pmd; set_pmd_pfn() local
55 pmd = pmd_offset(pud, vaddr); set_pmd_pfn()
56 set_pmd(pmd, pfn_pmd(pfn, flags)); set_pmd_pfn()
94 * kernel pmd is shared. If PAE were not to share the pmd a similar
H A Dfault.c71 pmd_t *pmd;
82 pmd = pmd_offset(pgd, address);
83 printk(KERN_DEBUG "pmd entry %p: %016Lx\n",
84 pmd, (long long)pmd_val(*pmd));
86 if (!pmd_present(*pmd)) {
87 printk(KERN_DEBUG "... pmd not present!\n");
90 pte = pte_offset(pmd, address);
388 pmd_t *pmd, *pmd_k; do_page_fault() local
406 pmd = pmd_offset(pud, address); do_page_fault()
407 set_pmd(pmd, *pmd_k); do_page_fault()
H A Dcache-flush-icache.c56 pmd_t *pmd; flush_icache_page_range() local
73 pmd = pmd_offset(pud, start); flush_icache_page_range()
74 if (!pmd || !pmd_val(*pmd)) flush_icache_page_range()
77 ppte = pte_offset_map(pmd, start); flush_icache_page_range()
H A Dcache-inv-icache.c34 pmd_t *pmd; flush_icache_page_range() local
51 pmd = pmd_offset(pud, start); flush_icache_page_range()
52 if (!pmd || !pmd_val(*pmd)) flush_icache_page_range()
55 ppte = pte_offset_map(pmd, start); flush_icache_page_range()
/linux-4.1.27/arch/hexagon/include/asm/
H A Dpgalloc.h96 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd_populate() argument
103 set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) | pmd_populate()
109 * share the same pmd's for their kernel mappings, but the v0.3
116 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_populate_kernel() argument
129 set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); pmd_populate_kernel()
135 pmdindex = (pgd_t *)pmd - mm->pgd; pmd_populate_kernel()
H A Dpgtable.h255 * @pmd_entry: pmd entry
259 static inline int pmd_none(pmd_t pmd) pmd_none() argument
261 return pmd_val(pmd) == _NULL_PMD; pmd_none()
270 static inline int pmd_present(pmd_t pmd) pmd_present() argument
272 return pmd_val(pmd) != (unsigned long)_NULL_PMD; pmd_present()
280 static inline int pmd_bad(pmd_t pmd) pmd_bad() argument
288 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
289 #define pmd_pgtable(pmd) pmd_page(pmd)
436 #define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr)
/linux-4.1.27/arch/arc/include/asm/
H A Dpgalloc.h39 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel() argument
41 pmd_set(pmd, pte); pmd_populate_kernel()
45 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep) pmd_populate() argument
47 pmd_set(pmd, (pte_t *) ptep); pmd_populate()
137 #define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
/linux-4.1.27/arch/tile/mm/
H A Dhugetlbpage.c71 pmd_t *pmd = pmd_alloc(mm, pud, addr); huge_pte_alloc() local
75 return (pte_t *)pmd; huge_pte_alloc()
80 return pte_alloc_map(mm, NULL, pmd, addr); huge_pte_alloc()
108 pmd_t *pmd; huge_pte_offset() local
129 pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud), huge_pte_offset()
131 if (!pmd_present(*pmd)) huge_pte_offset()
134 pmd = pmd_offset(pud, addr); huge_pte_offset()
138 if (pmd_huge(*pmd)) huge_pte_offset()
139 return (pte_t *)pmd; huge_pte_offset()
143 pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2); huge_pte_offset()
153 int pmd_huge(pmd_t pmd) pmd_huge() argument
155 return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE); pmd_huge()
H A Dpgtable.c102 pmd_t *pmd; shatter_huge_page() local
108 /* Get a pointer to the pmd entry that we need to change. */ shatter_huge_page()
115 pmd = pmd_offset(pud, addr); shatter_huge_page()
116 BUG_ON(!pmd_present(*pmd)); shatter_huge_page()
117 if (!pmd_huge_page(*pmd)) shatter_huge_page()
121 if (!pmd_huge_page(*pmd)) { shatter_huge_page()
128 pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd))); shatter_huge_page()
131 /* Walk every pgd on the system and update the pmd there. */ shatter_huge_page()
138 __set_pmd(copy_pmd, *pmd); shatter_huge_page()
344 pmd_t *pmd; virt_to_pte() local
355 pmd = pmd_offset(pud, addr); virt_to_pte()
356 if (!pmd_present(*pmd)) virt_to_pte()
358 if (pmd_huge_page(*pmd)) virt_to_pte()
359 return (pte_t *)pmd; virt_to_pte()
360 return pte_offset_kernel(pmd, addr); virt_to_pte()
H A Dinit.c118 static void __init assign_pte(pmd_t *pmd, pte_t *page_table) assign_pte() argument
125 *(pte_t *)pmd = pteval; assign_pte()
126 if (page_table != (pte_t *)pmd_page_vaddr(*pmd)) assign_pte()
137 static inline void assign_pmd(pud_t *pud, pmd_t *pmd) assign_pmd() argument
139 assign_pte((pmd_t *)pud, (pte_t *)pmd); assign_pmd()
144 /* Replace the given pmd with a full PTE table. */ shatter_pmd()
145 void __init shatter_pmd(pmd_t *pmd) shatter_pmd() argument
147 pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd)); shatter_pmd()
148 assign_pte(pmd, pte); shatter_pmd()
184 pmd_t *pmd = get_pmd(pgd, vaddr); page_table_range_init() local
185 if (pmd_none(*pmd)) page_table_range_init()
186 assign_pte(pmd, alloc_pte()); page_table_range_init()
409 pmd_t *pmd; kernel_physical_mapping_init() local
461 pmd = get_pmd(pgtables, address); kernel_physical_mapping_init()
465 *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot)); kernel_physical_mapping_init()
478 assign_pte(pmd, pte); kernel_physical_mapping_init()
508 pmd = get_pmd(pgtables, address); kernel_physical_mapping_init()
538 assign_pte(pmd++, pte); kernel_physical_mapping_init()
550 assign_pte(pmd, pte); kernel_physical_mapping_init()
573 *(pte_t *)(pmd++) = pfn_pte(pfn, pteval); kernel_physical_mapping_init()
641 pmd_t *pmd; permanent_kmaps_init() local
650 pmd = pmd_offset(pud, vaddr); permanent_kmaps_init()
651 pte = pte_offset_kernel(pmd, vaddr); permanent_kmaps_init()
/linux-4.1.27/arch/m32r/mm/
H A Dmmu.S149 or r3, r2 ; r3: pmd addr
151 ; pmd = pmd_offset(pgd, address);
152 ld r3, @r3 ; r3: pmd data
153 beqz r3, 3f ; pmd_none(*pmd) ?
157 bnez r2, 3f ; pmd_bad(*pmd) ?
160 ; pte = pte_offset(pmd, address);
266 ; pmd = pmd_offset(pgd, address);
267 ld r1, @r3 ; r1: pmd
268 beqz r1, 3f ; pmd_none(*pmd) ?
272 bne r1, r4, 3f ; pmd_bad(*pmd) ?
276 ; pte = pte_offset(pmd, address);
/linux-4.1.27/arch/m68k/mm/
H A Dkmap.c200 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; __ioremap()
265 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; __iounmap()
268 pmd_dir->pmd[pmd_off] = 0; __iounmap()
277 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); __iounmap()
344 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { kernel_set_cachemode()
345 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & kernel_set_cachemode()
354 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); kernel_set_cachemode()
H A Dmcfmmu.c89 pmd_t *pmd; cf_tlb_miss() local
110 pmd = pmd_offset(pgd, mmuar); cf_tlb_miss()
111 if (pmd_none(*pmd)) { cf_tlb_miss()
116 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) cf_tlb_miss()
117 : pte_offset_map(pmd, mmuar); cf_tlb_miss()
H A Dmotorola.c72 unsigned long pmd, last; kernel_ptr_table() local
83 pmd = __pgd_page(kernel_pg_dir[i]); kernel_ptr_table()
84 if (pmd > last) kernel_ptr_table()
85 last = pmd; kernel_ptr_table()
158 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; map_node()
167 pmd_dir->pmd[0] = virt_to_phys(pte_dir) | map_node()
/linux-4.1.27/fs/proc/
H A Dtask_mmu.c507 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, smaps_pmd_entry() argument
515 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); smaps_pmd_entry()
520 pmd_young(*pmd), pmd_dirty(*pmd)); smaps_pmd_entry()
523 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, smaps_pmd_entry() argument
529 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, smaps_pte_range() argument
536 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { smaps_pte_range()
537 smaps_pmd_entry(pmd, addr, walk); smaps_pte_range()
542 if (pmd_trans_unstable(pmd)) smaps_pte_range()
549 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); smaps_pte_range()
768 pmd_t pmd = *pmdp; clear_soft_dirty_pmd() local
770 pmd = pmd_wrprotect(pmd); clear_soft_dirty_pmd()
771 pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); clear_soft_dirty_pmd()
776 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); clear_soft_dirty_pmd()
792 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, clear_refs_pte_range() argument
801 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { clear_refs_pte_range()
803 clear_soft_dirty_pmd(vma, addr, pmd); clear_refs_pte_range()
807 page = pmd_page(*pmd); clear_refs_pte_range()
810 pmdp_test_and_clear_young(vma, addr, pmd); clear_refs_pte_range()
817 if (pmd_trans_unstable(pmd)) clear_refs_pte_range()
820 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); clear_refs_pte_range()
1085 pmd_t pmd, int offset, int pmd_flags2) thp_pmd_to_pagemap_entry()
1088 * Currently pmd for thp is always present because thp can not be thp_pmd_to_pagemap_entry()
1092 if (pmd_present(pmd)) thp_pmd_to_pagemap_entry()
1093 *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) thp_pmd_to_pagemap_entry()
1100 pmd_t pmd, int offset, int pmd_flags2) thp_pmd_to_pagemap_entry()
1105 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pagemap_pte_range() argument
1114 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { pagemap_pte_range()
1117 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) pagemap_pte_range()
1128 thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2); pagemap_pte_range()
1137 if (pmd_trans_unstable(pmd)) pagemap_pte_range()
1144 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pagemap_pte_range()
1412 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, gather_pte_stats() argument
1421 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { gather_pte_stats()
1422 pte_t huge_pte = *(pte_t *)pmd; gather_pte_stats()
1433 if (pmd_trans_unstable(pmd)) gather_pte_stats()
1435 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); gather_pte_stats()
1084 thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, pmd_t pmd, int offset, int pmd_flags2) thp_pmd_to_pagemap_entry() argument
1099 thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, pmd_t pmd, int offset, int pmd_flags2) thp_pmd_to_pagemap_entry() argument
/linux-4.1.27/arch/alpha/mm/
H A Dinit.c148 pmd_t *pmd; callback_init() local
187 pmd = pmd_offset(pgd, VMALLOC_START); callback_init()
188 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE)); callback_init()
216 if (pmd != pmd_offset(pgd, vaddr)) { callback_init()
218 pmd = pmd_offset(pgd, vaddr); callback_init()
219 pmd_set(pmd, (pte_t *)kernel_end); callback_init()
222 set_pte(pte_offset_kernel(pmd, vaddr), callback_init()
/linux-4.1.27/arch/parisc/mm/
H A Dinit.c39 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
43 pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
399 pmd_t *pmd; map_pages() local
435 pmd = (pmd_t *)__pa(pg_dir); map_pages()
437 pmd = (pmd_t *)pgd_address(*pg_dir); map_pages()
440 * pmd is physical at this point map_pages()
443 if (!pmd) { map_pages()
444 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER); map_pages()
445 pmd = (pmd_t *) __pa(pmd); map_pages()
448 pgd_populate(NULL, pg_dir, __va(pmd)); map_pages()
452 /* now change pmd to kernel virtual addresses */ map_pages()
454 pmd = (pmd_t *)__va(pmd) + start_pmd; map_pages()
455 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { map_pages()
461 pg_table = (pte_t *)pmd_address(*pmd); map_pages()
468 pmd_populate_kernel(NULL, pmd, __va(pg_table)); map_pages()
/linux-4.1.27/arch/arm/mach-shmobile/
H A Dpm-rmobile.c395 struct device_node *np, *pmd; rmobile_init_pm_domains() local
407 pmd = of_get_child_by_name(np, "pm-domains"); rmobile_init_pm_domains()
408 if (!pmd) { rmobile_init_pm_domains()
419 ret = rmobile_add_pm_domains(base, pmd, NULL); rmobile_init_pm_domains()
420 of_node_put(pmd); rmobile_init_pm_domains()
/linux-4.1.27/arch/m68k/sun3x/
H A Ddvma.c101 pmd_t *pmd; dvma_map_cpu() local
104 if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) { dvma_map_cpu()
118 if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) { dvma_map_cpu()
/linux-4.1.27/arch/arc/mm/
H A Dfault.c29 pmd_t *pmd, *pmd_k; handle_vmalloc_fault() local
42 pmd = pmd_offset(pud, address); handle_vmalloc_fault()
47 set_pmd(pmd, *pmd_k); handle_vmalloc_fault()

Completed in 2842 milliseconds

12