newpage          1766 drivers/misc/vmw_balloon.c 				 struct page *newpage, struct page *page,
newpage          1817 drivers/misc/vmw_balloon.c 	vmballoon_add_page(b, 0, newpage);
newpage          1822 drivers/misc/vmw_balloon.c 		status = vmballoon_status_page(b, 0, &newpage);
newpage          1840 drivers/misc/vmw_balloon.c 		get_page(newpage);
newpage          1853 drivers/misc/vmw_balloon.c 		balloon_page_insert(&b->b_dev_info, newpage);
newpage          3558 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct page *newpage;
newpage          3613 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
newpage          3615 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		if (!newpage) {
newpage          3621 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		sg_set_page(iter, newpage, page_size << page_order, 0);
newpage          3640 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		newpage = sg_page(iter);
newpage           709 drivers/virtio/virtio_balloon.c 		struct page *newpage, struct page *page, enum migrate_mode mode)
newpage           726 drivers/virtio/virtio_balloon.c 	get_page(newpage); /* balloon reference */
newpage           734 drivers/virtio/virtio_balloon.c 	    page_zone(page) != page_zone(newpage)) {
newpage           736 drivers/virtio/virtio_balloon.c 		adjust_managed_page_count(newpage, -1);
newpage           741 drivers/virtio/virtio_balloon.c 	balloon_page_insert(vb_dev_info, newpage);
newpage           746 drivers/virtio/virtio_balloon.c 	set_page_pfns(vb, vb->pfns, newpage);
newpage           922 fs/btrfs/disk-io.c 			struct page *newpage, struct page *page,
newpage           938 fs/btrfs/disk-io.c 	return migrate_page(mapping, newpage, page, mode);
newpage           235 fs/cachefiles/rdwr.c 	struct page *newpage, *backpage;
newpage           254 fs/cachefiles/rdwr.c 	newpage = NULL;
newpage           261 fs/cachefiles/rdwr.c 		if (!newpage) {
newpage           262 fs/cachefiles/rdwr.c 			newpage = __page_cache_alloc(cachefiles_gfp);
newpage           263 fs/cachefiles/rdwr.c 			if (!newpage)
newpage           267 fs/cachefiles/rdwr.c 		ret = add_to_page_cache_lru(newpage, bmapping,
newpage           278 fs/cachefiles/rdwr.c 	_debug("- new %p", newpage);
newpage           280 fs/cachefiles/rdwr.c 	backpage = newpage;
newpage           281 fs/cachefiles/rdwr.c 	newpage = NULL;
newpage           314 fs/cachefiles/rdwr.c 	if (newpage) {
newpage           315 fs/cachefiles/rdwr.c 		put_page(newpage);
newpage           316 fs/cachefiles/rdwr.c 		newpage = NULL;
newpage           368 fs/cachefiles/rdwr.c 	put_page(newpage);
newpage           471 fs/cachefiles/rdwr.c 	struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
newpage           497 fs/cachefiles/rdwr.c 			if (!newpage) {
newpage           498 fs/cachefiles/rdwr.c 				newpage = __page_cache_alloc(cachefiles_gfp);
newpage           499 fs/cachefiles/rdwr.c 				if (!newpage)
newpage           503 fs/cachefiles/rdwr.c 			ret = add_to_page_cache_lru(newpage, bmapping,
newpage           515 fs/cachefiles/rdwr.c 		_debug("- new %p", newpage);
newpage           517 fs/cachefiles/rdwr.c 		backpage = newpage;
newpage           518 fs/cachefiles/rdwr.c 		newpage = NULL;
newpage           641 fs/cachefiles/rdwr.c 	if (newpage)
newpage           642 fs/cachefiles/rdwr.c 		put_page(newpage);
newpage           660 fs/erofs/zdata.c 		struct page *const newpage =
newpage           663 fs/erofs/zdata.c 		err = z_erofs_attach_page(clt, newpage,
newpage          2979 fs/f2fs/data.c 		struct page *newpage, struct page *page, enum migrate_mode mode)
newpage          2997 fs/f2fs/data.c 	rc = migrate_page_move_mapping(mapping, newpage,
newpage          3009 fs/f2fs/data.c 				cur->page = newpage;
newpage          3014 fs/f2fs/data.c 		get_page(newpage);
newpage          3018 fs/f2fs/data.c 		f2fs_set_page_private(newpage, page_private(page));
newpage          3023 fs/f2fs/data.c 		migrate_page_copy(newpage, page);
newpage          3025 fs/f2fs/data.c 		migrate_page_states(newpage, page);
newpage          3237 fs/f2fs/f2fs.h int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
newpage           786 fs/fuse/dev.c  	struct page *newpage;
newpage           811 fs/fuse/dev.c  	newpage = buf->page;
newpage           813 fs/fuse/dev.c  	if (!PageUptodate(newpage))
newpage           814 fs/fuse/dev.c  		SetPageUptodate(newpage);
newpage           816 fs/fuse/dev.c  	ClearPageMappedToDisk(newpage);
newpage           818 fs/fuse/dev.c  	if (fuse_check_page(newpage) != 0)
newpage           834 fs/fuse/dev.c  	err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
newpage           836 fs/fuse/dev.c  		unlock_page(newpage);
newpage           840 fs/fuse/dev.c  	get_page(newpage);
newpage           843 fs/fuse/dev.c  		lru_cache_add_file(newpage);
newpage           850 fs/fuse/dev.c  		*pagep = newpage;
newpage           854 fs/fuse/dev.c  		unlock_page(newpage);
newpage           855 fs/fuse/dev.c  		put_page(newpage);
newpage           866 fs/fuse/dev.c  	unlock_page(newpage);
newpage           880 fs/hugetlbfs/inode.c 				struct page *newpage, struct page *page,
newpage           885 fs/hugetlbfs/inode.c 	rc = migrate_huge_page_move_mapping(mapping, newpage, page);
newpage           896 fs/hugetlbfs/inode.c 		set_page_private(newpage, page_private(page));
newpage           901 fs/hugetlbfs/inode.c 		migrate_page_copy(newpage, page);
newpage           903 fs/hugetlbfs/inode.c 		migrate_page_states(newpage, page);
newpage           487 fs/iomap/buffered-io.c iomap_migrate_page(struct address_space *mapping, struct page *newpage,
newpage           492 fs/iomap/buffered-io.c 	ret = migrate_page_move_mapping(mapping, newpage, page, 0);
newpage           498 fs/iomap/buffered-io.c 		get_page(newpage);
newpage           499 fs/iomap/buffered-io.c 		set_page_private(newpage, page_private(page));
newpage           502 fs/iomap/buffered-io.c 		SetPagePrivate(newpage);
newpage           506 fs/iomap/buffered-io.c 		migrate_page_copy(newpage, page);
newpage           508 fs/iomap/buffered-io.c 		migrate_page_states(newpage, page);
newpage           329 fs/jfs/jfs_logmgr.h 		} newpage;
newpage          1709 fs/jfs/jfs_xtree.c 	int newpage = 0;
newpage          1962 fs/jfs/jfs_xtree.c 				newpage = 1;
newpage          1992 fs/jfs/jfs_xtree.c 	if (newpage) {
newpage          5495 fs/nfs/nfs4proc.c 	struct page *newpage, **spages;
newpage          5502 fs/nfs/nfs4proc.c 		newpage = alloc_page(GFP_KERNEL);
newpage          5504 fs/nfs/nfs4proc.c 		if (newpage == NULL)
newpage          5506 fs/nfs/nfs4proc.c 		memcpy(page_address(newpage), buf, len);
newpage          5509 fs/nfs/nfs4proc.c 		*pages++ = newpage;
newpage          2106 fs/nfs/write.c int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
newpage          2123 fs/nfs/write.c 	return migrate_page(mapping, newpage, page, mode);
newpage          1464 fs/ubifs/file.c 		struct page *newpage, struct page *page, enum migrate_mode mode)
newpage          1468 fs/ubifs/file.c 	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
newpage          1474 fs/ubifs/file.c 		SetPagePrivate(newpage);
newpage          1478 fs/ubifs/file.c 		migrate_page_copy(newpage, page);
newpage          1480 fs/ubifs/file.c 		migrate_page_states(newpage, page);
newpage            58 include/linux/balloon_compaction.h 	int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
newpage            87 include/linux/balloon_compaction.h 				struct page *newpage,
newpage           168 include/linux/balloon_compaction.h static inline int balloon_page_migrate(struct page *newpage,
newpage           104 include/linux/hugetlb.h void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
newpage           164 include/linux/iomap.h int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
newpage            55 include/linux/ksm.h void ksm_migrate_page(struct page *newpage, struct page *oldpage);
newpage            88 include/linux/ksm.h static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
newpage           388 include/linux/memcontrol.h void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
newpage            66 include/linux/migrate.h 			struct page *newpage, struct page *page,
newpage            75 include/linux/migrate.h extern void migrate_page_states(struct page *newpage, struct page *page);
newpage            76 include/linux/migrate.h extern void migrate_page_copy(struct page *newpage, struct page *page);
newpage            78 include/linux/migrate.h 				  struct page *newpage, struct page *page);
newpage            80 include/linux/migrate.h 		struct page *newpage, struct page *page, int extra_count);
newpage            94 include/linux/migrate.h static inline void migrate_page_states(struct page *newpage, struct page *page)
newpage            98 include/linux/migrate.h static inline void migrate_page_copy(struct page *newpage,
newpage           102 include/linux/migrate.h 				  struct page *newpage, struct page *page)
newpage            15 include/linux/page_owner.h extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
newpage            39 include/linux/page_owner.h static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
newpage            42 include/linux/page_owner.h 		__copy_page_owner(oldpage, newpage);
newpage            66 include/linux/page_owner.h static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
newpage           234 mm/balloon_compaction.c 		struct page *newpage, struct page *page,
newpage           248 mm/balloon_compaction.c 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
newpage           250 mm/balloon_compaction.c 	return balloon->migratepage(balloon, newpage, page, mode);
newpage          5152 mm/hugetlb.c   void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
newpage          5156 mm/hugetlb.c   	hugetlb_cgroup_migrate(oldpage, newpage);
newpage          5157 mm/hugetlb.c   	set_page_owner_migrate_reason(newpage, reason);
newpage          5169 mm/hugetlb.c   	if (PageHugeTemporary(newpage)) {
newpage          5171 mm/hugetlb.c   		int new_nid = page_to_nid(newpage);
newpage          5174 mm/hugetlb.c   		ClearPageHugeTemporary(newpage);
newpage           327 mm/internal.h  static inline void mlock_migrate_page(struct page *newpage, struct page *page)
newpage           334 mm/internal.h  		SetPageMlocked(newpage);
newpage           335 mm/internal.h  		__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
newpage          2688 mm/ksm.c       void ksm_migrate_page(struct page *newpage, struct page *oldpage)
newpage          2693 mm/ksm.c       	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
newpage          2694 mm/ksm.c       	VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
newpage          2696 mm/ksm.c       	stable_node = page_stable_node(newpage);
newpage          2699 mm/ksm.c       		stable_node->kpfn = page_to_pfn(newpage);
newpage          6816 mm/memcontrol.c void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
newpage          6824 mm/memcontrol.c 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
newpage          6825 mm/memcontrol.c 	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
newpage          6826 mm/memcontrol.c 	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
newpage          6827 mm/memcontrol.c 		       newpage);
newpage          6833 mm/memcontrol.c 	if (newpage->mem_cgroup)
newpage          6842 mm/memcontrol.c 	compound = PageTransHuge(newpage);
newpage          6843 mm/memcontrol.c 	nr_pages = compound ? hpage_nr_pages(newpage) : 1;
newpage          6850 mm/memcontrol.c 	commit_charge(newpage, memcg, false);
newpage          6853 mm/memcontrol.c 	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
newpage          6854 mm/memcontrol.c 	memcg_check_events(memcg, newpage);
newpage           398 mm/migrate.c   		struct page *newpage, struct page *page, int extra_count)
newpage           411 mm/migrate.c   		newpage->index = page->index;
newpage           412 mm/migrate.c   		newpage->mapping = page->mapping;
newpage           414 mm/migrate.c   			__SetPageSwapBacked(newpage);
newpage           420 mm/migrate.c   	newzone = page_zone(newpage);
newpage           437 mm/migrate.c   	newpage->index = page->index;
newpage           438 mm/migrate.c   	newpage->mapping = page->mapping;
newpage           439 mm/migrate.c   	page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
newpage           441 mm/migrate.c   		__SetPageSwapBacked(newpage);
newpage           443 mm/migrate.c   			SetPageSwapCache(newpage);
newpage           444 mm/migrate.c   			set_page_private(newpage, page_private(page));
newpage           454 mm/migrate.c   		SetPageDirty(newpage);
newpage           457 mm/migrate.c   	xas_store(&xas, newpage);
newpage           463 mm/migrate.c   			xas_store(&xas, newpage);
newpage           512 mm/migrate.c   				   struct page *newpage, struct page *page)
newpage           529 mm/migrate.c   	newpage->index = page->index;
newpage           530 mm/migrate.c   	newpage->mapping = page->mapping;
newpage           532 mm/migrate.c   	get_page(newpage);
newpage           534 mm/migrate.c   	xas_store(&xas, newpage);
newpage           594 mm/migrate.c   void migrate_page_states(struct page *newpage, struct page *page)
newpage           599 mm/migrate.c   		SetPageError(newpage);
newpage           601 mm/migrate.c   		SetPageReferenced(newpage);
newpage           603 mm/migrate.c   		SetPageUptodate(newpage);
newpage           606 mm/migrate.c   		SetPageActive(newpage);
newpage           608 mm/migrate.c   		SetPageUnevictable(newpage);
newpage           610 mm/migrate.c   		SetPageWorkingset(newpage);
newpage           612 mm/migrate.c   		SetPageChecked(newpage);
newpage           614 mm/migrate.c   		SetPageMappedToDisk(newpage);
newpage           618 mm/migrate.c   		SetPageDirty(newpage);
newpage           621 mm/migrate.c   		set_page_young(newpage);
newpage           623 mm/migrate.c   		set_page_idle(newpage);
newpage           630 mm/migrate.c   	page_cpupid_xchg_last(newpage, cpupid);
newpage           632 mm/migrate.c   	ksm_migrate_page(newpage, page);
newpage           646 mm/migrate.c   	if (PageWriteback(newpage))
newpage           647 mm/migrate.c   		end_page_writeback(newpage);
newpage           649 mm/migrate.c   	copy_page_owner(page, newpage);
newpage           651 mm/migrate.c   	mem_cgroup_migrate(page, newpage);
newpage           655 mm/migrate.c   void migrate_page_copy(struct page *newpage, struct page *page)
newpage           658 mm/migrate.c   		copy_huge_page(newpage, page);
newpage           660 mm/migrate.c   		copy_highpage(newpage, page);
newpage           662 mm/migrate.c   	migrate_page_states(newpage, page);
newpage           677 mm/migrate.c   		struct page *newpage, struct page *page,
newpage           684 mm/migrate.c   	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
newpage           690 mm/migrate.c   		migrate_page_copy(newpage, page);
newpage           692 mm/migrate.c   		migrate_page_states(newpage, page);
newpage           737 mm/migrate.c   		struct page *newpage, struct page *page, enum migrate_mode mode,
newpage           745 mm/migrate.c   		return migrate_page(mapping, newpage, page, mode);
newpage           783 mm/migrate.c   	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
newpage           788 mm/migrate.c   	set_page_private(newpage, page_private(page));
newpage           791 mm/migrate.c   	get_page(newpage);
newpage           795 mm/migrate.c   		set_bh_page(bh, newpage, bh_offset(bh));
newpage           800 mm/migrate.c   	SetPagePrivate(newpage);
newpage           803 mm/migrate.c   		migrate_page_copy(newpage, page);
newpage           805 mm/migrate.c   		migrate_page_states(newpage, page);
newpage           827 mm/migrate.c   		struct page *newpage, struct page *page, enum migrate_mode mode)
newpage           829 mm/migrate.c   	return __buffer_migrate_page(mapping, newpage, page, mode, false);
newpage           840 mm/migrate.c   		struct page *newpage, struct page *page, enum migrate_mode mode)
newpage           842 mm/migrate.c   	return __buffer_migrate_page(mapping, newpage, page, mode, true);
newpage           891 mm/migrate.c   	struct page *newpage, struct page *page, enum migrate_mode mode)
newpage           913 mm/migrate.c   	return migrate_page(mapping, newpage, page, mode);
newpage           927 mm/migrate.c   static int move_to_new_page(struct page *newpage, struct page *page,
newpage           935 mm/migrate.c   	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
newpage           941 mm/migrate.c   			rc = migrate_page(mapping, newpage, page, mode);
newpage           950 mm/migrate.c   			rc = mapping->a_ops->migratepage(mapping, newpage,
newpage           953 mm/migrate.c   			rc = fallback_migrate_page(mapping, newpage,
newpage           967 mm/migrate.c   		rc = mapping->a_ops->migratepage(mapping, newpage,
newpage           996 mm/migrate.c   		if (likely(!is_zone_device_page(newpage)))
newpage           997 mm/migrate.c   			flush_dcache_page(newpage);
newpage          1004 mm/migrate.c   static int __unmap_and_move(struct page *page, struct page *newpage,
newpage          1080 mm/migrate.c   	if (unlikely(!trylock_page(newpage)))
newpage          1084 mm/migrate.c   		rc = move_to_new_page(newpage, page, mode);
newpage          1116 mm/migrate.c   		rc = move_to_new_page(newpage, page, mode);
newpage          1120 mm/migrate.c   			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
newpage          1123 mm/migrate.c   	unlock_page(newpage);
newpage          1141 mm/migrate.c   			put_page(newpage);
newpage          1143 mm/migrate.c   			putback_lru_page(newpage);
newpage          1171 mm/migrate.c   	struct page *newpage;
newpage          1176 mm/migrate.c   	newpage = get_new_page(page, private);
newpage          1177 mm/migrate.c   	if (!newpage)
newpage          1191 mm/migrate.c   			put_new_page(newpage, private);
newpage          1193 mm/migrate.c   			put_page(newpage);
newpage          1197 mm/migrate.c   	rc = __unmap_and_move(page, newpage, force, mode);
newpage          1199 mm/migrate.c   		set_page_owner_migrate_reason(newpage, reason);
newpage          1254 mm/migrate.c   			put_new_page(newpage, private);
newpage          1256 mm/migrate.c   			put_page(newpage);
newpage          1909 mm/migrate.c   	struct page *newpage;
newpage          1911 mm/migrate.c   	newpage = __alloc_pages_node(nid,
newpage          1917 mm/migrate.c   	return newpage;
newpage          2851 mm/migrate.c   		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
newpage          2856 mm/migrate.c   		if (!newpage) {
newpage          2875 mm/migrate.c   			migrate_vma_insert_page(migrate, addr, newpage,
newpage          2883 mm/migrate.c   		if (is_zone_device_page(newpage)) {
newpage          2884 mm/migrate.c   			if (is_device_private_page(newpage)) {
newpage          2903 mm/migrate.c   		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
newpage          2935 mm/migrate.c   		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
newpage          2939 mm/migrate.c   			if (newpage) {
newpage          2940 mm/migrate.c   				unlock_page(newpage);
newpage          2941 mm/migrate.c   				put_page(newpage);
newpage          2946 mm/migrate.c   		if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
newpage          2947 mm/migrate.c   			if (newpage) {
newpage          2948 mm/migrate.c   				unlock_page(newpage);
newpage          2949 mm/migrate.c   				put_page(newpage);
newpage          2951 mm/migrate.c   			newpage = page;
newpage          2954 mm/migrate.c   		remove_migration_ptes(page, newpage, false);
newpage          2963 mm/migrate.c   		if (newpage != page) {
newpage          2964 mm/migrate.c   			unlock_page(newpage);
newpage          2965 mm/migrate.c   			if (is_zone_device_page(newpage))
newpage          2966 mm/migrate.c   				put_page(newpage);
newpage          2968 mm/migrate.c   				putback_lru_page(newpage);
newpage           223 mm/page_owner.c void __copy_page_owner(struct page *oldpage, struct page *newpage)
newpage           226 mm/page_owner.c 	struct page_ext *new_ext = lookup_page_ext(newpage);
newpage          1556 mm/shmem.c     	struct page *oldpage, *newpage;
newpage          1572 mm/shmem.c     	newpage = shmem_alloc_page(gfp, info, index);
newpage          1573 mm/shmem.c     	if (!newpage)
newpage          1576 mm/shmem.c     	get_page(newpage);
newpage          1577 mm/shmem.c     	copy_highpage(newpage, oldpage);
newpage          1578 mm/shmem.c     	flush_dcache_page(newpage);
newpage          1580 mm/shmem.c     	__SetPageLocked(newpage);
newpage          1581 mm/shmem.c     	__SetPageSwapBacked(newpage);
newpage          1582 mm/shmem.c     	SetPageUptodate(newpage);
newpage          1583 mm/shmem.c     	set_page_private(newpage, entry.val);
newpage          1584 mm/shmem.c     	SetPageSwapCache(newpage);
newpage          1591 mm/shmem.c     	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
newpage          1593 mm/shmem.c     		__inc_node_page_state(newpage, NR_FILE_PAGES);
newpage          1604 mm/shmem.c     		oldpage = newpage;
newpage          1606 mm/shmem.c     		mem_cgroup_migrate(oldpage, newpage);
newpage          1607 mm/shmem.c     		lru_cache_add_anon(newpage);
newpage          1608 mm/shmem.c     		*pagep = newpage;
newpage          1373 mm/z3fold.c    static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
newpage          1382 mm/z3fold.c    	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
newpage          1398 mm/z3fold.c    	new_zhdr = page_address(newpage);
newpage          1400 mm/z3fold.c    	newpage->private = page->private;
newpage          1414 mm/z3fold.c    	get_page(newpage);
newpage          1422 mm/z3fold.c    	set_bit(NEEDS_COMPACTING, &newpage->private);
newpage          1425 mm/z3fold.c    	list_add(&newpage->lru, &pool->lru);
newpage          1427 mm/z3fold.c    	__SetPageMovable(newpage, new_mapping);
newpage          1893 mm/zsmalloc.c  				struct page *newpage, struct page *oldpage)
newpage          1902 mm/zsmalloc.c  			pages[idx] = newpage;
newpage          1909 mm/zsmalloc.c  	set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
newpage          1911 mm/zsmalloc.c  		newpage->index = oldpage->index;
newpage          1912 mm/zsmalloc.c  	__SetPageMovable(newpage, page_mapping(oldpage));
newpage          1971 mm/zsmalloc.c  static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
newpage          2031 mm/zsmalloc.c  	d_addr = kmap_atomic(newpage);
newpage          2045 mm/zsmalloc.c  			new_obj = (unsigned long)location_to_obj(newpage,
newpage          2052 mm/zsmalloc.c  	replace_sub_page(class, zspage, newpage, page);
newpage          2053 mm/zsmalloc.c  	get_page(newpage);
newpage          2072 mm/zsmalloc.c  	if (page_zone(newpage) != page_zone(page)) {
newpage          2074 mm/zsmalloc.c  		inc_zone_page_state(newpage, NR_ZSPAGES);
newpage          2079 mm/zsmalloc.c  	page = newpage;