cur_page 157 drivers/crypto/qat/qat_common/icp_qat_uclo.h struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX]; cur_page 1616 drivers/crypto/qat/qat_common/qat_uclo.c obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] = cur_page 151 drivers/gpu/drm/ttm/ttm_tt.c struct page *cur_page; cur_page 167 drivers/gpu/drm/ttm/ttm_tt.c cur_page = ttm->pages[i]; cur_page 168 drivers/gpu/drm/ttm/ttm_tt.c if (likely(cur_page != NULL)) { cur_page 169 drivers/gpu/drm/ttm/ttm_tt.c ret = ttm_tt_set_page_caching(cur_page, cur_page 183 drivers/gpu/drm/ttm/ttm_tt.c cur_page = ttm->pages[j]; cur_page 184 drivers/gpu/drm/ttm/ttm_tt.c if (likely(cur_page != NULL)) { cur_page 185 drivers/gpu/drm/ttm/ttm_tt.c (void)ttm_tt_set_page_caching(cur_page, c_state, cur_page 260 drivers/xen/xen-front-pgdir-shbuf.c int ret, cur_gref, cur_dir_page, cur_page, grefs_left; cur_page 281 drivers/xen/xen-front-pgdir-shbuf.c cur_page = 0; cur_page 293 drivers/xen/xen-front-pgdir-shbuf.c addr = xen_page_to_vaddr(buf->pages[cur_page]); cur_page 294 drivers/xen/xen-front-pgdir-shbuf.c gnttab_set_map_op(&map_ops[cur_page], addr, cur_page 298 drivers/xen/xen-front-pgdir-shbuf.c cur_page++; cur_page 307 drivers/xen/xen-front-pgdir-shbuf.c for (cur_page = 0; cur_page < buf->num_pages; cur_page++) { cur_page 308 drivers/xen/xen-front-pgdir-shbuf.c buf->backend_map_handles[cur_page] = map_ops[cur_page].handle; cur_page 309 drivers/xen/xen-front-pgdir-shbuf.c if (unlikely(map_ops[cur_page].status != GNTST_okay)) cur_page 312 drivers/xen/xen-front-pgdir-shbuf.c cur_page, map_ops[cur_page].status); cur_page 99 fs/direct-io.c struct page *cur_page; /* The page */ cur_page 753 fs/direct-io.c ret = bio_add_page(sdio->bio, sdio->cur_page, cur_page 761 fs/direct-io.c get_page(sdio->cur_page); cur_page 862 fs/direct-io.c if (sdio->cur_page == page && cur_page 873 fs/direct-io.c if (sdio->cur_page) { cur_page 875 fs/direct-io.c put_page(sdio->cur_page); cur_page 876 fs/direct-io.c sdio->cur_page = NULL; cur_page 882 fs/direct-io.c sdio->cur_page = page; cur_page 896 fs/direct-io.c put_page(sdio->cur_page); cur_page 897 fs/direct-io.c sdio->cur_page = NULL; cur_page 1343 fs/direct-io.c if (sdio.cur_page) { cur_page 1349 fs/direct-io.c put_page(sdio.cur_page); cur_page 1350 fs/direct-io.c sdio.cur_page = NULL; cur_page 886 fs/f2fs/checkpoint.c struct page *cp1, *cp2, *cur_page; cur_page 913 fs/f2fs/checkpoint.c cur_page = cp2; cur_page 915 fs/f2fs/checkpoint.c cur_page = cp1; cur_page 917 fs/f2fs/checkpoint.c cur_page = cp1; cur_page 919 fs/f2fs/checkpoint.c cur_page = cp2; cur_page 925 fs/f2fs/checkpoint.c cp_block = (struct f2fs_checkpoint *)page_address(cur_page); cur_page 928 fs/f2fs/checkpoint.c if (cur_page == cp1) cur_page 943 fs/f2fs/checkpoint.c if (cur_page == cp2) cur_page 950 fs/f2fs/checkpoint.c cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); cur_page 951 fs/f2fs/checkpoint.c if (IS_ERR(cur_page)) { cur_page 952 fs/f2fs/checkpoint.c err = PTR_ERR(cur_page); cur_page 955 fs/f2fs/checkpoint.c sit_bitmap_ptr = page_address(cur_page); cur_page 957 fs/f2fs/checkpoint.c f2fs_put_page(cur_page, 1); cur_page 179 fs/iomap/buffered-io.c struct page *cur_page; cur_page 211 fs/iomap/buffered-io.c struct page *page = ctx->cur_page; cur_page 291 fs/iomap/buffered-io.c struct iomap_readpage_ctx ctx = { .cur_page = page }; cur_page 360 fs/iomap/buffered-io.c if (ctx->cur_page && offset_in_page(pos + done) == 0) { cur_page 362 fs/iomap/buffered-io.c unlock_page(ctx->cur_page); cur_page 363 fs/iomap/buffered-io.c put_page(ctx->cur_page); cur_page 364 fs/iomap/buffered-io.c ctx->cur_page = NULL; cur_page 366 fs/iomap/buffered-io.c if (!ctx->cur_page) { cur_page 367 fs/iomap/buffered-io.c ctx->cur_page = iomap_next_page(inode, ctx->pages, cur_page 369 fs/iomap/buffered-io.c if (!ctx->cur_page) cur_page 406 fs/iomap/buffered-io.c if (ctx.cur_page) { cur_page 408 fs/iomap/buffered-io.c unlock_page(ctx.cur_page); cur_page 409 fs/iomap/buffered-io.c put_page(ctx.cur_page); cur_page 499 fs/ntfs/compress.c unsigned int xpage, max_page, cur_page, cur_ofs, i; cur_page 587 fs/ntfs/compress.c cur_page = 0; cur_page 722 fs/ntfs/compress.c cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size; cur_page 737 fs/ntfs/compress.c for (; cur_page < cb_max_page; cur_page++) { cur_page 738 fs/ntfs/compress.c page = pages[cur_page]; cur_page 750 fs/ntfs/compress.c if (cur_page == xpage) cur_page 754 fs/ntfs/compress.c pages[cur_page] = NULL; cur_page 763 fs/ntfs/compress.c page = pages[cur_page]; cur_page 775 fs/ntfs/compress.c unsigned int cur2_page = cur_page; cur_page 794 fs/ntfs/compress.c for (; cur_page < cb_max_page; cur_page++) { cur_page 795 fs/ntfs/compress.c page = pages[cur_page]; cur_page 806 fs/ntfs/compress.c page = pages[cur_page]; cur_page 842 fs/ntfs/compress.c unsigned int prev_cur_page = cur_page; cur_page 845 fs/ntfs/compress.c err = ntfs_decompress(pages, completed_pages, &cur_page, cur_page 859 fs/ntfs/compress.c for (; prev_cur_page < cur_page; prev_cur_page++) { cur_page 885 fs/ntfs/compress.c for (cur_page = 0; cur_page < max_page; cur_page++) { cur_page 886 fs/ntfs/compress.c page = pages[cur_page]; cur_page 895 fs/ntfs/compress.c if (cur_page != xpage) cur_page 897 fs/ntfs/compress.c pages[cur_page] = NULL; cur_page 937 fs/ntfs/compress.c for (i = cur_page; i < max_page; i++) { cur_page 394 lib/scatterlist.c unsigned int chunks, cur_page, seg_len, i; cur_page 418 lib/scatterlist.c cur_page = 0; cur_page 424 lib/scatterlist.c for (j = cur_page + 1; j < n_pages; j++) { cur_page 432 lib/scatterlist.c chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; cur_page 433 lib/scatterlist.c sg_set_page(s, pages[cur_page], cur_page 437 lib/scatterlist.c cur_page = j;