Lines Matching refs:pages

1118 	struct page **pages;  in __iommu_alloc_buffer()  local
1124 pages = kzalloc(array_size, GFP_KERNEL); in __iommu_alloc_buffer()
1126 pages = vzalloc(array_size); in __iommu_alloc_buffer()
1127 if (!pages) in __iommu_alloc_buffer()
1142 pages[i] = page + i; in __iommu_alloc_buffer()
1144 return pages; in __iommu_alloc_buffer()
1161 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); in __iommu_alloc_buffer()
1162 if (pages[i]) in __iommu_alloc_buffer()
1166 if (!pages[i]) { in __iommu_alloc_buffer()
1171 pages[i] = alloc_pages(gfp, 0); in __iommu_alloc_buffer()
1172 if (!pages[i]) in __iommu_alloc_buffer()
1177 split_page(pages[i], order); in __iommu_alloc_buffer()
1180 pages[i + j] = pages[i] + j; in __iommu_alloc_buffer()
1183 __dma_clear_buffer(pages[i], PAGE_SIZE << order); in __iommu_alloc_buffer()
1188 return pages; in __iommu_alloc_buffer()
1191 if (pages[i]) in __iommu_alloc_buffer()
1192 __free_pages(pages[i], 0); in __iommu_alloc_buffer()
1194 kfree(pages); in __iommu_alloc_buffer()
1196 vfree(pages); in __iommu_alloc_buffer()
1200 static int __iommu_free_buffer(struct device *dev, struct page **pages, in __iommu_free_buffer() argument
1208 dma_release_from_contiguous(dev, pages[0], count); in __iommu_free_buffer()
1211 if (pages[i]) in __iommu_free_buffer()
1212 __free_pages(pages[i], 0); in __iommu_free_buffer()
1216 kfree(pages); in __iommu_free_buffer()
1218 vfree(pages); in __iommu_free_buffer()
1226 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, in __iommu_alloc_remap() argument
1229 return dma_common_pages_remap(pages, size, in __iommu_alloc_remap()
1237 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) in __iommu_create_mapping() argument
1250 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; in __iommu_create_mapping()
1251 phys_addr_t phys = page_to_phys(pages[i]); in __iommu_create_mapping()
1255 if (page_to_pfn(pages[j]) != next_pfn) in __iommu_create_mapping()
1312 return area->pages; in __iommu_get_pages()
1348 struct page **pages; in arm_iommu_alloc_attrs() local
1366 pages = __iommu_alloc_buffer(dev, size, gfp, attrs); in arm_iommu_alloc_attrs()
1367 if (!pages) in arm_iommu_alloc_attrs()
1370 *handle = __iommu_create_mapping(dev, pages, size); in arm_iommu_alloc_attrs()
1375 return pages; in arm_iommu_alloc_attrs()
1377 addr = __iommu_alloc_remap(pages, size, gfp, prot, in arm_iommu_alloc_attrs()
1387 __iommu_free_buffer(dev, pages, size, attrs); in arm_iommu_alloc_attrs()
1397 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_mmap_attrs() local
1403 if (!pages) in arm_iommu_mmap_attrs()
1409 pages += off; in arm_iommu_mmap_attrs()
1412 int ret = vm_insert_page(vma, uaddr, *pages++); in arm_iommu_mmap_attrs()
1431 struct page **pages; in arm_iommu_free_attrs() local
1439 pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_free_attrs()
1440 if (!pages) { in arm_iommu_free_attrs()
1451 __iommu_free_buffer(dev, pages, size, attrs); in arm_iommu_free_attrs()
1459 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_get_sgtable() local
1461 if (!pages) in arm_iommu_get_sgtable()
1464 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, in arm_iommu_get_sgtable()