pools             107 arch/powerpc/include/asm/iommu.h 	struct iommu_pool pools[IOMMU_NR_POOLS];
pools             203 arch/powerpc/kernel/iommu.c 		pool = &(tbl->pools[pool_nr]);
pools             231 arch/powerpc/kernel/iommu.c 			pool = &(tbl->pools[0]);
pools             259 arch/powerpc/kernel/iommu.c 			pool = &tbl->pools[pool_nr];
pools             377 arch/powerpc/kernel/iommu.c 		p = &tbl->pools[pool_nr];
pools             714 arch/powerpc/kernel/iommu.c 		p = &tbl->pools[i];
pools            1060 arch/powerpc/kernel/iommu.c 		spin_lock(&tbl->pools[i].lock);
pools            1075 arch/powerpc/kernel/iommu.c 		spin_unlock(&tbl->pools[i].lock);
pools            1088 arch/powerpc/kernel/iommu.c 		spin_lock(&tbl->pools[i].lock);
pools            1096 arch/powerpc/kernel/iommu.c 		spin_unlock(&tbl->pools[i].lock);
pools              26 arch/sparc/include/asm/iommu-common.h 	struct iommu_pool	pools[IOMMU_NR_POOLS];
pools              82 arch/sparc/kernel/iommu-common.c 		spin_lock_init(&(iommu->pools[i].lock));
pools              83 arch/sparc/kernel/iommu-common.c 		iommu->pools[i].start = start;
pools              84 arch/sparc/kernel/iommu-common.c 		iommu->pools[i].hint = start;
pools              86 arch/sparc/kernel/iommu-common.c 		iommu->pools[i].end = start - 1;
pools             131 arch/sparc/kernel/iommu-common.c 		pool = &(iommu->pools[pool_nr]);
pools             161 arch/sparc/kernel/iommu-common.c 			pool = &(iommu->pools[0]);
pools             197 arch/sparc/kernel/iommu-common.c 			pool = &(iommu->pools[pool_nr]);
pools             241 arch/sparc/kernel/iommu-common.c 		p = &tbl->pools[pool_nr];
pools             719 arch/sparc/kernel/pci_sun4v.c 		pool = &(iommu->pools[pool_nr]);
pools            1038 drivers/atm/zatm.c 			unsigned long pools;
pools            1041 drivers/atm/zatm.c 			pools = zin(RQA);
pools            1042 drivers/atm/zatm.c 			EVENT("RQA (0x%08x)\n",pools,0);
pools            1043 drivers/atm/zatm.c 			for (i = 0; pools; i++) {
pools            1044 drivers/atm/zatm.c 				if (pools & 1) {
pools            1048 drivers/atm/zatm.c 				pools >>= 1;
pools            1052 drivers/atm/zatm.c 			unsigned long pools;
pools            1054 drivers/atm/zatm.c 			pools = zin(RQU);
pools            1056 drivers/atm/zatm.c 			    dev->number,pools);
pools            1058 drivers/atm/zatm.c 			for (i = 0; pools; i++) {
pools            1059 drivers/atm/zatm.c 				if (pools & 1) {
pools            1063 drivers/atm/zatm.c 				pools >>= 1;
pools            1268 drivers/atm/zatm.c 	int pools,vccs,rx;
pools            1282 drivers/atm/zatm.c 	pools = NR_POOLS;
pools            1283 drivers/atm/zatm.c 	if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE)
pools            1284 drivers/atm/zatm.c 		pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE;
pools            1285 drivers/atm/zatm.c 	vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/
pools            1297 drivers/atm/zatm.c 	curr += pools*POOL_SIZE/4;
pools            1304 drivers/atm/zatm.c 	    "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx,
pools             114 drivers/gpu/drm/ttm/ttm_page_alloc.c 		struct ttm_page_pool	pools[NUM_POOLS];
pools             243 drivers/gpu/drm/ttm/ttm_page_alloc.c 	return &_manager->pools[pool_index];
pools             399 drivers/gpu/drm/ttm/ttm_page_alloc.c 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
pools             422 drivers/gpu/drm/ttm/ttm_page_alloc.c 		pool = &_manager->pools[i];
pools            1022 drivers/gpu/drm/ttm/ttm_page_alloc.c 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
pools            1181 drivers/gpu/drm/ttm/ttm_page_alloc.c 		p = &_manager->pools[i];
pools              98 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct list_head pools; /* The 'struct device->dma_pools link */
pools             149 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct list_head pools;
pools             165 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct list_head	pools;
pools             507 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_reverse(p, &_manager->pools, pools) {
pools             514 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		list_del(&p->pools);
pools             519 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
pools             530 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		list_del(&pool->pools);
pools             584 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	INIT_LIST_HEAD(&sec_pool->pools);
pools             589 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	INIT_LIST_HEAD(&pool->pools);
pools             619 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_add(&sec_pool->pools, &_manager->pools);
pools             622 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_add(&pool->pools, &dev->dma_pools);
pools             655 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
pools            1102 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (list_empty(&_manager->pools))
pools            1110 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry(p, &_manager->pools, pools) {
pools            1142 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry(p, &_manager->pools, pools)
pools            1174 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	INIT_LIST_HEAD(&_manager->pools);
pools            1204 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
pools            1226 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry(p, &_manager->pools, pools) {
pools             525 drivers/md/dm-thin.c 	struct list_head pools;
pools             531 drivers/md/dm-thin.c 	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
pools             542 drivers/md/dm-thin.c 	list_add(&pool->list, &dm_thin_pool_table.pools);
pools             557 drivers/md/dm-thin.c 	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
pools             573 drivers/md/dm-thin.c 	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
pools            2991 drivers/md/dm.c 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
pools            2996 drivers/md/dm.c 	if (!pools)
pools            3006 drivers/md/dm.c 		ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
pools            3009 drivers/md/dm.c 		if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
pools            3021 drivers/md/dm.c 	ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
pools            3025 drivers/md/dm.c 	if (integrity && bioset_integrity_create(&pools->bs, pool_size))
pools            3028 drivers/md/dm.c 	return pools;
pools            3031 drivers/md/dm.c 	dm_free_md_mempools(pools);
pools            3036 drivers/md/dm.c void dm_free_md_mempools(struct dm_md_mempools *pools)
pools            3038 drivers/md/dm.c 	if (!pools)
pools            3041 drivers/md/dm.c 	bioset_exit(&pools->bs);
pools            3042 drivers/md/dm.c 	bioset_exit(&pools->io_bs);
pools            3044 drivers/md/dm.c 	kfree(pools);
pools             208 drivers/md/dm.h void dm_free_md_mempools(struct dm_md_mempools *pools);
pools             349 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	struct cxgbi_ppm_pool *pools;
pools             351 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
pools             368 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
pools             369 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool));
pools             371 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	if (!pools)
pools             375 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
pools             385 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	return pools;
pools            3102 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
pools            3103 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	pools_params.pools[0].backup_pool = 0;
pools            3104 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	pools_params.pools[0].buffer_size = priv->rx_buf_size;
pools             174 drivers/net/ethernet/freescale/dpaa2/dpni.c 		cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
pools             176 drivers/net/ethernet/freescale/dpaa2/dpni.c 			cpu_to_le16(cfg->pools[i].buffer_size);
pools             178 drivers/net/ethernet/freescale/dpaa2/dpni.c 			DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
pools             102 drivers/net/ethernet/freescale/dpaa2/dpni.h 	} pools[DPNI_MAX_DPBP];
pools            1758 drivers/soc/fsl/qbman/qman.c void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
pools            1763 drivers/soc/fsl/qbman/qman.c 	pools &= p->config->pools;
pools            1764 drivers/soc/fsl/qbman/qman.c 	p->sdqcr |= pools;
pools             297 drivers/soc/fsl/qbman/qman_portal.c 	pcfg->pools = qm_get_pools_sdqcr();
pools             177 drivers/soc/fsl/qbman/qman_priv.h 	u32 pools;
pools             203 drivers/soc/ti/knav_qmss.h 	struct list_head	pools;
pools             304 drivers/soc/ti/knav_qmss.h 	struct list_head			pools;
pools             363 drivers/soc/ti/knav_qmss.h 	list_for_each_entry(pool, &kdev->pools, list)
pools             827 drivers/soc/ti/knav_qmss_queue.c 	node = &region->pools;
pools             828 drivers/soc/ti/knav_qmss_queue.c 	list_for_each_entry(pi, &region->pools, region_inst) {
pools             842 drivers/soc/ti/knav_qmss_queue.c 		list_add_tail(&pool->list, &kdev->pools);
pools            1043 drivers/soc/ti/knav_qmss_queue.c 	list_add(&pool->region_inst, &region->pools);
pools            1126 drivers/soc/ti/knav_qmss_queue.c 		INIT_LIST_HEAD(&region->pools);
pools            1365 drivers/soc/ti/knav_qmss_queue.c 		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
pools            1787 drivers/soc/ti/knav_qmss_queue.c 	INIT_LIST_HEAD(&kdev->pools);
pools              44 drivers/staging/android/ion/ion_system_heap.c 	struct ion_page_pool *pools[NUM_ORDERS];
pools              51 drivers/staging/android/ion/ion_system_heap.c 	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
pools              68 drivers/staging/android/ion/ion_system_heap.c 	pool = heap->pools[order_to_index(order)];
pools             186 drivers/staging/android/ion/ion_system_heap.c 		pool = sys_heap->pools[i];
pools             215 drivers/staging/android/ion/ion_system_heap.c static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
pools             220 drivers/staging/android/ion/ion_system_heap.c 		if (pools[i])
pools             221 drivers/staging/android/ion/ion_system_heap.c 			ion_page_pool_destroy(pools[i]);
pools             224 drivers/staging/android/ion/ion_system_heap.c static int ion_system_heap_create_pools(struct ion_page_pool **pools)
pools             238 drivers/staging/android/ion/ion_system_heap.c 		pools[i] = pool;
pools             244 drivers/staging/android/ion/ion_system_heap.c 	ion_system_heap_destroy_pools(pools);
pools             259 drivers/staging/android/ion/ion_system_heap.c 	if (ion_system_heap_create_pools(heap->pools))
pools              50 mm/dmapool.c   	struct list_head pools;
pools              81 mm/dmapool.c   	list_for_each_entry(pool, &dev->dma_pools, pools) {
pools             105 mm/dmapool.c   static DEVICE_ATTR(pools, 0444, show_pools, NULL);
pools             171 mm/dmapool.c   	INIT_LIST_HEAD(&retval->pools);
pools             185 mm/dmapool.c   	list_add(&retval->pools, &dev->dma_pools);
pools             193 mm/dmapool.c   			list_del(&retval->pools);
pools             278 mm/dmapool.c   	list_del(&pool->pools);