pool               54 arch/arm/common/dmabounce.c 	struct dmabounce_pool *pool;
pool               61 arch/arm/common/dmabounce.c 	struct dma_pool	*pool;
pool              109 arch/arm/common/dmabounce.c 	struct dmabounce_pool *pool;
pool              117 arch/arm/common/dmabounce.c 		pool = &device_info->small;
pool              119 arch/arm/common/dmabounce.c 		pool = &device_info->large;
pool              121 arch/arm/common/dmabounce.c 		pool = NULL;
pool              133 arch/arm/common/dmabounce.c 	buf->pool = pool;
pool              135 arch/arm/common/dmabounce.c 	if (pool) {
pool              136 arch/arm/common/dmabounce.c 		buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
pool              152 arch/arm/common/dmabounce.c 	if (pool)
pool              153 arch/arm/common/dmabounce.c 		pool->allocs++;
pool              197 arch/arm/common/dmabounce.c 	if (buf->pool)
pool              198 arch/arm/common/dmabounce.c 		dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
pool              469 arch/arm/common/dmabounce.c static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
pool              472 arch/arm/common/dmabounce.c 	pool->size = size;
pool              473 arch/arm/common/dmabounce.c 	DO_STATS(pool->allocs = 0);
pool              474 arch/arm/common/dmabounce.c 	pool->pool = dma_pool_create(name, dev, size,
pool              478 arch/arm/common/dmabounce.c 	return pool->pool ? 0 : -ENOMEM;
pool              536 arch/arm/common/dmabounce.c 	dma_pool_destroy(device_info->small.pool);
pool              563 arch/arm/common/dmabounce.c 	if (device_info->small.pool)
pool              564 arch/arm/common/dmabounce.c 		dma_pool_destroy(device_info->small.pool);
pool              565 arch/arm/common/dmabounce.c 	if (device_info->large.pool)
pool              566 arch/arm/common/dmabounce.c 		dma_pool_destroy(device_info->large.pool);
pool               32 arch/ia64/kernel/uncached.c 	struct gen_pool *pool;
pool              150 arch/ia64/kernel/uncached.c 	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
pool              196 arch/ia64/kernel/uncached.c 		if (uc_pool->pool == NULL)
pool              199 arch/ia64/kernel/uncached.c 			uc_addr = gen_pool_alloc(uc_pool->pool,
pool              223 arch/ia64/kernel/uncached.c 	struct gen_pool *pool = uncached_pools[nid].pool;
pool              225 arch/ia64/kernel/uncached.c 	if (unlikely(pool == NULL))
pool              231 arch/ia64/kernel/uncached.c 	gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
pool              249 arch/ia64/kernel/uncached.c 	struct gen_pool *pool = uncached_pools[nid].pool;
pool              254 arch/ia64/kernel/uncached.c 	if (pool != NULL) {
pool              256 arch/ia64/kernel/uncached.c 		(void) gen_pool_add(pool, uc_start, size, nid);
pool              267 arch/ia64/kernel/uncached.c 		uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
pool              851 arch/mips/cavium-octeon/executive/cvmx-helper.c 		g_buffer.s.pool = CVMX_FPA_WQE_POOL;
pool              863 arch/mips/cavium-octeon/executive/cvmx-helper.c 		pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
pool              195 arch/mips/cavium-octeon/executive/cvmx-pko.c 	config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
pool               76 arch/mips/include/asm/octeon/cvmx-fpa-defs.h 		uint64_t pool:5;
pool               80 arch/mips/include/asm/octeon/cvmx-fpa-defs.h 		uint64_t pool:5;
pool              106 arch/mips/include/asm/octeon/cvmx-fpa.h static inline const char *cvmx_fpa_get_name(uint64_t pool)
pool              108 arch/mips/include/asm/octeon/cvmx-fpa.h 	return cvmx_fpa_pool_info[pool].name;
pool              117 arch/mips/include/asm/octeon/cvmx-fpa.h static inline void *cvmx_fpa_get_base(uint64_t pool)
pool              119 arch/mips/include/asm/octeon/cvmx-fpa.h 	return cvmx_fpa_pool_info[pool].base;
pool              131 arch/mips/include/asm/octeon/cvmx-fpa.h static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
pool              133 arch/mips/include/asm/octeon/cvmx-fpa.h 	return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
pool              135 arch/mips/include/asm/octeon/cvmx-fpa.h 		 ((char *)(cvmx_fpa_pool_info[pool].base)) +
pool              136 arch/mips/include/asm/octeon/cvmx-fpa.h 		 cvmx_fpa_pool_info[pool].size *
pool              137 arch/mips/include/asm/octeon/cvmx-fpa.h 		 cvmx_fpa_pool_info[pool].starting_element_count));
pool              185 arch/mips/include/asm/octeon/cvmx-fpa.h static inline void *cvmx_fpa_alloc(uint64_t pool)
pool              188 arch/mips/include/asm/octeon/cvmx-fpa.h 	    cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
pool              202 arch/mips/include/asm/octeon/cvmx-fpa.h static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
pool              212 arch/mips/include/asm/octeon/cvmx-fpa.h 	data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
pool              226 arch/mips/include/asm/octeon/cvmx-fpa.h static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
pool              232 arch/mips/include/asm/octeon/cvmx-fpa.h 	    CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
pool              248 arch/mips/include/asm/octeon/cvmx-fpa.h static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
pool              254 arch/mips/include/asm/octeon/cvmx-fpa.h 	    CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
pool              283 arch/mips/include/asm/octeon/cvmx-fpa.h extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
pool              297 arch/mips/include/asm/octeon/cvmx-fpa.h extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
pool              306 arch/mips/include/asm/octeon/cvmx-fpa.h uint64_t cvmx_fpa_get_block_size(uint64_t pool);
pool              168 arch/mips/include/asm/octeon/cvmx-helper-util.h 			      buffer_ptr.s.pool, 0);
pool               54 arch/mips/include/asm/octeon/cvmx-packet.h 		uint64_t pool:3;
pool               62 arch/mips/include/asm/octeon/cvmx-packet.h 	        uint64_t pool:3;
pool              138 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              144 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              194 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              200 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              209 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              215 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              307 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              313 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              360 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              366 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              386 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              392 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool             1459 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool             1465 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t pool:3;
pool              178 arch/powerpc/kernel/iommu.c 	struct iommu_pool *pool;
pool              201 arch/powerpc/kernel/iommu.c 		pool = &(tbl->large_pool);
pool              203 arch/powerpc/kernel/iommu.c 		pool = &(tbl->pools[pool_nr]);
pool              205 arch/powerpc/kernel/iommu.c 	spin_lock_irqsave(&(pool->lock), flags);
pool              209 arch/powerpc/kernel/iommu.c 	    (*handle >= pool->start) && (*handle < pool->end))
pool              212 arch/powerpc/kernel/iommu.c 		start = pool->hint;
pool              214 arch/powerpc/kernel/iommu.c 	limit = pool->end;
pool              221 arch/powerpc/kernel/iommu.c 		start = pool->start;
pool              230 arch/powerpc/kernel/iommu.c 			spin_unlock(&(pool->lock));
pool              231 arch/powerpc/kernel/iommu.c 			pool = &(tbl->pools[0]);
pool              232 arch/powerpc/kernel/iommu.c 			spin_lock(&(pool->lock));
pool              233 arch/powerpc/kernel/iommu.c 			start = pool->start;
pool              251 arch/powerpc/kernel/iommu.c 			pool->hint = pool->start;
pool              257 arch/powerpc/kernel/iommu.c 			spin_unlock(&(pool->lock));
pool              259 arch/powerpc/kernel/iommu.c 			pool = &tbl->pools[pool_nr];
pool              260 arch/powerpc/kernel/iommu.c 			spin_lock(&(pool->lock));
pool              261 arch/powerpc/kernel/iommu.c 			pool->hint = pool->start;
pool              267 arch/powerpc/kernel/iommu.c 			spin_unlock_irqrestore(&(pool->lock), flags);
pool              277 arch/powerpc/kernel/iommu.c 		pool->hint = end;
pool              280 arch/powerpc/kernel/iommu.c 		pool->hint = (end + tbl->it_blocksize - 1) &
pool              288 arch/powerpc/kernel/iommu.c 	spin_unlock_irqrestore(&(pool->lock), flags);
pool              388 arch/powerpc/kernel/iommu.c 	struct iommu_pool *pool;
pool              393 arch/powerpc/kernel/iommu.c 	pool = get_pool(tbl, free_entry);
pool              400 arch/powerpc/kernel/iommu.c 	spin_lock_irqsave(&(pool->lock), flags);
pool              402 arch/powerpc/kernel/iommu.c 	spin_unlock_irqrestore(&(pool->lock), flags);
pool              106 arch/sparc/kernel/iommu-common.c 	struct iommu_pool *pool;
pool              126 arch/sparc/kernel/iommu-common.c 		pool = &(iommu->large_pool);
pool              131 arch/sparc/kernel/iommu-common.c 		pool = &(iommu->pools[pool_nr]);
pool              133 arch/sparc/kernel/iommu-common.c 	spin_lock_irqsave(&pool->lock, flags);
pool              137 arch/sparc/kernel/iommu-common.c 	    (*handle >= pool->start) && (*handle < pool->end))
pool              140 arch/sparc/kernel/iommu-common.c 		start = pool->hint;
pool              142 arch/sparc/kernel/iommu-common.c 	limit = pool->end;
pool              151 arch/sparc/kernel/iommu-common.c 		start = pool->start;
pool              160 arch/sparc/kernel/iommu-common.c 			spin_unlock(&(pool->lock));
pool              161 arch/sparc/kernel/iommu-common.c 			pool = &(iommu->pools[0]);
pool              162 arch/sparc/kernel/iommu-common.c 			spin_lock(&(pool->lock));
pool              163 arch/sparc/kernel/iommu-common.c 			start = pool->start;
pool              190 arch/sparc/kernel/iommu-common.c 			pool->hint = pool->start;
pool              195 arch/sparc/kernel/iommu-common.c 			spin_unlock(&(pool->lock));
pool              197 arch/sparc/kernel/iommu-common.c 			pool = &(iommu->pools[pool_nr]);
pool              198 arch/sparc/kernel/iommu-common.c 			spin_lock(&(pool->lock));
pool              199 arch/sparc/kernel/iommu-common.c 			pool->hint = pool->start;
pool              210 arch/sparc/kernel/iommu-common.c 	    (n < pool->hint || need_flush(iommu))) {
pool              216 arch/sparc/kernel/iommu-common.c 	pool->hint = end;
pool              222 arch/sparc/kernel/iommu-common.c 	spin_unlock_irqrestore(&(pool->lock), flags);
pool              253 arch/sparc/kernel/iommu-common.c 	struct iommu_pool *pool;
pool              259 arch/sparc/kernel/iommu-common.c 	pool = get_pool(iommu, entry);
pool              261 arch/sparc/kernel/iommu-common.c 	spin_lock_irqsave(&(pool->lock), flags);
pool              263 arch/sparc/kernel/iommu-common.c 	spin_unlock_irqrestore(&(pool->lock), flags);
pool              713 arch/sparc/kernel/pci_sun4v.c 	struct iommu_pool *pool;
pool              719 arch/sparc/kernel/pci_sun4v.c 		pool = &(iommu->pools[pool_nr]);
pool              720 arch/sparc/kernel/pci_sun4v.c 		for (i = pool->start; i <= pool->end; i++) {
pool              152 block/bio.c    void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
pool              161 block/bio.c    		mempool_free(bv, pool);
pool              170 block/bio.c    			   mempool_t *pool)
pool              206 block/bio.c    		bvl = mempool_alloc(pool, gfp_mask);
pool             1945 block/bio.c    int biovec_init_pool(mempool_t *pool, int pool_entries)
pool             1949 block/bio.c    	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
pool              162 block/bounce.c static void bounce_end_io(struct bio *bio, mempool_t *pool)
pool              176 block/bounce.c 			mempool_free(bvec->bv_page, pool);
pool              197 block/bounce.c static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
pool              204 block/bounce.c 	bounce_end_io(bio, pool);
pool              287 block/bounce.c 			       mempool_t *pool)
pool              327 block/bounce.c 		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
pool              346 block/bounce.c 	if (pool == &page_pool) {
pool              362 block/bounce.c 	mempool_t *pool;
pool              378 block/bounce.c 		pool = &page_pool;
pool              381 block/bounce.c 		pool = &isa_page_pool;
pool              387 block/bounce.c 	__blk_queue_bounce(q, bio_orig, pool);
pool              672 drivers/atm/ambassador.c static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
pool              673 drivers/atm/ambassador.c   amb_rxq * rxq = &dev->rxq[pool];
pool              676 drivers/atm/ambassador.c   PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool);
pool              687 drivers/atm/ambassador.c     wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr));
pool              697 drivers/atm/ambassador.c static int rx_take (amb_dev * dev, unsigned char pool) {
pool              698 drivers/atm/ambassador.c   amb_rxq * rxq = &dev->rxq[pool];
pool              701 drivers/atm/ambassador.c   PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool);
pool              730 drivers/atm/ambassador.c static void drain_rx_pool (amb_dev * dev, unsigned char pool) {
pool              731 drivers/atm/ambassador.c   amb_rxq * rxq = &dev->rxq[pool];
pool              733 drivers/atm/ambassador.c   PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool);
pool              744 drivers/atm/ambassador.c     cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
pool              749 drivers/atm/ambassador.c       if (rx_take (dev, pool))
pool              757 drivers/atm/ambassador.c   unsigned char pool;
pool              761 drivers/atm/ambassador.c   for (pool = 0; pool < NUM_RX_POOLS; ++pool)
pool              762 drivers/atm/ambassador.c     drain_rx_pool (dev, pool);
pool              765 drivers/atm/ambassador.c static void fill_rx_pool (amb_dev * dev, unsigned char pool,
pool              771 drivers/atm/ambassador.c   PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority);
pool              776 drivers/atm/ambassador.c   rxq = &dev->rxq[pool];
pool              781 drivers/atm/ambassador.c       PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool);
pool              793 drivers/atm/ambassador.c     if (rx_give (dev, &rx, pool))
pool              803 drivers/atm/ambassador.c   unsigned char pool;
pool              807 drivers/atm/ambassador.c   for (pool = 0; pool < NUM_RX_POOLS; ++pool)
pool              808 drivers/atm/ambassador.c     fill_rx_pool (dev, pool, GFP_ATOMIC);
pool              852 drivers/atm/ambassador.c     unsigned char pool;
pool              853 drivers/atm/ambassador.c     for (pool = 0; pool < NUM_RX_POOLS; ++pool)
pool              854 drivers/atm/ambassador.c       while (!rx_take (dev, pool))
pool             1011 drivers/atm/ambassador.c   unsigned char pool = -1; // hush gcc
pool             1091 drivers/atm/ambassador.c     for (pool = 0; pool < NUM_RX_POOLS; ++pool)
pool             1092 drivers/atm/ambassador.c       if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) {
pool             1094 drivers/atm/ambassador.c 		pool, rxtp->max_sdu, dev->rxq[pool].buffer_size);
pool             1097 drivers/atm/ambassador.c     if (pool == NUM_RX_POOLS) {
pool             1154 drivers/atm/ambassador.c 	( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT)
pool             1174 drivers/atm/ambassador.c     vcc->rx_info.pool = pool;
pool             1178 drivers/atm/ambassador.c     if (!dev->rxq[pool].buffers_wanted)
pool             1179 drivers/atm/ambassador.c       dev->rxq[pool].buffers_wanted = rx_lats;
pool             1180 drivers/atm/ambassador.c     dev->rxq[pool].buffers_wanted += 1;
pool             1181 drivers/atm/ambassador.c     fill_rx_pool (dev, pool, GFP_KERNEL);
pool             1189 drivers/atm/ambassador.c 	( (pool << SRB_POOL_SHIFT)
pool             1195 drivers/atm/ambassador.c       cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
pool             1250 drivers/atm/ambassador.c     unsigned char pool = vcc->rx_info.pool;
pool             1274 drivers/atm/ambassador.c     dev->rxq[pool].buffers_wanted -= 1;
pool             1275 drivers/atm/ambassador.c     if (dev->rxq[pool].buffers_wanted == rx_lats) {
pool             1276 drivers/atm/ambassador.c       dev->rxq[pool].buffers_wanted = 0;
pool             1277 drivers/atm/ambassador.c       drain_rx_pool (dev, pool);
pool             1376 drivers/atm/ambassador.c   unsigned char pool = vcc->rx_info.pool;
pool             1394 drivers/atm/ambassador.c   if (!rx_give (dev, &rx, pool)) {
pool             1396 drivers/atm/ambassador.c     PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool);
pool             1412 drivers/atm/ambassador.c   unsigned char pool;
pool             1442 drivers/atm/ambassador.c     for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
pool             1443 drivers/atm/ambassador.c       amb_rxq * r = &dev->rxq[pool];
pool             1453 drivers/atm/ambassador.c     for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
pool             1454 drivers/atm/ambassador.c       amb_rxq * r = &dev->rxq[pool];
pool             1498 drivers/atm/ambassador.c   unsigned char pool;
pool             1509 drivers/atm/ambassador.c   for (pool = 0; pool < NUM_RX_POOLS; ++pool)
pool             1510 drivers/atm/ambassador.c     total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out));
pool             1572 drivers/atm/ambassador.c   for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
pool             1575 drivers/atm/ambassador.c     amb_rxq * rxq = &dev->rxq[pool];
pool             1577 drivers/atm/ambassador.c     rxq->buffer_size = rx_buffer_sizes[pool];
pool             1581 drivers/atm/ambassador.c     rxq->low = rxs[pool] - 1;
pool             1583 drivers/atm/ambassador.c     rxq->maximum = rxs[pool] - 1;
pool             1587 drivers/atm/ambassador.c     rxq->in.limit = in + rxs[pool];
pool             1594 drivers/atm/ambassador.c     rxq->out.limit = out + rxs[pool];
pool             1974 drivers/atm/ambassador.c   unsigned char pool;
pool             1986 drivers/atm/ambassador.c   for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
pool             1988 drivers/atm/ambassador.c     a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start);
pool             1989 drivers/atm/ambassador.c     a.rec_struct[pool].buffer_end   = bus_addr (dev->rxq[pool].in.limit);
pool             1990 drivers/atm/ambassador.c     a.rec_struct[pool].rx_start     = bus_addr (dev->rxq[pool].out.start);
pool             1991 drivers/atm/ambassador.c     a.rec_struct[pool].rx_end       = bus_addr (dev->rxq[pool].out.limit);
pool             1992 drivers/atm/ambassador.c     a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size);
pool             2126 drivers/atm/ambassador.c       unsigned char pool;
pool             2154 drivers/atm/ambassador.c       for (pool = 0; pool < NUM_RX_POOLS; ++pool)
pool             2155 drivers/atm/ambassador.c 	spin_lock_init (&dev->rxq[pool].lock);
pool             2299 drivers/atm/ambassador.c   unsigned char pool;
pool             2317 drivers/atm/ambassador.c   for (pool = 0; pool < NUM_RX_POOLS; ++pool)
pool             2318 drivers/atm/ambassador.c     if (rxs[pool] < MIN_QUEUE_SIZE)
pool             2320 drivers/atm/ambassador.c 	      pool, rxs[pool] = MIN_QUEUE_SIZE);
pool             2324 drivers/atm/ambassador.c   for (pool = 0; pool < NUM_RX_POOLS; ++pool)
pool             2325 drivers/atm/ambassador.c     if (rxs_bs[pool] <= max_rx_size)
pool             2327 drivers/atm/ambassador.c 	      pool, rxs_bs[pool]);
pool             2329 drivers/atm/ambassador.c       max_rx_size = rxs_bs[pool];
pool              602 drivers/atm/ambassador.h   unsigned char    pool;
pool              584 drivers/atm/idt77252.c 	struct sb_pool *pool = &card->sbpool[queue];
pool              587 drivers/atm/idt77252.c 	index = pool->index;
pool              588 drivers/atm/idt77252.c 	while (pool->skb[index]) {
pool              590 drivers/atm/idt77252.c 		if (index == pool->index)
pool              594 drivers/atm/idt77252.c 	pool->skb[index] = skb;
pool              597 drivers/atm/idt77252.c 	pool->index = (index + 1) & FBQ_MASK;
pool              791 drivers/atm/idt77252.h 	u32		pool;	/* sb_pool handle */
pool              799 drivers/atm/idt77252.h 	(((struct idt77252_skb_prv *)(ATM_SKB(skb)+1))->pool)
pool              180 drivers/atm/zatm.c static void refill_pool(struct atm_dev *dev,int pool)
pool              190 drivers/atm/zatm.c 	size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 :
pool              191 drivers/atm/zatm.c 	    pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head);
pool              198 drivers/atm/zatm.c 		offset = zatm_dev->pool_info[pool].offset+
pool              203 drivers/atm/zatm.c 	free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) &
pool              206 drivers/atm/zatm.c 	if (free >= zatm_dev->pool_info[pool].low_water) return;
pool              208 drivers/atm/zatm.c 	    zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
pool              209 drivers/atm/zatm.c 	    zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
pool              213 drivers/atm/zatm.c 	while (free < zatm_dev->pool_info[pool].high_water) {
pool              235 drivers/atm/zatm.c 		if (zatm_dev->last_free[pool])
pool              236 drivers/atm/zatm.c 			((struct rx_buffer_head *) (zatm_dev->last_free[pool]->
pool              238 drivers/atm/zatm.c 		zatm_dev->last_free[pool] = skb;
pool              239 drivers/atm/zatm.c 		skb_queue_tail(&zatm_dev->pool[pool],skb);
pool              247 drivers/atm/zatm.c 		zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
pool              251 drivers/atm/zatm.c 		    zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
pool              252 drivers/atm/zatm.c 		    zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
pool              258 drivers/atm/zatm.c static void drain_free(struct atm_dev *dev,int pool)
pool              260 drivers/atm/zatm.c 	skb_queue_purge(&ZATM_DEV(dev)->pool[pool]);
pool              280 drivers/atm/zatm.c static void use_pool(struct atm_dev *dev,int pool)
pool              287 drivers/atm/zatm.c 	if (!(zatm_dev->pool_info[pool].ref_count++)) {
pool              288 drivers/atm/zatm.c 		skb_queue_head_init(&zatm_dev->pool[pool]);
pool              289 drivers/atm/zatm.c 		size = pool-ZATM_AAL5_POOL_BASE;
pool              293 drivers/atm/zatm.c 		zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) <<
pool              297 drivers/atm/zatm.c 		    zatm_dev->pool_base+pool*2);
pool              299 drivers/atm/zatm.c 		    pool*2+1);
pool              301 drivers/atm/zatm.c 		zatm_dev->last_free[pool] = NULL;
pool              302 drivers/atm/zatm.c 		refill_pool(dev,pool);
pool              304 drivers/atm/zatm.c 	DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count);
pool              308 drivers/atm/zatm.c static void unuse_pool(struct atm_dev *dev,int pool)
pool              310 drivers/atm/zatm.c 	if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count))
pool              311 drivers/atm/zatm.c 		drain_free(dev,pool);
pool              423 drivers/atm/zatm.c 			pos = ZATM_VCC(vcc)->pool;
pool              426 drivers/atm/zatm.c 			skb_unlink(skb, zatm_dev->pool + pos);
pool              478 drivers/atm/zatm.c 	refill_pool(dev,zatm_vcc->pool);
pool              504 drivers/atm/zatm.c 		zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
pool              508 drivers/atm/zatm.c 		zatm_vcc->pool = ZATM_AAL0_POOL;
pool              510 drivers/atm/zatm.c 	if (zatm_vcc->pool < 0) return -EMSGSIZE;
pool              520 drivers/atm/zatm.c 	use_pool(vcc->dev,zatm_vcc->pool);
pool              521 drivers/atm/zatm.c 	DPRINTK("pool %d\n",zatm_vcc->pool);
pool              524 drivers/atm/zatm.c 	zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT,
pool              596 drivers/atm/zatm.c 	unuse_pool(vcc->dev,zatm_vcc->pool);
pool             1454 drivers/atm/zatm.c 				int pool;
pool             1456 drivers/atm/zatm.c 				if (get_user(pool,
pool             1459 drivers/atm/zatm.c 				if (pool < 0 || pool > ZATM_LAST_POOL)
pool             1461 drivers/atm/zatm.c 				pool = array_index_nospec(pool,
pool             1464 drivers/atm/zatm.c 				info = zatm_dev->pool_info[pool];
pool             1466 drivers/atm/zatm.c 					zatm_dev->pool_info[pool].rqa_count = 0;
pool             1467 drivers/atm/zatm.c 					zatm_dev->pool_info[pool].rqu_count = 0;
pool             1477 drivers/atm/zatm.c 				int pool;
pool             1480 drivers/atm/zatm.c 				if (get_user(pool,
pool             1483 drivers/atm/zatm.c 				if (pool < 0 || pool > ZATM_LAST_POOL)
pool             1485 drivers/atm/zatm.c 				pool = array_index_nospec(pool,
pool             1492 drivers/atm/zatm.c 					    pool_info[pool].low_water;
pool             1495 drivers/atm/zatm.c 					    pool_info[pool].high_water;
pool             1498 drivers/atm/zatm.c 					    pool_info[pool].next_thres;
pool             1503 drivers/atm/zatm.c 				zatm_dev->pool_info[pool].low_water =
pool             1505 drivers/atm/zatm.c 				zatm_dev->pool_info[pool].high_water =
pool             1507 drivers/atm/zatm.c 				zatm_dev->pool_info[pool].next_thres =
pool               46 drivers/atm/zatm.h 	int pool;			/* free buffer pool */
pool               68 drivers/atm/zatm.h 	struct sk_buff_head pool[NR_POOLS];/* free buffer pools */
pool             1588 drivers/block/rbd.c 	req->r_base_oloc.pool = rbd_dev->layout.pool_id;
pool             5409 drivers/block/rbd.c static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
pool             5549 drivers/block/rbd.c 	rbd_dev->header_oloc.pool = spec->pool_id;
pool               54 drivers/char/agp/frontend.c 	curr = agp_fe.current_controller->pool;
pool               88 drivers/char/agp/frontend.c 			agp_fe.current_controller->pool = next;
pool              203 drivers/char/agp/frontend.c 	prev = agp_fe.current_controller->pool;
pool              209 drivers/char/agp/frontend.c 	agp_fe.current_controller->pool = temp;
pool              363 drivers/char/agp/frontend.c 	memory = controller->pool;
pool              531 drivers/char/random.c 	__u32 *pool;
pool              561 drivers/char/random.c 	.pool = input_pool_data
pool              569 drivers/char/random.c 	.pool = blocking_pool_data,
pool              612 drivers/char/random.c 		w ^= r->pool[i];
pool              613 drivers/char/random.c 		w ^= r->pool[(i + tap1) & wordmask];
pool              614 drivers/char/random.c 		w ^= r->pool[(i + tap2) & wordmask];
pool              615 drivers/char/random.c 		w ^= r->pool[(i + tap3) & wordmask];
pool              616 drivers/char/random.c 		w ^= r->pool[(i + tap4) & wordmask];
pool              617 drivers/char/random.c 		w ^= r->pool[(i + tap5) & wordmask];
pool              620 drivers/char/random.c 		r->pool[i] = (w >> 3) ^ twist_table[w & 7];
pool              654 drivers/char/random.c 	__u32		pool[4];
pool              667 drivers/char/random.c 	__u32 a = f->pool[0],	b = f->pool[1];
pool              668 drivers/char/random.c 	__u32 c = f->pool[2],	d = f->pool[3];
pool              686 drivers/char/random.c 	f->pool[0] = a;  f->pool[1] = b;
pool              687 drivers/char/random.c 	f->pool[2] = c;  f->pool[3] = d;
pool              897 drivers/char/random.c 	struct crng_state **pool;
pool              899 drivers/char/random.c 	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
pool              905 drivers/char/random.c 		pool[i] = crng;
pool              908 drivers/char/random.c 	if (cmpxchg(&crng_node_pool, NULL, pool)) {
pool              910 drivers/char/random.c 			kfree(pool[i]);
pool              911 drivers/char/random.c 		kfree(pool);
pool             1324 drivers/char/random.c 	fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
pool             1325 drivers/char/random.c 	fast_pool->pool[1] ^= now ^ c_high;
pool             1327 drivers/char/random.c 	fast_pool->pool[2] ^= ip;
pool             1328 drivers/char/random.c 	fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
pool             1336 drivers/char/random.c 		    crng_fast_load((char *) fast_pool->pool,
pool             1337 drivers/char/random.c 				   sizeof(fast_pool->pool))) {
pool             1353 drivers/char/random.c 	__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
pool             1524 drivers/char/random.c 		sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
pool              234 drivers/crypto/cavium/nitrox/nitrox_lib.c 	ctx->pool = ndev->ctx_pool;
pool              238 drivers/crypto/cavium/nitrox/nitrox_lib.c 	chdr->pool = ndev->ctx_pool;
pool              257 drivers/crypto/cavium/nitrox/nitrox_lib.c 	dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
pool              194 drivers/crypto/cavium/nitrox/nitrox_req.h 	struct dma_pool *pool;
pool              439 drivers/crypto/cavium/nitrox/nitrox_req.h 	struct dma_pool *pool;
pool               68 drivers/crypto/hisilicon/sgl.c 			     struct hisi_acc_sgl_pool *pool, u32 count)
pool               73 drivers/crypto/hisilicon/sgl.c 	if (!dev || !pool || !count)
pool               80 drivers/crypto/hisilicon/sgl.c 	pool->sgl = dma_alloc_coherent(dev, size, &pool->sgl_dma, GFP_KERNEL);
pool               81 drivers/crypto/hisilicon/sgl.c 	if (!pool->sgl)
pool               84 drivers/crypto/hisilicon/sgl.c 	pool->size = size;
pool               85 drivers/crypto/hisilicon/sgl.c 	pool->count = count;
pool               86 drivers/crypto/hisilicon/sgl.c 	pool->sgl_size = sgl_size;
pool               99 drivers/crypto/hisilicon/sgl.c void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
pool              101 drivers/crypto/hisilicon/sgl.c 	dma_free_coherent(dev, pool->size, pool->sgl, pool->sgl_dma);
pool              102 drivers/crypto/hisilicon/sgl.c 	memset(pool, 0, sizeof(struct hisi_acc_sgl_pool));
pool              106 drivers/crypto/hisilicon/sgl.c struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index,
pool              109 drivers/crypto/hisilicon/sgl.c 	if (!pool || !hw_sgl_dma || index >= pool->count || !pool->sgl)
pool              112 drivers/crypto/hisilicon/sgl.c 	*hw_sgl_dma = pool->sgl_dma + pool->sgl_size * index;
pool              113 drivers/crypto/hisilicon/sgl.c 	return (void *)pool->sgl + pool->sgl_size * index;
pool              116 drivers/crypto/hisilicon/sgl.c void acc_put_sgl(struct hisi_acc_sgl_pool *pool, u32 index) {}
pool              149 drivers/crypto/hisilicon/sgl.c 			      struct hisi_acc_sgl_pool *pool,
pool              159 drivers/crypto/hisilicon/sgl.c 	if (!dev || !sgl || !pool || !hw_sgl_dma || sg_n > acc_sgl_sge_nr)
pool              166 drivers/crypto/hisilicon/sgl.c 	curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
pool               17 drivers/crypto/hisilicon/sgl.h 			      struct hisi_acc_sgl_pool *pool,
pool               21 drivers/crypto/hisilicon/sgl.h int hisi_acc_create_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool,
pool               23 drivers/crypto/hisilicon/sgl.h void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool);
pool              469 drivers/crypto/hisilicon/zip/zip_crypto.c 	struct hisi_acc_sgl_pool *pool = &qp_ctx->sgl_pool;
pool              477 drivers/crypto/hisilicon/zip/zip_crypto.c 	req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
pool              483 drivers/crypto/hisilicon/zip/zip_crypto.c 	req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
pool              381 drivers/crypto/marvell/cesa.c 	engine->pool = of_gen_pool_get(cesa->dev->of_node,
pool              383 drivers/crypto/marvell/cesa.c 	if (engine->pool) {
pool              384 drivers/crypto/marvell/cesa.c 		engine->sram = gen_pool_dma_alloc(engine->pool,
pool              390 drivers/crypto/marvell/cesa.c 		engine->pool = NULL;
pool              424 drivers/crypto/marvell/cesa.c 	if (engine->pool)
pool              425 drivers/crypto/marvell/cesa.c 		gen_pool_free(engine->pool, (unsigned long)engine->sram,
pool              455 drivers/crypto/marvell/cesa.h 	struct gen_pool *pool;
pool              287 drivers/dma/amba-pl08x.c 	struct dma_pool *pool;
pool             1268 drivers/dma/amba-pl08x.c 	txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
pool             1498 drivers/dma/amba-pl08x.c 		dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
pool             2842 drivers/dma/amba-pl08x.c 	pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
pool             2844 drivers/dma/amba-pl08x.c 	if (!pl08x->pool) {
pool             2981 drivers/dma/amba-pl08x.c 	dma_pool_destroy(pl08x->pool);
pool             1284 drivers/dma/coh901318.c 	struct coh901318_pool pool;
pool             1343 drivers/dma/coh901318.c 	int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
pool             1897 drivers/dma/coh901318.c 	coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
pool             2123 drivers/dma/coh901318.c 		coh901318_lli_free(&cohc->base->pool, &cohd->lli);
pool             2132 drivers/dma/coh901318.c 		coh901318_lli_free(&cohc->base->pool, &cohd->lli);
pool             2239 drivers/dma/coh901318.c 	lli = coh901318_lli_alloc(&cohc->base->pool, lli_len);
pool             2245 drivers/dma/coh901318.c 		&cohc->base->pool, lli, src, size, dest,
pool             2352 drivers/dma/coh901318.c 	lli = coh901318_lli_alloc(&cohc->base->pool, len);
pool             2360 drivers/dma/coh901318.c 	ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
pool             2676 drivers/dma/coh901318.c 	err = coh901318_pool_create(&base->pool, &pdev->dev,
pool             2748 drivers/dma/coh901318.c 	coh901318_pool_destroy(&base->pool);
pool             2779 drivers/dma/coh901318.c 	coh901318_pool_destroy(&base->pool);
pool               53 drivers/dma/coh901318.h int coh901318_pool_create(struct coh901318_pool *pool,
pool               62 drivers/dma/coh901318.h int coh901318_pool_destroy(struct coh901318_pool *pool);
pool               72 drivers/dma/coh901318.h coh901318_lli_alloc(struct coh901318_pool *pool,
pool               80 drivers/dma/coh901318.h void coh901318_lli_free(struct coh901318_pool *pool,
pool               95 drivers/dma/coh901318.h coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
pool              113 drivers/dma/coh901318.h coh901318_lli_fill_single(struct coh901318_pool *pool,
pool              134 drivers/dma/coh901318.h coh901318_lli_fill_sg(struct coh901318_pool *pool,
pool               19 drivers/dma/coh901318_lli.c #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
pool               20 drivers/dma/coh901318_lli.c #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
pool               22 drivers/dma/coh901318_lli.c #define DEBUGFS_POOL_COUNTER_RESET(pool)
pool               23 drivers/dma/coh901318_lli.c #define DEBUGFS_POOL_COUNTER_ADD(pool, add)
pool               35 drivers/dma/coh901318_lli.c int coh901318_pool_create(struct coh901318_pool *pool,
pool               39 drivers/dma/coh901318_lli.c 	spin_lock_init(&pool->lock);
pool               40 drivers/dma/coh901318_lli.c 	pool->dev = dev;
pool               41 drivers/dma/coh901318_lli.c 	pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
pool               43 drivers/dma/coh901318_lli.c 	DEBUGFS_POOL_COUNTER_RESET(pool);
pool               47 drivers/dma/coh901318_lli.c int coh901318_pool_destroy(struct coh901318_pool *pool)
pool               50 drivers/dma/coh901318_lli.c 	dma_pool_destroy(pool->dmapool);
pool               55 drivers/dma/coh901318_lli.c coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
pool               66 drivers/dma/coh901318_lli.c 	spin_lock(&pool->lock);
pool               68 drivers/dma/coh901318_lli.c 	head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
pool               73 drivers/dma/coh901318_lli.c 	DEBUGFS_POOL_COUNTER_ADD(pool, 1);
pool               83 drivers/dma/coh901318_lli.c 		lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
pool               88 drivers/dma/coh901318_lli.c 		DEBUGFS_POOL_COUNTER_ADD(pool, 1);
pool               97 drivers/dma/coh901318_lli.c 	spin_unlock(&pool->lock);
pool              102 drivers/dma/coh901318_lli.c 	spin_unlock(&pool->lock);
pool              107 drivers/dma/coh901318_lli.c 	spin_unlock(&pool->lock);
pool              108 drivers/dma/coh901318_lli.c 	coh901318_lli_free(pool, &head);
pool              112 drivers/dma/coh901318_lli.c void coh901318_lli_free(struct coh901318_pool *pool,
pool              126 drivers/dma/coh901318_lli.c 	spin_lock(&pool->lock);
pool              130 drivers/dma/coh901318_lli.c 		dma_pool_free(pool->dmapool, l, l->phy_this);
pool              131 drivers/dma/coh901318_lli.c 		DEBUGFS_POOL_COUNTER_ADD(pool, -1);
pool              134 drivers/dma/coh901318_lli.c 	dma_pool_free(pool->dmapool, l, l->phy_this);
pool              135 drivers/dma/coh901318_lli.c 	DEBUGFS_POOL_COUNTER_ADD(pool, -1);
pool              137 drivers/dma/coh901318_lli.c 	spin_unlock(&pool->lock);
pool              142 drivers/dma/coh901318_lli.c coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
pool              175 drivers/dma/coh901318_lli.c coh901318_lli_fill_single(struct coh901318_pool *pool,
pool              231 drivers/dma/coh901318_lli.c coh901318_lli_fill_sg(struct coh901318_pool *pool,
pool              249 drivers/dma/coh901318_lli.c 	spin_lock(&pool->lock);
pool              307 drivers/dma/coh901318_lli.c 	spin_unlock(&pool->lock);
pool              311 drivers/dma/coh901318_lli.c 	spin_unlock(&pool->lock);
pool             1171 drivers/dma/dmaengine.c 	mempool_t *pool;
pool             1228 drivers/dma/dmaengine.c 	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
pool             1245 drivers/dma/dmaengine.c 		mempool_destroy(p->pool);
pool             1246 drivers/dma/dmaengine.c 		p->pool = NULL;
pool             1267 drivers/dma/dmaengine.c 		p->pool = mempool_create_slab_pool(1, p->cache);
pool             1268 drivers/dma/dmaengine.c 		if (!p->pool)
pool             1284 drivers/dma/dmaengine.c 	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
pool              214 drivers/dma/idma64.c 			dma_pool_free(idma64c->pool, hw->lli, hw->llp);
pool              310 drivers/dma/idma64.c 		hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
pool              502 drivers/dma/idma64.c 	idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
pool              505 drivers/dma/idma64.c 	if (!idma64c->pool) {
pool              518 drivers/dma/idma64.c 	dma_pool_destroy(idma64c->pool);
pool              519 drivers/dma/idma64.c 	idma64c->pool = NULL;
pool              137 drivers/dma/idma64.h 	void *pool;
pool              108 drivers/dma/k3dma.c 	struct dma_pool		*pool;
pool              480 drivers/dma/k3dma.c 	ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
pool              717 drivers/dma/k3dma.c 	dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
pool              895 drivers/dma/k3dma.c 	d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
pool              897 drivers/dma/k3dma.c 	if (!d->pool)
pool              126 drivers/dma/mmp_tdma.c 	struct gen_pool			*pool;
pool              361 drivers/dma/mmp_tdma.c 	gpool = tdmac->pool;
pool              413 drivers/dma/mmp_tdma.c 	gpool = tdmac->pool;
pool              555 drivers/dma/mmp_tdma.c 					int type, struct gen_pool *pool)
pool              576 drivers/dma/mmp_tdma.c 	tdmac->pool	   = pool;
pool              636 drivers/dma/mmp_tdma.c 	struct gen_pool *pool = NULL;
pool              664 drivers/dma/mmp_tdma.c 		pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
pool              666 drivers/dma/mmp_tdma.c 		pool = sram_get_gpool("asram");
pool              667 drivers/dma/mmp_tdma.c 	if (!pool) {
pool              683 drivers/dma/mmp_tdma.c 		ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
pool              118 drivers/dma/pch_dma.c 	struct dma_pool		*pool;
pool              432 drivers/dma/pch_dma.c 	desc = dma_pool_zalloc(pd->pool, flags, &addr);
pool              544 drivers/dma/pch_dma.c 		dma_pool_free(pd->pool, desc, desc->txd.phys);
pool              876 drivers/dma/pch_dma.c 	pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev,
pool              878 drivers/dma/pch_dma.c 	if (!pd->pool) {
pool              926 drivers/dma/pch_dma.c 	dma_pool_destroy(pd->pool);
pool              958 drivers/dma/pch_dma.c 		dma_pool_destroy(pd->pool);
pool             2531 drivers/dma/pl330.c static int add_desc(struct list_head *pool, spinlock_t *lock,
pool             2546 drivers/dma/pl330.c 		list_add_tail(&desc[i].node, pool);
pool             2554 drivers/dma/pl330.c static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
pool             2562 drivers/dma/pl330.c 	if (!list_empty(pool)) {
pool             2563 drivers/dma/pl330.c 		desc = list_entry(pool->next,
pool             2589 drivers/dma/pl330.c 		LIST_HEAD(pool);
pool             2591 drivers/dma/pl330.c 		if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
pool             2594 drivers/dma/pl330.c 		desc = pluck_desc(&pool, &lock);
pool             2595 drivers/dma/pl330.c 		WARN_ON(!desc || !list_empty(&pool));
pool              830 drivers/dma/ste_dma40.c 	struct d40_lcla_pool *pool = &chan->base->lcla_pool;
pool              890 drivers/dma/ste_dma40.c 		struct d40_log_lli *lcla = pool->base + lcla_offset;
pool              925 drivers/dma/ste_dma40.c 						pool->dma_addr, lcla_offset,
pool             3375 drivers/dma/ste_dma40.c 	struct d40_lcla_pool *pool = &base->lcla_pool;
pool             3439 drivers/dma/ste_dma40.c 	pool->dma_addr = dma_map_single(base->dev, pool->base,
pool             3442 drivers/dma/ste_dma40.c 	if (dma_mapping_error(base->dev, pool->dma_addr)) {
pool             3443 drivers/dma/ste_dma40.c 		pool->dma_addr = 0;
pool              196 drivers/dma/sun6i-dma.c 	struct dma_pool		*pool;
pool              418 drivers/dma/sun6i-dma.c 		dma_pool_free(sdev->pool, v_lli, p_lli);
pool              651 drivers/dma/sun6i-dma.c 	v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
pool              711 drivers/dma/sun6i-dma.c 		v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
pool              756 drivers/dma/sun6i-dma.c 		dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
pool              790 drivers/dma/sun6i-dma.c 		v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
pool              824 drivers/dma/sun6i-dma.c 		dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
pool             1277 drivers/dma/sun6i-dma.c 	sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
pool             1279 drivers/dma/sun6i-dma.c 	if (!sdc->pool) {
pool              125 drivers/dma/zx_dma.c 	struct dma_pool		*pool;
pool              436 drivers/dma/zx_dma.c 	ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
pool              721 drivers/dma/zx_dma.c 	dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
pool              792 drivers/dma/zx_dma.c 	d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
pool              794 drivers/dma/zx_dma.c 	if (!d->pool)
pool              897 drivers/dma/zx_dma.c 	dmam_pool_destroy(d->pool);
pool               20 drivers/firmware/tegra/bpmp-tegra186.c 		struct gen_pool *pool;
pool              176 drivers/firmware/tegra/bpmp-tegra186.c 	priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
pool              177 drivers/firmware/tegra/bpmp-tegra186.c 	if (!priv->tx.pool) {
pool              182 drivers/firmware/tegra/bpmp-tegra186.c 	priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
pool              188 drivers/firmware/tegra/bpmp-tegra186.c 	priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
pool              189 drivers/firmware/tegra/bpmp-tegra186.c 	if (!priv->rx.pool) {
pool              195 drivers/firmware/tegra/bpmp-tegra186.c 	priv->rx.virt = gen_pool_dma_alloc(priv->rx.pool, 4096, &priv->rx.phys);
pool              254 drivers/firmware/tegra/bpmp-tegra186.c 	gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
pool              256 drivers/firmware/tegra/bpmp-tegra186.c 	gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
pool              274 drivers/firmware/tegra/bpmp-tegra186.c 	gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
pool              275 drivers/firmware/tegra/bpmp-tegra186.c 	gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
pool              511 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 		const struct resource_pool *pool,
pool              523 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 	secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
pool              524 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 	secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
pool              525 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 	secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
pool              526 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 	secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
pool              527 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 	secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
pool              528 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 	secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
pool              730 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 	const struct resource_pool *pool = dc->res_pool;
pool              865 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 	for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
pool             1171 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 		for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
pool             1233 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 						hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe);
pool             1235 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 						split_stream_across_pipes(&context->res_ctx, pool, pipe, hsplit_pipe);
pool              249 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	struct resource_pool *pool,
pool              253 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	const struct resource_caps *caps = pool->res_cap;
pool              261 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	pool->audio_count = 0;
pool              270 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		update_num_audio(&straps, &num_audio, &pool->audio_support);
pool              282 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pool->audios[i] = aud;
pool              283 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pool->audio_count++;
pool              287 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	pool->stream_enc_count = 0;
pool              290 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx);
pool              291 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			if (pool->stream_enc[i] == NULL)
pool              293 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pool->stream_enc_count++;
pool              298 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	if (pool->audio_count < pool->stream_enc_count) {
pool              302 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pool->stream_enc[pool->stream_enc_count] =
pool              305 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		if (pool->stream_enc[pool->stream_enc_count] == NULL) {
pool              309 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pool->stream_enc_count++;
pool              317 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool              323 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	for (i = 0; i < pool->clk_src_count; i++) {
pool              324 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		if (pool->clock_sources[i] == clock_source)
pool              332 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool              335 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	int i = find_matching_clock_source(pool, clock_source);
pool              340 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	if (pool->dp_clock_source == clock_source)
pool              346 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool              349 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	int i = find_matching_clock_source(pool, clock_source);
pool              354 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	if (pool->dp_clock_source == clock_source)
pool              360 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool              363 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	int i = find_matching_clock_source(pool, clock_source);
pool              368 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	if (pool->dp_clock_source == clock_source)
pool             1070 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool             1105 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		int preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
pool             1117 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		for (i = pool->pipe_count - 1; i >= 0; i--) {
pool             1165 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool             1175 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	for (i = pool->pipe_count - 1; i >= 0; i--) {
pool             1187 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	if (!pool->funcs->acquire_idle_pipe_for_layer)
pool             1190 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);
pool             1196 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool             1201 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	for (i = 0; i < pool->pipe_count; i++) {
pool             1214 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			split_pipe->stream_res.tg = pool->timing_generators[i];
pool             1215 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			split_pipe->plane_res.hubp = pool->hubps[i];
pool             1216 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			split_pipe->plane_res.ipp = pool->ipps[i];
pool             1217 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			split_pipe->plane_res.dpp = pool->dpps[i];
pool             1218 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			split_pipe->stream_res.opp = pool->opps[i];
pool             1219 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
pool             1237 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	struct resource_pool *pool = dc->res_pool;
pool             1272 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
pool             1276 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
pool             1316 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	struct resource_pool *pool = dc->res_pool;
pool             1330 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	for (i = pool->pipe_count - 1; i >= 0; i--) {
pool             1579 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool             1585 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	for (i = 0; i < pool->stream_enc_count; i++) {
pool             1586 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		if (pool->stream_enc[i] == stream_enc)
pool             1594 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool             1599 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	for (i = 0; i < pool->audio_count; i++) {
pool             1600 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		if (pool->audios[i] == audio)
pool             1607 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool             1612 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	for (i = 0; i < pool->pipe_count; i++) {
pool             1616 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pipe_ctx->stream_res.tg = pool->timing_generators[i];
pool             1617 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pipe_ctx->plane_res.mi = pool->mis[i];
pool             1618 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pipe_ctx->plane_res.hubp = pool->hubps[i];
pool             1619 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pipe_ctx->plane_res.ipp = pool->ipps[i];
pool             1620 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pipe_ctx->plane_res.xfm = pool->transforms[i];
pool             1621 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pipe_ctx->plane_res.dpp = pool->dpps[i];
pool             1622 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pipe_ctx->stream_res.opp = pool->opps[i];
pool             1623 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			if (pool->dpps[i])
pool             1624 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 				pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
pool             1637 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool             1642 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	available_audio_count = pool->audio_count;
pool             1649 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			return pool->audios[i];
pool             1655 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		return pool->audios[id];
pool             1660 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			return pool->audios[i];
pool             1853 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool,
pool             1872 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	if (inst >= pool->pipe_count)
pool             1875 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	if (inst >= pool->stream_enc_count)
pool             1878 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]);
pool             1880 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	if (tg_inst >= pool->timing_generator_count)
pool             1886 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
pool             1887 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_ctx->plane_res.mi = pool->mis[tg_inst];
pool             1888 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_ctx->plane_res.hubp = pool->hubps[tg_inst];
pool             1889 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_ctx->plane_res.ipp = pool->ipps[tg_inst];
pool             1890 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_ctx->plane_res.xfm = pool->transforms[tg_inst];
pool             1891 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
pool             1892 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_ctx->stream_res.opp = pool->opps[tg_inst];
pool             1894 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		if (pool->dpps[tg_inst])
pool             1895 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
pool             1910 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	const struct resource_pool *pool = dc->res_pool;
pool             1940 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 				pool,
pool             1945 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
pool             1949 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
pool             1959 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			&context->res_ctx, pool, stream);
pool             1965 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		&context->res_ctx, pool,
pool             1974 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
pool             1982 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			update_audio_usage(&context->res_ctx, pool,
pool             1988 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_ctx->stream_res.abm = pool->abm;
pool             2483 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		const struct resource_pool *pool)
pool             2487 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	for (i = 0; i < pool->clk_src_count; ++i) {
pool             2489 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 			return pool->clock_sources[i];
pool             2537 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 	const struct resource_pool *pool = dc->res_pool;
pool             2546 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		pipe_ctx->clock_source = pool->dp_clock_source;
pool             2559 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 					pool);
pool             2566 drivers/gpu/drm/amd/display/dc/core/dc_resource.c 		&context->res_ctx, pool,
pool               29 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c 	struct resource_pool *pool,
pool               47 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c 	dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc);
pool               50 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c 		dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc);
pool               55 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c 		return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw);
pool               58 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c 	return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw);
pool               34 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h 	struct resource_pool *pool,
pool              380 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c 	struct resource_pool *pool,
pool              394 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c 		if (line < pool->res_cap->num_ddc)
pool              395 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c 			dce_i2c_hw = pool->hw_i2cs[line];
pool              401 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c 	if (pool->i2c_hw_buffer_in_use || !is_engine_available(dce_i2c_hw))
pool              433 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c 	pool->i2c_hw_buffer_in_use = true;
pool              577 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c 	struct resource_pool *pool,
pool              605 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c 	pool->i2c_hw_buffer_in_use = false;
pool              326 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h 	struct resource_pool *pool,
pool              332 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h 	struct resource_pool *pool,
pool               69 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 	struct resource_pool *pool,
pool              497 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 	struct resource_pool *pool,
pool              523 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 	release_engine_dce_sw(pool, dce_i2c_sw);
pool              528 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 	struct resource_pool *pool,
pool              535 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c 		engine = pool->sw_i2cs[line];
pool               47 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h 	struct resource_pool *pool,
pool               53 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h 	struct resource_pool *pool,
pool              682 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c static void destruct(struct dce110_resource_pool *pool)
pool              686 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool              687 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.opps[i] != NULL)
pool              688 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			dce110_opp_destroy(&pool->base.opps[i]);
pool              690 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.transforms[i] != NULL)
pool              691 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			dce100_transform_destroy(&pool->base.transforms[i]);
pool              693 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.ipps[i] != NULL)
pool              694 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			dce_ipp_destroy(&pool->base.ipps[i]);
pool              696 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.mis[i] != NULL) {
pool              697 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
pool              698 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			pool->base.mis[i] = NULL;
pool              701 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.timing_generators[i] != NULL)	{
pool              702 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool              703 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			pool->base.timing_generators[i] = NULL;
pool              707 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool              708 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.engines[i] != NULL)
pool              709 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			dce110_engine_destroy(&pool->base.engines[i]);
pool              710 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.hw_i2cs[i] != NULL) {
pool              711 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			kfree(pool->base.hw_i2cs[i]);
pool              712 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			pool->base.hw_i2cs[i] = NULL;
pool              714 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.sw_i2cs[i] != NULL) {
pool              715 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			kfree(pool->base.sw_i2cs[i]);
pool              716 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			pool->base.sw_i2cs[i] = NULL;
pool              720 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	for (i = 0; i < pool->base.stream_enc_count; i++) {
pool              721 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.stream_enc[i] != NULL)
pool              722 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool              725 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool              726 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.clock_sources[i] != NULL)
pool              727 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			dce100_clock_source_destroy(&pool->base.clock_sources[i]);
pool              730 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (pool->base.dp_clock_source != NULL)
pool              731 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		dce100_clock_source_destroy(&pool->base.dp_clock_source);
pool              733 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	for (i = 0; i < pool->base.audio_count; i++)	{
pool              734 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.audios[i] != NULL)
pool              735 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			dce_aud_destroy(&pool->base.audios[i]);
pool              738 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (pool->base.abm != NULL)
pool              739 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 				dce_abm_destroy(&pool->base.abm);
pool              741 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (pool->base.dmcu != NULL)
pool              742 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			dce_dmcu_destroy(&pool->base.dmcu);
pool              744 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (pool->base.irqs != NULL)
pool              745 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		dal_irq_service_destroy(&pool->base.irqs);
pool              838 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c static void dce100_destroy_resource_pool(struct resource_pool **pool)
pool              840 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
pool              844 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	*pool = NULL;
pool              858 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		const struct resource_pool *pool,
pool              865 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	for (i = 0; i < pool->stream_enc_count; i++) {
pool              867 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 				pool->stream_enc[i]) {
pool              872 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 			if (pool->stream_enc[i]->id ==
pool              874 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 				return pool->stream_enc[i];
pool              892 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		return pool->stream_enc[j];
pool              910 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	struct dce110_resource_pool *pool)
pool              918 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	pool->base.res_cap = &res_cap;
pool              919 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	pool->base.funcs = &dce100_res_pool_funcs;
pool              920 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool              925 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.dp_clock_source =
pool              928 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.clock_sources[0] =
pool              930 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.clock_sources[1] =
pool              932 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.clock_sources[2] =
pool              934 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.clk_src_count = 3;
pool              937 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.dp_clock_source =
pool              940 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.clock_sources[0] =
pool              942 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.clock_sources[1] =
pool              944 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.clk_src_count = 2;
pool              947 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (pool->base.dp_clock_source == NULL) {
pool              953 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool              954 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool              961 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	pool->base.dmcu = dce_dmcu_create(ctx,
pool              965 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (pool->base.dmcu == NULL) {
pool              971 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	pool->base.abm = dce_abm_create(ctx,
pool              975 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (pool->base.abm == NULL) {
pool              984 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.irqs = dal_irq_service_dce110_create(&init_data);
pool              985 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (!pool->base.irqs)
pool              992 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool              993 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	pool->base.pipe_count = res_cap.num_timing_generator;
pool              994 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
pool             1000 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             1001 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.timing_generators[i] =
pool             1006 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.timing_generators[i] == NULL) {
pool             1012 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.mis[i] = dce100_mem_input_create(ctx, i);
pool             1013 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.mis[i] == NULL) {
pool             1020 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.ipps[i] = dce100_ipp_create(ctx, i);
pool             1021 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.ipps[i] == NULL) {
pool             1028 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.transforms[i] = dce100_transform_create(ctx, i);
pool             1029 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.transforms[i] == NULL) {
pool             1036 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.opps[i] = dce100_opp_create(ctx, i);
pool             1037 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.opps[i] == NULL) {
pool             1045 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1046 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
pool             1047 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.engines[i] == NULL) {
pool             1053 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.hw_i2cs[i] = dce100_i2c_hw_create(ctx, i);
pool             1054 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             1060 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		pool->base.sw_i2cs[i] = NULL;
pool             1063 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             1068 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base,
pool             1077 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	destruct(pool);
pool             1086 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	struct dce110_resource_pool *pool =
pool             1089 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (!pool)
pool             1092 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	if (construct(num_virtual_links, dc, pool))
pool             1093 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 		return &pool->base;
pool             1095 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	kfree(pool);
pool               51 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h 		const struct resource_pool *pool,
pool             1684 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 		const struct resource_pool *pool)
pool             1687 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 	int underlay_idx = pool->underlay_pipe_index;
pool               64 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h 		const struct resource_pool *pool);
pool              739 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c static void destruct(struct dce110_resource_pool *pool)
pool              743 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool              744 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.opps[i] != NULL)
pool              745 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			dce110_opp_destroy(&pool->base.opps[i]);
pool              747 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.transforms[i] != NULL)
pool              748 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			dce110_transform_destroy(&pool->base.transforms[i]);
pool              750 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.ipps[i] != NULL)
pool              751 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			dce_ipp_destroy(&pool->base.ipps[i]);
pool              753 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.mis[i] != NULL) {
pool              754 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
pool              755 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			pool->base.mis[i] = NULL;
pool              758 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.timing_generators[i] != NULL)	{
pool              759 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool              760 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			pool->base.timing_generators[i] = NULL;
pool              764 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool              765 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.engines[i] != NULL)
pool              766 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			dce110_engine_destroy(&pool->base.engines[i]);
pool              767 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.hw_i2cs[i] != NULL) {
pool              768 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			kfree(pool->base.hw_i2cs[i]);
pool              769 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			pool->base.hw_i2cs[i] = NULL;
pool              771 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.sw_i2cs[i] != NULL) {
pool              772 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			kfree(pool->base.sw_i2cs[i]);
pool              773 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			pool->base.sw_i2cs[i] = NULL;
pool              777 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->base.stream_enc_count; i++) {
pool              778 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.stream_enc[i] != NULL)
pool              779 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool              782 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool              783 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.clock_sources[i] != NULL) {
pool              784 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			dce110_clock_source_destroy(&pool->base.clock_sources[i]);
pool              788 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (pool->base.dp_clock_source != NULL)
pool              789 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		dce110_clock_source_destroy(&pool->base.dp_clock_source);
pool              791 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->base.audio_count; i++)	{
pool              792 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.audios[i] != NULL) {
pool              793 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			dce_aud_destroy(&pool->base.audios[i]);
pool              797 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (pool->base.abm != NULL)
pool              798 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		dce_abm_destroy(&pool->base.abm);
pool              800 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (pool->base.dmcu != NULL)
pool              801 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		dce_dmcu_destroy(&pool->base.dmcu);
pool              803 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (pool->base.irqs != NULL) {
pool              804 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		dal_irq_service_destroy(&pool->base.irqs);
pool             1050 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		const struct resource_pool *pool,
pool             1055 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	unsigned int underlay_idx = pool->underlay_pipe_index;
pool             1061 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx];
pool             1062 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pipe_ctx->plane_res.mi = pool->mis[underlay_idx];
pool             1064 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx];
pool             1065 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pipe_ctx->stream_res.opp = pool->opps[underlay_idx];
pool             1114 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c static void dce110_destroy_resource_pool(struct resource_pool **pool)
pool             1116 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
pool             1120 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	*pool = NULL;
pool             1125 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		const struct resource_pool *pool,
pool             1132 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->stream_enc_count; i++) {
pool             1134 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 				pool->stream_enc[i]) {
pool             1139 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 			if (pool->stream_enc[i]->id ==
pool             1141 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 				return pool->stream_enc[i];
pool             1150 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		return pool->stream_enc[j];
pool             1167 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
pool             1192 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->opps[pool->pipe_count] = &dce110_oppv->base;
pool             1193 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->timing_generators[pool->pipe_count] = &dce110_tgv->base;
pool             1194 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->mis[pool->pipe_count] = &dce110_miv->base;
pool             1195 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->transforms[pool->pipe_count] = &dce110_xfmv->base;
pool             1196 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->pipe_count++;
pool             1273 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	struct dce110_resource_pool *pool,
pool             1282 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->base.res_cap = dce110_resource_cap(&ctx->asic_id);
pool             1283 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->base.funcs = &dce110_res_pool_funcs;
pool             1289 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool             1290 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->base.underlay_pipe_index = pool->base.pipe_count;
pool             1291 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
pool             1304 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.dp_clock_source =
pool             1307 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.clock_sources[0] =
pool             1310 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.clock_sources[1] =
pool             1314 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.clk_src_count = 2;
pool             1319 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (pool->base.dp_clock_source == NULL) {
pool             1325 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool             1326 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool             1333 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->base.dmcu = dce_dmcu_create(ctx,
pool             1337 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (pool->base.dmcu == NULL) {
pool             1343 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	pool->base.abm = dce_abm_create(ctx,
pool             1347 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (pool->base.abm == NULL) {
pool             1356 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.irqs = dal_irq_service_dce110_create(&init_data);
pool             1357 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (!pool->base.irqs)
pool             1361 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             1362 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.timing_generators[i] = dce110_timing_generator_create(
pool             1364 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.timing_generators[i] == NULL) {
pool             1370 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.mis[i] = dce110_mem_input_create(ctx, i);
pool             1371 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.mis[i] == NULL) {
pool             1378 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.ipps[i] = dce110_ipp_create(ctx, i);
pool             1379 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.ipps[i] == NULL) {
pool             1386 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.transforms[i] = dce110_transform_create(ctx, i);
pool             1387 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.transforms[i] == NULL) {
pool             1394 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.opps[i] = dce110_opp_create(ctx, i);
pool             1395 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.opps[i] == NULL) {
pool             1403 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1404 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
pool             1405 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.engines[i] == NULL) {
pool             1411 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.hw_i2cs[i] = dce110_i2c_hw_create(ctx, i);
pool             1412 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             1418 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		pool->base.sw_i2cs[i] = NULL;
pool             1424 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (!underlay_create(ctx, &pool->base))
pool             1427 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base,
pool             1434 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             1436 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	for (i = 0; i < pool->base.underlay_pipe_index; ++i)
pool             1439 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap;
pool             1448 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	destruct(pool);
pool             1457 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	struct dce110_resource_pool *pool =
pool             1460 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (!pool)
pool             1463 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	if (construct(num_virtual_links, dc, pool, asic_id))
pool             1464 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 		return &pool->base;
pool             1466 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	kfree(pool);
pool               34 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h #define TO_DCE110_RES_POOL(pool)\
pool               35 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h 	container_of(pool, struct dce110_resource_pool, base)
pool               50 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h 		const struct resource_pool *pool,
pool              701 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c static void destruct(struct dce110_resource_pool *pool)
pool              705 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool              706 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.opps[i] != NULL)
pool              707 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			dce110_opp_destroy(&pool->base.opps[i]);
pool              709 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.transforms[i] != NULL)
pool              710 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			dce112_transform_destroy(&pool->base.transforms[i]);
pool              712 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.ipps[i] != NULL)
pool              713 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			dce_ipp_destroy(&pool->base.ipps[i]);
pool              715 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.mis[i] != NULL) {
pool              716 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
pool              717 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			pool->base.mis[i] = NULL;
pool              720 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.timing_generators[i] != NULL) {
pool              721 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool              722 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			pool->base.timing_generators[i] = NULL;
pool              726 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool              727 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.engines[i] != NULL)
pool              728 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			dce110_engine_destroy(&pool->base.engines[i]);
pool              729 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.hw_i2cs[i] != NULL) {
pool              730 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			kfree(pool->base.hw_i2cs[i]);
pool              731 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			pool->base.hw_i2cs[i] = NULL;
pool              733 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.sw_i2cs[i] != NULL) {
pool              734 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			kfree(pool->base.sw_i2cs[i]);
pool              735 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			pool->base.sw_i2cs[i] = NULL;
pool              739 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	for (i = 0; i < pool->base.stream_enc_count; i++) {
pool              740 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.stream_enc[i] != NULL)
pool              741 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool              744 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool              745 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.clock_sources[i] != NULL) {
pool              746 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			dce112_clock_source_destroy(&pool->base.clock_sources[i]);
pool              750 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	if (pool->base.dp_clock_source != NULL)
pool              751 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		dce112_clock_source_destroy(&pool->base.dp_clock_source);
pool              753 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	for (i = 0; i < pool->base.audio_count; i++)	{
pool              754 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.audios[i] != NULL) {
pool              755 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 			dce_aud_destroy(&pool->base.audios[i]);
pool              759 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	if (pool->base.abm != NULL)
pool              760 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		dce_abm_destroy(&pool->base.abm);
pool              762 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	if (pool->base.dmcu != NULL)
pool              763 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		dce_dmcu_destroy(&pool->base.dmcu);
pool              765 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	if (pool->base.irqs != NULL) {
pool              766 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		dal_irq_service_destroy(&pool->base.irqs);
pool              772 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		const struct resource_pool *pool,
pool              777 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		return pool->clock_sources[DCE112_CLK_SRC_PLL0];
pool              779 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		return pool->clock_sources[DCE112_CLK_SRC_PLL1];
pool              781 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		return pool->clock_sources[DCE112_CLK_SRC_PLL2];
pool              783 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		return pool->clock_sources[DCE112_CLK_SRC_PLL3];
pool              785 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		return pool->clock_sources[DCE112_CLK_SRC_PLL4];
pool              787 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		return pool->clock_sources[DCE112_CLK_SRC_PLL5];
pool              966 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c static void dce112_destroy_resource_pool(struct resource_pool **pool)
pool              968 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
pool              972 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	*pool = NULL;
pool             1146 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	struct dce110_resource_pool *pool)
pool             1153 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.res_cap = dce112_resource_cap(&ctx->asic_id);
pool             1154 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.funcs = &dce112_res_pool_funcs;
pool             1159 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool             1160 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool             1161 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
pool             1172 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.clock_sources[DCE112_CLK_SRC_PLL0] =
pool             1177 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.clock_sources[DCE112_CLK_SRC_PLL1] =
pool             1182 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.clock_sources[DCE112_CLK_SRC_PLL2] =
pool             1187 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.clock_sources[DCE112_CLK_SRC_PLL3] =
pool             1192 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.clock_sources[DCE112_CLK_SRC_PLL4] =
pool             1197 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.clock_sources[DCE112_CLK_SRC_PLL5] =
pool             1202 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.clk_src_count = DCE112_CLK_SRC_TOTAL;
pool             1204 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.dp_clock_source =  dce112_clock_source_create(
pool             1209 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool             1210 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool             1217 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.dmcu = dce_dmcu_create(ctx,
pool             1221 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	if (pool->base.dmcu == NULL) {
pool             1227 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	pool->base.abm = dce_abm_create(ctx,
pool             1231 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	if (pool->base.abm == NULL) {
pool             1240 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		pool->base.irqs = dal_irq_service_dce110_create(&init_data);
pool             1241 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (!pool->base.irqs)
pool             1245 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             1246 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		pool->base.timing_generators[i] =
pool             1251 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.timing_generators[i] == NULL) {
pool             1257 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		pool->base.mis[i] = dce112_mem_input_create(ctx, i);
pool             1258 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.mis[i] == NULL) {
pool             1265 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		pool->base.ipps[i] = dce112_ipp_create(ctx, i);
pool             1266 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.ipps[i] == NULL) {
pool             1273 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		pool->base.transforms[i] = dce112_transform_create(ctx, i);
pool             1274 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.transforms[i] == NULL) {
pool             1281 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		pool->base.opps[i] = dce112_opp_create(
pool             1284 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.opps[i] == NULL) {
pool             1292 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1293 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
pool             1294 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.engines[i] == NULL) {
pool             1300 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		pool->base.hw_i2cs[i] = dce112_i2c_hw_create(ctx, i);
pool             1301 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             1307 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		pool->base.sw_i2cs[i] = NULL;
pool             1310 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base,
pool             1314 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             1329 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	destruct(pool);
pool             1337 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	struct dce110_resource_pool *pool =
pool             1340 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	if (!pool)
pool             1343 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	if (construct(num_virtual_links, dc, pool))
pool             1344 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 		return &pool->base;
pool             1346 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	kfree(pool);
pool              548 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c static void destruct(struct dce110_resource_pool *pool)
pool              552 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool              553 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.opps[i] != NULL)
pool              554 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			dce110_opp_destroy(&pool->base.opps[i]);
pool              556 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.transforms[i] != NULL)
pool              557 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			dce120_transform_destroy(&pool->base.transforms[i]);
pool              559 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.ipps[i] != NULL)
pool              560 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			dce_ipp_destroy(&pool->base.ipps[i]);
pool              562 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.mis[i] != NULL) {
pool              563 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
pool              564 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			pool->base.mis[i] = NULL;
pool              567 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.irqs != NULL) {
pool              568 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			dal_irq_service_destroy(&pool->base.irqs);
pool              571 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.timing_generators[i] != NULL) {
pool              572 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool              573 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			pool->base.timing_generators[i] = NULL;
pool              577 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool              578 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.engines[i] != NULL)
pool              579 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			dce110_engine_destroy(&pool->base.engines[i]);
pool              580 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.hw_i2cs[i] != NULL) {
pool              581 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			kfree(pool->base.hw_i2cs[i]);
pool              582 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			pool->base.hw_i2cs[i] = NULL;
pool              584 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.sw_i2cs[i] != NULL) {
pool              585 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			kfree(pool->base.sw_i2cs[i]);
pool              586 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			pool->base.sw_i2cs[i] = NULL;
pool              590 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	for (i = 0; i < pool->base.audio_count; i++) {
pool              591 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.audios[i])
pool              592 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			dce_aud_destroy(&pool->base.audios[i]);
pool              595 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	for (i = 0; i < pool->base.stream_enc_count; i++) {
pool              596 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.stream_enc[i] != NULL)
pool              597 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 			kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool              600 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool              601 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.clock_sources[i] != NULL)
pool              603 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 				&pool->base.clock_sources[i]);
pool              606 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	if (pool->base.dp_clock_source != NULL)
pool              607 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		dce120_clock_source_destroy(&pool->base.dp_clock_source);
pool              609 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	if (pool->base.abm != NULL)
pool              610 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		dce_abm_destroy(&pool->base.abm);
pool              612 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	if (pool->base.dmcu != NULL)
pool              613 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		dce_dmcu_destroy(&pool->base.dmcu);
pool              825 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c static void dce120_destroy_resource_pool(struct resource_pool **pool)
pool              827 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
pool              831 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	*pool = NULL;
pool              984 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	struct dce110_resource_pool *pool)
pool              996 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.res_cap = &res_cap;
pool              997 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.funcs = &dce120_res_pool_funcs;
pool             1000 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.pipe_count = res_cap.num_timing_generator;
pool             1001 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
pool             1002 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool             1016 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
pool             1020 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
pool             1024 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
pool             1028 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
pool             1032 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
pool             1036 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
pool             1040 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
pool             1042 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.dp_clock_source =
pool             1047 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool             1048 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool             1055 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.dmcu = dce_dmcu_create(ctx,
pool             1059 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	if (pool->base.dmcu == NULL) {
pool             1065 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.abm = dce_abm_create(ctx,
pool             1069 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	if (pool->base.abm == NULL) {
pool             1077 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
pool             1078 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	if (!pool->base.irqs)
pool             1087 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             1095 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		pool->base.timing_generators[j] =
pool             1100 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.timing_generators[j] == NULL) {
pool             1106 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		pool->base.mis[j] = dce120_mem_input_create(ctx, i);
pool             1108 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.mis[j] == NULL) {
pool             1115 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		pool->base.ipps[j] = dce120_ipp_create(ctx, i);
pool             1116 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.ipps[i] == NULL) {
pool             1123 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		pool->base.transforms[j] = dce120_transform_create(ctx, i);
pool             1124 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.transforms[i] == NULL) {
pool             1131 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		pool->base.opps[j] = dce120_opp_create(
pool             1134 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.opps[j] == NULL) {
pool             1144 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1145 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
pool             1146 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.engines[i] == NULL) {
pool             1152 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		pool->base.hw_i2cs[i] = dce120_i2c_hw_create(ctx, i);
pool             1153 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             1159 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		pool->base.sw_i2cs[i] = NULL;
pool             1163 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.pipe_count = j;
pool             1164 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	pool->base.timing_generator_count = j;
pool             1171 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs))
pool             1178 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             1194 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	destruct(pool);
pool             1203 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	struct dce110_resource_pool *pool =
pool             1206 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	if (!pool)
pool             1209 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	if (construct(num_virtual_links, dc, pool))
pool             1210 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 		return &pool->base;
pool             1212 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	kfree(pool);
pool              730 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c static void destruct(struct dce110_resource_pool *pool)
pool              734 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool              735 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.opps[i] != NULL)
pool              736 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			dce110_opp_destroy(&pool->base.opps[i]);
pool              738 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.transforms[i] != NULL)
pool              739 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			dce80_transform_destroy(&pool->base.transforms[i]);
pool              741 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.ipps[i] != NULL)
pool              742 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			dce_ipp_destroy(&pool->base.ipps[i]);
pool              744 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.mis[i] != NULL) {
pool              745 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
pool              746 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			pool->base.mis[i] = NULL;
pool              749 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.timing_generators[i] != NULL)	{
pool              750 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool              751 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			pool->base.timing_generators[i] = NULL;
pool              755 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool              756 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.engines[i] != NULL)
pool              757 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			dce110_engine_destroy(&pool->base.engines[i]);
pool              758 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.hw_i2cs[i] != NULL) {
pool              759 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			kfree(pool->base.hw_i2cs[i]);
pool              760 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			pool->base.hw_i2cs[i] = NULL;
pool              762 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.sw_i2cs[i] != NULL) {
pool              763 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			kfree(pool->base.sw_i2cs[i]);
pool              764 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			pool->base.sw_i2cs[i] = NULL;
pool              768 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.stream_enc_count; i++) {
pool              769 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.stream_enc[i] != NULL)
pool              770 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool              773 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool              774 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.clock_sources[i] != NULL) {
pool              775 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			dce80_clock_source_destroy(&pool->base.clock_sources[i]);
pool              779 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.abm != NULL)
pool              780 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			dce_abm_destroy(&pool->base.abm);
pool              782 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.dmcu != NULL)
pool              783 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			dce_dmcu_destroy(&pool->base.dmcu);
pool              785 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.dp_clock_source != NULL)
pool              786 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		dce80_clock_source_destroy(&pool->base.dp_clock_source);
pool              788 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.audio_count; i++)	{
pool              789 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.audios[i] != NULL) {
pool              790 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 			dce_aud_destroy(&pool->base.audios[i]);
pool              794 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.irqs != NULL) {
pool              795 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		dal_irq_service_destroy(&pool->base.irqs);
pool              854 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c static void dce80_destroy_resource_pool(struct resource_pool **pool)
pool              856 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
pool              860 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	*pool = NULL;
pool              876 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	struct dce110_resource_pool *pool)
pool              884 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.res_cap = &res_cap;
pool              885 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.funcs = &dce80_res_pool_funcs;
pool              891 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool              892 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.pipe_count = res_cap.num_timing_generator;
pool              893 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.timing_generator_count = res_cap.num_timing_generator;
pool              906 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.dp_clock_source =
pool              909 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[0] =
pool              911 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[1] =
pool              913 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[2] =
pool              915 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clk_src_count = 3;
pool              918 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.dp_clock_source =
pool              921 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[0] =
pool              923 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[1] =
pool              925 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clk_src_count = 2;
pool              928 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.dp_clock_source == NULL) {
pool              934 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool              935 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool              942 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.dmcu = dce_dmcu_create(ctx,
pool              946 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.dmcu == NULL) {
pool              952 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.abm = dce_abm_create(ctx,
pool              956 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.abm == NULL) {
pool              965 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.irqs = dal_irq_service_dce80_create(&init_data);
pool              966 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (!pool->base.irqs)
pool              970 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool              971 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.timing_generators[i] = dce80_timing_generator_create(
pool              973 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.timing_generators[i] == NULL) {
pool              979 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.mis[i] = dce80_mem_input_create(ctx, i);
pool              980 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.mis[i] == NULL) {
pool              986 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.ipps[i] = dce80_ipp_create(ctx, i);
pool              987 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.ipps[i] == NULL) {
pool              993 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.transforms[i] = dce80_transform_create(ctx, i);
pool              994 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.transforms[i] == NULL) {
pool             1000 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.opps[i] = dce80_opp_create(ctx, i);
pool             1001 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.opps[i] == NULL) {
pool             1008 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1009 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
pool             1010 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.engines[i] == NULL) {
pool             1016 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i);
pool             1017 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             1023 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx);
pool             1024 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.sw_i2cs[i] == NULL) {
pool             1032 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             1039 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base,
pool             1049 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	destruct(pool);
pool             1057 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	struct dce110_resource_pool *pool =
pool             1060 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (!pool)
pool             1063 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (dce80_construct(num_virtual_links, dc, pool))
pool             1064 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		return &pool->base;
pool             1073 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	struct dce110_resource_pool *pool)
pool             1081 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.res_cap = &res_cap_81;
pool             1082 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.funcs = &dce80_res_pool_funcs;
pool             1088 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool             1089 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.pipe_count = res_cap_81.num_timing_generator;
pool             1090 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.timing_generator_count = res_cap_81.num_timing_generator;
pool             1103 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.dp_clock_source =
pool             1106 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[0] =
pool             1108 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[1] =
pool             1110 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[2] =
pool             1112 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clk_src_count = 3;
pool             1115 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.dp_clock_source =
pool             1118 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[0] =
pool             1120 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[1] =
pool             1122 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clk_src_count = 2;
pool             1125 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.dp_clock_source == NULL) {
pool             1131 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool             1132 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool             1139 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.dmcu = dce_dmcu_create(ctx,
pool             1143 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.dmcu == NULL) {
pool             1149 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.abm = dce_abm_create(ctx,
pool             1153 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.abm == NULL) {
pool             1162 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.irqs = dal_irq_service_dce80_create(&init_data);
pool             1163 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (!pool->base.irqs)
pool             1167 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             1168 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.timing_generators[i] = dce80_timing_generator_create(
pool             1170 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.timing_generators[i] == NULL) {
pool             1176 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.mis[i] = dce80_mem_input_create(ctx, i);
pool             1177 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.mis[i] == NULL) {
pool             1183 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.ipps[i] = dce80_ipp_create(ctx, i);
pool             1184 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.ipps[i] == NULL) {
pool             1190 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.transforms[i] = dce80_transform_create(ctx, i);
pool             1191 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.transforms[i] == NULL) {
pool             1197 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.opps[i] = dce80_opp_create(ctx, i);
pool             1198 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.opps[i] == NULL) {
pool             1205 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1206 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
pool             1207 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.engines[i] == NULL) {
pool             1213 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i);
pool             1214 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             1220 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx);
pool             1221 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.sw_i2cs[i] == NULL) {
pool             1229 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             1236 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base,
pool             1246 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	destruct(pool);
pool             1254 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	struct dce110_resource_pool *pool =
pool             1257 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (!pool)
pool             1260 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (dce81_construct(num_virtual_links, dc, pool))
pool             1261 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		return &pool->base;
pool             1270 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	struct dce110_resource_pool *pool)
pool             1278 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.res_cap = &res_cap_83;
pool             1279 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.funcs = &dce80_res_pool_funcs;
pool             1285 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool             1286 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.pipe_count = res_cap_83.num_timing_generator;
pool             1287 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.timing_generator_count = res_cap_83.num_timing_generator;
pool             1300 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.dp_clock_source =
pool             1303 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[0] =
pool             1305 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[1] =
pool             1307 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clk_src_count = 2;
pool             1310 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.dp_clock_source =
pool             1313 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clock_sources[0] =
pool             1315 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.clk_src_count = 1;
pool             1318 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.dp_clock_source == NULL) {
pool             1324 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool             1325 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool             1332 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.dmcu = dce_dmcu_create(ctx,
pool             1336 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.dmcu == NULL) {
pool             1342 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	pool->base.abm = dce_abm_create(ctx,
pool             1346 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (pool->base.abm == NULL) {
pool             1355 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.irqs = dal_irq_service_dce80_create(&init_data);
pool             1356 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (!pool->base.irqs)
pool             1360 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             1361 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.timing_generators[i] = dce80_timing_generator_create(
pool             1363 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.timing_generators[i] == NULL) {
pool             1369 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.mis[i] = dce80_mem_input_create(ctx, i);
pool             1370 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.mis[i] == NULL) {
pool             1376 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.ipps[i] = dce80_ipp_create(ctx, i);
pool             1377 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.ipps[i] == NULL) {
pool             1383 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.transforms[i] = dce80_transform_create(ctx, i);
pool             1384 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.transforms[i] == NULL) {
pool             1390 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.opps[i] = dce80_opp_create(ctx, i);
pool             1391 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.opps[i] == NULL) {
pool             1398 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1399 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
pool             1400 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.engines[i] == NULL) {
pool             1406 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i);
pool             1407 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             1413 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx);
pool             1414 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		if (pool->base.sw_i2cs[i] == NULL) {
pool             1422 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             1429 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base,
pool             1439 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	destruct(pool);
pool             1447 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	struct dce110_resource_pool *pool =
pool             1450 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (!pool)
pool             1453 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	if (dce83_construct(num_virtual_links, dc, pool))
pool             1454 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 		return &pool->base;
pool              129 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	struct resource_pool *pool = dc->res_pool;
pool              135 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              136 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 		struct hubp *hubp = pool->hubps[i];
pool              167 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              168 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
pool              173 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
pool              192 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              193 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
pool              200 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
pool              224 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              225 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
pool              230 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
pool              245 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	struct resource_pool *pool = dc->res_pool;
pool              257 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              258 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 		struct dpp *dpp = pool->dpps[i];
pool              299 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              302 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
pool              314 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	for (i = 0; i < pool->timing_generator_count; i++) {
pool              315 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 		struct timing_generator *tg = pool->timing_generators[i];
pool              355 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
pool              356 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 		struct display_stream_compressor *dsc = pool->dscs[i];
pool              371 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 	for (i = 0; i < pool->stream_enc_count; i++) {
pool              372 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 		struct stream_encoder *enc = pool->stream_enc[i];
pool              112 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	struct resource_pool *pool = dc->res_pool;
pool              133 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              134 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 		struct hubp *hubp = pool->hubps[i];
pool              190 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	struct resource_pool *pool = dc->res_pool;
pool              203 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              204 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
pool              212 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
pool              232 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	struct resource_pool *pool = dc->res_pool;
pool              248 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              249 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
pool              260 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
pool              289 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	struct resource_pool *pool = dc->res_pool;
pool              302 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              303 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
pool              311 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
pool              329 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	struct resource_pool *pool = dc->res_pool;
pool              341 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              342 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 		struct dpp *dpp = pool->dpps[i];
pool              384 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	struct resource_pool *pool = dc->res_pool;
pool              394 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              397 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
pool              415 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	struct resource_pool *pool = dc->res_pool;
pool              426 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	for (i = 0; i < pool->timing_generator_count; i++) {
pool              427 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 		struct timing_generator *tg = pool->timing_generators[i];
pool              490 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	struct resource_pool *pool = dc->res_pool;
pool              493 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	for (i = 0; i < pool->timing_generator_count; i++) {
pool              494 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 		struct timing_generator *tg = pool->timing_generators[i];
pool              506 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	struct resource_pool *pool = dc->res_pool;
pool              509 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 	for (i = 0; i < pool->pipe_count; i++) {
pool              510 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 		struct hubp *hubp = pool->hubps[i];
pool              885 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c static void destruct(struct dcn10_resource_pool *pool)
pool              889 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	for (i = 0; i < pool->base.stream_enc_count; i++) {
pool              890 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.stream_enc[i] != NULL) {
pool              891 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool              892 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			pool->base.stream_enc[i] = NULL;
pool              896 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.mpc != NULL) {
pool              897 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		kfree(TO_DCN10_MPC(pool->base.mpc));
pool              898 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.mpc = NULL;
pool              901 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.hubbub != NULL) {
pool              902 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		kfree(pool->base.hubbub);
pool              903 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.hubbub = NULL;
pool              906 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool              907 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.opps[i] != NULL)
pool              908 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
pool              910 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.dpps[i] != NULL)
pool              911 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			dcn10_dpp_destroy(&pool->base.dpps[i]);
pool              913 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.ipps[i] != NULL)
pool              914 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
pool              916 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.hubps[i] != NULL) {
pool              917 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			kfree(TO_DCN10_HUBP(pool->base.hubps[i]));
pool              918 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			pool->base.hubps[i] = NULL;
pool              921 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.irqs != NULL) {
pool              922 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			dal_irq_service_destroy(&pool->base.irqs);
pool              925 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.timing_generators[i] != NULL)	{
pool              926 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool              927 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			pool->base.timing_generators[i] = NULL;
pool              931 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool              932 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.engines[i] != NULL)
pool              933 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			dce110_engine_destroy(&pool->base.engines[i]);
pool              934 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.hw_i2cs[i] != NULL) {
pool              935 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			kfree(pool->base.hw_i2cs[i]);
pool              936 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			pool->base.hw_i2cs[i] = NULL;
pool              938 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.sw_i2cs[i] != NULL) {
pool              939 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			kfree(pool->base.sw_i2cs[i]);
pool              940 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			pool->base.sw_i2cs[i] = NULL;
pool              944 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	for (i = 0; i < pool->base.audio_count; i++) {
pool              945 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.audios[i])
pool              946 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			dce_aud_destroy(&pool->base.audios[i]);
pool              949 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool              950 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.clock_sources[i] != NULL) {
pool              951 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			dcn10_clock_source_destroy(&pool->base.clock_sources[i]);
pool              952 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			pool->base.clock_sources[i] = NULL;
pool              956 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.dp_clock_source != NULL) {
pool              957 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		dcn10_clock_source_destroy(&pool->base.dp_clock_source);
pool              958 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.dp_clock_source = NULL;
pool              961 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.abm != NULL)
pool              962 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		dce_abm_destroy(&pool->base.abm);
pool              964 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.dmcu != NULL)
pool              965 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		dce_dmcu_destroy(&pool->base.dmcu);
pool              967 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	kfree(pool->base.pp_smu);
pool             1090 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		const struct resource_pool *pool,
pool             1095 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
pool             1110 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
pool             1111 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
pool             1112 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
pool             1113 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
pool             1128 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c static void dcn10_destroy_resource_pool(struct resource_pool **pool)
pool             1130 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
pool             1134 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	*pool = NULL;
pool             1217 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		const struct resource_pool *pool,
pool             1224 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	for (i = 0; i < pool->stream_enc_count; i++) {
pool             1226 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 				pool->stream_enc[i]) {
pool             1231 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			if (pool->stream_enc[i]->id ==
pool             1233 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 				return pool->stream_enc[i];
pool             1242 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		return pool->stream_enc[j];
pool             1274 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	struct dcn10_resource_pool *pool)
pool             1284 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.res_cap = &rv2_res_cap;
pool             1286 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.res_cap = &res_cap;
pool             1287 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.funcs = &dcn10_res_pool_funcs;
pool             1297 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool             1300 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool             1303 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.pipe_count = 3;
pool             1323 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.clock_sources[DCN10_CLK_SRC_PLL0] =
pool             1327 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.clock_sources[DCN10_CLK_SRC_PLL1] =
pool             1331 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.clock_sources[DCN10_CLK_SRC_PLL2] =
pool             1337 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
pool             1343 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
pool             1346 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL;
pool             1348 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.dp_clock_source =
pool             1354 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool             1355 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool             1362 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.dmcu = dcn10_dmcu_create(ctx,
pool             1366 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.dmcu == NULL) {
pool             1372 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.abm = dce_abm_create(ctx,
pool             1376 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.abm == NULL) {
pool             1418 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.pp_smu = dcn10_pp_smu_create(ctx);
pool             1424 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.pp_smu != NULL
pool             1425 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 			&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
pool             1432 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		dc->res_pool = &pool->base;
pool             1439 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
pool             1440 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (!pool->base.irqs)
pool             1447 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             1454 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.hubps[j] = dcn10_hubp_create(ctx, i);
pool             1455 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.hubps[j] == NULL) {
pool             1462 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.ipps[j] = dcn10_ipp_create(ctx, i);
pool             1463 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.ipps[j] == NULL) {
pool             1470 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.dpps[j] = dcn10_dpp_create(ctx, i);
pool             1471 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.dpps[j] == NULL) {
pool             1478 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.opps[j] = dcn10_opp_create(ctx, i);
pool             1479 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.opps[j] == NULL) {
pool             1486 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.timing_generators[j] = dcn10_timing_generator_create(
pool             1488 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.timing_generators[j] == NULL) {
pool             1497 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1498 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
pool             1499 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.engines[i] == NULL) {
pool             1505 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i);
pool             1506 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             1512 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		pool->base.sw_i2cs[i] = NULL;
pool             1516 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.pipe_count = j;
pool             1517 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.timing_generator_count = j;
pool             1522 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	dc->dml.ip.max_num_dpp = pool->base.pipe_count;
pool             1523 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
pool             1525 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.mpc = dcn10_mpc_create(ctx);
pool             1526 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.mpc == NULL) {
pool             1532 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	pool->base.hubbub = dcn10_hubbub_create(ctx);
pool             1533 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (pool->base.hubbub == NULL) {
pool             1539 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base,
pool             1545 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             1556 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	destruct(pool);
pool             1565 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	struct dcn10_resource_pool *pool =
pool             1568 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (!pool)
pool             1571 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	if (construct(init_data->num_virtual_links, dc, pool))
pool             1572 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 		return &pool->base;
pool             1574 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	kfree(pool);
pool               31 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h #define TO_DCN10_RES_POOL(pool)\
pool               32 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h 	container_of(pool, struct dcn10_resource_pool, base)
pool               47 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h 		const struct resource_pool *pool,
pool             1304 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c static void destruct(struct dcn20_resource_pool *pool)
pool             1308 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.stream_enc_count; i++) {
pool             1309 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.stream_enc[i] != NULL) {
pool             1310 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool             1311 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.stream_enc[i] = NULL;
pool             1316 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool             1317 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.dscs[i] != NULL)
pool             1318 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			dcn20_dsc_destroy(&pool->base.dscs[i]);
pool             1322 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.mpc != NULL) {
pool             1323 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		kfree(TO_DCN20_MPC(pool->base.mpc));
pool             1324 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.mpc = NULL;
pool             1326 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.hubbub != NULL) {
pool             1327 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		kfree(pool->base.hubbub);
pool             1328 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.hubbub = NULL;
pool             1330 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             1331 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.dpps[i] != NULL)
pool             1332 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			dcn20_dpp_destroy(&pool->base.dpps[i]);
pool             1334 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.ipps[i] != NULL)
pool             1335 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
pool             1337 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.hubps[i] != NULL) {
pool             1338 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
pool             1339 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.hubps[i] = NULL;
pool             1342 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.irqs != NULL) {
pool             1343 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			dal_irq_service_destroy(&pool->base.irqs);
pool             1347 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1348 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.engines[i] != NULL)
pool             1349 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			dce110_engine_destroy(&pool->base.engines[i]);
pool             1350 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.hw_i2cs[i] != NULL) {
pool             1351 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			kfree(pool->base.hw_i2cs[i]);
pool             1352 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.hw_i2cs[i] = NULL;
pool             1354 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.sw_i2cs[i] != NULL) {
pool             1355 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			kfree(pool->base.sw_i2cs[i]);
pool             1356 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.sw_i2cs[i] = NULL;
pool             1360 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool             1361 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.opps[i] != NULL)
pool             1362 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
pool             1365 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool             1366 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.timing_generators[i] != NULL)	{
pool             1367 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool             1368 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.timing_generators[i] = NULL;
pool             1372 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
pool             1373 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.dwbc[i] != NULL) {
pool             1374 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
pool             1375 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.dwbc[i] = NULL;
pool             1377 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.mcif_wb[i] != NULL) {
pool             1378 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
pool             1379 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.mcif_wb[i] = NULL;
pool             1383 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.audio_count; i++) {
pool             1384 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.audios[i])
pool             1385 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			dce_aud_destroy(&pool->base.audios[i]);
pool             1388 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool             1389 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.clock_sources[i] != NULL) {
pool             1390 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
pool             1391 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.clock_sources[i] = NULL;
pool             1395 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.dp_clock_source != NULL) {
pool             1396 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		dcn20_clock_source_destroy(&pool->base.dp_clock_source);
pool             1397 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.dp_clock_source = NULL;
pool             1401 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.abm != NULL)
pool             1402 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		dce_abm_destroy(&pool->base.abm);
pool             1404 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.dmcu != NULL)
pool             1405 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		dce_dmcu_destroy(&pool->base.dmcu);
pool             1407 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.dccg != NULL)
pool             1408 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		dcn_dccg_destroy(&pool->base.dccg);
pool             1410 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.pp_smu != NULL)
pool             1411 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		dcn20_pp_smu_destroy(&pool->base.pp_smu);
pool             1532 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			const struct resource_pool *pool,
pool             1541 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
pool             1542 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		*dsc = pool->dscs[pipe_idx];
pool             1548 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->res_cap->num_dsc; i++)
pool             1550 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			*dsc = pool->dscs[i];
pool             1557 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			const struct resource_pool *pool,
pool             1562 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->res_cap->num_dsc; i++)
pool             1563 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->dscs[i] == *dsc) {
pool             1580 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	const struct resource_pool *pool = dc->res_pool;
pool             1589 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
pool             1724 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		const struct resource_pool *pool,
pool             1733 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx];
pool             1734 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx];
pool             1735 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx];
pool             1736 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
pool             1737 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
pool             1738 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
pool             1787 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
pool             1790 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
pool             1802 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		const struct resource_pool *pool,
pool             1813 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
pool             1814 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
pool             1815 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
pool             1816 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
pool             1817 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
pool             1818 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
pool             2290 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		const struct resource_pool *pool,
pool             2936 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		const struct resource_pool *pool,
pool             2941 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
pool             2953 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
pool             2954 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
pool             2955 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
pool             2956 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
pool             2971 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c static void dcn20_destroy_resource_pool(struct resource_pool **pool)
pool             2973 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
pool             2977 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	*pool = NULL;
pool             3017 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
pool             3020 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	uint32_t pipe_count = pool->res_cap->num_dwb;
pool             3037 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->dwbc[i] = &dwbc20->base;
pool             3042 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
pool             3045 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	uint32_t pipe_count = pool->res_cap->num_dwb;
pool             3064 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->mcif_wb[i] = &mcif_wb20->base;
pool             3274 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 				  struct dcn20_resource_pool *pool)
pool             3393 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.pp_smu) {
pool             3401 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
pool             3402 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
pool             3403 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 				(&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
pool             3408 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
pool             3409 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
pool             3410 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					(&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
pool             3424 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
pool             3425 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	loaded_ip->max_num_dpp = pool->base.pipe_count;
pool             3434 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	struct dcn20_resource_pool *pool)
pool             3447 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.funcs = &dcn20_res_pool_funcs;
pool             3450 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.res_cap = &res_cap_nv14;
pool             3451 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.pipe_count = 5;
pool             3452 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.mpcc_count = 5;
pool             3454 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.res_cap = &res_cap_nv10;
pool             3455 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.pipe_count = 6;
pool             3456 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.mpcc_count = 6;
pool             3461 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool             3476 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.pipe_count = 4;
pool             3477 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.mpcc_count = pool->base.pipe_count;
pool             3493 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
pool             3497 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
pool             3501 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
pool             3505 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
pool             3509 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
pool             3513 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.clock_sources[DCN20_CLK_SRC_PLL5] =
pool             3517 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL;
pool             3519 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.dp_clock_source =
pool             3524 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool             3525 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool             3532 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
pool             3533 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.dccg == NULL) {
pool             3539 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.dmcu = dcn20_dmcu_create(ctx,
pool             3543 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.dmcu == NULL) {
pool             3549 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.abm = dce_abm_create(ctx,
pool             3553 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.abm == NULL) {
pool             3559 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.pp_smu = dcn20_pp_smu_create(ctx);
pool             3562 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (!init_soc_bounding_box(dc, pool)) {
pool             3608 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.pp_smu->nv_funcs.set_wm_ranges)
pool             3609 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 			pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
pool             3613 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.irqs = dal_irq_service_dcn20_create(&init_data);
pool             3614 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (!pool->base.irqs)
pool             3618 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             3619 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.hubps[i] = dcn20_hubp_create(ctx, i);
pool             3620 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.hubps[i] == NULL) {
pool             3627 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.ipps[i] = dcn20_ipp_create(ctx, i);
pool             3628 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.ipps[i] == NULL) {
pool             3635 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.dpps[i] = dcn20_dpp_create(ctx, i);
pool             3636 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.dpps[i] == NULL) {
pool             3643 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             3644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.engines[i] = dcn20_aux_engine_create(ctx, i);
pool             3645 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.engines[i] == NULL) {
pool             3651 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i);
pool             3652 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             3658 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.sw_i2cs[i] = NULL;
pool             3661 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool             3662 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.opps[i] = dcn20_opp_create(ctx, i);
pool             3663 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.opps[i] == NULL) {
pool             3671 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool             3672 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.timing_generators[i] = dcn20_timing_generator_create(
pool             3674 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.timing_generators[i] == NULL) {
pool             3681 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.timing_generator_count = i;
pool             3683 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.mpc = dcn20_mpc_create(ctx);
pool             3684 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.mpc == NULL) {
pool             3690 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	pool->base.hubbub = dcn20_hubbub_create(ctx);
pool             3691 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (pool->base.hubbub == NULL) {
pool             3698 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool             3699 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
pool             3700 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		if (pool->base.dscs[i] == NULL) {
pool             3708 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (!dcn20_dwbc_create(ctx, &pool->base)) {
pool             3713 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
pool             3719 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base,
pool             3726 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             3737 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	destruct(pool);
pool             3746 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	struct dcn20_resource_pool *pool =
pool             3749 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (!pool)
pool             3752 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	if (construct(init_data->num_virtual_links, dc, pool))
pool             3753 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 		return &pool->base;
pool             3756 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	kfree(pool);
pool               31 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h #define TO_DCN20_RES_POOL(pool)\
pool               32 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h 	container_of(pool, struct dcn20_resource_pool, base)
pool               56 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h 		const struct resource_pool *pool,
pool              110 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool);
pool              111 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool);
pool              832 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c static void destruct(struct dcn21_resource_pool *pool)
pool              836 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.stream_enc_count; i++) {
pool              837 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.stream_enc[i] != NULL) {
pool              838 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool              839 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.stream_enc[i] = NULL;
pool              844 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool              845 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.dscs[i] != NULL)
pool              846 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			dcn20_dsc_destroy(&pool->base.dscs[i]);
pool              850 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.mpc != NULL) {
pool              851 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		kfree(TO_DCN20_MPC(pool->base.mpc));
pool              852 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.mpc = NULL;
pool              854 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.hubbub != NULL) {
pool              855 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		kfree(pool->base.hubbub);
pool              856 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.hubbub = NULL;
pool              858 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool              859 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.dpps[i] != NULL)
pool              860 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			dcn20_dpp_destroy(&pool->base.dpps[i]);
pool              862 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.ipps[i] != NULL)
pool              863 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
pool              865 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.hubps[i] != NULL) {
pool              866 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
pool              867 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.hubps[i] = NULL;
pool              870 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.irqs != NULL) {
pool              871 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			dal_irq_service_destroy(&pool->base.irqs);
pool              875 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool              876 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.engines[i] != NULL)
pool              877 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			dce110_engine_destroy(&pool->base.engines[i]);
pool              878 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.hw_i2cs[i] != NULL) {
pool              879 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			kfree(pool->base.hw_i2cs[i]);
pool              880 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.hw_i2cs[i] = NULL;
pool              882 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.sw_i2cs[i] != NULL) {
pool              883 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			kfree(pool->base.sw_i2cs[i]);
pool              884 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.sw_i2cs[i] = NULL;
pool              888 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool              889 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.opps[i] != NULL)
pool              890 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
pool              893 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool              894 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.timing_generators[i] != NULL)	{
pool              895 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool              896 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.timing_generators[i] = NULL;
pool              900 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
pool              901 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.dwbc[i] != NULL) {
pool              902 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
pool              903 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.dwbc[i] = NULL;
pool              905 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.mcif_wb[i] != NULL) {
pool              906 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
pool              907 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.mcif_wb[i] = NULL;
pool              911 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.audio_count; i++) {
pool              912 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.audios[i])
pool              913 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			dce_aud_destroy(&pool->base.audios[i]);
pool              916 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool              917 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.clock_sources[i] != NULL) {
pool              918 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
pool              919 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 			pool->base.clock_sources[i] = NULL;
pool              923 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.dp_clock_source != NULL) {
pool              924 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		dcn20_clock_source_destroy(&pool->base.dp_clock_source);
pool              925 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.dp_clock_source = NULL;
pool              929 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.abm != NULL)
pool              930 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		dce_abm_destroy(&pool->base.abm);
pool              932 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.dmcu != NULL)
pool              933 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		dce_dmcu_destroy(&pool->base.dmcu);
pool              936 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.dmcub != NULL)
pool              937 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		dcn21_dmcub_destroy(&pool->base.dmcub);
pool              940 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.dccg != NULL)
pool              941 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		dcn_dccg_destroy(&pool->base.dccg);
pool              943 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.pp_smu != NULL)
pool              944 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		dcn20_pp_smu_destroy(&pool->base.pp_smu);
pool             1113 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c static void dcn21_destroy_resource_pool(struct resource_pool **pool)
pool             1115 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
pool             1119 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	*pool = NULL;
pool             1276 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
pool             1280 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
pool             1281 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
pool             1440 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	struct dcn21_resource_pool *pool)
pool             1448 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.res_cap = &res_cap_rn;
pool             1452 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.res_cap = &res_cap_rn_FPGA_4pipe;
pool             1455 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.funcs = &dcn21_res_pool_funcs;
pool             1460 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool             1462 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.pipe_count = 4;
pool             1476 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.pipe_count = 4;
pool             1489 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
pool             1493 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
pool             1497 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
pool             1502 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
pool             1505 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.dp_clock_source =
pool             1510 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.clk_src_count; i++) {
pool             1511 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.clock_sources[i] == NULL) {
pool             1518 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
pool             1519 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.dccg == NULL) {
pool             1526 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.dmcub = dcn21_dmcub_create(ctx,
pool             1530 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.dmcub == NULL) {
pool             1537 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.pp_smu = dcn21_pp_smu_create(ctx);
pool             1542 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.irqs = dal_irq_service_dcn21_create(&init_data);
pool             1543 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (!pool->base.irqs)
pool             1547 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.pipe_count; i++) {
pool             1548 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.hubps[i] = dcn21_hubp_create(ctx, i);
pool             1549 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.hubps[i] == NULL) {
pool             1556 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.ipps[i] = dcn21_ipp_create(ctx, i);
pool             1557 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.ipps[i] == NULL) {
pool             1564 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.dpps[i] = dcn21_dpp_create(ctx, i);
pool             1565 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.dpps[i] == NULL) {
pool             1573 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool             1574 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.engines[i] = dcn21_aux_engine_create(ctx, i);
pool             1575 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.engines[i] == NULL) {
pool             1581 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.hw_i2cs[i] = dcn21_i2c_hw_create(ctx, i);
pool             1582 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.hw_i2cs[i] == NULL) {
pool             1588 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.sw_i2cs[i] = NULL;
pool             1591 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool             1592 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.opps[i] = dcn21_opp_create(ctx, i);
pool             1593 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.opps[i] == NULL) {
pool             1601 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool             1602 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.timing_generators[i] = dcn21_timing_generator_create(
pool             1604 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.timing_generators[i] == NULL) {
pool             1611 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.timing_generator_count = i;
pool             1613 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.mpc = dcn21_mpc_create(ctx);
pool             1614 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.mpc == NULL) {
pool             1620 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	pool->base.hubbub = dcn21_hubbub_create(ctx);
pool             1621 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (pool->base.hubbub == NULL) {
pool             1628 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool             1629 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		pool->base.dscs[i] = dcn21_dsc_create(ctx, i);
pool             1630 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		if (pool->base.dscs[i] == NULL) {
pool             1638 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (!dcn20_dwbc_create(ctx, &pool->base)) {
pool             1643 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
pool             1649 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (!resource_construct(num_virtual_links, dc, &pool->base,
pool             1656 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	dc->caps.max_planes =  pool->base.pipe_count;
pool             1667 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	destruct(pool);
pool             1676 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	struct dcn21_resource_pool *pool =
pool             1679 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (!pool)
pool             1682 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	if (construct(init_data->num_virtual_links, dc, pool))
pool             1683 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 		return &pool->base;
pool             1686 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	kfree(pool);
pool               31 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h #define TO_DCN21_RES_POOL(pool)\
pool               32 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h 	container_of(pool, struct dcn21_resource_pool, base)
pool               95 drivers/gpu/drm/amd/display/dc/inc/core_types.h 	void (*destroy)(struct resource_pool **pool);
pool              115 drivers/gpu/drm/amd/display/dc/inc/core_types.h 			const struct resource_pool *pool,
pool              134 drivers/gpu/drm/amd/display/dc/inc/core_types.h 			const struct resource_pool *pool,
pool               80 drivers/gpu/drm/amd/display/dc/inc/resource.h 	struct resource_pool *pool,
pool              104 drivers/gpu/drm/amd/display/dc/inc/resource.h 		const struct resource_pool *pool,
pool              109 drivers/gpu/drm/amd/display/dc/inc/resource.h 		const struct resource_pool *pool,
pool              114 drivers/gpu/drm/amd/display/dc/inc/resource.h 		const struct resource_pool *pool,
pool              127 drivers/gpu/drm/amd/display/dc/inc/resource.h 		const struct resource_pool *pool);
pool              138 drivers/gpu/drm/amd/display/dc/inc/resource.h 		const struct resource_pool *pool);
pool              142 drivers/gpu/drm/amd/display/dc/inc/resource.h 		const struct resource_pool *pool,
pool              153 drivers/gpu/drm/amd/display/dc/inc/resource.h 		const struct resource_pool *pool);
pool              178 drivers/gpu/drm/amd/display/dc/inc/resource.h 		const struct resource_pool *pool,
pool             1147 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct intel_engine_pool_node *pool;
pool             1153 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
pool             1154 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (IS_ERR(pool))
pool             1155 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		return PTR_ERR(pool);
pool             1157 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	cmd = i915_gem_object_pin_map(pool->obj,
pool             1166 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	batch = i915_vma_instance(pool->obj, vma->vm, NULL);
pool             1182 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	err = intel_engine_pool_mark_active(pool, rq);
pool             1221 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	i915_gem_object_unpin_map(pool->obj);
pool             1223 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	intel_engine_pool_put(pool);
pool             1992 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct intel_engine_pool_node *pool;
pool             1998 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
pool             1999 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (IS_ERR(pool))
pool             2000 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		return ERR_CAST(pool);
pool             2002 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	vma = shadow_batch_pin(eb, pool->obj);
pool             2017 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				      pool->obj,
pool             2051 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	vma->private = pool;
pool             2055 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	intel_engine_pool_put(pool);
pool               20 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	struct intel_engine_pool_node *pool;
pool               35 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	pool = intel_engine_pool_get(&ce->engine->pool, size);
pool               36 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	if (IS_ERR(pool)) {
pool               37 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = PTR_ERR(pool);
pool               41 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
pool               82 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_gem_object_unpin_map(pool->obj);
pool               84 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	batch = i915_vma_instance(pool->obj, ce->vm, NULL);
pool               94 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	batch->private = pool;
pool               98 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	intel_engine_pool_put(pool);
pool              204 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	struct intel_engine_pool_node *pool;
pool              219 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	pool = intel_engine_pool_get(&ce->engine->pool, size);
pool              220 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	if (IS_ERR(pool)) {
pool              221 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = PTR_ERR(pool);
pool              225 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
pool              281 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_gem_object_unpin_map(pool->obj);
pool              283 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	batch = i915_vma_instance(pool->obj, ce->vm, NULL);
pool              293 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	batch->private = pool;
pool              297 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	intel_engine_pool_put(pool);
pool              604 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	intel_engine_pool_init(&engine->pool);
pool              810 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	intel_engine_pool_fini(&engine->pool);
pool              146 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	intel_engine_pool_park(&engine->pool);
pool               13 drivers/gpu/drm/i915/gt/intel_engine_pool.c static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
pool               15 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	return container_of(pool, struct intel_engine_cs, pool);
pool               19 drivers/gpu/drm/i915/gt/intel_engine_pool.c bucket_for_size(struct intel_engine_pool *pool, size_t sz)
pool               29 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	if (n >= ARRAY_SIZE(pool->cache_list))
pool               30 drivers/gpu/drm/i915/gt/intel_engine_pool.c 		n = ARRAY_SIZE(pool->cache_list) - 1;
pool               32 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	return &pool->cache_list[n];
pool               68 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	struct intel_engine_pool *pool = node->pool;
pool               69 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	struct list_head *list = bucket_for_size(pool, node->obj->base.size);
pool               72 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
pool               79 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	spin_lock_irqsave(&pool->lock, flags);
pool               81 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool               85 drivers/gpu/drm/i915/gt/intel_engine_pool.c node_create(struct intel_engine_pool *pool, size_t sz)
pool               87 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	struct intel_engine_cs *engine = to_engine(pool);
pool               96 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	node->pool = pool;
pool              113 drivers/gpu/drm/i915/gt/intel_engine_pool.c intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
pool              120 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
pool              123 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	list = bucket_for_size(pool, size);
pool              125 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	spin_lock_irqsave(&pool->lock, flags);
pool              132 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              135 drivers/gpu/drm/i915/gt/intel_engine_pool.c 		node = node_create(pool, size);
pool              149 drivers/gpu/drm/i915/gt/intel_engine_pool.c void intel_engine_pool_init(struct intel_engine_pool *pool)
pool              153 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	spin_lock_init(&pool->lock);
pool              154 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
pool              155 drivers/gpu/drm/i915/gt/intel_engine_pool.c 		INIT_LIST_HEAD(&pool->cache_list[n]);
pool              158 drivers/gpu/drm/i915/gt/intel_engine_pool.c void intel_engine_pool_park(struct intel_engine_pool *pool)
pool              162 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
pool              163 drivers/gpu/drm/i915/gt/intel_engine_pool.c 		struct list_head *list = &pool->cache_list[n];
pool              173 drivers/gpu/drm/i915/gt/intel_engine_pool.c void intel_engine_pool_fini(struct intel_engine_pool *pool)
pool              177 drivers/gpu/drm/i915/gt/intel_engine_pool.c 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
pool              178 drivers/gpu/drm/i915/gt/intel_engine_pool.c 		GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
pool               15 drivers/gpu/drm/i915/gt/intel_engine_pool.h intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
pool               30 drivers/gpu/drm/i915/gt/intel_engine_pool.h void intel_engine_pool_init(struct intel_engine_pool *pool);
pool               31 drivers/gpu/drm/i915/gt/intel_engine_pool.h void intel_engine_pool_park(struct intel_engine_pool *pool);
pool               32 drivers/gpu/drm/i915/gt/intel_engine_pool.h void intel_engine_pool_fini(struct intel_engine_pool *pool);
pool               26 drivers/gpu/drm/i915/gt/intel_engine_pool_types.h 	struct intel_engine_pool *pool;
pool              395 drivers/gpu/drm/i915/gt/intel_engine_types.h 	struct intel_engine_pool pool;
pool              286 drivers/gpu/drm/i915/gt/mock_engine.c 	intel_engine_pool_init(&engine->pool);
pool               46 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num)
pool               48 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 	memset(pool, 0, num * sizeof(*pool));
pool              235 drivers/gpu/drm/i915/i915_gpu_error.c 	struct pagevec pool;
pool              244 drivers/gpu/drm/i915/i915_gpu_error.c 	if (pool_init(&c->pool, ALLOW_FAIL))
pool              251 drivers/gpu/drm/i915/i915_gpu_error.c 		pool_fini(&c->pool);
pool              257 drivers/gpu/drm/i915/i915_gpu_error.c 		c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
pool              281 drivers/gpu/drm/i915/i915_gpu_error.c 	page = pool_alloc(&c->pool, ALLOW_FAIL);
pool              357 drivers/gpu/drm/i915/i915_gpu_error.c 		pool_free(&c->pool, c->tmp);
pool              358 drivers/gpu/drm/i915/i915_gpu_error.c 	pool_fini(&c->pool);
pool              369 drivers/gpu/drm/i915/i915_gpu_error.c 	struct pagevec pool;
pool              374 drivers/gpu/drm/i915/i915_gpu_error.c 	return pool_init(&c->pool, ALLOW_FAIL) == 0;
pool              388 drivers/gpu/drm/i915/i915_gpu_error.c 	ptr = pool_alloc(&c->pool, ALLOW_FAIL);
pool              411 drivers/gpu/drm/i915/i915_gpu_error.c 	pool_fini(&c->pool);
pool             1006 drivers/gpu/drm/i915/i915_gpu_error.c 			pool_free(&compress->pool, dst->pages[dst->page_count]);
pool             1379 drivers/gpu/drm/i915/i915_gpu_error.c 		pool_refill(&compress->pool, ALLOW_FAIL);
pool               86 drivers/gpu/drm/sis/sis_mm.c 			 void *data, int pool)
pool               97 drivers/gpu/drm/sis/sis_mm.c 	if (0 == ((pool == 0) ? dev_priv->vram_initialized :
pool              112 drivers/gpu/drm/sis/sis_mm.c 	if (pool == AGP_TYPE) {
pool              142 drivers/gpu/drm/sis/sis_mm.c 	mem->offset = ((pool == 0) ?
pool              160 drivers/gpu/drm/sis/sis_mm.c 	DRM_DEBUG("alloc %d, size = %ld, offset = %ld\n", pool, mem->size,
pool              266 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
pool              269 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->npages -= freed_pages;
pool              270 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->nfrees += freed_pages;
pool              283 drivers/gpu/drm/ttm/ttm_page_alloc.c static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
pool              308 drivers/gpu/drm/ttm/ttm_page_alloc.c 	spin_lock_irqsave(&pool->lock, irq_flags);
pool              310 drivers/gpu/drm/ttm/ttm_page_alloc.c 	list_for_each_entry_reverse(p, &pool->list, lru) {
pool              318 drivers/gpu/drm/ttm/ttm_page_alloc.c 			__list_del(p->lru.prev, &pool->list);
pool              320 drivers/gpu/drm/ttm/ttm_page_alloc.c 			ttm_pool_update_free_locked(pool, freed_pages);
pool              325 drivers/gpu/drm/ttm/ttm_page_alloc.c 			spin_unlock_irqrestore(&pool->lock, irq_flags);
pool              327 drivers/gpu/drm/ttm/ttm_page_alloc.c 			ttm_pages_put(pages_to_free, freed_pages, pool->order);
pool              353 drivers/gpu/drm/ttm/ttm_page_alloc.c 		__list_del(&p->lru, &pool->list);
pool              355 drivers/gpu/drm/ttm/ttm_page_alloc.c 		ttm_pool_update_free_locked(pool, freed_pages);
pool              359 drivers/gpu/drm/ttm/ttm_page_alloc.c 	spin_unlock_irqrestore(&pool->lock, irq_flags);
pool              362 drivers/gpu/drm/ttm/ttm_page_alloc.c 		ttm_pages_put(pages_to_free, freed_pages, pool->order);
pool              383 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct ttm_page_pool *pool;
pool              399 drivers/gpu/drm/ttm/ttm_page_alloc.c 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
pool              400 drivers/gpu/drm/ttm/ttm_page_alloc.c 		page_nr = (1 << pool->order);
pool              402 drivers/gpu/drm/ttm/ttm_page_alloc.c 		nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
pool              403 drivers/gpu/drm/ttm/ttm_page_alloc.c 		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
pool              404 drivers/gpu/drm/ttm/ttm_page_alloc.c 		freed += (nr_free_pool - shrink_pages) << pool->order;
pool              407 drivers/gpu/drm/ttm/ttm_page_alloc.c 		shrink_pages <<= pool->order;
pool              419 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct ttm_page_pool *pool;
pool              422 drivers/gpu/drm/ttm/ttm_page_alloc.c 		pool = &_manager->pools[i];
pool              423 drivers/gpu/drm/ttm/ttm_page_alloc.c 		count += (pool->npages << pool->order);
pool              571 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
pool              583 drivers/gpu/drm/ttm/ttm_page_alloc.c 	if (pool->fill_lock)
pool              586 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->fill_lock = true;
pool              591 drivers/gpu/drm/ttm/ttm_page_alloc.c 		&& count > pool->npages) {
pool              599 drivers/gpu/drm/ttm/ttm_page_alloc.c 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
pool              602 drivers/gpu/drm/ttm/ttm_page_alloc.c 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
pool              604 drivers/gpu/drm/ttm/ttm_page_alloc.c 		spin_lock_irqsave(&pool->lock, *irq_flags);
pool              607 drivers/gpu/drm/ttm/ttm_page_alloc.c 			list_splice(&new_pages, &pool->list);
pool              608 drivers/gpu/drm/ttm/ttm_page_alloc.c 			++pool->nrefills;
pool              609 drivers/gpu/drm/ttm/ttm_page_alloc.c 			pool->npages += alloc_size;
pool              611 drivers/gpu/drm/ttm/ttm_page_alloc.c 			pr_debug("Failed to fill pool (%p)\n", pool);
pool              616 drivers/gpu/drm/ttm/ttm_page_alloc.c 			list_splice(&new_pages, &pool->list);
pool              617 drivers/gpu/drm/ttm/ttm_page_alloc.c 			pool->npages += cpages;
pool              621 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->fill_lock = false;
pool              629 drivers/gpu/drm/ttm/ttm_page_alloc.c static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
pool              640 drivers/gpu/drm/ttm/ttm_page_alloc.c 	spin_lock_irqsave(&pool->lock, irq_flags);
pool              642 drivers/gpu/drm/ttm/ttm_page_alloc.c 		ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
pool              645 drivers/gpu/drm/ttm/ttm_page_alloc.c 	if (count >= pool->npages) {
pool              647 drivers/gpu/drm/ttm/ttm_page_alloc.c 		list_splice_init(&pool->list, pages);
pool              648 drivers/gpu/drm/ttm/ttm_page_alloc.c 		count -= pool->npages;
pool              649 drivers/gpu/drm/ttm/ttm_page_alloc.c 		pool->npages = 0;
pool              654 drivers/gpu/drm/ttm/ttm_page_alloc.c 	if (count <= pool->npages/2) {
pool              656 drivers/gpu/drm/ttm/ttm_page_alloc.c 		list_for_each(p, &pool->list) {
pool              661 drivers/gpu/drm/ttm/ttm_page_alloc.c 		i = pool->npages + 1;
pool              662 drivers/gpu/drm/ttm/ttm_page_alloc.c 		list_for_each_prev(p, &pool->list) {
pool              668 drivers/gpu/drm/ttm/ttm_page_alloc.c 	list_cut_position(pages, &pool->list, p);
pool              669 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->npages -= count;
pool              672 drivers/gpu/drm/ttm/ttm_page_alloc.c 	spin_unlock_irqrestore(&pool->lock, irq_flags);
pool              688 drivers/gpu/drm/ttm/ttm_page_alloc.c 		gfp_t gfp_flags = pool->gfp_flags;
pool              711 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
pool              718 drivers/gpu/drm/ttm/ttm_page_alloc.c 	if (pool == NULL) {
pool              797 drivers/gpu/drm/ttm/ttm_page_alloc.c 	spin_lock_irqsave(&pool->lock, irq_flags);
pool              802 drivers/gpu/drm/ttm/ttm_page_alloc.c 			list_add_tail(&pages[i]->lru, &pool->list);
pool              804 drivers/gpu/drm/ttm/ttm_page_alloc.c 			pool->npages++;
pool              810 drivers/gpu/drm/ttm/ttm_page_alloc.c 	if (pool->npages > _manager->options.max_size) {
pool              811 drivers/gpu/drm/ttm/ttm_page_alloc.c 		npages = pool->npages - _manager->options.max_size;
pool              817 drivers/gpu/drm/ttm/ttm_page_alloc.c 	spin_unlock_irqrestore(&pool->lock, irq_flags);
pool              819 drivers/gpu/drm/ttm/ttm_page_alloc.c 		ttm_page_pool_free(pool, npages, false);
pool              829 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
pool              839 drivers/gpu/drm/ttm/ttm_page_alloc.c 	if (pool == NULL) {
pool              917 drivers/gpu/drm/ttm/ttm_page_alloc.c 	r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
pool              942 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
pool              945 drivers/gpu/drm/ttm/ttm_page_alloc.c 	spin_lock_init(&pool->lock);
pool              946 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->fill_lock = false;
pool              947 drivers/gpu/drm/ttm/ttm_page_alloc.c 	INIT_LIST_HEAD(&pool->list);
pool              948 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->npages = pool->nfrees = 0;
pool              949 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->gfp_flags = flags;
pool              950 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->name = name;
pool              951 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->order = order;
pool              151 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct dma_pool *pool;
pool              266 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static int ttm_set_pages_caching(struct dma_pool *pool,
pool              271 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (pool->type & IS_UC) {
pool              275 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			       pool->dev_name, cpages);
pool              277 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (pool->type & IS_WC) {
pool              281 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			       pool->dev_name, cpages);
pool              286 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
pool              291 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (pool->type & IS_HUGE)
pool              294 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
pool              299 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
pool              309 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (pool->type & IS_HUGE)
pool              312 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
pool              313 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				pool->gfp_flags, attrs);
pool              320 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (pool->type & IS_HUGE)
pool              344 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static void ttm_pool_update_free_locked(struct dma_pool *pool,
pool              347 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool->npages_free -= freed_pages;
pool              348 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool->nfrees += freed_pages;
pool              353 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
pool              359 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (!(pool->type & IS_CACHED)) {
pool              360 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		num_pages = pool->size / PAGE_SIZE;
pool              363 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			       pool->dev_name, num_pages);
pool              367 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	__ttm_dma_free_page(pool, d_page);
pool              370 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
pool              375 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (pool->type & IS_HUGE) {
pool              377 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			ttm_dma_page_put(pool, d_page);
pool              383 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (npages && !(pool->type & IS_CACHED) &&
pool              386 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		       pool->dev_name, npages);
pool              390 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		__ttm_dma_free_page(pool, d_page);
pool              404 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
pool              427 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		       pool->dev_name);
pool              432 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	spin_lock_irqsave(&pool->lock, irq_flags);
pool              435 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
pool              447 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			ttm_pool_update_free_locked(pool, freed_pages);
pool              452 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			spin_unlock_irqrestore(&pool->lock, irq_flags);
pool              454 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
pool              484 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		ttm_pool_update_free_locked(pool, freed_pages);
pool              488 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	spin_unlock_irqrestore(&pool->lock, irq_flags);
pool              491 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
pool              501 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct dma_pool *pool;
pool              510 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool = p->pool;
pool              511 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (pool->type != type)
pool              519 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
pool              520 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (pool->type != type)
pool              524 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
pool              525 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
pool              530 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		list_del(&pool->pools);
pool              531 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		kfree(pool);
pool              543 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct dma_pool *pool = *(struct dma_pool **)res;
pool              545 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (pool)
pool              546 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		ttm_dma_free_pool(dev, pool->type);
pool              560 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct dma_pool *pool = NULL, **ptr;
pool              574 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
pool              576 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (!pool)
pool              586 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	sec_pool->pool =  pool;
pool              588 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	INIT_LIST_HEAD(&pool->free_list);
pool              589 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	INIT_LIST_HEAD(&pool->pools);
pool              590 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	spin_lock_init(&pool->lock);
pool              591 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool->dev = dev;
pool              592 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool->npages_free = pool->npages_in_use = 0;
pool              593 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool->nfrees = 0;
pool              594 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool->gfp_flags = flags;
pool              597 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool->size = HPAGE_PMD_SIZE;
pool              602 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool->size = PAGE_SIZE;
pool              603 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool->type = type;
pool              604 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool->nrefills = 0;
pool              605 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	p = pool->name;
pool              608 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
pool              615 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
pool              622 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_add(&pool->pools, &dev->dma_pools);
pool              625 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	*ptr = pool;
pool              628 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	return pool;
pool              632 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	kfree(pool);
pool              639 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct dma_pool *pool, *tmp;
pool              655 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
pool              656 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (pool->type == type)
pool              657 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			return pool;
pool              666 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
pool              684 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		__ttm_dma_free_page(pool, d_page);
pool              699 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
pool              717 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		       pool->dev_name);
pool              723 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			 pool->dev_name, pool->name, current->pid, count);
pool              726 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		dma_p = __ttm_dma_alloc_page(pool);
pool              729 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				 pool->dev_name, i);
pool              734 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				r = ttm_set_pages_caching(pool, caching_array,
pool              738 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 						pool, d_pages, caching_array,
pool              755 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		npages = pool->size / PAGE_SIZE;
pool              760 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				r = ttm_set_pages_caching(pool, caching_array,
pool              764 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 					     pool, d_pages, caching_array,
pool              774 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		r = ttm_set_pages_caching(pool, caching_array, cpages);
pool              776 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			ttm_dma_handle_caching_state_failure(pool, d_pages,
pool              787 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
pool              791 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	int r = pool->npages_free;
pool              793 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (count > pool->npages_free) {
pool              798 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
pool              802 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
pool              804 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		spin_lock_irqsave(&pool->lock, *irq_flags);
pool              807 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			list_splice(&d_pages, &pool->free_list);
pool              808 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			++pool->nrefills;
pool              809 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			pool->npages_free += count;
pool              816 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				 pool->dev_name, pool->name, r);
pool              821 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			list_splice_tail(&d_pages, &pool->free_list);
pool              822 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			pool->npages_free += cpages;
pool              834 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
pool              843 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	spin_lock_irqsave(&pool->lock, irq_flags);
pool              844 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
pool              846 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
pool              850 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool->npages_in_use += 1;
pool              851 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool->npages_free -= 1;
pool              853 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	spin_unlock_irqrestore(&pool->lock, irq_flags);
pool              892 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct dma_pool *pool;
pool              913 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool = ttm_dma_find_pool(dev, type | IS_HUGE);
pool              914 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (!pool) {
pool              917 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
pool              918 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (IS_ERR_OR_NULL(pool))
pool              925 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
pool              930 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 						pool->size, ctx);
pool              950 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool = ttm_dma_find_pool(dev, type);
pool              951 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (!pool) {
pool              954 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool = ttm_dma_pool_init(dev, gfp_flags, type);
pool              955 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (IS_ERR_OR_NULL(pool))
pool              960 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
pool              967 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 						pool->size, ctx);
pool              996 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct dma_pool *pool;
pool             1006 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool = ttm_dma_find_pool(dev, type | IS_HUGE);
pool             1007 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (pool) {
pool             1017 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 							 pool->size);
pool             1020 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			ttm_dma_page_put(pool, d_page);
pool             1023 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		spin_lock_irqsave(&pool->lock, irq_flags);
pool             1024 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool->npages_in_use -= count;
pool             1025 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool->nfrees += count;
pool             1026 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		spin_unlock_irqrestore(&pool->lock, irq_flags);
pool             1030 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool = ttm_dma_find_pool(dev, type);
pool             1031 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (!pool)
pool             1034 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	is_cached = (ttm_dma_find_pool(pool->dev,
pool             1035 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
pool             1046 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 						 pool->size);
pool             1051 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			ttm_dma_page_put(pool, d_page);
pool             1054 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	spin_lock_irqsave(&pool->lock, irq_flags);
pool             1055 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	pool->npages_in_use -= count;
pool             1057 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool->nfrees += count;
pool             1059 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool->npages_free += count;
pool             1060 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		list_splice(&ttm_dma->pages_list, &pool->free_list);
pool             1065 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (pool->npages_free >= (_manager->options.max_size +
pool             1067 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			npages = pool->npages_free - _manager->options.max_size;
pool             1069 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	spin_unlock_irqrestore(&pool->lock, irq_flags);
pool             1079 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		ttm_dma_page_pool_free(pool, npages, false);
pool             1122 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
pool             1126 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			 p->pool->dev_name, p->pool->name, current->pid,
pool             1143 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		count += p->pool->npages_free;
pool             1205 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
pool             1208 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			ttm_dma_pool_match, p->pool));
pool             1209 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		ttm_dma_free_pool(p->dev, p->pool->type);
pool             1218 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct dma_pool *pool = NULL;
pool             1230 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool = p->pool;
pool             1232 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				pool->name, pool->nrefills,
pool             1233 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				pool->nfrees, pool->npages_in_use,
pool             1234 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				pool->npages_free,
pool             1235 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				pool->dev_name);
pool              158 drivers/hid/usbhid/hid-pidff.c 	struct pidff_usage pool[sizeof(pidff_pool)];
pool             1137 drivers/hid/usbhid/hid-pidff.c 	PIDFF_FIND_FIELDS(pool, PID_POOL, 0);
pool             1169 drivers/hid/usbhid/hid-pidff.c 	if (pidff->pool[PID_SIMULTANEOUS_MAX].value) {
pool             1170 drivers/hid/usbhid/hid-pidff.c 		while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) {
pool             1174 drivers/hid/usbhid/hid-pidff.c 					 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
pool             1285 drivers/hid/usbhid/hid-pidff.c 	if (pidff->pool[PID_SIMULTANEOUS_MAX].value)
pool             1287 drivers/hid/usbhid/hid-pidff.c 			pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
pool             1289 drivers/hid/usbhid/hid-pidff.c 	if (pidff->pool[PID_RAM_POOL_SIZE].value)
pool             1291 drivers/hid/usbhid/hid-pidff.c 			pidff->pool[PID_RAM_POOL_SIZE].value[0]);
pool             1293 drivers/hid/usbhid/hid-pidff.c 	if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
pool             1294 drivers/hid/usbhid/hid-pidff.c 	    pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
pool              378 drivers/hv/hv_kvp.c 	__u8 pool = kvp_transaction.kvp_msg->kvp_hdr.pool;
pool              392 drivers/hv/hv_kvp.c 	message->kvp_hdr.pool = pool;
pool             2234 drivers/i3c/master.c void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool)
pool             2239 drivers/i3c/master.c 	while (!list_empty(&pool->free_slots)) {
pool             2240 drivers/i3c/master.c 		slot = list_first_entry(&pool->free_slots,
pool             2250 drivers/i3c/master.c 	WARN_ON(nslots != pool->num_slots);
pool             2252 drivers/i3c/master.c 	kfree(pool->payload_buf);
pool             2253 drivers/i3c/master.c 	kfree(pool->slots);
pool             2254 drivers/i3c/master.c 	kfree(pool);
pool             2271 drivers/i3c/master.c 	struct i3c_generic_ibi_pool *pool;
pool             2276 drivers/i3c/master.c 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool             2277 drivers/i3c/master.c 	if (!pool)
pool             2280 drivers/i3c/master.c 	spin_lock_init(&pool->lock);
pool             2281 drivers/i3c/master.c 	INIT_LIST_HEAD(&pool->free_slots);
pool             2282 drivers/i3c/master.c 	INIT_LIST_HEAD(&pool->pending);
pool             2284 drivers/i3c/master.c 	pool->slots = kcalloc(req->num_slots, sizeof(*slot), GFP_KERNEL);
pool             2285 drivers/i3c/master.c 	if (!pool->slots) {
pool             2291 drivers/i3c/master.c 		pool->payload_buf = kcalloc(req->num_slots,
pool             2293 drivers/i3c/master.c 		if (!pool->payload_buf) {
pool             2300 drivers/i3c/master.c 		slot = &pool->slots[i];
pool             2304 drivers/i3c/master.c 			slot->base.data = pool->payload_buf +
pool             2307 drivers/i3c/master.c 		list_add_tail(&slot->node, &pool->free_slots);
pool             2308 drivers/i3c/master.c 		pool->num_slots++;
pool             2311 drivers/i3c/master.c 	return pool;
pool             2314 drivers/i3c/master.c 	i3c_generic_ibi_free_pool(pool);
pool             2330 drivers/i3c/master.c i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool)
pool             2335 drivers/i3c/master.c 	spin_lock_irqsave(&pool->lock, flags);
pool             2336 drivers/i3c/master.c 	slot = list_first_entry_or_null(&pool->free_slots,
pool             2340 drivers/i3c/master.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool             2354 drivers/i3c/master.c void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
pool             2364 drivers/i3c/master.c 	spin_lock_irqsave(&pool->lock, flags);
pool             2365 drivers/i3c/master.c 	list_add_tail(&slot->node, &pool->free_slots);
pool             2366 drivers/i3c/master.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              218 drivers/iio/industrialio-trigger.c 	ret = bitmap_find_free_region(trig->pool,
pool              231 drivers/iio/industrialio-trigger.c 	clear_bit(irq - trig->subirq_base, trig->pool);
pool              247 drivers/iio/industrialio-trigger.c 		= bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
pool              298 drivers/iio/industrialio-trigger.c 		= (bitmap_weight(trig->pool,
pool               95 drivers/infiniband/core/fmr_pool.c 	void                     (*flush_function)(struct ib_fmr_pool *pool,
pool              115 drivers/infiniband/core/fmr_pool.c static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
pool              123 drivers/infiniband/core/fmr_pool.c 	if (!pool->cache_bucket)
pool              126 drivers/infiniband/core/fmr_pool.c 	bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
pool              138 drivers/infiniband/core/fmr_pool.c static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
pool              145 drivers/infiniband/core/fmr_pool.c 	spin_lock_irq(&pool->pool_lock);
pool              147 drivers/infiniband/core/fmr_pool.c 	list_for_each_entry(fmr, &pool->dirty_list, list) {
pool              153 drivers/infiniband/core/fmr_pool.c 	list_splice_init(&pool->dirty_list, &unmap_list);
pool              154 drivers/infiniband/core/fmr_pool.c 	pool->dirty_len = 0;
pool              156 drivers/infiniband/core/fmr_pool.c 	spin_unlock_irq(&pool->pool_lock);
pool              166 drivers/infiniband/core/fmr_pool.c 	spin_lock_irq(&pool->pool_lock);
pool              167 drivers/infiniband/core/fmr_pool.c 	list_splice(&unmap_list, &pool->free_list);
pool              168 drivers/infiniband/core/fmr_pool.c 	spin_unlock_irq(&pool->pool_lock);
pool              173 drivers/infiniband/core/fmr_pool.c 	struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
pool              175 drivers/infiniband/core/fmr_pool.c 	ib_fmr_batch_release(pool);
pool              176 drivers/infiniband/core/fmr_pool.c 	atomic_inc(&pool->flush_ser);
pool              177 drivers/infiniband/core/fmr_pool.c 	wake_up_interruptible(&pool->force_wait);
pool              179 drivers/infiniband/core/fmr_pool.c 	if (pool->flush_function)
pool              180 drivers/infiniband/core/fmr_pool.c 		pool->flush_function(pool, pool->flush_arg);
pool              182 drivers/infiniband/core/fmr_pool.c 	if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
pool              183 drivers/infiniband/core/fmr_pool.c 		kthread_queue_work(pool->worker, &pool->work);
pool              198 drivers/infiniband/core/fmr_pool.c 	struct ib_fmr_pool *pool;
pool              218 drivers/infiniband/core/fmr_pool.c 	pool = kmalloc(sizeof *pool, GFP_KERNEL);
pool              219 drivers/infiniband/core/fmr_pool.c 	if (!pool)
pool              222 drivers/infiniband/core/fmr_pool.c 	pool->cache_bucket   = NULL;
pool              223 drivers/infiniband/core/fmr_pool.c 	pool->flush_function = params->flush_function;
pool              224 drivers/infiniband/core/fmr_pool.c 	pool->flush_arg      = params->flush_arg;
pool              226 drivers/infiniband/core/fmr_pool.c 	INIT_LIST_HEAD(&pool->free_list);
pool              227 drivers/infiniband/core/fmr_pool.c 	INIT_LIST_HEAD(&pool->dirty_list);
pool              230 drivers/infiniband/core/fmr_pool.c 		pool->cache_bucket =
pool              232 drivers/infiniband/core/fmr_pool.c 				      sizeof(*pool->cache_bucket),
pool              234 drivers/infiniband/core/fmr_pool.c 		if (!pool->cache_bucket) {
pool              240 drivers/infiniband/core/fmr_pool.c 			INIT_HLIST_HEAD(pool->cache_bucket + i);
pool              243 drivers/infiniband/core/fmr_pool.c 	pool->pool_size       = 0;
pool              244 drivers/infiniband/core/fmr_pool.c 	pool->max_pages       = params->max_pages_per_fmr;
pool              245 drivers/infiniband/core/fmr_pool.c 	pool->max_remaps      = max_remaps;
pool              246 drivers/infiniband/core/fmr_pool.c 	pool->dirty_watermark = params->dirty_watermark;
pool              247 drivers/infiniband/core/fmr_pool.c 	pool->dirty_len       = 0;
pool              248 drivers/infiniband/core/fmr_pool.c 	spin_lock_init(&pool->pool_lock);
pool              249 drivers/infiniband/core/fmr_pool.c 	atomic_set(&pool->req_ser,   0);
pool              250 drivers/infiniband/core/fmr_pool.c 	atomic_set(&pool->flush_ser, 0);
pool              251 drivers/infiniband/core/fmr_pool.c 	init_waitqueue_head(&pool->force_wait);
pool              253 drivers/infiniband/core/fmr_pool.c 	pool->worker =
pool              255 drivers/infiniband/core/fmr_pool.c 	if (IS_ERR(pool->worker)) {
pool              257 drivers/infiniband/core/fmr_pool.c 		ret = PTR_ERR(pool->worker);
pool              260 drivers/infiniband/core/fmr_pool.c 	kthread_init_work(&pool->work, ib_fmr_cleanup_func);
pool              266 drivers/infiniband/core/fmr_pool.c 			.max_maps   = pool->max_remaps,
pool              271 drivers/infiniband/core/fmr_pool.c 		if (pool->cache_bucket)
pool              279 drivers/infiniband/core/fmr_pool.c 			fmr->pool             = pool;
pool              292 drivers/infiniband/core/fmr_pool.c 			list_add_tail(&fmr->list, &pool->free_list);
pool              293 drivers/infiniband/core/fmr_pool.c 			++pool->pool_size;
pool              297 drivers/infiniband/core/fmr_pool.c 	return pool;
pool              300 drivers/infiniband/core/fmr_pool.c 	kfree(pool->cache_bucket);
pool              301 drivers/infiniband/core/fmr_pool.c 	kfree(pool);
pool              306 drivers/infiniband/core/fmr_pool.c 	ib_destroy_fmr_pool(pool);
pool              318 drivers/infiniband/core/fmr_pool.c void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
pool              325 drivers/infiniband/core/fmr_pool.c 	kthread_destroy_worker(pool->worker);
pool              326 drivers/infiniband/core/fmr_pool.c 	ib_fmr_batch_release(pool);
pool              329 drivers/infiniband/core/fmr_pool.c 	list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
pool              341 drivers/infiniband/core/fmr_pool.c 	if (i < pool->pool_size)
pool              343 drivers/infiniband/core/fmr_pool.c 			pool->pool_size - i);
pool              345 drivers/infiniband/core/fmr_pool.c 	kfree(pool->cache_bucket);
pool              346 drivers/infiniband/core/fmr_pool.c 	kfree(pool);
pool              356 drivers/infiniband/core/fmr_pool.c int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
pool              367 drivers/infiniband/core/fmr_pool.c 	spin_lock_irq(&pool->pool_lock);
pool              368 drivers/infiniband/core/fmr_pool.c 	list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
pool              370 drivers/infiniband/core/fmr_pool.c 			list_move(&fmr->list, &pool->dirty_list);
pool              372 drivers/infiniband/core/fmr_pool.c 	spin_unlock_irq(&pool->pool_lock);
pool              374 drivers/infiniband/core/fmr_pool.c 	serial = atomic_inc_return(&pool->req_ser);
pool              375 drivers/infiniband/core/fmr_pool.c 	kthread_queue_work(pool->worker, &pool->work);
pool              377 drivers/infiniband/core/fmr_pool.c 	if (wait_event_interruptible(pool->force_wait,
pool              378 drivers/infiniband/core/fmr_pool.c 				     atomic_read(&pool->flush_ser) - serial >= 0))
pool              397 drivers/infiniband/core/fmr_pool.c 	struct ib_fmr_pool *pool = pool_handle;
pool              402 drivers/infiniband/core/fmr_pool.c 	if (list_len < 1 || list_len > pool->max_pages)
pool              405 drivers/infiniband/core/fmr_pool.c 	spin_lock_irqsave(&pool->pool_lock, flags);
pool              406 drivers/infiniband/core/fmr_pool.c 	fmr = ib_fmr_cache_lookup(pool,
pool              417 drivers/infiniband/core/fmr_pool.c 		spin_unlock_irqrestore(&pool->pool_lock, flags);
pool              422 drivers/infiniband/core/fmr_pool.c 	if (list_empty(&pool->free_list)) {
pool              423 drivers/infiniband/core/fmr_pool.c 		spin_unlock_irqrestore(&pool->pool_lock, flags);
pool              427 drivers/infiniband/core/fmr_pool.c 	fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
pool              430 drivers/infiniband/core/fmr_pool.c 	spin_unlock_irqrestore(&pool->pool_lock, flags);
pool              436 drivers/infiniband/core/fmr_pool.c 		spin_lock_irqsave(&pool->pool_lock, flags);
pool              437 drivers/infiniband/core/fmr_pool.c 		list_add(&fmr->list, &pool->free_list);
pool              438 drivers/infiniband/core/fmr_pool.c 		spin_unlock_irqrestore(&pool->pool_lock, flags);
pool              448 drivers/infiniband/core/fmr_pool.c 	if (pool->cache_bucket) {
pool              453 drivers/infiniband/core/fmr_pool.c 		spin_lock_irqsave(&pool->pool_lock, flags);
pool              455 drivers/infiniband/core/fmr_pool.c 			       pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
pool              456 drivers/infiniband/core/fmr_pool.c 		spin_unlock_irqrestore(&pool->pool_lock, flags);
pool              472 drivers/infiniband/core/fmr_pool.c 	struct ib_fmr_pool *pool;
pool              475 drivers/infiniband/core/fmr_pool.c 	pool = fmr->pool;
pool              477 drivers/infiniband/core/fmr_pool.c 	spin_lock_irqsave(&pool->pool_lock, flags);
pool              481 drivers/infiniband/core/fmr_pool.c 		if (fmr->remap_count < pool->max_remaps) {
pool              482 drivers/infiniband/core/fmr_pool.c 			list_add_tail(&fmr->list, &pool->free_list);
pool              484 drivers/infiniband/core/fmr_pool.c 			list_add_tail(&fmr->list, &pool->dirty_list);
pool              485 drivers/infiniband/core/fmr_pool.c 			if (++pool->dirty_len >= pool->dirty_watermark) {
pool              486 drivers/infiniband/core/fmr_pool.c 				atomic_inc(&pool->req_ser);
pool              487 drivers/infiniband/core/fmr_pool.c 				kthread_queue_work(pool->worker, &pool->work);
pool              492 drivers/infiniband/core/fmr_pool.c 	spin_unlock_irqrestore(&pool->pool_lock, flags);
pool              322 drivers/infiniband/hw/hfi1/pio.c 		int pool;
pool              354 drivers/infiniband/hw/hfi1/pio.c 		pool = wildcard_to_pool(size);
pool              355 drivers/infiniband/hw/hfi1/pio.c 		if (pool == -1) {			/* non-wildcard */
pool              357 drivers/infiniband/hw/hfi1/pio.c 		} else if (pool < NUM_SC_POOLS) {	/* valid wildcard */
pool              358 drivers/infiniband/hw/hfi1/pio.c 			mem_pool_info[pool].count += count;
pool              421 drivers/infiniband/hw/hfi1/pio.c 			unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
pool              423 drivers/infiniband/hw/hfi1/pio.c 			WARN_ON_ONCE(pool >= NUM_SC_POOLS);
pool              424 drivers/infiniband/hw/hfi1/pio.c 			dd->sc_sizes[i].size = mem_pool_info[pool].size;
pool              215 drivers/infiniband/hw/hns/hns_roce_cmd.c 	hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
pool              218 drivers/infiniband/hw/hns/hns_roce_cmd.c 	if (!hr_dev->cmd.pool)
pool              226 drivers/infiniband/hw/hns/hns_roce_cmd.c 	dma_pool_destroy(hr_dev->cmd.pool);
pool              274 drivers/infiniband/hw/hns/hns_roce_cmd.c 	mailbox->buf = dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL,
pool              290 drivers/infiniband/hw/hns/hns_roce_cmd.c 	dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
pool              603 drivers/infiniband/hw/hns/hns_roce_device.h 	struct dma_pool		*pool;
pool               66 drivers/infiniband/hw/i40iw/i40iw_pble.c 	if (pinfo->pool) {
pool               73 drivers/infiniband/hw/i40iw/i40iw_pble.c 		gen_pool_destroy(pinfo->pool);
pool               99 drivers/infiniband/hw/i40iw/i40iw_pble.c 	pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
pool              101 drivers/infiniband/hw/i40iw/i40iw_pble.c 	if (!pble_rsrc->pinfo.pool)
pool              385 drivers/infiniband/hw/i40iw/i40iw_pble.c 	if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
pool              426 drivers/infiniband/hw/i40iw/i40iw_pble.c 	struct gen_pool *pool;
pool              431 drivers/infiniband/hw/i40iw/i40iw_pble.c 	pool = pble_rsrc->pinfo.pool;
pool              435 drivers/infiniband/hw/i40iw/i40iw_pble.c 			gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
pool              441 drivers/infiniband/hw/i40iw/i40iw_pble.c 		gen_pool_free(pool, root->addr, (root->cnt << 3));
pool              455 drivers/infiniband/hw/i40iw/i40iw_pble.c 					    struct gen_pool *pool)
pool              475 drivers/infiniband/hw/i40iw/i40iw_pble.c 	root->addr = gen_pool_alloc(pool, (total << 3));
pool              482 drivers/infiniband/hw/i40iw/i40iw_pble.c 			       (u64)gen_pool_virt_to_phys(pool, root->addr));
pool              487 drivers/infiniband/hw/i40iw/i40iw_pble.c 		leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
pool              490 drivers/infiniband/hw/i40iw/i40iw_pble.c 		leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
pool              515 drivers/infiniband/hw/i40iw/i40iw_pble.c 	struct gen_pool *pool;
pool              518 drivers/infiniband/hw/i40iw/i40iw_pble.c 	pool = pble_rsrc->pinfo.pool;
pool              519 drivers/infiniband/hw/i40iw/i40iw_pble.c 	addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
pool              526 drivers/infiniband/hw/i40iw/i40iw_pble.c 	lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
pool              543 drivers/infiniband/hw/i40iw/i40iw_pble.c 							struct gen_pool *pool)
pool              549 drivers/infiniband/hw/i40iw/i40iw_pble.c 		status = get_lvl2_pble(pble_rsrc, palloc, pool);
pool              565 drivers/infiniband/hw/i40iw/i40iw_pble.c 	struct gen_pool *pool;
pool              570 drivers/infiniband/hw/i40iw/i40iw_pble.c 	pool = pble_rsrc->pinfo.pool;
pool              574 drivers/infiniband/hw/i40iw/i40iw_pble.c 	status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
pool              582 drivers/infiniband/hw/i40iw/i40iw_pble.c 		status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
pool              603 drivers/infiniband/hw/i40iw/i40iw_pble.c 	struct gen_pool *pool;
pool              605 drivers/infiniband/hw/i40iw/i40iw_pble.c 	pool = pble_rsrc->pinfo.pool;
pool              609 drivers/infiniband/hw/i40iw/i40iw_pble.c 		gen_pool_free(pool, palloc->level1.addr,
pool              101 drivers/infiniband/hw/i40iw/i40iw_pble.h 	struct gen_pool *pool;
pool              943 drivers/infiniband/hw/mlx5/mlx5_ib.h 	mempool_t *pool;
pool             1358 drivers/infiniband/hw/mlx5/odp.c 	mempool_free(pfault, eq->pool);
pool             1369 drivers/infiniband/hw/mlx5/odp.c 		pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
pool             1469 drivers/infiniband/hw/mlx5/odp.c static void mempool_refill(mempool_t *pool)
pool             1471 drivers/infiniband/hw/mlx5/odp.c 	while (pool->curr_nr < pool->min_nr)
pool             1472 drivers/infiniband/hw/mlx5/odp.c 		mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
pool             1480 drivers/infiniband/hw/mlx5/odp.c 	mempool_refill(eq->pool);
pool             1502 drivers/infiniband/hw/mlx5/odp.c 	eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
pool             1504 drivers/infiniband/hw/mlx5/odp.c 	if (!eq->pool)
pool             1538 drivers/infiniband/hw/mlx5/odp.c 	mempool_destroy(eq->pool);
pool             1551 drivers/infiniband/hw/mlx5/odp.c 	mempool_destroy(eq->pool);
pool              189 drivers/infiniband/hw/mthca/mthca_av.c 		ah->av = dma_pool_zalloc(dev->av_table.pool,
pool              253 drivers/infiniband/hw/mthca/mthca_av.c 		dma_pool_free(dev->av_table.pool, ah->av, ah->avdma);
pool              340 drivers/infiniband/hw/mthca/mthca_av.c 	dev->av_table.pool = dma_pool_create("mthca_av", &dev->pdev->dev,
pool              343 drivers/infiniband/hw/mthca/mthca_av.c 	if (!dev->av_table.pool)
pool              360 drivers/infiniband/hw/mthca/mthca_av.c 	dma_pool_destroy(dev->av_table.pool);
pool              374 drivers/infiniband/hw/mthca/mthca_av.c 	dma_pool_destroy(dev->av_table.pool);
pool              535 drivers/infiniband/hw/mthca/mthca_cmd.c 	dev->cmd.pool = dma_pool_create("mthca_cmd", &dev->pdev->dev,
pool              538 drivers/infiniband/hw/mthca/mthca_cmd.c 	if (!dev->cmd.pool) {
pool              548 drivers/infiniband/hw/mthca/mthca_cmd.c 	dma_pool_destroy(dev->cmd.pool);
pool              618 drivers/infiniband/hw/mthca/mthca_cmd.c 	mailbox->buf = dma_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
pool              632 drivers/infiniband/hw/mthca/mthca_cmd.c 	dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
pool              121 drivers/infiniband/hw/mthca/mthca_dev.h 	struct dma_pool          *pool;
pool              266 drivers/infiniband/hw/mthca/mthca_dev.h 	struct dma_pool   *pool;
pool               82 drivers/infiniband/sw/rxe/rxe_mr.c 	if (mem->pelem.pool->type == RXE_TYPE_MR) {
pool              108 drivers/infiniband/sw/rxe/rxe_pool.c static inline const char *pool_name(struct rxe_pool *pool)
pool              110 drivers/infiniband/sw/rxe/rxe_pool.c 	return rxe_type_info[pool->type].name;
pool              113 drivers/infiniband/sw/rxe/rxe_pool.c static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
pool              115 drivers/infiniband/sw/rxe/rxe_pool.c 	return rxe_type_info[pool->type].cache;
pool              169 drivers/infiniband/sw/rxe/rxe_pool.c static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
pool              174 drivers/infiniband/sw/rxe/rxe_pool.c 	if ((max - min + 1) < pool->max_elem) {
pool              180 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->max_index = max;
pool              181 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->min_index = min;
pool              184 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->table = kmalloc(size, GFP_KERNEL);
pool              185 drivers/infiniband/sw/rxe/rxe_pool.c 	if (!pool->table) {
pool              190 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->table_size = size;
pool              191 drivers/infiniband/sw/rxe/rxe_pool.c 	bitmap_zero(pool->table, max - min + 1);
pool              199 drivers/infiniband/sw/rxe/rxe_pool.c 	struct rxe_pool		*pool,
pool              206 drivers/infiniband/sw/rxe/rxe_pool.c 	memset(pool, 0, sizeof(*pool));
pool              208 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->rxe		= rxe;
pool              209 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->type		= type;
pool              210 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->max_elem		= max_elem;
pool              211 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->elem_size		= ALIGN(size, RXE_POOL_ALIGN);
pool              212 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->flags		= rxe_type_info[type].flags;
pool              213 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->tree		= RB_ROOT;
pool              214 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->cleanup		= rxe_type_info[type].cleanup;
pool              216 drivers/infiniband/sw/rxe/rxe_pool.c 	atomic_set(&pool->num_elem, 0);
pool              218 drivers/infiniband/sw/rxe/rxe_pool.c 	kref_init(&pool->ref_cnt);
pool              220 drivers/infiniband/sw/rxe/rxe_pool.c 	rwlock_init(&pool->pool_lock);
pool              223 drivers/infiniband/sw/rxe/rxe_pool.c 		err = rxe_pool_init_index(pool,
pool              231 drivers/infiniband/sw/rxe/rxe_pool.c 		pool->key_offset = rxe_type_info[type].key_offset;
pool              232 drivers/infiniband/sw/rxe/rxe_pool.c 		pool->key_size = rxe_type_info[type].key_size;
pool              235 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->state = RXE_POOL_STATE_VALID;
pool              243 drivers/infiniband/sw/rxe/rxe_pool.c 	struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
pool              245 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->state = RXE_POOL_STATE_INVALID;
pool              246 drivers/infiniband/sw/rxe/rxe_pool.c 	kfree(pool->table);
pool              249 drivers/infiniband/sw/rxe/rxe_pool.c static void rxe_pool_put(struct rxe_pool *pool)
pool              251 drivers/infiniband/sw/rxe/rxe_pool.c 	kref_put(&pool->ref_cnt, rxe_pool_release);
pool              254 drivers/infiniband/sw/rxe/rxe_pool.c void rxe_pool_cleanup(struct rxe_pool *pool)
pool              258 drivers/infiniband/sw/rxe/rxe_pool.c 	write_lock_irqsave(&pool->pool_lock, flags);
pool              259 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->state = RXE_POOL_STATE_INVALID;
pool              260 drivers/infiniband/sw/rxe/rxe_pool.c 	if (atomic_read(&pool->num_elem) > 0)
pool              262 drivers/infiniband/sw/rxe/rxe_pool.c 			pool_name(pool));
pool              263 drivers/infiniband/sw/rxe/rxe_pool.c 	write_unlock_irqrestore(&pool->pool_lock, flags);
pool              265 drivers/infiniband/sw/rxe/rxe_pool.c 	rxe_pool_put(pool);
pool              268 drivers/infiniband/sw/rxe/rxe_pool.c static u32 alloc_index(struct rxe_pool *pool)
pool              271 drivers/infiniband/sw/rxe/rxe_pool.c 	u32 range = pool->max_index - pool->min_index + 1;
pool              273 drivers/infiniband/sw/rxe/rxe_pool.c 	index = find_next_zero_bit(pool->table, range, pool->last);
pool              275 drivers/infiniband/sw/rxe/rxe_pool.c 		index = find_first_zero_bit(pool->table, range);
pool              278 drivers/infiniband/sw/rxe/rxe_pool.c 	set_bit(index, pool->table);
pool              279 drivers/infiniband/sw/rxe/rxe_pool.c 	pool->last = index;
pool              280 drivers/infiniband/sw/rxe/rxe_pool.c 	return index + pool->min_index;
pool              283 drivers/infiniband/sw/rxe/rxe_pool.c static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
pool              285 drivers/infiniband/sw/rxe/rxe_pool.c 	struct rb_node **link = &pool->tree.rb_node;
pool              305 drivers/infiniband/sw/rxe/rxe_pool.c 	rb_insert_color(&new->node, &pool->tree);
pool              310 drivers/infiniband/sw/rxe/rxe_pool.c static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
pool              312 drivers/infiniband/sw/rxe/rxe_pool.c 	struct rb_node **link = &pool->tree.rb_node;
pool              321 drivers/infiniband/sw/rxe/rxe_pool.c 		cmp = memcmp((u8 *)elem + pool->key_offset,
pool              322 drivers/infiniband/sw/rxe/rxe_pool.c 			     (u8 *)new + pool->key_offset, pool->key_size);
pool              336 drivers/infiniband/sw/rxe/rxe_pool.c 	rb_insert_color(&new->node, &pool->tree);
pool              344 drivers/infiniband/sw/rxe/rxe_pool.c 	struct rxe_pool *pool = elem->pool;
pool              347 drivers/infiniband/sw/rxe/rxe_pool.c 	write_lock_irqsave(&pool->pool_lock, flags);
pool              348 drivers/infiniband/sw/rxe/rxe_pool.c 	memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
pool              349 drivers/infiniband/sw/rxe/rxe_pool.c 	insert_key(pool, elem);
pool              350 drivers/infiniband/sw/rxe/rxe_pool.c 	write_unlock_irqrestore(&pool->pool_lock, flags);
pool              356 drivers/infiniband/sw/rxe/rxe_pool.c 	struct rxe_pool *pool = elem->pool;
pool              359 drivers/infiniband/sw/rxe/rxe_pool.c 	write_lock_irqsave(&pool->pool_lock, flags);
pool              360 drivers/infiniband/sw/rxe/rxe_pool.c 	rb_erase(&elem->node, &pool->tree);
pool              361 drivers/infiniband/sw/rxe/rxe_pool.c 	write_unlock_irqrestore(&pool->pool_lock, flags);
pool              367 drivers/infiniband/sw/rxe/rxe_pool.c 	struct rxe_pool *pool = elem->pool;
pool              370 drivers/infiniband/sw/rxe/rxe_pool.c 	write_lock_irqsave(&pool->pool_lock, flags);
pool              371 drivers/infiniband/sw/rxe/rxe_pool.c 	elem->index = alloc_index(pool);
pool              372 drivers/infiniband/sw/rxe/rxe_pool.c 	insert_index(pool, elem);
pool              373 drivers/infiniband/sw/rxe/rxe_pool.c 	write_unlock_irqrestore(&pool->pool_lock, flags);
pool              379 drivers/infiniband/sw/rxe/rxe_pool.c 	struct rxe_pool *pool = elem->pool;
pool              382 drivers/infiniband/sw/rxe/rxe_pool.c 	write_lock_irqsave(&pool->pool_lock, flags);
pool              383 drivers/infiniband/sw/rxe/rxe_pool.c 	clear_bit(elem->index - pool->min_index, pool->table);
pool              384 drivers/infiniband/sw/rxe/rxe_pool.c 	rb_erase(&elem->node, &pool->tree);
pool              385 drivers/infiniband/sw/rxe/rxe_pool.c 	write_unlock_irqrestore(&pool->pool_lock, flags);
pool              388 drivers/infiniband/sw/rxe/rxe_pool.c void *rxe_alloc(struct rxe_pool *pool)
pool              393 drivers/infiniband/sw/rxe/rxe_pool.c 	might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
pool              395 drivers/infiniband/sw/rxe/rxe_pool.c 	read_lock_irqsave(&pool->pool_lock, flags);
pool              396 drivers/infiniband/sw/rxe/rxe_pool.c 	if (pool->state != RXE_POOL_STATE_VALID) {
pool              397 drivers/infiniband/sw/rxe/rxe_pool.c 		read_unlock_irqrestore(&pool->pool_lock, flags);
pool              400 drivers/infiniband/sw/rxe/rxe_pool.c 	kref_get(&pool->ref_cnt);
pool              401 drivers/infiniband/sw/rxe/rxe_pool.c 	read_unlock_irqrestore(&pool->pool_lock, flags);
pool              403 drivers/infiniband/sw/rxe/rxe_pool.c 	if (!ib_device_try_get(&pool->rxe->ib_dev))
pool              406 drivers/infiniband/sw/rxe/rxe_pool.c 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
pool              409 drivers/infiniband/sw/rxe/rxe_pool.c 	elem = kmem_cache_zalloc(pool_cache(pool),
pool              410 drivers/infiniband/sw/rxe/rxe_pool.c 				 (pool->flags & RXE_POOL_ATOMIC) ?
pool              415 drivers/infiniband/sw/rxe/rxe_pool.c 	elem->pool = pool;
pool              421 drivers/infiniband/sw/rxe/rxe_pool.c 	atomic_dec(&pool->num_elem);
pool              422 drivers/infiniband/sw/rxe/rxe_pool.c 	ib_device_put(&pool->rxe->ib_dev);
pool              424 drivers/infiniband/sw/rxe/rxe_pool.c 	rxe_pool_put(pool);
pool              428 drivers/infiniband/sw/rxe/rxe_pool.c int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
pool              432 drivers/infiniband/sw/rxe/rxe_pool.c 	might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
pool              434 drivers/infiniband/sw/rxe/rxe_pool.c 	read_lock_irqsave(&pool->pool_lock, flags);
pool              435 drivers/infiniband/sw/rxe/rxe_pool.c 	if (pool->state != RXE_POOL_STATE_VALID) {
pool              436 drivers/infiniband/sw/rxe/rxe_pool.c 		read_unlock_irqrestore(&pool->pool_lock, flags);
pool              439 drivers/infiniband/sw/rxe/rxe_pool.c 	kref_get(&pool->ref_cnt);
pool              440 drivers/infiniband/sw/rxe/rxe_pool.c 	read_unlock_irqrestore(&pool->pool_lock, flags);
pool              442 drivers/infiniband/sw/rxe/rxe_pool.c 	if (!ib_device_try_get(&pool->rxe->ib_dev))
pool              445 drivers/infiniband/sw/rxe/rxe_pool.c 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
pool              448 drivers/infiniband/sw/rxe/rxe_pool.c 	elem->pool = pool;
pool              454 drivers/infiniband/sw/rxe/rxe_pool.c 	atomic_dec(&pool->num_elem);
pool              455 drivers/infiniband/sw/rxe/rxe_pool.c 	ib_device_put(&pool->rxe->ib_dev);
pool              457 drivers/infiniband/sw/rxe/rxe_pool.c 	rxe_pool_put(pool);
pool              465 drivers/infiniband/sw/rxe/rxe_pool.c 	struct rxe_pool *pool = elem->pool;
pool              467 drivers/infiniband/sw/rxe/rxe_pool.c 	if (pool->cleanup)
pool              468 drivers/infiniband/sw/rxe/rxe_pool.c 		pool->cleanup(elem);
pool              470 drivers/infiniband/sw/rxe/rxe_pool.c 	if (!(pool->flags & RXE_POOL_NO_ALLOC))
pool              471 drivers/infiniband/sw/rxe/rxe_pool.c 		kmem_cache_free(pool_cache(pool), elem);
pool              472 drivers/infiniband/sw/rxe/rxe_pool.c 	atomic_dec(&pool->num_elem);
pool              473 drivers/infiniband/sw/rxe/rxe_pool.c 	ib_device_put(&pool->rxe->ib_dev);
pool              474 drivers/infiniband/sw/rxe/rxe_pool.c 	rxe_pool_put(pool);
pool              477 drivers/infiniband/sw/rxe/rxe_pool.c void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
pool              483 drivers/infiniband/sw/rxe/rxe_pool.c 	read_lock_irqsave(&pool->pool_lock, flags);
pool              485 drivers/infiniband/sw/rxe/rxe_pool.c 	if (pool->state != RXE_POOL_STATE_VALID)
pool              488 drivers/infiniband/sw/rxe/rxe_pool.c 	node = pool->tree.rb_node;
pool              504 drivers/infiniband/sw/rxe/rxe_pool.c 	read_unlock_irqrestore(&pool->pool_lock, flags);
pool              508 drivers/infiniband/sw/rxe/rxe_pool.c void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
pool              515 drivers/infiniband/sw/rxe/rxe_pool.c 	read_lock_irqsave(&pool->pool_lock, flags);
pool              517 drivers/infiniband/sw/rxe/rxe_pool.c 	if (pool->state != RXE_POOL_STATE_VALID)
pool              520 drivers/infiniband/sw/rxe/rxe_pool.c 	node = pool->tree.rb_node;
pool              525 drivers/infiniband/sw/rxe/rxe_pool.c 		cmp = memcmp((u8 *)elem + pool->key_offset,
pool              526 drivers/infiniband/sw/rxe/rxe_pool.c 			     key, pool->key_size);
pool              540 drivers/infiniband/sw/rxe/rxe_pool.c 	read_unlock_irqrestore(&pool->pool_lock, flags);
pool               83 drivers/infiniband/sw/rxe/rxe_pool.h 	struct rxe_pool		*pool;
pool              126 drivers/infiniband/sw/rxe/rxe_pool.h int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
pool              130 drivers/infiniband/sw/rxe/rxe_pool.h void rxe_pool_cleanup(struct rxe_pool *pool);
pool              133 drivers/infiniband/sw/rxe/rxe_pool.h void *rxe_alloc(struct rxe_pool *pool);
pool              136 drivers/infiniband/sw/rxe/rxe_pool.h int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
pool              155 drivers/infiniband/sw/rxe/rxe_pool.h void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
pool              158 drivers/infiniband/sw/rxe/rxe_pool.h void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
pool              413 drivers/infiniband/ulp/srp/ib_srp.c static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
pool              418 drivers/infiniband/ulp/srp/ib_srp.c 	if (!pool)
pool              421 drivers/infiniband/ulp/srp/ib_srp.c 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
pool              425 drivers/infiniband/ulp/srp/ib_srp.c 	kfree(pool);
pool              439 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_fr_pool *pool;
pool              448 drivers/infiniband/ulp/srp/ib_srp.c 	pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
pool              449 drivers/infiniband/ulp/srp/ib_srp.c 	if (!pool)
pool              451 drivers/infiniband/ulp/srp/ib_srp.c 	pool->size = pool_size;
pool              452 drivers/infiniband/ulp/srp/ib_srp.c 	pool->max_page_list_len = max_page_list_len;
pool              453 drivers/infiniband/ulp/srp/ib_srp.c 	spin_lock_init(&pool->lock);
pool              454 drivers/infiniband/ulp/srp/ib_srp.c 	INIT_LIST_HEAD(&pool->free_list);
pool              461 drivers/infiniband/ulp/srp/ib_srp.c 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
pool              471 drivers/infiniband/ulp/srp/ib_srp.c 		list_add_tail(&d->entry, &pool->free_list);
pool              475 drivers/infiniband/ulp/srp/ib_srp.c 	return pool;
pool              478 drivers/infiniband/ulp/srp/ib_srp.c 	srp_destroy_fr_pool(pool);
pool              481 drivers/infiniband/ulp/srp/ib_srp.c 	pool = ERR_PTR(ret);
pool              489 drivers/infiniband/ulp/srp/ib_srp.c static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
pool              494 drivers/infiniband/ulp/srp/ib_srp.c 	spin_lock_irqsave(&pool->lock, flags);
pool              495 drivers/infiniband/ulp/srp/ib_srp.c 	if (!list_empty(&pool->free_list)) {
pool              496 drivers/infiniband/ulp/srp/ib_srp.c 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
pool              499 drivers/infiniband/ulp/srp/ib_srp.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              513 drivers/infiniband/ulp/srp/ib_srp.c static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
pool              519 drivers/infiniband/ulp/srp/ib_srp.c 	spin_lock_irqsave(&pool->lock, flags);
pool              521 drivers/infiniband/ulp/srp/ib_srp.c 		list_add(&desc[i]->entry, &pool->free_list);
pool              522 drivers/infiniband/ulp/srp/ib_srp.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              271 drivers/lightnvm/pblk-core.c 	mempool_t *pool;
pool              278 drivers/lightnvm/pblk-core.c 		pool = &pblk->w_rq_pool;
pool              282 drivers/lightnvm/pblk-core.c 		pool = &pblk->r_rq_pool;
pool              286 drivers/lightnvm/pblk-core.c 		pool = &pblk->e_rq_pool;
pool              290 drivers/lightnvm/pblk-core.c 	rqd = mempool_alloc(pool, GFP_KERNEL);
pool              299 drivers/lightnvm/pblk-core.c 	mempool_t *pool;
pool              306 drivers/lightnvm/pblk-core.c 		pool = &pblk->w_rq_pool;
pool              309 drivers/lightnvm/pblk-core.c 		pool = &pblk->r_rq_pool;
pool              312 drivers/lightnvm/pblk-core.c 		pool = &pblk->e_rq_pool;
pool              320 drivers/lightnvm/pblk-core.c 	mempool_free(rqd, pool);
pool             1186 drivers/md/bcache/bset.c 	mempool_exit(&state->pool);
pool             1197 drivers/md/bcache/bset.c 	return mempool_init_page_pool(&state->pool, 1, page_order);
pool             1255 drivers/md/bcache/bset.c 		outp = mempool_alloc(&state->pool, GFP_NOIO);
pool             1284 drivers/md/bcache/bset.c 		mempool_free(virt_to_page(out), &state->pool);
pool              364 drivers/md/bcache/bset.h 	mempool_t		pool;
pool               25 drivers/md/dm-io.c 	mempool_t pool;
pool               58 drivers/md/dm-io.c 	ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
pool               69 drivers/md/dm-io.c 	mempool_exit(&client->pool);
pool               77 drivers/md/dm-io.c 	mempool_exit(&client->pool);
pool              124 drivers/md/dm-io.c 	mempool_free(io, &io->client->pool);
pool              446 drivers/md/dm-io.c 	io = mempool_alloc(&client->pool, GFP_NOIO);
pool              478 drivers/md/dm-io.c 	io = mempool_alloc(&client->pool, GFP_NOIO);
pool              287 drivers/md/dm-thin.c static void metadata_operation_failed(struct pool *pool, const char *op, int r);
pool              289 drivers/md/dm-thin.c static enum pool_mode get_pool_mode(struct pool *pool)
pool              291 drivers/md/dm-thin.c 	return pool->pf.mode;
pool              294 drivers/md/dm-thin.c static void notify_of_pool_mode_change(struct pool *pool)
pool              304 drivers/md/dm-thin.c 	enum pool_mode mode = get_pool_mode(pool);
pool              307 drivers/md/dm-thin.c 		if (!pool->pf.error_if_no_space)
pool              313 drivers/md/dm-thin.c 	dm_table_event(pool->ti->table);
pool              315 drivers/md/dm-thin.c 	       dm_device_name(pool->pool_md),
pool              324 drivers/md/dm-thin.c 	struct pool *pool;
pool              345 drivers/md/dm-thin.c 	struct pool *pool;
pool              366 drivers/md/dm-thin.c static bool block_size_is_power_of_two(struct pool *pool)
pool              368 drivers/md/dm-thin.c 	return pool->sectors_per_block_shift >= 0;
pool              371 drivers/md/dm-thin.c static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
pool              373 drivers/md/dm-thin.c 	return block_size_is_power_of_two(pool) ?
pool              374 drivers/md/dm-thin.c 		(b << pool->sectors_per_block_shift) :
pool              375 drivers/md/dm-thin.c 		(b * pool->sectors_per_block);
pool              400 drivers/md/dm-thin.c 	sector_t s = block_to_sectors(tc->pool, data_b);
pool              401 drivers/md/dm-thin.c 	sector_t len = block_to_sectors(tc->pool, data_e - data_b);
pool              436 drivers/md/dm-thin.c static void wake_worker(struct pool *pool)
pool              438 drivers/md/dm-thin.c 	queue_work(pool->wq, &pool->worker);
pool              443 drivers/md/dm-thin.c static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
pool              453 drivers/md/dm-thin.c 	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
pool              455 drivers/md/dm-thin.c 	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
pool              461 drivers/md/dm-thin.c 		dm_bio_prison_free_cell(pool->prison, cell_prealloc);
pool              466 drivers/md/dm-thin.c static void cell_release(struct pool *pool,
pool              470 drivers/md/dm-thin.c 	dm_cell_release(pool->prison, cell, bios);
pool              471 drivers/md/dm-thin.c 	dm_bio_prison_free_cell(pool->prison, cell);
pool              474 drivers/md/dm-thin.c static void cell_visit_release(struct pool *pool,
pool              479 drivers/md/dm-thin.c 	dm_cell_visit_release(pool->prison, fn, context, cell);
pool              480 drivers/md/dm-thin.c 	dm_bio_prison_free_cell(pool->prison, cell);
pool              483 drivers/md/dm-thin.c static void cell_release_no_holder(struct pool *pool,
pool              487 drivers/md/dm-thin.c 	dm_cell_release_no_holder(pool->prison, cell, bios);
pool              488 drivers/md/dm-thin.c 	dm_bio_prison_free_cell(pool->prison, cell);
pool              491 drivers/md/dm-thin.c static void cell_error_with_code(struct pool *pool,
pool              494 drivers/md/dm-thin.c 	dm_cell_error(pool->prison, cell, error_code);
pool              495 drivers/md/dm-thin.c 	dm_bio_prison_free_cell(pool->prison, cell);
pool              498 drivers/md/dm-thin.c static blk_status_t get_pool_io_error_code(struct pool *pool)
pool              500 drivers/md/dm-thin.c 	return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR;
pool              503 drivers/md/dm-thin.c static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
pool              505 drivers/md/dm-thin.c 	cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
pool              508 drivers/md/dm-thin.c static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
pool              510 drivers/md/dm-thin.c 	cell_error_with_code(pool, cell, 0);
pool              513 drivers/md/dm-thin.c static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
pool              515 drivers/md/dm-thin.c 	cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
pool              539 drivers/md/dm-thin.c static void __pool_table_insert(struct pool *pool)
pool              542 drivers/md/dm-thin.c 	list_add(&pool->list, &dm_thin_pool_table.pools);
pool              545 drivers/md/dm-thin.c static void __pool_table_remove(struct pool *pool)
pool              548 drivers/md/dm-thin.c 	list_del(&pool->list);
pool              551 drivers/md/dm-thin.c static struct pool *__pool_table_lookup(struct mapped_device *md)
pool              553 drivers/md/dm-thin.c 	struct pool *pool = NULL, *tmp;
pool              559 drivers/md/dm-thin.c 			pool = tmp;
pool              564 drivers/md/dm-thin.c 	return pool;
pool              567 drivers/md/dm-thin.c static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
pool              569 drivers/md/dm-thin.c 	struct pool *pool = NULL, *tmp;
pool              575 drivers/md/dm-thin.c 			pool = tmp;
pool              580 drivers/md/dm-thin.c 	return pool;
pool              627 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool              639 drivers/md/dm-thin.c 		cell_requeue(pool, cell);
pool              658 drivers/md/dm-thin.c static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
pool              663 drivers/md/dm-thin.c 	list_for_each_entry_rcu(tc, &pool->active_thins, list)
pool              668 drivers/md/dm-thin.c static void error_retry_list(struct pool *pool)
pool              670 drivers/md/dm-thin.c 	error_retry_list_with_code(pool, get_pool_io_error_code(pool));
pool              682 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool              685 drivers/md/dm-thin.c 	if (block_size_is_power_of_two(pool))
pool              686 drivers/md/dm-thin.c 		block_nr >>= pool->sectors_per_block_shift;
pool              688 drivers/md/dm-thin.c 		(void) sector_div(block_nr, pool->sectors_per_block);
pool              699 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool              703 drivers/md/dm-thin.c 	b += pool->sectors_per_block - 1ull; /* so we round up */
pool              705 drivers/md/dm-thin.c 	if (block_size_is_power_of_two(pool)) {
pool              706 drivers/md/dm-thin.c 		b >>= pool->sectors_per_block_shift;
pool              707 drivers/md/dm-thin.c 		e >>= pool->sectors_per_block_shift;
pool              709 drivers/md/dm-thin.c 		(void) sector_div(b, pool->sectors_per_block);
pool              710 drivers/md/dm-thin.c 		(void) sector_div(e, pool->sectors_per_block);
pool              723 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool              727 drivers/md/dm-thin.c 	if (block_size_is_power_of_two(pool))
pool              729 drivers/md/dm-thin.c 			(block << pool->sectors_per_block_shift) |
pool              730 drivers/md/dm-thin.c 			(bi_sector & (pool->sectors_per_block - 1));
pool              732 drivers/md/dm-thin.c 		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
pool              733 drivers/md/dm-thin.c 				 sector_div(bi_sector, pool->sectors_per_block);
pool              747 drivers/md/dm-thin.c static void inc_all_io_entry(struct pool *pool, struct bio *bio)
pool              755 drivers/md/dm-thin.c 	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
pool              760 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool              782 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool              783 drivers/md/dm-thin.c 	bio_list_add(&pool->deferred_flush_bios, bio);
pool              784 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              836 drivers/md/dm-thin.c 	struct pool *pool = m->tc->pool;
pool              839 drivers/md/dm-thin.c 		list_add_tail(&m->list, &pool->prepared_mappings);
pool              840 drivers/md/dm-thin.c 		wake_worker(pool);
pool              847 drivers/md/dm-thin.c 	struct pool *pool = m->tc->pool;
pool              849 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool              851 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              889 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool              893 drivers/md/dm-thin.c 	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
pool              896 drivers/md/dm-thin.c 	wake_worker(pool);
pool              917 drivers/md/dm-thin.c 			inc_all_io_entry(info->tc->pool, bio);
pool              945 drivers/md/dm-thin.c 	cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
pool              957 drivers/md/dm-thin.c 	cell_error(m->tc->pool, m->cell);
pool              959 drivers/md/dm-thin.c 	mempool_free(m, &m->tc->pool->mapping_pool);
pool              964 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool              990 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool              991 drivers/md/dm-thin.c 	bio_list_add(&pool->deferred_flush_completions, bio);
pool              992 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              998 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1003 drivers/md/dm-thin.c 		cell_error(pool, m->cell);
pool             1014 drivers/md/dm-thin.c 		metadata_operation_failed(pool, "dm_thin_insert_block", r);
pool             1015 drivers/md/dm-thin.c 		cell_error(pool, m->cell);
pool             1029 drivers/md/dm-thin.c 		inc_all_io_entry(tc->pool, m->cell->holder);
pool             1036 drivers/md/dm-thin.c 	mempool_free(m, &pool->mapping_pool);
pool             1046 drivers/md/dm-thin.c 	mempool_free(m, &tc->pool->mapping_pool);
pool             1068 drivers/md/dm-thin.c 		metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
pool             1074 drivers/md/dm-thin.c 	mempool_free(m, &tc->pool->mapping_pool);
pool             1089 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1097 drivers/md/dm-thin.c 			r = dm_pool_block_is_shared(pool->pmd, b, &shared);
pool             1110 drivers/md/dm-thin.c 			r = dm_pool_block_is_shared(pool->pmd, e, &shared);
pool             1131 drivers/md/dm-thin.c 	struct pool *pool = m->tc->pool;
pool             1133 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool             1134 drivers/md/dm-thin.c 	list_add_tail(&m->list, &pool->prepared_discards_pt2);
pool             1135 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool             1136 drivers/md/dm-thin.c 	wake_worker(pool);
pool             1153 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1164 drivers/md/dm-thin.c 		metadata_operation_failed(pool, "dm_thin_remove_range", r);
pool             1167 drivers/md/dm-thin.c 		mempool_free(m, &pool->mapping_pool);
pool             1175 drivers/md/dm-thin.c 	r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
pool             1177 drivers/md/dm-thin.c 		metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
pool             1180 drivers/md/dm-thin.c 		mempool_free(m, &pool->mapping_pool);
pool             1187 drivers/md/dm-thin.c 		       dm_device_name(tc->pool->pool_md));
pool             1210 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1216 drivers/md/dm-thin.c 	r = dm_pool_dec_data_range(pool->pmd, m->data_block,
pool             1219 drivers/md/dm-thin.c 		metadata_operation_failed(pool, "dm_pool_dec_data_range", r);
pool             1225 drivers/md/dm-thin.c 	mempool_free(m, &pool->mapping_pool);
pool             1228 drivers/md/dm-thin.c static void process_prepared(struct pool *pool, struct list_head *head,
pool             1236 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool             1238 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool             1247 drivers/md/dm-thin.c static int io_overlaps_block(struct pool *pool, struct bio *bio)
pool             1250 drivers/md/dm-thin.c 		(pool->sectors_per_block << SECTOR_SHIFT);
pool             1253 drivers/md/dm-thin.c static int io_overwrites_block(struct pool *pool, struct bio *bio)
pool             1256 drivers/md/dm-thin.c 		io_overlaps_block(pool, bio);
pool             1266 drivers/md/dm-thin.c static int ensure_next_mapping(struct pool *pool)
pool             1268 drivers/md/dm-thin.c 	if (pool->next_mapping)
pool             1271 drivers/md/dm-thin.c 	pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC);
pool             1273 drivers/md/dm-thin.c 	return pool->next_mapping ? 0 : -ENOMEM;
pool             1276 drivers/md/dm-thin.c static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
pool             1278 drivers/md/dm-thin.c 	struct dm_thin_new_mapping *m = pool->next_mapping;
pool             1280 drivers/md/dm-thin.c 	BUG_ON(!pool->next_mapping);
pool             1286 drivers/md/dm-thin.c 	pool->next_mapping = NULL;
pool             1300 drivers/md/dm-thin.c 	dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
pool             1307 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1313 drivers/md/dm-thin.c 	inc_all_io_entry(pool, bio);
pool             1326 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1327 drivers/md/dm-thin.c 	struct dm_thin_new_mapping *m = get_next_mapping(pool);
pool             1342 drivers/md/dm-thin.c 	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
pool             1351 drivers/md/dm-thin.c 	if (io_overwrites_block(pool, bio))
pool             1357 drivers/md/dm-thin.c 		from.sector = data_origin * pool->sectors_per_block;
pool             1361 drivers/md/dm-thin.c 		to.sector = data_dest * pool->sectors_per_block;
pool             1364 drivers/md/dm-thin.c 		dm_kcopyd_copy(pool->copier, &from, 1, &to,
pool             1370 drivers/md/dm-thin.c 		if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
pool             1373 drivers/md/dm-thin.c 				data_dest * pool->sectors_per_block + len,
pool             1374 drivers/md/dm-thin.c 				(data_dest + 1) * pool->sectors_per_block);
pool             1387 drivers/md/dm-thin.c 		      tc->pool->sectors_per_block);
pool             1394 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1395 drivers/md/dm-thin.c 	struct dm_thin_new_mapping *m = get_next_mapping(pool);
pool             1409 drivers/md/dm-thin.c 	if (pool->pf.zero_new_blocks) {
pool             1410 drivers/md/dm-thin.c 		if (io_overwrites_block(pool, bio))
pool             1413 drivers/md/dm-thin.c 			ll_zero(tc, m, data_block * pool->sectors_per_block,
pool             1414 drivers/md/dm-thin.c 				(data_block + 1) * pool->sectors_per_block);
pool             1423 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1424 drivers/md/dm-thin.c 	sector_t virt_block_begin = virt_block * pool->sectors_per_block;
pool             1425 drivers/md/dm-thin.c 	sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
pool             1430 drivers/md/dm-thin.c 			      pool->sectors_per_block);
pool             1441 drivers/md/dm-thin.c static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
pool             1443 drivers/md/dm-thin.c static void requeue_bios(struct pool *pool);
pool             1450 drivers/md/dm-thin.c static bool is_read_only(struct pool *pool)
pool             1452 drivers/md/dm-thin.c 	return is_read_only_pool_mode(get_pool_mode(pool));
pool             1455 drivers/md/dm-thin.c static void check_for_metadata_space(struct pool *pool)
pool             1461 drivers/md/dm-thin.c 	r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
pool             1467 drivers/md/dm-thin.c 	if (ooms_reason && !is_read_only(pool)) {
pool             1469 drivers/md/dm-thin.c 		set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
pool             1473 drivers/md/dm-thin.c static void check_for_data_space(struct pool *pool)
pool             1478 drivers/md/dm-thin.c 	if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
pool             1481 drivers/md/dm-thin.c 	r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
pool             1486 drivers/md/dm-thin.c 		set_pool_mode(pool, PM_WRITE);
pool             1487 drivers/md/dm-thin.c 		requeue_bios(pool);
pool             1495 drivers/md/dm-thin.c static int commit(struct pool *pool)
pool             1499 drivers/md/dm-thin.c 	if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
pool             1502 drivers/md/dm-thin.c 	r = dm_pool_commit_metadata(pool->pmd);
pool             1504 drivers/md/dm-thin.c 		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
pool             1506 drivers/md/dm-thin.c 		check_for_metadata_space(pool);
pool             1507 drivers/md/dm-thin.c 		check_for_data_space(pool);
pool             1513 drivers/md/dm-thin.c static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
pool             1517 drivers/md/dm-thin.c 	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
pool             1519 drivers/md/dm-thin.c 		       dm_device_name(pool->pool_md));
pool             1520 drivers/md/dm-thin.c 		spin_lock_irqsave(&pool->lock, flags);
pool             1521 drivers/md/dm-thin.c 		pool->low_water_triggered = true;
pool             1522 drivers/md/dm-thin.c 		spin_unlock_irqrestore(&pool->lock, flags);
pool             1523 drivers/md/dm-thin.c 		dm_table_event(pool->ti->table);
pool             1531 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1533 drivers/md/dm-thin.c 	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
pool             1536 drivers/md/dm-thin.c 	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
pool             1538 drivers/md/dm-thin.c 		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
pool             1542 drivers/md/dm-thin.c 	check_low_water_mark(pool, free_blocks);
pool             1549 drivers/md/dm-thin.c 		r = commit(pool);
pool             1553 drivers/md/dm-thin.c 		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
pool             1555 drivers/md/dm-thin.c 			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
pool             1560 drivers/md/dm-thin.c 			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
pool             1565 drivers/md/dm-thin.c 	r = dm_pool_alloc_data_block(pool->pmd, result);
pool             1568 drivers/md/dm-thin.c 			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
pool             1570 drivers/md/dm-thin.c 			metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
pool             1574 drivers/md/dm-thin.c 	r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
pool             1576 drivers/md/dm-thin.c 		metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
pool             1582 drivers/md/dm-thin.c 		r = commit(pool);
pool             1605 drivers/md/dm-thin.c static blk_status_t should_error_unserviceable_bio(struct pool *pool)
pool             1607 drivers/md/dm-thin.c 	enum pool_mode m = get_pool_mode(pool);
pool             1616 drivers/md/dm-thin.c 		return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
pool             1629 drivers/md/dm-thin.c static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
pool             1631 drivers/md/dm-thin.c 	blk_status_t error = should_error_unserviceable_bio(pool);
pool             1640 drivers/md/dm-thin.c static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
pool             1646 drivers/md/dm-thin.c 	error = should_error_unserviceable_bio(pool);
pool             1648 drivers/md/dm-thin.c 		cell_error_with_code(pool, cell, error);
pool             1653 drivers/md/dm-thin.c 	cell_release(pool, cell, &bios);
pool             1662 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1663 drivers/md/dm-thin.c 	struct dm_thin_new_mapping *m = get_next_mapping(pool);
pool             1675 drivers/md/dm-thin.c 	if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
pool             1676 drivers/md/dm-thin.c 		pool->process_prepared_discard(m);
pool             1682 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1692 drivers/md/dm-thin.c 		r = ensure_next_mapping(pool);
pool             1707 drivers/md/dm-thin.c 		if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
pool             1717 drivers/md/dm-thin.c 		m = get_next_mapping(pool);
pool             1735 drivers/md/dm-thin.c 		if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
pool             1736 drivers/md/dm-thin.c 			pool->process_prepared_discard(m);
pool             1779 drivers/md/dm-thin.c 	if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
pool             1789 drivers/md/dm-thin.c 	tc->pool->process_discard_cell(tc, virt_cell);
pool             1799 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1809 drivers/md/dm-thin.c 		retry_bios_on_resume(pool, cell);
pool             1815 drivers/md/dm-thin.c 		cell_error(pool, cell);
pool             1833 drivers/md/dm-thin.c 			h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
pool             1834 drivers/md/dm-thin.c 			inc_all_io_entry(info->tc->pool, bio);
pool             1851 drivers/md/dm-thin.c 	cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
pool             1867 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1875 drivers/md/dm-thin.c 	if (bio_detain(pool, &key, bio, &data_cell)) {
pool             1886 drivers/md/dm-thin.c 		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
pool             1887 drivers/md/dm-thin.c 		inc_all_io_entry(pool, bio);
pool             1900 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1906 drivers/md/dm-thin.c 		inc_all_io_entry(pool, bio);
pool             1933 drivers/md/dm-thin.c 		retry_bios_on_resume(pool, cell);
pool             1939 drivers/md/dm-thin.c 		cell_error(pool, cell);
pool             1947 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             1953 drivers/md/dm-thin.c 		cell_requeue(pool, cell);
pool             1963 drivers/md/dm-thin.c 			inc_all_io_entry(pool, bio);
pool             1971 drivers/md/dm-thin.c 			inc_all_io_entry(pool, bio);
pool             2001 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             2011 drivers/md/dm-thin.c 	if (bio_detain(pool, &key, bio, &cell))
pool             2029 drivers/md/dm-thin.c 			handle_unserviceable_bio(tc->pool, bio);
pool             2033 drivers/md/dm-thin.c 			inc_all_io_entry(tc->pool, bio);
pool             2044 drivers/md/dm-thin.c 			handle_unserviceable_bio(tc->pool, bio);
pool             2049 drivers/md/dm-thin.c 			inc_all_io_entry(tc->pool, bio);
pool             2090 drivers/md/dm-thin.c 	cell_success(tc->pool, cell);
pool             2095 drivers/md/dm-thin.c 	cell_error(tc->pool, cell);
pool             2102 drivers/md/dm-thin.c static int need_commit_due_to_time(struct pool *pool)
pool             2104 drivers/md/dm-thin.c 	return !time_in_range(jiffies, pool->last_commit_jiffies,
pool             2105 drivers/md/dm-thin.c 			      pool->last_commit_jiffies + COMMIT_PERIOD);
pool             2174 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             2210 drivers/md/dm-thin.c 		if (ensure_next_mapping(pool)) {
pool             2219 drivers/md/dm-thin.c 			pool->process_discard(tc, bio);
pool             2221 drivers/md/dm-thin.c 			pool->process_bio(tc, bio);
pool             2224 drivers/md/dm-thin.c 			throttle_work_update(&pool->throttle);
pool             2225 drivers/md/dm-thin.c 			dm_pool_issue_prefetches(pool->pmd);
pool             2248 drivers/md/dm-thin.c static unsigned sort_cells(struct pool *pool, struct list_head *cells)
pool             2257 drivers/md/dm-thin.c 		pool->cell_sort_array[count++] = cell;
pool             2261 drivers/md/dm-thin.c 	sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
pool             2268 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             2284 drivers/md/dm-thin.c 		count = sort_cells(tc->pool, &cells);
pool             2287 drivers/md/dm-thin.c 			cell = pool->cell_sort_array[i];
pool             2295 drivers/md/dm-thin.c 			if (ensure_next_mapping(pool)) {
pool             2297 drivers/md/dm-thin.c 					list_add(&pool->cell_sort_array[j]->user_list, &cells);
pool             2306 drivers/md/dm-thin.c 				pool->process_discard_cell(tc, cell);
pool             2308 drivers/md/dm-thin.c 				pool->process_cell(tc, cell);
pool             2321 drivers/md/dm-thin.c static struct thin_c *get_first_thin(struct pool *pool)
pool             2326 drivers/md/dm-thin.c 	if (!list_empty(&pool->active_thins)) {
pool             2327 drivers/md/dm-thin.c 		tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
pool             2335 drivers/md/dm-thin.c static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
pool             2340 drivers/md/dm-thin.c 	list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
pool             2352 drivers/md/dm-thin.c static void process_deferred_bios(struct pool *pool)
pool             2359 drivers/md/dm-thin.c 	tc = get_first_thin(pool);
pool             2363 drivers/md/dm-thin.c 		tc = get_next_thin(pool, tc);
pool             2373 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool             2374 drivers/md/dm-thin.c 	bio_list_merge(&bios, &pool->deferred_flush_bios);
pool             2375 drivers/md/dm-thin.c 	bio_list_init(&pool->deferred_flush_bios);
pool             2377 drivers/md/dm-thin.c 	bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
pool             2378 drivers/md/dm-thin.c 	bio_list_init(&pool->deferred_flush_completions);
pool             2379 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool             2382 drivers/md/dm-thin.c 	    !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
pool             2385 drivers/md/dm-thin.c 	if (commit(pool)) {
pool             2392 drivers/md/dm-thin.c 	pool->last_commit_jiffies = jiffies;
pool             2411 drivers/md/dm-thin.c 	struct pool *pool = container_of(ws, struct pool, worker);
pool             2413 drivers/md/dm-thin.c 	throttle_work_start(&pool->throttle);
pool             2414 drivers/md/dm-thin.c 	dm_pool_issue_prefetches(pool->pmd);
pool             2415 drivers/md/dm-thin.c 	throttle_work_update(&pool->throttle);
pool             2416 drivers/md/dm-thin.c 	process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
pool             2417 drivers/md/dm-thin.c 	throttle_work_update(&pool->throttle);
pool             2418 drivers/md/dm-thin.c 	process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
pool             2419 drivers/md/dm-thin.c 	throttle_work_update(&pool->throttle);
pool             2420 drivers/md/dm-thin.c 	process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2);
pool             2421 drivers/md/dm-thin.c 	throttle_work_update(&pool->throttle);
pool             2422 drivers/md/dm-thin.c 	process_deferred_bios(pool);
pool             2423 drivers/md/dm-thin.c 	throttle_work_complete(&pool->throttle);
pool             2432 drivers/md/dm-thin.c 	struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
pool             2433 drivers/md/dm-thin.c 	wake_worker(pool);
pool             2434 drivers/md/dm-thin.c 	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
pool             2444 drivers/md/dm-thin.c 	struct pool *pool = container_of(to_delayed_work(ws), struct pool,
pool             2447 drivers/md/dm-thin.c 	if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
pool             2448 drivers/md/dm-thin.c 		pool->pf.error_if_no_space = true;
pool             2449 drivers/md/dm-thin.c 		notify_of_pool_mode_change(pool);
pool             2450 drivers/md/dm-thin.c 		error_retry_list_with_code(pool, BLK_STS_NOSPC);
pool             2471 drivers/md/dm-thin.c static void pool_work_wait(struct pool_work *pw, struct pool *pool,
pool             2476 drivers/md/dm-thin.c 	queue_work(pool->wq, &pw->worker);
pool             2512 drivers/md/dm-thin.c 	pool_work_wait(&w.pw, tc->pool, fn);
pool             2522 drivers/md/dm-thin.c static void set_discard_callbacks(struct pool *pool)
pool             2524 drivers/md/dm-thin.c 	struct pool_c *pt = pool->ti->private;
pool             2527 drivers/md/dm-thin.c 		pool->process_discard_cell = process_discard_cell_passdown;
pool             2528 drivers/md/dm-thin.c 		pool->process_prepared_discard = process_prepared_discard_passdown_pt1;
pool             2529 drivers/md/dm-thin.c 		pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2;
pool             2531 drivers/md/dm-thin.c 		pool->process_discard_cell = process_discard_cell_no_passdown;
pool             2532 drivers/md/dm-thin.c 		pool->process_prepared_discard = process_prepared_discard_no_passdown;
pool             2536 drivers/md/dm-thin.c static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool             2538 drivers/md/dm-thin.c 	struct pool_c *pt = pool->ti->private;
pool             2539 drivers/md/dm-thin.c 	bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
pool             2540 drivers/md/dm-thin.c 	enum pool_mode old_mode = get_pool_mode(pool);
pool             2549 drivers/md/dm-thin.c 		      dm_device_name(pool->pool_md));
pool             2565 drivers/md/dm-thin.c 		dm_pool_metadata_read_only(pool->pmd);
pool             2566 drivers/md/dm-thin.c 		pool->process_bio = process_bio_fail;
pool             2567 drivers/md/dm-thin.c 		pool->process_discard = process_bio_fail;
pool             2568 drivers/md/dm-thin.c 		pool->process_cell = process_cell_fail;
pool             2569 drivers/md/dm-thin.c 		pool->process_discard_cell = process_cell_fail;
pool             2570 drivers/md/dm-thin.c 		pool->process_prepared_mapping = process_prepared_mapping_fail;
pool             2571 drivers/md/dm-thin.c 		pool->process_prepared_discard = process_prepared_discard_fail;
pool             2573 drivers/md/dm-thin.c 		error_retry_list(pool);
pool             2578 drivers/md/dm-thin.c 		dm_pool_metadata_read_only(pool->pmd);
pool             2579 drivers/md/dm-thin.c 		pool->process_bio = process_bio_read_only;
pool             2580 drivers/md/dm-thin.c 		pool->process_discard = process_bio_success;
pool             2581 drivers/md/dm-thin.c 		pool->process_cell = process_cell_read_only;
pool             2582 drivers/md/dm-thin.c 		pool->process_discard_cell = process_cell_success;
pool             2583 drivers/md/dm-thin.c 		pool->process_prepared_mapping = process_prepared_mapping_fail;
pool             2584 drivers/md/dm-thin.c 		pool->process_prepared_discard = process_prepared_discard_success;
pool             2586 drivers/md/dm-thin.c 		error_retry_list(pool);
pool             2598 drivers/md/dm-thin.c 		pool->out_of_data_space = true;
pool             2599 drivers/md/dm-thin.c 		pool->process_bio = process_bio_read_only;
pool             2600 drivers/md/dm-thin.c 		pool->process_discard = process_discard_bio;
pool             2601 drivers/md/dm-thin.c 		pool->process_cell = process_cell_read_only;
pool             2602 drivers/md/dm-thin.c 		pool->process_prepared_mapping = process_prepared_mapping;
pool             2603 drivers/md/dm-thin.c 		set_discard_callbacks(pool);
pool             2605 drivers/md/dm-thin.c 		if (!pool->pf.error_if_no_space && no_space_timeout)
pool             2606 drivers/md/dm-thin.c 			queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
pool             2611 drivers/md/dm-thin.c 			cancel_delayed_work_sync(&pool->no_space_timeout);
pool             2612 drivers/md/dm-thin.c 		pool->out_of_data_space = false;
pool             2613 drivers/md/dm-thin.c 		pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
pool             2614 drivers/md/dm-thin.c 		dm_pool_metadata_read_write(pool->pmd);
pool             2615 drivers/md/dm-thin.c 		pool->process_bio = process_bio;
pool             2616 drivers/md/dm-thin.c 		pool->process_discard = process_discard_bio;
pool             2617 drivers/md/dm-thin.c 		pool->process_cell = process_cell;
pool             2618 drivers/md/dm-thin.c 		pool->process_prepared_mapping = process_prepared_mapping;
pool             2619 drivers/md/dm-thin.c 		set_discard_callbacks(pool);
pool             2623 drivers/md/dm-thin.c 	pool->pf.mode = new_mode;
pool             2631 drivers/md/dm-thin.c 		notify_of_pool_mode_change(pool);
pool             2634 drivers/md/dm-thin.c static void abort_transaction(struct pool *pool)
pool             2636 drivers/md/dm-thin.c 	const char *dev_name = dm_device_name(pool->pool_md);
pool             2639 drivers/md/dm-thin.c 	if (dm_pool_abort_metadata(pool->pmd)) {
pool             2641 drivers/md/dm-thin.c 		set_pool_mode(pool, PM_FAIL);
pool             2644 drivers/md/dm-thin.c 	if (dm_pool_metadata_set_needs_check(pool->pmd)) {
pool             2646 drivers/md/dm-thin.c 		set_pool_mode(pool, PM_FAIL);
pool             2650 drivers/md/dm-thin.c static void metadata_operation_failed(struct pool *pool, const char *op, int r)
pool             2653 drivers/md/dm-thin.c 		    dm_device_name(pool->pool_md), op, r);
pool             2655 drivers/md/dm-thin.c 	abort_transaction(pool);
pool             2656 drivers/md/dm-thin.c 	set_pool_mode(pool, PM_READ_ONLY);
pool             2671 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             2677 drivers/md/dm-thin.c 	wake_worker(pool);
pool             2682 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             2684 drivers/md/dm-thin.c 	throttle_lock(&pool->throttle);
pool             2686 drivers/md/dm-thin.c 	throttle_unlock(&pool->throttle);
pool             2692 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             2694 drivers/md/dm-thin.c 	throttle_lock(&pool->throttle);
pool             2698 drivers/md/dm-thin.c 	throttle_unlock(&pool->throttle);
pool             2700 drivers/md/dm-thin.c 	wake_worker(pool);
pool             2735 drivers/md/dm-thin.c 	if (get_pool_mode(tc->pool) == PM_FAIL) {
pool             2750 drivers/md/dm-thin.c 	if (bio_detain(tc->pool, &key, bio, &virt_cell))
pool             2780 drivers/md/dm-thin.c 		if (bio_detain(tc->pool, &key, bio, &data_cell)) {
pool             2785 drivers/md/dm-thin.c 		inc_all_io_entry(tc->pool, bio);
pool             2814 drivers/md/dm-thin.c 	if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
pool             2821 drivers/md/dm-thin.c static void requeue_bios(struct pool *pool)
pool             2827 drivers/md/dm-thin.c 	list_for_each_entry_rcu(tc, &pool->active_thins, list) {
pool             2857 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             2869 drivers/md/dm-thin.c 	else if (data_limits->max_discard_sectors < pool->sectors_per_block)
pool             2878 drivers/md/dm-thin.c static int bind_control_target(struct pool *pool, struct dm_target *ti)
pool             2885 drivers/md/dm-thin.c 	enum pool_mode old_mode = get_pool_mode(pool);
pool             2895 drivers/md/dm-thin.c 	pool->ti = ti;
pool             2896 drivers/md/dm-thin.c 	pool->pf = pt->adjusted_pf;
pool             2897 drivers/md/dm-thin.c 	pool->low_water_blocks = pt->low_water_blocks;
pool             2899 drivers/md/dm-thin.c 	set_pool_mode(pool, new_mode);
pool             2904 drivers/md/dm-thin.c static void unbind_control_target(struct pool *pool, struct dm_target *ti)
pool             2906 drivers/md/dm-thin.c 	if (pool->ti == ti)
pool             2907 drivers/md/dm-thin.c 		pool->ti = NULL;
pool             2923 drivers/md/dm-thin.c static void __pool_destroy(struct pool *pool)
pool             2925 drivers/md/dm-thin.c 	__pool_table_remove(pool);
pool             2927 drivers/md/dm-thin.c 	vfree(pool->cell_sort_array);
pool             2928 drivers/md/dm-thin.c 	if (dm_pool_metadata_close(pool->pmd) < 0)
pool             2931 drivers/md/dm-thin.c 	dm_bio_prison_destroy(pool->prison);
pool             2932 drivers/md/dm-thin.c 	dm_kcopyd_client_destroy(pool->copier);
pool             2934 drivers/md/dm-thin.c 	if (pool->wq)
pool             2935 drivers/md/dm-thin.c 		destroy_workqueue(pool->wq);
pool             2937 drivers/md/dm-thin.c 	if (pool->next_mapping)
pool             2938 drivers/md/dm-thin.c 		mempool_free(pool->next_mapping, &pool->mapping_pool);
pool             2939 drivers/md/dm-thin.c 	mempool_exit(&pool->mapping_pool);
pool             2940 drivers/md/dm-thin.c 	dm_deferred_set_destroy(pool->shared_read_ds);
pool             2941 drivers/md/dm-thin.c 	dm_deferred_set_destroy(pool->all_io_ds);
pool             2942 drivers/md/dm-thin.c 	kfree(pool);
pool             2947 drivers/md/dm-thin.c static struct pool *pool_create(struct mapped_device *pool_md,
pool             2955 drivers/md/dm-thin.c 	struct pool *pool;
pool             2962 drivers/md/dm-thin.c 		return (struct pool *)pmd;
pool             2965 drivers/md/dm-thin.c 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool             2966 drivers/md/dm-thin.c 	if (!pool) {
pool             2972 drivers/md/dm-thin.c 	pool->pmd = pmd;
pool             2973 drivers/md/dm-thin.c 	pool->sectors_per_block = block_size;
pool             2975 drivers/md/dm-thin.c 		pool->sectors_per_block_shift = -1;
pool             2977 drivers/md/dm-thin.c 		pool->sectors_per_block_shift = __ffs(block_size);
pool             2978 drivers/md/dm-thin.c 	pool->low_water_blocks = 0;
pool             2979 drivers/md/dm-thin.c 	pool_features_init(&pool->pf);
pool             2980 drivers/md/dm-thin.c 	pool->prison = dm_bio_prison_create();
pool             2981 drivers/md/dm-thin.c 	if (!pool->prison) {
pool             2987 drivers/md/dm-thin.c 	pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
pool             2988 drivers/md/dm-thin.c 	if (IS_ERR(pool->copier)) {
pool             2989 drivers/md/dm-thin.c 		r = PTR_ERR(pool->copier);
pool             2999 drivers/md/dm-thin.c 	pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
pool             3000 drivers/md/dm-thin.c 	if (!pool->wq) {
pool             3006 drivers/md/dm-thin.c 	throttle_init(&pool->throttle);
pool             3007 drivers/md/dm-thin.c 	INIT_WORK(&pool->worker, do_worker);
pool             3008 drivers/md/dm-thin.c 	INIT_DELAYED_WORK(&pool->waker, do_waker);
pool             3009 drivers/md/dm-thin.c 	INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
pool             3010 drivers/md/dm-thin.c 	spin_lock_init(&pool->lock);
pool             3011 drivers/md/dm-thin.c 	bio_list_init(&pool->deferred_flush_bios);
pool             3012 drivers/md/dm-thin.c 	bio_list_init(&pool->deferred_flush_completions);
pool             3013 drivers/md/dm-thin.c 	INIT_LIST_HEAD(&pool->prepared_mappings);
pool             3014 drivers/md/dm-thin.c 	INIT_LIST_HEAD(&pool->prepared_discards);
pool             3015 drivers/md/dm-thin.c 	INIT_LIST_HEAD(&pool->prepared_discards_pt2);
pool             3016 drivers/md/dm-thin.c 	INIT_LIST_HEAD(&pool->active_thins);
pool             3017 drivers/md/dm-thin.c 	pool->low_water_triggered = false;
pool             3018 drivers/md/dm-thin.c 	pool->suspended = true;
pool             3019 drivers/md/dm-thin.c 	pool->out_of_data_space = false;
pool             3021 drivers/md/dm-thin.c 	pool->shared_read_ds = dm_deferred_set_create();
pool             3022 drivers/md/dm-thin.c 	if (!pool->shared_read_ds) {
pool             3028 drivers/md/dm-thin.c 	pool->all_io_ds = dm_deferred_set_create();
pool             3029 drivers/md/dm-thin.c 	if (!pool->all_io_ds) {
pool             3035 drivers/md/dm-thin.c 	pool->next_mapping = NULL;
pool             3036 drivers/md/dm-thin.c 	r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE,
pool             3044 drivers/md/dm-thin.c 	pool->cell_sort_array =
pool             3046 drivers/md/dm-thin.c 				   sizeof(*pool->cell_sort_array)));
pool             3047 drivers/md/dm-thin.c 	if (!pool->cell_sort_array) {
pool             3053 drivers/md/dm-thin.c 	pool->ref_count = 1;
pool             3054 drivers/md/dm-thin.c 	pool->last_commit_jiffies = jiffies;
pool             3055 drivers/md/dm-thin.c 	pool->pool_md = pool_md;
pool             3056 drivers/md/dm-thin.c 	pool->md_dev = metadata_dev;
pool             3057 drivers/md/dm-thin.c 	pool->data_dev = data_dev;
pool             3058 drivers/md/dm-thin.c 	__pool_table_insert(pool);
pool             3060 drivers/md/dm-thin.c 	return pool;
pool             3063 drivers/md/dm-thin.c 	mempool_exit(&pool->mapping_pool);
pool             3065 drivers/md/dm-thin.c 	dm_deferred_set_destroy(pool->all_io_ds);
pool             3067 drivers/md/dm-thin.c 	dm_deferred_set_destroy(pool->shared_read_ds);
pool             3069 drivers/md/dm-thin.c 	destroy_workqueue(pool->wq);
pool             3071 drivers/md/dm-thin.c 	dm_kcopyd_client_destroy(pool->copier);
pool             3073 drivers/md/dm-thin.c 	dm_bio_prison_destroy(pool->prison);
pool             3075 drivers/md/dm-thin.c 	kfree(pool);
pool             3083 drivers/md/dm-thin.c static void __pool_inc(struct pool *pool)
pool             3086 drivers/md/dm-thin.c 	pool->ref_count++;
pool             3089 drivers/md/dm-thin.c static void __pool_dec(struct pool *pool)
pool             3092 drivers/md/dm-thin.c 	BUG_ON(!pool->ref_count);
pool             3093 drivers/md/dm-thin.c 	if (!--pool->ref_count)
pool             3094 drivers/md/dm-thin.c 		__pool_destroy(pool);
pool             3097 drivers/md/dm-thin.c static struct pool *__pool_find(struct mapped_device *pool_md,
pool             3103 drivers/md/dm-thin.c 	struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
pool             3105 drivers/md/dm-thin.c 	if (pool) {
pool             3106 drivers/md/dm-thin.c 		if (pool->pool_md != pool_md) {
pool             3110 drivers/md/dm-thin.c 		if (pool->data_dev != data_dev) {
pool             3114 drivers/md/dm-thin.c 		__pool_inc(pool);
pool             3117 drivers/md/dm-thin.c 		pool = __pool_table_lookup(pool_md);
pool             3118 drivers/md/dm-thin.c 		if (pool) {
pool             3119 drivers/md/dm-thin.c 			if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) {
pool             3123 drivers/md/dm-thin.c 			__pool_inc(pool);
pool             3126 drivers/md/dm-thin.c 			pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error);
pool             3131 drivers/md/dm-thin.c 	return pool;
pool             3143 drivers/md/dm-thin.c 	unbind_control_target(pt->pool, ti);
pool             3144 drivers/md/dm-thin.c 	__pool_dec(pt->pool);
pool             3205 drivers/md/dm-thin.c 	struct pool *pool = context;
pool             3208 drivers/md/dm-thin.c 	       dm_device_name(pool->pool_md));
pool             3210 drivers/md/dm-thin.c 	dm_table_event(pool->ti->table);
pool             3304 drivers/md/dm-thin.c 	struct pool *pool;
pool             3379 drivers/md/dm-thin.c 	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
pool             3381 drivers/md/dm-thin.c 	if (IS_ERR(pool)) {
pool             3382 drivers/md/dm-thin.c 		r = PTR_ERR(pool);
pool             3392 drivers/md/dm-thin.c 	if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
pool             3398 drivers/md/dm-thin.c 	pt->pool = pool;
pool             3424 drivers/md/dm-thin.c 	r = dm_pool_register_metadata_threshold(pt->pool->pmd,
pool             3427 drivers/md/dm-thin.c 						pool);
pool             3439 drivers/md/dm-thin.c 	__pool_dec(pool);
pool             3456 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3462 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool             3465 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool             3474 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3480 drivers/md/dm-thin.c 	(void) sector_div(data_size, pool->sectors_per_block);
pool             3482 drivers/md/dm-thin.c 	r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
pool             3485 drivers/md/dm-thin.c 		      dm_device_name(pool->pool_md));
pool             3491 drivers/md/dm-thin.c 		      dm_device_name(pool->pool_md),
pool             3496 drivers/md/dm-thin.c 		if (dm_pool_metadata_needs_check(pool->pmd)) {
pool             3498 drivers/md/dm-thin.c 			      dm_device_name(pool->pool_md));
pool             3504 drivers/md/dm-thin.c 			       dm_device_name(pool->pool_md),
pool             3506 drivers/md/dm-thin.c 		r = dm_pool_resize_data_dev(pool->pmd, data_size);
pool             3508 drivers/md/dm-thin.c 			metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
pool             3522 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3527 drivers/md/dm-thin.c 	metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
pool             3529 drivers/md/dm-thin.c 	r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
pool             3532 drivers/md/dm-thin.c 		      dm_device_name(pool->pool_md));
pool             3538 drivers/md/dm-thin.c 		      dm_device_name(pool->pool_md),
pool             3543 drivers/md/dm-thin.c 		if (dm_pool_metadata_needs_check(pool->pmd)) {
pool             3545 drivers/md/dm-thin.c 			      dm_device_name(pool->pool_md));
pool             3549 drivers/md/dm-thin.c 		warn_if_metadata_device_too_big(pool->md_dev);
pool             3551 drivers/md/dm-thin.c 		       dm_device_name(pool->pool_md),
pool             3554 drivers/md/dm-thin.c 		if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
pool             3555 drivers/md/dm-thin.c 			set_pool_mode(pool, PM_WRITE);
pool             3557 drivers/md/dm-thin.c 		r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
pool             3559 drivers/md/dm-thin.c 			metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
pool             3585 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3590 drivers/md/dm-thin.c 	r = bind_control_target(pool, ti);
pool             3594 drivers/md/dm-thin.c 	dm_pool_register_pre_commit_callback(pool->pmd,
pool             3606 drivers/md/dm-thin.c 		(void) commit(pool);
pool             3611 drivers/md/dm-thin.c static void pool_suspend_active_thins(struct pool *pool)
pool             3616 drivers/md/dm-thin.c 	tc = get_first_thin(pool);
pool             3619 drivers/md/dm-thin.c 		tc = get_next_thin(pool, tc);
pool             3623 drivers/md/dm-thin.c static void pool_resume_active_thins(struct pool *pool)
pool             3628 drivers/md/dm-thin.c 	tc = get_first_thin(pool);
pool             3631 drivers/md/dm-thin.c 		tc = get_next_thin(pool, tc);
pool             3638 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3645 drivers/md/dm-thin.c 	requeue_bios(pool);
pool             3646 drivers/md/dm-thin.c 	pool_resume_active_thins(pool);
pool             3648 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool             3649 drivers/md/dm-thin.c 	pool->low_water_triggered = false;
pool             3650 drivers/md/dm-thin.c 	pool->suspended = false;
pool             3651 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool             3653 drivers/md/dm-thin.c 	do_waker(&pool->waker.work);
pool             3659 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3662 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool             3663 drivers/md/dm-thin.c 	pool->suspended = true;
pool             3664 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool             3666 drivers/md/dm-thin.c 	pool_suspend_active_thins(pool);
pool             3672 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3675 drivers/md/dm-thin.c 	pool_resume_active_thins(pool);
pool             3677 drivers/md/dm-thin.c 	spin_lock_irqsave(&pool->lock, flags);
pool             3678 drivers/md/dm-thin.c 	pool->suspended = false;
pool             3679 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool             3685 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3687 drivers/md/dm-thin.c 	cancel_delayed_work_sync(&pool->waker);
pool             3688 drivers/md/dm-thin.c 	cancel_delayed_work_sync(&pool->no_space_timeout);
pool             3689 drivers/md/dm-thin.c 	flush_workqueue(pool->wq);
pool             3690 drivers/md/dm-thin.c 	(void) commit(pool);
pool             3716 drivers/md/dm-thin.c static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
pool             3729 drivers/md/dm-thin.c 	r = dm_pool_create_thin(pool->pmd, dev_id);
pool             3739 drivers/md/dm-thin.c static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
pool             3757 drivers/md/dm-thin.c 	r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
pool             3767 drivers/md/dm-thin.c static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
pool             3780 drivers/md/dm-thin.c 	r = dm_pool_delete_thin_device(pool->pmd, dev_id);
pool             3787 drivers/md/dm-thin.c static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
pool             3806 drivers/md/dm-thin.c 	r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
pool             3816 drivers/md/dm-thin.c static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
pool             3824 drivers/md/dm-thin.c 	(void) commit(pool);
pool             3826 drivers/md/dm-thin.c 	r = dm_pool_reserve_metadata_snap(pool->pmd);
pool             3833 drivers/md/dm-thin.c static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
pool             3841 drivers/md/dm-thin.c 	r = dm_pool_release_metadata_snap(pool->pmd);
pool             3862 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3864 drivers/md/dm-thin.c 	if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
pool             3866 drivers/md/dm-thin.c 		      dm_device_name(pool->pool_md));
pool             3871 drivers/md/dm-thin.c 		r = process_create_thin_mesg(argc, argv, pool);
pool             3874 drivers/md/dm-thin.c 		r = process_create_snap_mesg(argc, argv, pool);
pool             3877 drivers/md/dm-thin.c 		r = process_delete_mesg(argc, argv, pool);
pool             3880 drivers/md/dm-thin.c 		r = process_set_transaction_id_mesg(argc, argv, pool);
pool             3883 drivers/md/dm-thin.c 		r = process_reserve_metadata_snap_mesg(argc, argv, pool);
pool             3886 drivers/md/dm-thin.c 		r = process_release_metadata_snap_mesg(argc, argv, pool);
pool             3892 drivers/md/dm-thin.c 		(void) commit(pool);
pool             3942 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             3946 drivers/md/dm-thin.c 		if (get_pool_mode(pool) == PM_FAIL) {
pool             3953 drivers/md/dm-thin.c 			(void) commit(pool);
pool             3955 drivers/md/dm-thin.c 		r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
pool             3958 drivers/md/dm-thin.c 			      dm_device_name(pool->pool_md), r);
pool             3962 drivers/md/dm-thin.c 		r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
pool             3965 drivers/md/dm-thin.c 			      dm_device_name(pool->pool_md), r);
pool             3969 drivers/md/dm-thin.c 		r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
pool             3972 drivers/md/dm-thin.c 			      dm_device_name(pool->pool_md), r);
pool             3976 drivers/md/dm-thin.c 		r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
pool             3979 drivers/md/dm-thin.c 			      dm_device_name(pool->pool_md), r);
pool             3983 drivers/md/dm-thin.c 		r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
pool             3986 drivers/md/dm-thin.c 			      dm_device_name(pool->pool_md), r);
pool             3990 drivers/md/dm-thin.c 		r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
pool             3993 drivers/md/dm-thin.c 			      dm_device_name(pool->pool_md), r);
pool             4009 drivers/md/dm-thin.c 		mode = get_pool_mode(pool);
pool             4017 drivers/md/dm-thin.c 		if (!pool->pf.discard_enabled)
pool             4019 drivers/md/dm-thin.c 		else if (pool->pf.discard_passdown)
pool             4024 drivers/md/dm-thin.c 		if (pool->pf.error_if_no_space)
pool             4029 drivers/md/dm-thin.c 		if (dm_pool_metadata_needs_check(pool->pmd))
pool             4042 drivers/md/dm-thin.c 		       (unsigned long)pool->sectors_per_block,
pool             4064 drivers/md/dm-thin.c 	struct pool *pool = pt->pool;
pool             4076 drivers/md/dm-thin.c 	if (limits->max_sectors < pool->sectors_per_block) {
pool             4077 drivers/md/dm-thin.c 		while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
pool             4088 drivers/md/dm-thin.c 	if (io_opt_sectors < pool->sectors_per_block ||
pool             4089 drivers/md/dm-thin.c 	    !is_factor(io_opt_sectors, pool->sectors_per_block)) {
pool             4090 drivers/md/dm-thin.c 		if (is_factor(pool->sectors_per_block, limits->max_sectors))
pool             4093 drivers/md/dm-thin.c 			blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
pool             4094 drivers/md/dm-thin.c 		blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
pool             4160 drivers/md/dm-thin.c 	spin_lock_irqsave(&tc->pool->lock, flags);
pool             4162 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&tc->pool->lock, flags);
pool             4170 drivers/md/dm-thin.c 	__pool_dec(tc->pool);
pool             4256 drivers/md/dm-thin.c 	tc->pool = __pool_table_lookup(pool_md);
pool             4257 drivers/md/dm-thin.c 	if (!tc->pool) {
pool             4262 drivers/md/dm-thin.c 	__pool_inc(tc->pool);
pool             4264 drivers/md/dm-thin.c 	if (get_pool_mode(tc->pool) == PM_FAIL) {
pool             4270 drivers/md/dm-thin.c 	r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
pool             4276 drivers/md/dm-thin.c 	r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
pool             4285 drivers/md/dm-thin.c 	if (tc->pool->pf.discard_enabled) {
pool             4292 drivers/md/dm-thin.c 	spin_lock_irqsave(&tc->pool->lock, flags);
pool             4293 drivers/md/dm-thin.c 	if (tc->pool->suspended) {
pool             4294 drivers/md/dm-thin.c 		spin_unlock_irqrestore(&tc->pool->lock, flags);
pool             4302 drivers/md/dm-thin.c 	list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
pool             4303 drivers/md/dm-thin.c 	spin_unlock_irqrestore(&tc->pool->lock, flags);
pool             4319 drivers/md/dm-thin.c 	__pool_dec(tc->pool);
pool             4349 drivers/md/dm-thin.c 	struct pool *pool = h->tc->pool;
pool             4355 drivers/md/dm-thin.c 		spin_lock_irqsave(&pool->lock, flags);
pool             4360 drivers/md/dm-thin.c 		spin_unlock_irqrestore(&pool->lock, flags);
pool             4367 drivers/md/dm-thin.c 			spin_lock_irqsave(&pool->lock, flags);
pool             4369 drivers/md/dm-thin.c 				list_add_tail(&m->list, &pool->prepared_discards);
pool             4370 drivers/md/dm-thin.c 			spin_unlock_irqrestore(&pool->lock, flags);
pool             4371 drivers/md/dm-thin.c 			wake_worker(pool);
pool             4422 drivers/md/dm-thin.c 	if (get_pool_mode(tc->pool) == PM_FAIL) {
pool             4444 drivers/md/dm-thin.c 			DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
pool             4447 drivers/md/dm-thin.c 						tc->pool->sectors_per_block) - 1);
pool             4473 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             4479 drivers/md/dm-thin.c 	if (!pool->ti)
pool             4482 drivers/md/dm-thin.c 	blocks = pool->ti->len;
pool             4483 drivers/md/dm-thin.c 	(void) sector_div(blocks, pool->sectors_per_block);
pool             4485 drivers/md/dm-thin.c 		return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
pool             4493 drivers/md/dm-thin.c 	struct pool *pool = tc->pool;
pool             4495 drivers/md/dm-thin.c 	if (!pool->pf.discard_enabled)
pool             4498 drivers/md/dm-thin.c 	limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
pool               75 drivers/md/md-multipath.c 	mempool_free(mp_bh, &conf->pool);
pool              111 drivers/md/md-multipath.c 	mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);
pool              119 drivers/md/md-multipath.c 		mempool_free(mp_bh, &conf->pool);
pool              427 drivers/md/md-multipath.c 	ret = mempool_init_kmalloc_pool(&conf->pool, NR_RESERVED_BUFS,
pool              451 drivers/md/md-multipath.c 	mempool_exit(&conf->pool);
pool              463 drivers/md/md-multipath.c 	mempool_exit(&conf->pool);
pool               16 drivers/md/md-multipath.h 	mempool_t		pool;
pool             2945 drivers/media/platform/coda/coda-common.c 	struct gen_pool *pool;
pool             3007 drivers/media/platform/coda/coda-common.c 	pool = of_gen_pool_get(np, "iram", 0);
pool             3008 drivers/media/platform/coda/coda-common.c 	if (!pool && pdata)
pool             3009 drivers/media/platform/coda/coda-common.c 		pool = gen_pool_get(pdata->iram_dev, NULL);
pool             3010 drivers/media/platform/coda/coda-common.c 	if (!pool) {
pool             3014 drivers/media/platform/coda/coda-common.c 	dev->iram_pool = pool;
pool               48 drivers/media/platform/vsp1/vsp1_clu.c 	dlb = vsp1_dl_body_get(clu->pool);
pool              225 drivers/media/platform/vsp1/vsp1_clu.c 	vsp1_dl_body_pool_destroy(clu->pool);
pool              263 drivers/media/platform/vsp1/vsp1_clu.c 	clu->pool = vsp1_dl_body_pool_create(clu->entity.vsp1, 3, CLU_SIZE + 1,
pool              265 drivers/media/platform/vsp1/vsp1_clu.c 	if (!clu->pool)
pool               35 drivers/media/platform/vsp1/vsp1_clu.h 	struct vsp1_dl_body_pool *pool;
pool              110 drivers/media/platform/vsp1/vsp1_dl.c 	struct vsp1_dl_body_pool *pool;
pool              227 drivers/media/platform/vsp1/vsp1_dl.c 	struct vsp1_dl_body_pool *pool;
pool              251 drivers/media/platform/vsp1/vsp1_dl.c 	struct vsp1_dl_body_pool *pool;
pool              255 drivers/media/platform/vsp1/vsp1_dl.c 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool              256 drivers/media/platform/vsp1/vsp1_dl.c 	if (!pool)
pool              259 drivers/media/platform/vsp1/vsp1_dl.c 	pool->vsp1 = vsp1;
pool              268 drivers/media/platform/vsp1/vsp1_dl.c 	pool->size = dlb_size * num_bodies;
pool              270 drivers/media/platform/vsp1/vsp1_dl.c 	pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL);
pool              271 drivers/media/platform/vsp1/vsp1_dl.c 	if (!pool->bodies) {
pool              272 drivers/media/platform/vsp1/vsp1_dl.c 		kfree(pool);
pool              276 drivers/media/platform/vsp1/vsp1_dl.c 	pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
pool              278 drivers/media/platform/vsp1/vsp1_dl.c 	if (!pool->mem) {
pool              279 drivers/media/platform/vsp1/vsp1_dl.c 		kfree(pool->bodies);
pool              280 drivers/media/platform/vsp1/vsp1_dl.c 		kfree(pool);
pool              284 drivers/media/platform/vsp1/vsp1_dl.c 	spin_lock_init(&pool->lock);
pool              285 drivers/media/platform/vsp1/vsp1_dl.c 	INIT_LIST_HEAD(&pool->free);
pool              288 drivers/media/platform/vsp1/vsp1_dl.c 		struct vsp1_dl_body *dlb = &pool->bodies[i];
pool              290 drivers/media/platform/vsp1/vsp1_dl.c 		dlb->pool = pool;
pool              293 drivers/media/platform/vsp1/vsp1_dl.c 		dlb->dma = pool->dma + i * dlb_size;
pool              294 drivers/media/platform/vsp1/vsp1_dl.c 		dlb->entries = pool->mem + i * dlb_size;
pool              296 drivers/media/platform/vsp1/vsp1_dl.c 		list_add_tail(&dlb->free, &pool->free);
pool              299 drivers/media/platform/vsp1/vsp1_dl.c 	return pool;
pool              308 drivers/media/platform/vsp1/vsp1_dl.c void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool)
pool              310 drivers/media/platform/vsp1/vsp1_dl.c 	if (!pool)
pool              313 drivers/media/platform/vsp1/vsp1_dl.c 	if (pool->mem)
pool              314 drivers/media/platform/vsp1/vsp1_dl.c 		dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
pool              315 drivers/media/platform/vsp1/vsp1_dl.c 			    pool->dma);
pool              317 drivers/media/platform/vsp1/vsp1_dl.c 	kfree(pool->bodies);
pool              318 drivers/media/platform/vsp1/vsp1_dl.c 	kfree(pool);
pool              329 drivers/media/platform/vsp1/vsp1_dl.c struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool)
pool              334 drivers/media/platform/vsp1/vsp1_dl.c 	spin_lock_irqsave(&pool->lock, flags);
pool              336 drivers/media/platform/vsp1/vsp1_dl.c 	if (!list_empty(&pool->free)) {
pool              337 drivers/media/platform/vsp1/vsp1_dl.c 		dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free);
pool              342 drivers/media/platform/vsp1/vsp1_dl.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              365 drivers/media/platform/vsp1/vsp1_dl.c 	spin_lock_irqsave(&dlb->pool->lock, flags);
pool              366 drivers/media/platform/vsp1/vsp1_dl.c 	list_add_tail(&dlb->free, &dlb->pool->free);
pool              367 drivers/media/platform/vsp1/vsp1_dl.c 	spin_unlock_irqrestore(&dlb->pool->lock, flags);
pool              426 drivers/media/platform/vsp1/vsp1_dl.c 	struct vsp1_dl_cmd_pool *pool;
pool              430 drivers/media/platform/vsp1/vsp1_dl.c 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool              431 drivers/media/platform/vsp1/vsp1_dl.c 	if (!pool)
pool              434 drivers/media/platform/vsp1/vsp1_dl.c 	spin_lock_init(&pool->lock);
pool              435 drivers/media/platform/vsp1/vsp1_dl.c 	INIT_LIST_HEAD(&pool->free);
pool              437 drivers/media/platform/vsp1/vsp1_dl.c 	pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
pool              438 drivers/media/platform/vsp1/vsp1_dl.c 	if (!pool->cmds) {
pool              439 drivers/media/platform/vsp1/vsp1_dl.c 		kfree(pool);
pool              447 drivers/media/platform/vsp1/vsp1_dl.c 	pool->size = cmd_size * num_cmds;
pool              448 drivers/media/platform/vsp1/vsp1_dl.c 	pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
pool              450 drivers/media/platform/vsp1/vsp1_dl.c 	if (!pool->mem) {
pool              451 drivers/media/platform/vsp1/vsp1_dl.c 		kfree(pool->cmds);
pool              452 drivers/media/platform/vsp1/vsp1_dl.c 		kfree(pool);
pool              457 drivers/media/platform/vsp1/vsp1_dl.c 		struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
pool              463 drivers/media/platform/vsp1/vsp1_dl.c 		cmd->pool = pool;
pool              471 drivers/media/platform/vsp1/vsp1_dl.c 		cmd->cmds = pool->mem + cmd_offset;
pool              472 drivers/media/platform/vsp1/vsp1_dl.c 		cmd->cmd_dma = pool->dma + cmd_offset;
pool              474 drivers/media/platform/vsp1/vsp1_dl.c 		cmd->data = pool->mem + data_offset;
pool              475 drivers/media/platform/vsp1/vsp1_dl.c 		cmd->data_dma = pool->dma + data_offset;
pool              477 drivers/media/platform/vsp1/vsp1_dl.c 		list_add_tail(&cmd->free, &pool->free);
pool              480 drivers/media/platform/vsp1/vsp1_dl.c 	return pool;
pool              484 drivers/media/platform/vsp1/vsp1_dl.c struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
pool              489 drivers/media/platform/vsp1/vsp1_dl.c 	spin_lock_irqsave(&pool->lock, flags);
pool              491 drivers/media/platform/vsp1/vsp1_dl.c 	if (!list_empty(&pool->free)) {
pool              492 drivers/media/platform/vsp1/vsp1_dl.c 		cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
pool              497 drivers/media/platform/vsp1/vsp1_dl.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              512 drivers/media/platform/vsp1/vsp1_dl.c 	spin_lock_irqsave(&cmd->pool->lock, flags);
pool              513 drivers/media/platform/vsp1/vsp1_dl.c 	list_add_tail(&cmd->free, &cmd->pool->free);
pool              514 drivers/media/platform/vsp1/vsp1_dl.c 	spin_unlock_irqrestore(&cmd->pool->lock, flags);
pool              517 drivers/media/platform/vsp1/vsp1_dl.c static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
pool              519 drivers/media/platform/vsp1/vsp1_dl.c 	if (!pool)
pool              522 drivers/media/platform/vsp1/vsp1_dl.c 	if (pool->mem)
pool              523 drivers/media/platform/vsp1/vsp1_dl.c 		dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
pool              524 drivers/media/platform/vsp1/vsp1_dl.c 			    pool->dma);
pool              526 drivers/media/platform/vsp1/vsp1_dl.c 	kfree(pool->cmds);
pool              527 drivers/media/platform/vsp1/vsp1_dl.c 	kfree(pool);
pool              559 drivers/media/platform/vsp1/vsp1_dl.c 	dl->body0 = vsp1_dl_body_get(dlm->pool);
pool             1084 drivers/media/platform/vsp1/vsp1_dl.c 	return vsp1_dl_body_get(dlm->pool);
pool             1119 drivers/media/platform/vsp1/vsp1_dl.c 	dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
pool             1121 drivers/media/platform/vsp1/vsp1_dl.c 	if (!dlm->pool)
pool             1165 drivers/media/platform/vsp1/vsp1_dl.c 	vsp1_dl_body_pool_destroy(dlm->pool);
pool               38 drivers/media/platform/vsp1/vsp1_dl.h 	struct vsp1_dl_cmd_pool *pool;
pool               71 drivers/media/platform/vsp1/vsp1_dl.h void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool);
pool               72 drivers/media/platform/vsp1/vsp1_dl.h struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool);
pool               45 drivers/media/platform/vsp1/vsp1_lut.c 	dlb = vsp1_dl_body_get(lut->pool);
pool              184 drivers/media/platform/vsp1/vsp1_lut.c 	vsp1_dl_body_pool_destroy(lut->pool);
pool              221 drivers/media/platform/vsp1/vsp1_lut.c 	lut->pool = vsp1_dl_body_pool_create(vsp1, 3, LUT_SIZE, 0);
pool              222 drivers/media/platform/vsp1/vsp1_lut.c 	if (!lut->pool)
pool               32 drivers/media/platform/vsp1/vsp1_lut.h 	struct vsp1_dl_body_pool *pool;
pool               81 drivers/misc/sram-exec.c void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
pool               91 drivers/misc/sram-exec.c 		if (p->pool == pool)
pool               99 drivers/misc/sram-exec.c 	if (!addr_in_gen_pool(pool, (unsigned long)dst, size))
pool               60 drivers/misc/sram.c 	part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
pool               62 drivers/misc/sram.c 	if (IS_ERR(part->pool))
pool               63 drivers/misc/sram.c 		return PTR_ERR(part->pool);
pool               65 drivers/misc/sram.c 	ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start,
pool              102 drivers/misc/sram.c 	if (block->pool) {
pool              141 drivers/misc/sram.c 		if (part->pool &&
pool              142 drivers/misc/sram.c 		    gen_pool_avail(part->pool) < gen_pool_size(part->pool))
pool              207 drivers/misc/sram.c 			block->pool = true;
pool              212 drivers/misc/sram.c 		if ((block->export || block->pool || block->protect_exec) &&
pool              275 drivers/misc/sram.c 		if ((block->export || block->pool || block->protect_exec) &&
pool              301 drivers/misc/sram.c 		ret = gen_pool_add_virt(sram->pool,
pool              374 drivers/misc/sram.c 	sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
pool              376 drivers/misc/sram.c 	if (IS_ERR(sram->pool))
pool              377 drivers/misc/sram.c 		return PTR_ERR(sram->pool);
pool              399 drivers/misc/sram.c 		gen_pool_size(sram->pool) / 1024, sram->virt_base);
pool              418 drivers/misc/sram.c 	if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
pool               11 drivers/misc/sram.h 	struct gen_pool *pool;
pool               21 drivers/misc/sram.h 	struct gen_pool *pool;
pool               33 drivers/misc/sram.h 	bool pool;
pool              254 drivers/mtd/nand/raw/atmel/nand-controller.c 		struct gen_pool *pool;
pool             2221 drivers/mtd/nand/raw/atmel/nand-controller.c 	nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
pool             2223 drivers/mtd/nand/raw/atmel/nand-controller.c 	if (!nc->sram.pool) {
pool             2228 drivers/mtd/nand/raw/atmel/nand-controller.c 	nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
pool             2251 drivers/mtd/nand/raw/atmel/nand-controller.c 	if (hsmc_nc->sram.pool)
pool             2252 drivers/mtd/nand/raw/atmel/nand-controller.c 		gen_pool_free(hsmc_nc->sram.pool,
pool               48 drivers/mtd/ubi/fastmap-wl.c 				    struct ubi_fm_pool *pool)
pool               53 drivers/mtd/ubi/fastmap-wl.c 	for (i = pool->used; i < pool->size; i++) {
pool               54 drivers/mtd/ubi/fastmap-wl.c 		e = ubi->lookuptbl[pool->pebs[i]];
pool              113 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_fm_pool *pool = &ubi->fm_pool;
pool              120 drivers/mtd/ubi/fastmap-wl.c 	return_unused_pool_pebs(ubi, pool);
pool              123 drivers/mtd/ubi/fastmap-wl.c 	pool->size = 0;
pool              127 drivers/mtd/ubi/fastmap-wl.c 		if (pool->size < pool->max_size) {
pool              135 drivers/mtd/ubi/fastmap-wl.c 			pool->pebs[pool->size] = e->pnum;
pool              136 drivers/mtd/ubi/fastmap-wl.c 			pool->size++;
pool              160 drivers/mtd/ubi/fastmap-wl.c 	pool->used = 0;
pool              200 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_fm_pool *pool = &ubi->fm_pool;
pool              209 drivers/mtd/ubi/fastmap-wl.c 	if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
pool              222 drivers/mtd/ubi/fastmap-wl.c 	if (pool->used == pool->size) {
pool              239 drivers/mtd/ubi/fastmap-wl.c 	ubi_assert(pool->used < pool->size);
pool              240 drivers/mtd/ubi/fastmap-wl.c 	ret = pool->pebs[pool->used++];
pool              253 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
pool              258 drivers/mtd/ubi/fastmap-wl.c 	if (pool->used == pool->size) {
pool              269 drivers/mtd/ubi/fastmap-wl.c 	pnum = pool->pebs[pool->used++];
pool              551 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	struct bnx2x_alloc_pool *pool = &fp->page_pool;
pool              554 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (!pool->page) {
pool              555 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
pool              556 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		if (unlikely(!pool->page))
pool              559 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pool->offset = 0;
pool              562 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	mapping = dma_map_page(&bp->pdev->dev, pool->page,
pool              563 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			       pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
pool              569 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	sw_buf->page = pool->page;
pool              570 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	sw_buf->offset = pool->offset;
pool              577 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	pool->offset += SGE_PAGE_SIZE;
pool              578 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
pool              579 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		get_page(pool->page);
pool              581 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pool->page = NULL;
pool             1005 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 					  struct bnx2x_alloc_pool *pool)
pool             1007 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	if (!pool->page)
pool             1010 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	put_page(pool->page);
pool             1012 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	pool->page = NULL;
pool              122 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	struct cxgbi_ppm_pool *pool;
pool              126 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	if (!ppm->pool)
pool              130 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	pool = per_cpu_ptr(ppm->pool, cpu);
pool              131 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	spin_lock_bh(&pool->lock);
pool              134 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max,
pool              135 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 				    pool->next, count, 0);
pool              137 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		pool->next = 0;
pool              138 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		spin_unlock_bh(&pool->lock);
pool              142 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	pool->next = i + count;
pool              143 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	if (pool->next >= ppm->pool_index_max)
pool              144 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		pool->next = 0;
pool              146 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	spin_unlock_bh(&pool->lock);
pool              150 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		pool->next);
pool              198 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		struct cxgbi_ppm_pool *pool;
pool              203 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		pool = per_cpu_ptr(ppm->pool, cpu);
pool              204 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		spin_lock_bh(&pool->lock);
pool              205 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		bitmap_clear(pool->bmap, i, count);
pool              207 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		if (i < pool->next)
pool              208 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 			pool->next = i;
pool              209 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		spin_unlock_bh(&pool->lock);
pool              212 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 			 __func__, cpu, i, pool->next);
pool              330 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	free_percpu(ppm->pool);
pool              396 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	struct cxgbi_ppm_pool *pool = NULL;
pool              430 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
pool              431 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		if (!pool) {
pool              485 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	ppm->pool = pool;
pool              512 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	free_percpu(pool);
pool              141 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h 	struct cxgbi_ppm_pool __percpu *pool;
pool              518 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		dpaa_bp->pool = bman_new_pool();
pool              519 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		if (!dpaa_bp->pool) {
pool              525 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
pool              540 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	bman_free_pool(dpaa_bp->pool);
pool              555 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		ret = bman_acquire(bp->pool, bmb, num);
pool              594 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	bman_free_pool(bp->pool);
pool              757 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		u32 pool;
pool              760 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		ret = qman_alloc_pool(&pool);
pool              763 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			rx_pool_channel = pool;
pool              778 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
pool              785 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		qman_p_static_dequeue_add(portal, pool);
pool             1281 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	err = bman_release(dpaa_bp->pool, bmb, cnt);
pool               98 drivers/net/ethernet/freescale/dpaa/dpaa_eth.h 	struct bman_pool *pool;
pool               40 drivers/net/ethernet/freescale/fman/fman_muram.c 	struct gen_pool *pool;
pool               75 drivers/net/ethernet/freescale/fman/fman_muram.c 	muram->pool = gen_pool_create(ilog2(64), -1);
pool               76 drivers/net/ethernet/freescale/fman/fman_muram.c 	if (!muram->pool) {
pool               87 drivers/net/ethernet/freescale/fman/fman_muram.c 	ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr,
pool              102 drivers/net/ethernet/freescale/fman/fman_muram.c 	gen_pool_destroy(muram->pool);
pool              136 drivers/net/ethernet/freescale/fman/fman_muram.c 	vaddr = gen_pool_alloc(muram->pool, size);
pool              158 drivers/net/ethernet/freescale/fman/fman_muram.c 	gen_pool_free(muram->pool, addr, size);
pool              241 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	struct hix5hd2_desc_sw pool[QUEUE_NUMS];
pool              242 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c #define rx_fq		pool[0]
pool              243 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c #define rx_bq		pool[1]
pool              244 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c #define tx_bq		pool[2]
pool              245 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c #define tx_rq		pool[3]
pool              982 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		if (priv->pool[i].desc) {
pool              983 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			dma_free_coherent(priv->dev, priv->pool[i].size,
pool              984 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 					  priv->pool[i].desc,
pool              985 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 					  priv->pool[i].phys_addr);
pool              986 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			priv->pool[i].desc = NULL;
pool             1004 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
pool             1010 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		priv->pool[i].size = size;
pool             1011 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		priv->pool[i].desc = virt_addr;
pool             1012 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		priv->pool[i].phys_addr = phys_addr;
pool              145 drivers/net/ethernet/ibm/ibmveth.c static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
pool              149 drivers/net/ethernet/ibm/ibmveth.c 	pool->size = pool_size;
pool              150 drivers/net/ethernet/ibm/ibmveth.c 	pool->index = pool_index;
pool              151 drivers/net/ethernet/ibm/ibmveth.c 	pool->buff_size = buff_size;
pool              152 drivers/net/ethernet/ibm/ibmveth.c 	pool->threshold = pool_size * 7 / 8;
pool              153 drivers/net/ethernet/ibm/ibmveth.c 	pool->active = pool_active;
pool              157 drivers/net/ethernet/ibm/ibmveth.c static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
pool              161 drivers/net/ethernet/ibm/ibmveth.c 	pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL);
pool              163 drivers/net/ethernet/ibm/ibmveth.c 	if (!pool->free_map)
pool              166 drivers/net/ethernet/ibm/ibmveth.c 	pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
pool              167 drivers/net/ethernet/ibm/ibmveth.c 	if (!pool->dma_addr) {
pool              168 drivers/net/ethernet/ibm/ibmveth.c 		kfree(pool->free_map);
pool              169 drivers/net/ethernet/ibm/ibmveth.c 		pool->free_map = NULL;
pool              173 drivers/net/ethernet/ibm/ibmveth.c 	pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
pool              175 drivers/net/ethernet/ibm/ibmveth.c 	if (!pool->skbuff) {
pool              176 drivers/net/ethernet/ibm/ibmveth.c 		kfree(pool->dma_addr);
pool              177 drivers/net/ethernet/ibm/ibmveth.c 		pool->dma_addr = NULL;
pool              179 drivers/net/ethernet/ibm/ibmveth.c 		kfree(pool->free_map);
pool              180 drivers/net/ethernet/ibm/ibmveth.c 		pool->free_map = NULL;
pool              184 drivers/net/ethernet/ibm/ibmveth.c 	for (i = 0; i < pool->size; ++i)
pool              185 drivers/net/ethernet/ibm/ibmveth.c 		pool->free_map[i] = i;
pool              187 drivers/net/ethernet/ibm/ibmveth.c 	atomic_set(&pool->available, 0);
pool              188 drivers/net/ethernet/ibm/ibmveth.c 	pool->producer_index = 0;
pool              189 drivers/net/ethernet/ibm/ibmveth.c 	pool->consumer_index = 0;
pool              206 drivers/net/ethernet/ibm/ibmveth.c 					  struct ibmveth_buff_pool *pool)
pool              209 drivers/net/ethernet/ibm/ibmveth.c 	u32 count = pool->size - atomic_read(&pool->available);
pool              222 drivers/net/ethernet/ibm/ibmveth.c 		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
pool              231 drivers/net/ethernet/ibm/ibmveth.c 		free_index = pool->consumer_index;
pool              232 drivers/net/ethernet/ibm/ibmveth.c 		pool->consumer_index++;
pool              233 drivers/net/ethernet/ibm/ibmveth.c 		if (pool->consumer_index >= pool->size)
pool              234 drivers/net/ethernet/ibm/ibmveth.c 			pool->consumer_index = 0;
pool              235 drivers/net/ethernet/ibm/ibmveth.c 		index = pool->free_map[free_index];
pool              238 drivers/net/ethernet/ibm/ibmveth.c 		BUG_ON(pool->skbuff[index] != NULL);
pool              241 drivers/net/ethernet/ibm/ibmveth.c 				pool->buff_size, DMA_FROM_DEVICE);
pool              246 drivers/net/ethernet/ibm/ibmveth.c 		pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool              247 drivers/net/ethernet/ibm/ibmveth.c 		pool->dma_addr[index] = dma_addr;
pool              248 drivers/net/ethernet/ibm/ibmveth.c 		pool->skbuff[index] = skb;
pool              250 drivers/net/ethernet/ibm/ibmveth.c 		correlator = ((u64)pool->index << 32) | index;
pool              253 drivers/net/ethernet/ibm/ibmveth.c 		desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
pool              257 drivers/net/ethernet/ibm/ibmveth.c 			unsigned int len = min(pool->buff_size,
pool              274 drivers/net/ethernet/ibm/ibmveth.c 	atomic_add(buffers_added, &(pool->available));
pool              278 drivers/net/ethernet/ibm/ibmveth.c 	pool->free_map[free_index] = index;
pool              279 drivers/net/ethernet/ibm/ibmveth.c 	pool->skbuff[index] = NULL;
pool              280 drivers/net/ethernet/ibm/ibmveth.c 	if (pool->consumer_index == 0)
pool              281 drivers/net/ethernet/ibm/ibmveth.c 		pool->consumer_index = pool->size - 1;
pool              283 drivers/net/ethernet/ibm/ibmveth.c 		pool->consumer_index--;
pool              286 drivers/net/ethernet/ibm/ibmveth.c 		                 pool->dma_addr[index], pool->buff_size,
pool              292 drivers/net/ethernet/ibm/ibmveth.c 	atomic_add(buffers_added, &(pool->available));
pool              315 drivers/net/ethernet/ibm/ibmveth.c 		struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
pool              317 drivers/net/ethernet/ibm/ibmveth.c 		if (pool->active &&
pool              318 drivers/net/ethernet/ibm/ibmveth.c 		    (atomic_read(&pool->available) < pool->threshold))
pool              319 drivers/net/ethernet/ibm/ibmveth.c 			ibmveth_replenish_buffer_pool(adapter, pool);
pool              327 drivers/net/ethernet/ibm/ibmveth.c 				     struct ibmveth_buff_pool *pool)
pool              331 drivers/net/ethernet/ibm/ibmveth.c 	kfree(pool->free_map);
pool              332 drivers/net/ethernet/ibm/ibmveth.c 	pool->free_map = NULL;
pool              334 drivers/net/ethernet/ibm/ibmveth.c 	if (pool->skbuff && pool->dma_addr) {
pool              335 drivers/net/ethernet/ibm/ibmveth.c 		for (i = 0; i < pool->size; ++i) {
pool              336 drivers/net/ethernet/ibm/ibmveth.c 			struct sk_buff *skb = pool->skbuff[i];
pool              339 drivers/net/ethernet/ibm/ibmveth.c 						 pool->dma_addr[i],
pool              340 drivers/net/ethernet/ibm/ibmveth.c 						 pool->buff_size,
pool              343 drivers/net/ethernet/ibm/ibmveth.c 				pool->skbuff[i] = NULL;
pool              348 drivers/net/ethernet/ibm/ibmveth.c 	if (pool->dma_addr) {
pool              349 drivers/net/ethernet/ibm/ibmveth.c 		kfree(pool->dma_addr);
pool              350 drivers/net/ethernet/ibm/ibmveth.c 		pool->dma_addr = NULL;
pool              353 drivers/net/ethernet/ibm/ibmveth.c 	if (pool->skbuff) {
pool              354 drivers/net/ethernet/ibm/ibmveth.c 		kfree(pool->skbuff);
pool              355 drivers/net/ethernet/ibm/ibmveth.c 		pool->skbuff = NULL;
pool              363 drivers/net/ethernet/ibm/ibmveth.c 	unsigned int pool  = correlator >> 32;
pool              368 drivers/net/ethernet/ibm/ibmveth.c 	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
pool              369 drivers/net/ethernet/ibm/ibmveth.c 	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
pool              371 drivers/net/ethernet/ibm/ibmveth.c 	skb = adapter->rx_buff_pool[pool].skbuff[index];
pool              375 drivers/net/ethernet/ibm/ibmveth.c 	adapter->rx_buff_pool[pool].skbuff[index] = NULL;
pool              378 drivers/net/ethernet/ibm/ibmveth.c 			 adapter->rx_buff_pool[pool].dma_addr[index],
pool              379 drivers/net/ethernet/ibm/ibmveth.c 			 adapter->rx_buff_pool[pool].buff_size,
pool              382 drivers/net/ethernet/ibm/ibmveth.c 	free_index = adapter->rx_buff_pool[pool].producer_index;
pool              383 drivers/net/ethernet/ibm/ibmveth.c 	adapter->rx_buff_pool[pool].producer_index++;
pool              384 drivers/net/ethernet/ibm/ibmveth.c 	if (adapter->rx_buff_pool[pool].producer_index >=
pool              385 drivers/net/ethernet/ibm/ibmveth.c 	    adapter->rx_buff_pool[pool].size)
pool              386 drivers/net/ethernet/ibm/ibmveth.c 		adapter->rx_buff_pool[pool].producer_index = 0;
pool              387 drivers/net/ethernet/ibm/ibmveth.c 	adapter->rx_buff_pool[pool].free_map[free_index] = index;
pool              391 drivers/net/ethernet/ibm/ibmveth.c 	atomic_dec(&(adapter->rx_buff_pool[pool].available));
pool              398 drivers/net/ethernet/ibm/ibmveth.c 	unsigned int pool = correlator >> 32;
pool              401 drivers/net/ethernet/ibm/ibmveth.c 	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
pool              402 drivers/net/ethernet/ibm/ibmveth.c 	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
pool              404 drivers/net/ethernet/ibm/ibmveth.c 	return adapter->rx_buff_pool[pool].skbuff[index];
pool              412 drivers/net/ethernet/ibm/ibmveth.c 	unsigned int pool = correlator >> 32;
pool              418 drivers/net/ethernet/ibm/ibmveth.c 	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
pool              419 drivers/net/ethernet/ibm/ibmveth.c 	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
pool              421 drivers/net/ethernet/ibm/ibmveth.c 	if (!adapter->rx_buff_pool[pool].active) {
pool              423 drivers/net/ethernet/ibm/ibmveth.c 		ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
pool              428 drivers/net/ethernet/ibm/ibmveth.c 		adapter->rx_buff_pool[pool].buff_size;
pool              429 drivers/net/ethernet/ibm/ibmveth.c 	desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
pool             1747 drivers/net/ethernet/ibm/ibmveth.c 	struct ibmveth_buff_pool *pool = container_of(kobj,
pool             1752 drivers/net/ethernet/ibm/ibmveth.c 		return sprintf(buf, "%d\n", pool->active);
pool             1754 drivers/net/ethernet/ibm/ibmveth.c 		return sprintf(buf, "%d\n", pool->size);
pool             1756 drivers/net/ethernet/ibm/ibmveth.c 		return sprintf(buf, "%d\n", pool->buff_size);
pool             1763 drivers/net/ethernet/ibm/ibmveth.c 	struct ibmveth_buff_pool *pool = container_of(kobj,
pool             1773 drivers/net/ethernet/ibm/ibmveth.c 		if (value && !pool->active) {
pool             1775 drivers/net/ethernet/ibm/ibmveth.c 				if (ibmveth_alloc_buffer_pool(pool)) {
pool             1780 drivers/net/ethernet/ibm/ibmveth.c 				pool->active = 1;
pool             1787 drivers/net/ethernet/ibm/ibmveth.c 				pool->active = 1;
pool             1789 drivers/net/ethernet/ibm/ibmveth.c 		} else if (!value && pool->active) {
pool             1795 drivers/net/ethernet/ibm/ibmveth.c 				if (pool == &adapter->rx_buff_pool[i])
pool             1811 drivers/net/ethernet/ibm/ibmveth.c 				pool->active = 0;
pool             1816 drivers/net/ethernet/ibm/ibmveth.c 			pool->active = 0;
pool             1826 drivers/net/ethernet/ibm/ibmveth.c 				pool->size = value;
pool             1830 drivers/net/ethernet/ibm/ibmveth.c 				pool->size = value;
pool             1841 drivers/net/ethernet/ibm/ibmveth.c 				pool->buff_size = value;
pool             1845 drivers/net/ethernet/ibm/ibmveth.c 				pool->buff_size = value;
pool              243 drivers/net/ethernet/ibm/ibmvnic.c 			      struct ibmvnic_rx_pool *pool)
pool              245 drivers/net/ethernet/ibm/ibmvnic.c 	int count = pool->size - atomic_read(&pool->available);
pool              259 drivers/net/ethernet/ibm/ibmvnic.c 	if (!pool->active)
pool              267 drivers/net/ethernet/ibm/ibmvnic.c 		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
pool              274 drivers/net/ethernet/ibm/ibmvnic.c 		index = pool->free_map[pool->next_free];
pool              276 drivers/net/ethernet/ibm/ibmvnic.c 		if (pool->rx_buff[index].skb)
pool              280 drivers/net/ethernet/ibm/ibmvnic.c 		offset = index * pool->buff_size;
pool              281 drivers/net/ethernet/ibm/ibmvnic.c 		dst = pool->long_term_buff.buff + offset;
pool              282 drivers/net/ethernet/ibm/ibmvnic.c 		memset(dst, 0, pool->buff_size);
pool              283 drivers/net/ethernet/ibm/ibmvnic.c 		dma_addr = pool->long_term_buff.addr + offset;
pool              284 drivers/net/ethernet/ibm/ibmvnic.c 		pool->rx_buff[index].data = dst;
pool              286 drivers/net/ethernet/ibm/ibmvnic.c 		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
pool              287 drivers/net/ethernet/ibm/ibmvnic.c 		pool->rx_buff[index].dma = dma_addr;
pool              288 drivers/net/ethernet/ibm/ibmvnic.c 		pool->rx_buff[index].skb = skb;
pool              289 drivers/net/ethernet/ibm/ibmvnic.c 		pool->rx_buff[index].pool_index = pool->index;
pool              290 drivers/net/ethernet/ibm/ibmvnic.c 		pool->rx_buff[index].size = pool->buff_size;
pool              295 drivers/net/ethernet/ibm/ibmvnic.c 		    cpu_to_be64((u64)&pool->rx_buff[index]);
pool              297 drivers/net/ethernet/ibm/ibmvnic.c 		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
pool              307 drivers/net/ethernet/ibm/ibmvnic.c 		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
pool              309 drivers/net/ethernet/ibm/ibmvnic.c 		lpar_rc = send_subcrq(adapter, handle_array[pool->index],
pool              316 drivers/net/ethernet/ibm/ibmvnic.c 		pool->next_free = (pool->next_free + 1) % pool->size;
pool              318 drivers/net/ethernet/ibm/ibmvnic.c 	atomic_add(buffers_added, &pool->available);
pool              324 drivers/net/ethernet/ibm/ibmvnic.c 	pool->free_map[pool->next_free] = index;
pool              325 drivers/net/ethernet/ibm/ibmvnic.c 	pool->rx_buff[index].skb = NULL;
pool              329 drivers/net/ethernet/ibm/ibmvnic.c 	atomic_add(buffers_added, &pool->available);
pool             2203 drivers/net/ethernet/ibm/ibmvnic.c 	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
pool             2207 drivers/net/ethernet/ibm/ibmvnic.c 	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
pool             2208 drivers/net/ethernet/ibm/ibmvnic.c 	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
pool             2210 drivers/net/ethernet/ibm/ibmvnic.c 	atomic_dec(&pool->available);
pool             2913 drivers/net/ethernet/ibm/ibmvnic.c 		unsigned int pool = scrq->pool_index;
pool             2925 drivers/net/ethernet/ibm/ibmvnic.c 				tx_pool = &adapter->tso_pool[pool];
pool             2928 drivers/net/ethernet/ibm/ibmvnic.c 				tx_pool = &adapter->tx_pool[pool];
pool              287 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	int pool;
pool              538 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	u16 pool;
pool              112 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	if (ddp->pool) {
pool              113 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
pool              114 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		ddp->pool = NULL;
pool              182 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	if (!ddp_pool->pool) {
pool              195 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
pool              200 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	ddp->pool = ddp_pool->pool;
pool              319 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
pool              602 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	dma_pool_destroy(ddp_pool->pool);
pool              603 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	ddp_pool->pool = NULL;
pool              611 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	struct dma_pool *pool;
pool              616 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
pool              618 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	if (!pool)
pool              622 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	ddp_pool->pool = pool;
pool               42 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h 	struct dma_pool *pool;
pool               47 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h 	struct dma_pool *pool;
pool               24 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	u16 reg_idx, pool;
pool               37 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
pool               40 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			pool++;
pool               44 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
pool              189 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	u16 reg_idx, pool;
pool              197 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	pool = 0;
pool              207 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			pool++;
pool              211 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
pool             3864 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u16 pool = adapter->num_rx_pools;
pool             3870 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		while (pool--)
pool             3872 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
pool             3915 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u16 pool = adapter->num_rx_pools;
pool             3917 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		while (pool--)
pool             3919 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
pool             3990 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u16 pool = adapter->num_rx_pools;
pool             4001 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		while (pool--)
pool             4003 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					IXGBE_PFVFMRQC(VMDQ_P(pool)),
pool             4170 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u16 pool = adapter->num_rx_pools;
pool             4187 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	while (pool--)
pool             4188 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
pool             4194 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u16 pool = adapter->num_rx_pools;
pool             4213 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	while (pool--)
pool             4214 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
pool             4739 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					    mac_table->pool,
pool             4762 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					    mac_table->pool,
pool             4783 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
pool             4796 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (mac_table->pool != pool)
pool             4813 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	mac_table->pool = VMDQ_P(0);
pool             4817 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
pool             4822 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			 const u8 *addr, u16 pool)
pool             4836 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		mac_table->pool = pool;
pool             4850 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			 const u8 *addr, u16 pool)
pool             4865 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (mac_table->pool != pool)
pool             5350 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	baseq = accel->pool * adapter->num_rx_queues_per_pool;
pool             5352 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		   accel->pool, adapter->num_rx_pools,
pool             5375 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				   VMDQ_P(accel->pool));
pool             5391 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	clear_bit(accel->pool, adapter->fwd_bitmask);
pool             9044 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	int pool;
pool             9056 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
pool             9057 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (pool < adapter->num_rx_pools) {
pool             9058 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		set_bit(pool, adapter->fwd_bitmask);
pool             9059 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		accel->pool = pool;
pool             9931 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u16 pool = VMDQ_P(0);
pool             9933 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
pool             10069 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	int pool, err;
pool             10090 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
pool             10091 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (pool == adapter->num_rx_pools) {
pool             10137 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (pool >= adapter->num_rx_pools)
pool             10145 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	set_bit(pool, adapter->fwd_bitmask);
pool             10146 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	netdev_set_sb_channel(vdev, pool);
pool             10147 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	accel->pool = pool;
pool             10169 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			     VMDQ_P(accel->pool));
pool             10192 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	clear_bit(accel->pool, adapter->fwd_bitmask);
pool             3531 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 						  unsigned int pool)
pool             3536 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 	if (pool > 63)
pool             3543 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 		pfflp |= (1ULL << pool);
pool             3545 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 		pfflp &= ~(1ULL << pool);
pool               61 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)	(0x1700 + ((pool) << 2))
pool              175 drivers/net/ethernet/marvell/mvneta.c #define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
pool               33 drivers/net/ethernet/marvell/mvneta_bm.h #define MVNETA_BM_XBAR_POOL_REG(pool)		\
pool               34 drivers/net/ethernet/marvell/mvneta_bm.h 		(((pool) < 2) ? MVNETA_BM_XBAR_01_REG : MVNETA_BM_XBAR_23_REG)
pool               35 drivers/net/ethernet/marvell/mvneta_bm.h #define     MVNETA_BM_TARGET_ID_OFFS(pool)	(((pool) & 1) ? 16 : 0)
pool               36 drivers/net/ethernet/marvell/mvneta_bm.h #define     MVNETA_BM_TARGET_ID_MASK(pool)	\
pool               37 drivers/net/ethernet/marvell/mvneta_bm.h 		(0xf << MVNETA_BM_TARGET_ID_OFFS(pool))
pool               38 drivers/net/ethernet/marvell/mvneta_bm.h #define     MVNETA_BM_TARGET_ID_VAL(pool, id)	\
pool               39 drivers/net/ethernet/marvell/mvneta_bm.h 		((id) << MVNETA_BM_TARGET_ID_OFFS(pool))
pool               40 drivers/net/ethernet/marvell/mvneta_bm.h #define     MVNETA_BM_XBAR_ATTR_OFFS(pool)	(((pool) & 1) ? 20 : 4)
pool               41 drivers/net/ethernet/marvell/mvneta_bm.h #define     MVNETA_BM_XBAR_ATTR_MASK(pool)	\
pool               42 drivers/net/ethernet/marvell/mvneta_bm.h 		(0xff << MVNETA_BM_XBAR_ATTR_OFFS(pool))
pool               43 drivers/net/ethernet/marvell/mvneta_bm.h #define     MVNETA_BM_XBAR_ATTR_VAL(pool, attr)	\
pool               44 drivers/net/ethernet/marvell/mvneta_bm.h 		((attr) << MVNETA_BM_XBAR_ATTR_OFFS(pool))
pool               47 drivers/net/ethernet/marvell/mvneta_bm.h #define MVNETA_BM_POOL_BASE_REG(pool)		(0x10 + ((pool) << 4))
pool               51 drivers/net/ethernet/marvell/mvneta_bm.h #define MVNETA_BM_POOL_READ_PTR_REG(pool)	(0x14 + ((pool) << 4))
pool               57 drivers/net/ethernet/marvell/mvneta_bm.h #define MVNETA_BM_POOL_WRITE_PTR_REG(pool)	(0x18 + ((pool) << 4))
pool               64 drivers/net/ethernet/marvell/mvneta_bm.h #define MVNETA_BM_POOL_SIZE_REG(pool)		(0x1c + ((pool) << 4))
pool               31 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_POOL_BUF_SIZE_REG(pool)		(0x180 + 4 * (pool))
pool              283 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_POOL_BASE_REG(pool)		(0x6000 + ((pool) * 4))
pool              285 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_POOL_SIZE_REG(pool)		(0x6040 + ((pool) * 4))
pool              287 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_POOL_READ_PTR_REG(pool)	(0x6080 + ((pool) * 4))
pool              289 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_POOL_PTRS_NUM_REG(pool)	(0x60c0 + ((pool) * 4))
pool              291 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_BPPI_READ_PTR_REG(pool)	(0x6100 + ((pool) * 4))
pool              292 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)	(0x6140 + ((pool) * 4))
pool              296 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_POOL_CTRL_REG(pool)		(0x6200 + ((pool) * 4))
pool              308 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_INTR_CAUSE_REG(pool)		(0x6240 + ((pool) * 4))
pool              314 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_INTR_MASK_REG(pool)		(0x6280 + ((pool) * 4))
pool              315 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_PHY_ALLOC_REG(pool)		(0x6400 + ((pool) * 4))
pool              322 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_BM_PHY_RLS_REG(pool)		(0x6480 + ((pool) * 4))
pool              327 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
pool              329 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (likely(pool->frag_size <= PAGE_SIZE))
pool              330 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		return netdev_alloc_frag(pool->frag_size);
pool              332 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		return kmalloc(pool->frag_size, GFP_ATOMIC);
pool              335 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
pool              337 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (likely(pool->frag_size <= PAGE_SIZE))
pool              657 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
pool              691 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				   MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
pool              753 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
pool              755 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
pool              758 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
pool              759 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	    (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
pool              760 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		netdev_err(port->dev, "Invalid pool %d\n", pool);
pool              776 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				if (pool < port->nrxqs)
pool              781 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				pkts_num = mvpp2_pools[pool].buf_num;
pool              810 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			 unsigned int pool, int pkt_size)
pool              812 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
pool              815 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (pool > port->nrxqs * 2) {
pool              816 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		netdev_err(port->dev, "Invalid pool %d\n", pool);
pool             2410 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		int pool;
pool             2412 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
pool             2415 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_bm_pool_put(port, pool,
pool             2868 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			   struct mvpp2_bm_pool *bm_pool, int pool)
pool             2880 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
pool             2940 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		int pool, rx_bytes, err;
pool             2951 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
pool             2953 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		bm_pool = &port->priv->bm_pools[pool];
pool             2965 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
pool             2980 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		err = mvpp2_rx_refill(port, bm_pool, pool);
pool              377 drivers/net/ethernet/marvell/octeontx2/af/mbox.h 		struct npa_pool_s pool;
pool              394 drivers/net/ethernet/marvell/octeontx2/af/mbox.h 		struct npa_pool_s pool;
pool              114 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
pool              128 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
pool              170 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
pool              173 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			ena = (req->pool.ena & req->pool_mask.ena) |
pool              191 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 				memcpy(&rsp->pool, ctx,
pool              214 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		aq_req.pool.ena = 0;
pool             2531 drivers/net/ethernet/mellanox/mlx4/cmd.c 	if (!priv->cmd.pool) {
pool             2532 drivers/net/ethernet/mellanox/mlx4/cmd.c 		priv->cmd.pool = dma_pool_create("mlx4_cmd",
pool             2536 drivers/net/ethernet/mellanox/mlx4/cmd.c 		if (!priv->cmd.pool)
pool             2599 drivers/net/ethernet/mellanox/mlx4/cmd.c 	if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
pool             2600 drivers/net/ethernet/mellanox/mlx4/cmd.c 		dma_pool_destroy(priv->cmd.pool);
pool             2601 drivers/net/ethernet/mellanox/mlx4/cmd.c 		priv->cmd.pool = NULL;
pool             2701 drivers/net/ethernet/mellanox/mlx4/cmd.c 	mailbox->buf = dma_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
pool             2718 drivers/net/ethernet/mellanox/mlx4/cmd.c 	dma_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
pool              628 drivers/net/ethernet/mellanox/mlx4/mlx4.h 	struct dma_pool	       *pool;
pool             1192 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
pool             1207 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
pool             1930 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
pool             1931 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	if (!cmd->pool)
pool             2013 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	dma_pool_destroy(cmd->pool);
pool             2027 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	dma_pool_destroy(cmd->pool);
pool               12 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_pool *pool;
pool               61 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_pool *pool;
pool               98 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
pool              102 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5_core_dev *mdev = pool->dmn->mdev;
pool              111 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	icm_mr->pool = pool;
pool              117 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
pool              118 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 							       pool->icm_type) * 2;
pool              123 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
pool              128 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
pool              134 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
pool              145 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
pool              159 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev;
pool              200 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_pool *pool = bucket->pool;
pool              208 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
pool              209 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 							 pool->icm_type);
pool              211 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (pool->icm_type == DR_ICM_TYPE_STE) {
pool              221 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_lock(&pool->mr_mutex);
pool              222 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (!list_empty(&pool->icm_mr_list)) {
pool              223 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		icm_mr = list_last_entry(&pool->icm_mr_list,
pool              231 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		icm_mr = dr_icm_pool_mr_create(pool, dm_type, align_base);
pool              255 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		if (pool->icm_type == DR_ICM_TYPE_STE) {
pool              266 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_unlock(&pool->mr_mutex);
pool              272 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_unlock(&pool->mr_mutex);
pool              290 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (bucket->pool->icm_type == DR_ICM_TYPE_STE)
pool              296 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool,
pool              300 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (pool->icm_type == DR_ICM_TYPE_STE)
pool              306 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->pool = pool;
pool              332 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool)
pool              337 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++)
pool              338 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		hot_size += pool->buckets[chunk_order].hot_list_count *
pool              339 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			    mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type);
pool              344 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool,
pool              349 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bytes_for_sync = dr_icm_hot_mem_size(pool);
pool              377 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool,
pool              384 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	for (i = 0; i < pool->num_of_buckets; i++) {
pool              385 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket = &pool->buckets[i];
pool              401 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool,
pool              408 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	for (i = 0; i < pool->num_of_buckets; i++) {
pool              409 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket = &pool->buckets[i];
pool              423 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool,
pool              430 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	for (i = 0; i < pool->num_of_buckets; i++) {
pool              431 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket = &pool->buckets[i];
pool              449 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
pool              457 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (chunk_size > pool->max_log_chunk_sz)
pool              460 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket = &pool->buckets[chunk_size];
pool              466 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		if (dr_icm_reuse_hot_entries(pool, bucket)) {
pool              467 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_buckets_start(pool, bucket, buckets);
pool              468 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
pool              470 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				dr_icm_chill_buckets_abort(pool, bucket, buckets);
pool              471 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				mlx5dr_dbg(pool->dmn, "Sync_steering failed\n");
pool              475 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_buckets_end(pool, bucket, buckets);
pool              501 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (bucket->pool->icm_type == DR_ICM_TYPE_STE) {
pool              520 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_pool *pool;
pool              528 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
pool              529 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (!pool)
pool              532 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	pool->buckets = kcalloc(max_log_chunk_sz + 1,
pool              533 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				sizeof(pool->buckets[0]),
pool              535 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (!pool->buckets)
pool              538 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	pool->dmn = dmn;
pool              539 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	pool->icm_type = icm_type;
pool              540 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	pool->max_log_chunk_sz = max_log_chunk_sz;
pool              541 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	pool->num_of_buckets = max_log_chunk_sz + 1;
pool              542 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	INIT_LIST_HEAD(&pool->icm_mr_list);
pool              544 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	for (i = 0; i < pool->num_of_buckets; i++)
pool              545 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		dr_icm_bucket_init(pool, &pool->buckets[i], i);
pool              547 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_init(&pool->mr_mutex);
pool              549 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	return pool;
pool              552 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	kvfree(pool);
pool              556 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
pool              561 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_destroy(&pool->mr_mutex);
pool              563 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list)
pool              566 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	for (i = 0; i < pool->num_of_buckets; i++)
pool              567 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		dr_icm_bucket_cleanup(&pool->buckets[i]);
pool              569 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	kfree(pool->buckets);
pool              570 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	kvfree(pool);
pool              662 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
pool              674 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c 	chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
pool              195 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
pool              946 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
pool              949 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
pool             10068 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
pool             10094 drivers/net/ethernet/mellanox/mlxsw/reg.h static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
pool             10100 drivers/net/ethernet/mellanox/mlxsw/reg.h 	mlxsw_reg_sbpr_pool_set(payload, pool);
pool             10176 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
pool             10181 drivers/net/ethernet/mellanox/mlxsw/reg.h 				       bool infi_max, u8 pool)
pool             10190 drivers/net/ethernet/mellanox/mlxsw/reg.h 	mlxsw_reg_sbcm_pool_set(payload, pool);
pool             10216 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
pool             10264 drivers/net/ethernet/mellanox/mlxsw/reg.h static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
pool             10270 drivers/net/ethernet/mellanox/mlxsw/reg.h 	mlxsw_reg_sbpm_pool_set(payload, pool);
pool             10324 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4);
pool             10327 drivers/net/ethernet/mellanox/mlxsw/reg.h 				       u32 max_buff, u8 pool)
pool             10333 drivers/net/ethernet/mellanox/mlxsw/reg.h 	mlxsw_reg_sbmm_pool_set(payload, pool);
pool               53 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c 	u8 pool;
pool              181 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c 	mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
pool              206 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c 			    min_buff, max_buff, infi_max, des->pool);
pool              234 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
pool              257 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
pool              285 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
pool              841 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c 				    des->pool);
pool               70 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	struct mlxsw_sp_counter_pool *pool;
pool               87 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool               88 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	if (!pool)
pool               91 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
pool               92 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
pool               94 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	pool->usage = kzalloc(map_size, GFP_KERNEL);
pool               95 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	if (!pool->usage) {
pool              100 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	pool->sub_pools = mlxsw_sp_counter_sub_pools;
pool              106 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 		sub_pool = &pool->sub_pools[i];
pool              112 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 		if (sub_pool->base_index + sub_pool->size > pool->pool_size)
pool              113 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 			sub_pool->size = pool->pool_size - sub_pool->base_index;
pool              116 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	mlxsw_sp->counter_pool = pool;
pool              120 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	kfree(pool);
pool              126 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
pool              128 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
pool              129 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 			       pool->pool_size);
pool              130 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	kfree(pool->usage);
pool              131 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	kfree(pool);
pool              138 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
pool              148 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
pool              157 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 		__set_bit(entry_index + i, pool->usage);
pool              167 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
pool              171 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 	if (WARN_ON(counter_index >= pool->pool_size))
pool              175 drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c 		__clear_bit(counter_index + i, pool->usage);
pool               85 drivers/net/ethernet/netronome/nfp/nfp_abi.h 	__le32 pool;
pool               32 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c 		.pool		= cpu_to_le32(pool_index),
pool               63 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c 			.pool		= cpu_to_le32(pool_index),
pool              559 drivers/net/ethernet/ti/cpsw.c 	struct page_pool *pool;
pool              568 drivers/net/ethernet/ti/cpsw.c 	pool = page_pool_create(&pp_params);
pool              569 drivers/net/ethernet/ti/cpsw.c 	if (IS_ERR(pool))
pool              572 drivers/net/ethernet/ti/cpsw.c 	return pool;
pool              579 drivers/net/ethernet/ti/cpsw.c 	struct page_pool *pool;
pool              582 drivers/net/ethernet/ti/cpsw.c 	pool = cpsw->page_pool[ch];
pool              589 drivers/net/ethernet/ti/cpsw.c 	ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
pool              608 drivers/net/ethernet/ti/cpsw.c 	struct page_pool *pool;
pool              612 drivers/net/ethernet/ti/cpsw.c 	pool = cpsw_create_page_pool(cpsw, pool_size);
pool              613 drivers/net/ethernet/ti/cpsw.c 	if (IS_ERR(pool))
pool              614 drivers/net/ethernet/ti/cpsw.c 		ret = PTR_ERR(pool);
pool              616 drivers/net/ethernet/ti/cpsw.c 		cpsw->page_pool[ch] = pool;
pool              683 drivers/net/ethernet/ti/cpsw.c 	struct page_pool	*pool;
pool              695 drivers/net/ethernet/ti/cpsw.c 	pool = cpsw->page_pool[ch];
pool              711 drivers/net/ethernet/ti/cpsw.c 		page_pool_recycle_direct(pool, page);
pool              715 drivers/net/ethernet/ti/cpsw.c 	new_page = page_pool_dev_alloc_pages(pool);
pool              754 drivers/net/ethernet/ti/cpsw.c 		page_pool_recycle_direct(pool, page);
pool              768 drivers/net/ethernet/ti/cpsw.c 	page_pool_release_page(pool, page);
pool              784 drivers/net/ethernet/ti/cpsw.c 		page_pool_recycle_direct(pool, new_page);
pool             1355 drivers/net/ethernet/ti/cpsw.c 	struct page_pool *pool;
pool             1362 drivers/net/ethernet/ti/cpsw.c 		pool = cpsw->page_pool[ch];
pool             1365 drivers/net/ethernet/ti/cpsw.c 			page = page_pool_dev_alloc_pages(pool);
pool             1384 drivers/net/ethernet/ti/cpsw.c 				page_pool_recycle_direct(pool, page);
pool               68 drivers/net/ethernet/ti/cpts.c 			list_add(&event->list, &cpts->pool);
pool              152 drivers/net/ethernet/ti/cpts.c 		if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
pool              157 drivers/net/ethernet/ti/cpts.c 		event = list_first_entry(&cpts->pool, struct cpts_event, list);
pool              205 drivers/net/ethernet/ti/cpts.c 			list_add(&event->list, &cpts->pool);
pool              386 drivers/net/ethernet/ti/cpts.c 			list_add(&event->list, &cpts->pool);
pool              395 drivers/net/ethernet/ti/cpts.c 			list_add(&event->list, &cpts->pool);
pool              453 drivers/net/ethernet/ti/cpts.c 	INIT_LIST_HEAD(&cpts->pool);
pool              455 drivers/net/ethernet/ti/cpts.c 		list_add(&cpts->pool_data[i].list, &cpts->pool);
pool              113 drivers/net/ethernet/ti/cpts.h 	struct list_head pool;
pool              100 drivers/net/ethernet/ti/davinci_cpdma.c 	struct cpdma_desc_pool	*pool;
pool              192 drivers/net/ethernet/ti/davinci_cpdma.c 	struct cpdma_desc_pool *pool = ctlr->pool;
pool              194 drivers/net/ethernet/ti/davinci_cpdma.c 	if (!pool)
pool              197 drivers/net/ethernet/ti/davinci_cpdma.c 	WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
pool              199 drivers/net/ethernet/ti/davinci_cpdma.c 	     gen_pool_size(pool->gen_pool),
pool              200 drivers/net/ethernet/ti/davinci_cpdma.c 	     gen_pool_avail(pool->gen_pool));
pool              201 drivers/net/ethernet/ti/davinci_cpdma.c 	if (pool->cpumap)
pool              202 drivers/net/ethernet/ti/davinci_cpdma.c 		dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
pool              203 drivers/net/ethernet/ti/davinci_cpdma.c 				  pool->phys);
pool              215 drivers/net/ethernet/ti/davinci_cpdma.c 	struct cpdma_desc_pool *pool;
pool              218 drivers/net/ethernet/ti/davinci_cpdma.c 	pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
pool              219 drivers/net/ethernet/ti/davinci_cpdma.c 	if (!pool)
pool              221 drivers/net/ethernet/ti/davinci_cpdma.c 	ctlr->pool = pool;
pool              223 drivers/net/ethernet/ti/davinci_cpdma.c 	pool->mem_size	= cpdma_params->desc_mem_size;
pool              224 drivers/net/ethernet/ti/davinci_cpdma.c 	pool->desc_size	= ALIGN(sizeof(struct cpdma_desc),
pool              226 drivers/net/ethernet/ti/davinci_cpdma.c 	pool->num_desc	= pool->mem_size / pool->desc_size;
pool              234 drivers/net/ethernet/ti/davinci_cpdma.c 		pool->num_desc = cpdma_params->descs_pool_size;
pool              235 drivers/net/ethernet/ti/davinci_cpdma.c 		pool->mem_size = pool->desc_size * pool->num_desc;
pool              236 drivers/net/ethernet/ti/davinci_cpdma.c 		if (pool->mem_size > cpdma_params->desc_mem_size)
pool              240 drivers/net/ethernet/ti/davinci_cpdma.c 	pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
pool              242 drivers/net/ethernet/ti/davinci_cpdma.c 	if (IS_ERR(pool->gen_pool)) {
pool              243 drivers/net/ethernet/ti/davinci_cpdma.c 		ret = PTR_ERR(pool->gen_pool);
pool              249 drivers/net/ethernet/ti/davinci_cpdma.c 		pool->phys  = cpdma_params->desc_mem_phys;
pool              250 drivers/net/ethernet/ti/davinci_cpdma.c 		pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
pool              251 drivers/net/ethernet/ti/davinci_cpdma.c 					   pool->mem_size);
pool              252 drivers/net/ethernet/ti/davinci_cpdma.c 		pool->hw_addr = cpdma_params->desc_hw_addr;
pool              254 drivers/net/ethernet/ti/davinci_cpdma.c 		pool->cpumap = dma_alloc_coherent(ctlr->dev,  pool->mem_size,
pool              255 drivers/net/ethernet/ti/davinci_cpdma.c 						  &pool->hw_addr, GFP_KERNEL);
pool              256 drivers/net/ethernet/ti/davinci_cpdma.c 		pool->iomap = (void __iomem __force *)pool->cpumap;
pool              257 drivers/net/ethernet/ti/davinci_cpdma.c 		pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
pool              260 drivers/net/ethernet/ti/davinci_cpdma.c 	if (!pool->iomap)
pool              263 drivers/net/ethernet/ti/davinci_cpdma.c 	ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
pool              264 drivers/net/ethernet/ti/davinci_cpdma.c 				pool->phys, pool->mem_size, -1);
pool              275 drivers/net/ethernet/ti/davinci_cpdma.c 	ctlr->pool = NULL;
pool              279 drivers/net/ethernet/ti/davinci_cpdma.c static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
pool              284 drivers/net/ethernet/ti/davinci_cpdma.c 	return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
pool              288 drivers/net/ethernet/ti/davinci_cpdma.c desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
pool              290 drivers/net/ethernet/ti/davinci_cpdma.c 	return dma ? pool->iomap + dma - pool->hw_addr : NULL;
pool              294 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_desc_alloc(struct cpdma_desc_pool *pool)
pool              297 drivers/net/ethernet/ti/davinci_cpdma.c 		gen_pool_alloc(pool->gen_pool, pool->desc_size);
pool              300 drivers/net/ethernet/ti/davinci_cpdma.c static void cpdma_desc_free(struct cpdma_desc_pool *pool,
pool              303 drivers/net/ethernet/ti/davinci_cpdma.c 	gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
pool              378 drivers/net/ethernet/ti/davinci_cpdma.c 	struct cpdma_desc_pool	*pool = ctlr->pool;
pool              393 drivers/net/ethernet/ti/davinci_cpdma.c 		chan_write(chan, hdp, desc_phys(pool, chan->head));
pool              527 drivers/net/ethernet/ti/davinci_cpdma.c 	ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
pool              528 drivers/net/ethernet/ti/davinci_cpdma.c 	ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
pool              985 drivers/net/ethernet/ti/davinci_cpdma.c 	struct cpdma_desc_pool		*pool = ctlr->pool;
pool              989 drivers/net/ethernet/ti/davinci_cpdma.c 	desc_dma = desc_phys(pool, desc);
pool             1031 drivers/net/ethernet/ti/davinci_cpdma.c 	desc = cpdma_desc_alloc(ctlr->pool);
pool             1052 drivers/net/ethernet/ti/davinci_cpdma.c 			cpdma_desc_free(ctlr->pool, desc, 1);
pool             1182 drivers/net/ethernet/ti/davinci_cpdma.c 	struct cpdma_desc_pool	*pool = ctlr->pool;
pool             1188 drivers/net/ethernet/ti/davinci_cpdma.c 			 gen_pool_avail(pool->gen_pool);
pool             1198 drivers/net/ethernet/ti/davinci_cpdma.c 	struct cpdma_desc_pool		*pool = ctlr->pool;
pool             1215 drivers/net/ethernet/ti/davinci_cpdma.c 	cpdma_desc_free(pool, desc, 1);
pool             1225 drivers/net/ethernet/ti/davinci_cpdma.c 	struct cpdma_desc_pool		*pool = ctlr->pool;
pool             1237 drivers/net/ethernet/ti/davinci_cpdma.c 	desc_dma = desc_phys(pool, desc);
pool             1253 drivers/net/ethernet/ti/davinci_cpdma.c 	chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
pool             1260 drivers/net/ethernet/ti/davinci_cpdma.c 		chan_write(chan, hdp, desc_phys(pool, chan->head));
pool             1315 drivers/net/ethernet/ti/davinci_cpdma.c 	struct cpdma_desc_pool	*pool = ctlr->pool;
pool             1359 drivers/net/ethernet/ti/davinci_cpdma.c 		chan->head = desc_from_phys(pool, next_dma);
pool             1434 drivers/net/ethernet/ti/davinci_cpdma.c 	ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
pool             1438 drivers/net/ethernet/ti/davinci_cpdma.c 		ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
pool             1466 drivers/net/ethernet/via/via-velocity.c 	void *pool;
pool             1475 drivers/net/ethernet/via/via-velocity.c 	pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
pool             1477 drivers/net/ethernet/via/via-velocity.c 	if (!pool) {
pool             1483 drivers/net/ethernet/via/via-velocity.c 	vptr->rx.ring = pool;
pool             1486 drivers/net/ethernet/via/via-velocity.c 	pool += rx_ring_size;
pool             1490 drivers/net/ethernet/via/via-velocity.c 		vptr->tx.rings[i] = pool;
pool             1492 drivers/net/ethernet/via/via-velocity.c 		pool += tx_ring_size;
pool              236 drivers/net/wireless/ath/wcn36xx/dxe.c 				   struct wcn36xx_dxe_mem_pool *pool)
pool              238 drivers/net/wireless/ath/wcn36xx/dxe.c 	int i, chunk_size = pool->chunk_size;
pool              239 drivers/net/wireless/ath/wcn36xx/dxe.c 	dma_addr_t bd_phy_addr = pool->phy_addr;
pool              240 drivers/net/wireless/ath/wcn36xx/dxe.c 	void *bd_cpu_addr = pool->virt_addr;
pool             3192 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct dma_pool *pool;
pool             3209 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0,
pool             3211 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (!pool) {
pool             3237 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL,
pool             3281 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		dma_pool_free(pool, virts[i], phys[i]);
pool             3283 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	dma_pool_destroy(pool);
pool             3444 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (rxq->pool[i].skb != NULL) {
pool             3445 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
pool             3447 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			dev_kfree_skb(rxq->pool[i].skb);
pool             3448 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			rxq->pool[i].skb = NULL;
pool             3450 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
pool             5234 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (rxq->pool[i].skb != NULL) {
pool             5235 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
pool             5237 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			dev_kfree_skb(rxq->pool[i].skb);
pool             5260 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
pool              707 drivers/net/wireless/intel/ipw2x00/ipw2200.h 	struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
pool             1068 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (rxq->pool[i].page != NULL) {
pool             1069 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
pool             1072 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			__il_free_pages(il, rxq->pool[i].page);
pool             1073 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			rxq->pool[i].page = NULL;
pool             1075 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
pool             1117 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (rxq->pool[i].page != NULL) {
pool             1118 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
pool             1121 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			__il_free_pages(il, rxq->pool[i].page);
pool             1122 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			rxq->pool[i].page = NULL;
pool               95 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (rxq->pool[i].page != NULL) {
pool               96 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
pool               99 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			__il_free_pages(il, rxq->pool[i].page);
pool              100 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			rxq->pool[i].page = NULL;
pool              102 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
pool              410 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (rxq->pool[i].page != NULL) {
pool              411 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
pool              414 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			__il_free_pages(il, rxq->pool[i].page);
pool              415 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			rxq->pool[i].page = NULL;
pool             2619 drivers/net/wireless/intel/iwlegacy/common.c 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
pool              595 drivers/net/wireless/intel/iwlegacy/common.h 	struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
pool              181 drivers/net/wireless/st/cw1200/queue.c 	queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
pool              183 drivers/net/wireless/st/cw1200/queue.c 	if (!queue->pool)
pool              189 drivers/net/wireless/st/cw1200/queue.c 		kfree(queue->pool);
pool              190 drivers/net/wireless/st/cw1200/queue.c 		queue->pool = NULL;
pool              195 drivers/net/wireless/st/cw1200/queue.c 		list_add_tail(&queue->pool[i].head, &queue->free_pool);
pool              247 drivers/net/wireless/st/cw1200/queue.c 	kfree(queue->pool);
pool              249 drivers/net/wireless/st/cw1200/queue.c 	queue->pool = NULL;
pool              301 drivers/net/wireless/st/cw1200/queue.c 							    item - queue->pool);
pool              380 drivers/net/wireless/st/cw1200/queue.c 	item = &queue->pool[item_id];
pool              431 drivers/net/wireless/st/cw1200/queue.c 							    item - queue->pool);
pool              451 drivers/net/wireless/st/cw1200/queue.c 	item = &queue->pool[item_id];
pool              500 drivers/net/wireless/st/cw1200/queue.c 	item = &queue->pool[item_id];
pool               32 drivers/net/wireless/st/cw1200/queue.h 	struct cw1200_queue_item *pool;
pool              715 drivers/nvme/host/lightnvm.c static void nvme_nvm_destroy_dma_pool(void *pool)
pool              717 drivers/nvme/host/lightnvm.c 	struct dma_pool *dma_pool = pool;
pool              722 drivers/nvme/host/lightnvm.c static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
pool              725 drivers/nvme/host/lightnvm.c 	return dma_pool_alloc(pool, mem_flags, dma_handler);
pool              728 drivers/nvme/host/lightnvm.c static void nvme_nvm_dev_dma_free(void *pool, void *addr,
pool              731 drivers/nvme/host/lightnvm.c 	dma_pool_free(pool, addr, dma_handler);
pool              589 drivers/nvme/host/pci.c 	struct dma_pool *pool;
pool              623 drivers/nvme/host/pci.c 		pool = dev->prp_small_pool;
pool              626 drivers/nvme/host/pci.c 		pool = dev->prp_page_pool;
pool              630 drivers/nvme/host/pci.c 	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
pool              642 drivers/nvme/host/pci.c 			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
pool              703 drivers/nvme/host/pci.c 	struct dma_pool *pool;
pool              718 drivers/nvme/host/pci.c 		pool = dev->prp_small_pool;
pool              721 drivers/nvme/host/pci.c 		pool = dev->prp_page_pool;
pool              725 drivers/nvme/host/pci.c 	sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
pool              741 drivers/nvme/host/pci.c 			sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
pool               31 drivers/pci/p2pdma.c 	struct gen_pool *pool;
pool               53 drivers/pci/p2pdma.c 	if (pdev->p2pdma->pool)
pool               54 drivers/pci/p2pdma.c 		size = gen_pool_size(pdev->p2pdma->pool);
pool               66 drivers/pci/p2pdma.c 	if (pdev->p2pdma->pool)
pool               67 drivers/pci/p2pdma.c 		avail = gen_pool_avail(pdev->p2pdma->pool);
pool              107 drivers/pci/p2pdma.c 	gen_pool_destroy(p2pdma->pool);
pool              123 drivers/pci/p2pdma.c 	p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
pool              124 drivers/pci/p2pdma.c 	if (!p2p->pool)
pool              141 drivers/pci/p2pdma.c 	gen_pool_destroy(p2p->pool);
pool              203 drivers/pci/p2pdma.c 	error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
pool              679 drivers/pci/p2pdma.c 	ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
pool              685 drivers/pci/p2pdma.c 		gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
pool              705 drivers/pci/p2pdma.c 	gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
pool              729 drivers/pci/p2pdma.c 	return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr);
pool             1120 drivers/s390/cio/css.c static void __gp_dma_free_dma(struct gen_pool *pool,
pool              207 drivers/s390/scsi/zfcp_aux.c 	adapter->pool.erp_req =
pool              209 drivers/s390/scsi/zfcp_aux.c 	if (!adapter->pool.erp_req)
pool              212 drivers/s390/scsi/zfcp_aux.c 	adapter->pool.gid_pn_req =
pool              214 drivers/s390/scsi/zfcp_aux.c 	if (!adapter->pool.gid_pn_req)
pool              217 drivers/s390/scsi/zfcp_aux.c 	adapter->pool.scsi_req =
pool              219 drivers/s390/scsi/zfcp_aux.c 	if (!adapter->pool.scsi_req)
pool              222 drivers/s390/scsi/zfcp_aux.c 	adapter->pool.scsi_abort =
pool              224 drivers/s390/scsi/zfcp_aux.c 	if (!adapter->pool.scsi_abort)
pool              227 drivers/s390/scsi/zfcp_aux.c 	adapter->pool.status_read_req =
pool              230 drivers/s390/scsi/zfcp_aux.c 	if (!adapter->pool.status_read_req)
pool              233 drivers/s390/scsi/zfcp_aux.c 	adapter->pool.qtcb_pool =
pool              235 drivers/s390/scsi/zfcp_aux.c 	if (!adapter->pool.qtcb_pool)
pool              239 drivers/s390/scsi/zfcp_aux.c 	adapter->pool.sr_data =
pool              241 drivers/s390/scsi/zfcp_aux.c 	if (!adapter->pool.sr_data)
pool              244 drivers/s390/scsi/zfcp_aux.c 	adapter->pool.gid_pn =
pool              246 drivers/s390/scsi/zfcp_aux.c 	if (!adapter->pool.gid_pn)
pool              254 drivers/s390/scsi/zfcp_aux.c 	mempool_destroy(adapter->pool.erp_req);
pool              255 drivers/s390/scsi/zfcp_aux.c 	mempool_destroy(adapter->pool.scsi_req);
pool              256 drivers/s390/scsi/zfcp_aux.c 	mempool_destroy(adapter->pool.scsi_abort);
pool              257 drivers/s390/scsi/zfcp_aux.c 	mempool_destroy(adapter->pool.qtcb_pool);
pool              258 drivers/s390/scsi/zfcp_aux.c 	mempool_destroy(adapter->pool.status_read_req);
pool              259 drivers/s390/scsi/zfcp_aux.c 	mempool_destroy(adapter->pool.sr_data);
pool              260 drivers/s390/scsi/zfcp_aux.c 	mempool_destroy(adapter->pool.gid_pn);
pool              189 drivers/s390/scsi/zfcp_def.h 	struct zfcp_adapter_mempool	pool;      /* Adapter memory pools */
pool              331 drivers/s390/scsi/zfcp_def.h 	mempool_t		*pool;
pool              812 drivers/s390/scsi/zfcp_erp.c 	if (mempool_resize(act->adapter->pool.sr_data,
pool              816 drivers/s390/scsi/zfcp_erp.c 	if (mempool_resize(act->adapter->pool.status_read_req,
pool              396 drivers/s390/scsi/zfcp_fc.c 			       adapter->pool.gid_pn_req,
pool              416 drivers/s390/scsi/zfcp_fc.c 	fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
pool              430 drivers/s390/scsi/zfcp_fc.c 	mempool_free(fc_req, adapter->pool.gid_pn);
pool               92 drivers/s390/scsi/zfcp_fsf.c 	if (likely(req->pool)) {
pool               94 drivers/s390/scsi/zfcp_fsf.c 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
pool               95 drivers/s390/scsi/zfcp_fsf.c 		mempool_free(req, req->pool);
pool              227 drivers/s390/scsi/zfcp_fsf.c 		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
pool              280 drivers/s390/scsi/zfcp_fsf.c 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
pool              673 drivers/s390/scsi/zfcp_fsf.c static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
pool              677 drivers/s390/scsi/zfcp_fsf.c 	if (likely(pool))
pool              678 drivers/s390/scsi/zfcp_fsf.c 		req = mempool_alloc(pool, GFP_ATOMIC);
pool              686 drivers/s390/scsi/zfcp_fsf.c 	req->pool = pool;
pool              690 drivers/s390/scsi/zfcp_fsf.c static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
pool              694 drivers/s390/scsi/zfcp_fsf.c 	if (likely(pool))
pool              695 drivers/s390/scsi/zfcp_fsf.c 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
pool              708 drivers/s390/scsi/zfcp_fsf.c 						mempool_t *pool)
pool              711 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
pool              727 drivers/s390/scsi/zfcp_fsf.c 		if (likely(pool))
pool              729 drivers/s390/scsi/zfcp_fsf.c 				adapter->pool.qtcb_pool);
pool              811 drivers/s390/scsi/zfcp_fsf.c 				  adapter->pool.status_read_req);
pool              817 drivers/s390/scsi/zfcp_fsf.c 	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
pool              838 drivers/s390/scsi/zfcp_fsf.c 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
pool              923 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.scsi_abort);
pool             1094 drivers/s390/scsi/zfcp_fsf.c 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
pool             1106 drivers/s390/scsi/zfcp_fsf.c 				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
pool             1252 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.erp_req);
pool             1345 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.erp_req);
pool             1507 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.erp_req);
pool             1574 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.erp_req);
pool             1649 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.erp_req);
pool             1708 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.erp_req);
pool             1802 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.erp_req);
pool             1922 drivers/s390/scsi/zfcp_fsf.c 				  adapter->pool.erp_req);
pool             2014 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.erp_req);
pool             2296 drivers/s390/scsi/zfcp_fsf.c 				  sbtype, adapter->pool.scsi_req);
pool             2403 drivers/s390/scsi/zfcp_fsf.c 				  qdio->adapter->pool.scsi_req);
pool              564 drivers/scsi/fnic/fnic_main.c 	mempool_t *pool;
pool              748 drivers/scsi/fnic/fnic_main.c 	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
pool              749 drivers/scsi/fnic/fnic_main.c 	if (!pool)
pool              751 drivers/scsi/fnic/fnic_main.c 	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
pool              753 drivers/scsi/fnic/fnic_main.c 	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
pool              754 drivers/scsi/fnic/fnic_main.c 	if (!pool)
pool              756 drivers/scsi/fnic/fnic_main.c 	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
pool              739 drivers/scsi/ibmvscsi/ibmvfc.c static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
pool              742 drivers/scsi/ibmvscsi/ibmvfc.c 	int index = evt - pool->events;
pool              743 drivers/scsi/ibmvscsi/ibmvfc.c 	if (index < 0 || index >= pool->size)	/* outside of bounds */
pool              745 drivers/scsi/ibmvscsi/ibmvfc.c 	if (evt != pool->events + index)	/* unaligned */
pool              758 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_event_pool *pool = &vhost->pool;
pool              760 drivers/scsi/ibmvscsi/ibmvfc.c 	BUG_ON(!ibmvfc_valid_event(pool, evt));
pool             1198 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_event_pool *pool = &vhost->pool;
pool             1201 drivers/scsi/ibmvscsi/ibmvfc.c 	pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
pool             1202 drivers/scsi/ibmvscsi/ibmvfc.c 	pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
pool             1203 drivers/scsi/ibmvscsi/ibmvfc.c 	if (!pool->events)
pool             1206 drivers/scsi/ibmvscsi/ibmvfc.c 	pool->iu_storage = dma_alloc_coherent(vhost->dev,
pool             1207 drivers/scsi/ibmvscsi/ibmvfc.c 					      pool->size * sizeof(*pool->iu_storage),
pool             1208 drivers/scsi/ibmvscsi/ibmvfc.c 					      &pool->iu_token, 0);
pool             1210 drivers/scsi/ibmvscsi/ibmvfc.c 	if (!pool->iu_storage) {
pool             1211 drivers/scsi/ibmvscsi/ibmvfc.c 		kfree(pool->events);
pool             1215 drivers/scsi/ibmvscsi/ibmvfc.c 	for (i = 0; i < pool->size; ++i) {
pool             1216 drivers/scsi/ibmvscsi/ibmvfc.c 		struct ibmvfc_event *evt = &pool->events[i];
pool             1219 drivers/scsi/ibmvscsi/ibmvfc.c 		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
pool             1220 drivers/scsi/ibmvscsi/ibmvfc.c 		evt->xfer_iu = pool->iu_storage + i;
pool             1238 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_event_pool *pool = &vhost->pool;
pool             1241 drivers/scsi/ibmvscsi/ibmvfc.c 	for (i = 0; i < pool->size; ++i) {
pool             1242 drivers/scsi/ibmvscsi/ibmvfc.c 		list_del(&pool->events[i].queue);
pool             1243 drivers/scsi/ibmvscsi/ibmvfc.c 		BUG_ON(atomic_read(&pool->events[i].free) != 1);
pool             1244 drivers/scsi/ibmvscsi/ibmvfc.c 		if (pool->events[i].ext_list)
pool             1246 drivers/scsi/ibmvscsi/ibmvfc.c 				      pool->events[i].ext_list,
pool             1247 drivers/scsi/ibmvscsi/ibmvfc.c 				      pool->events[i].ext_list_token);
pool             1250 drivers/scsi/ibmvscsi/ibmvfc.c 	kfree(pool->events);
pool             1252 drivers/scsi/ibmvscsi/ibmvfc.c 			  pool->size * sizeof(*pool->iu_storage),
pool             1253 drivers/scsi/ibmvscsi/ibmvfc.c 			  pool->iu_storage, pool->iu_token);
pool             2775 drivers/scsi/ibmvscsi/ibmvfc.c 	if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
pool              693 drivers/scsi/ibmvscsi/ibmvfc.h 	struct ibmvfc_event_pool pool;
pool              441 drivers/scsi/ibmvscsi/ibmvscsi.c static int initialize_event_pool(struct event_pool *pool,
pool              446 drivers/scsi/ibmvscsi/ibmvscsi.c 	pool->size = size;
pool              447 drivers/scsi/ibmvscsi/ibmvscsi.c 	pool->next = 0;
pool              448 drivers/scsi/ibmvscsi/ibmvscsi.c 	pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
pool              449 drivers/scsi/ibmvscsi/ibmvscsi.c 	if (!pool->events)
pool              452 drivers/scsi/ibmvscsi/ibmvscsi.c 	pool->iu_storage =
pool              454 drivers/scsi/ibmvscsi/ibmvscsi.c 			       pool->size * sizeof(*pool->iu_storage),
pool              455 drivers/scsi/ibmvscsi/ibmvscsi.c 			       &pool->iu_token, 0);
pool              456 drivers/scsi/ibmvscsi/ibmvscsi.c 	if (!pool->iu_storage) {
pool              457 drivers/scsi/ibmvscsi/ibmvscsi.c 		kfree(pool->events);
pool              461 drivers/scsi/ibmvscsi/ibmvscsi.c 	for (i = 0; i < pool->size; ++i) {
pool              462 drivers/scsi/ibmvscsi/ibmvscsi.c 		struct srp_event_struct *evt = &pool->events[i];
pool              467 drivers/scsi/ibmvscsi/ibmvscsi.c 		evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
pool              469 drivers/scsi/ibmvscsi/ibmvscsi.c 		evt->xfer_iu = pool->iu_storage + i;
pool              485 drivers/scsi/ibmvscsi/ibmvscsi.c static void release_event_pool(struct event_pool *pool,
pool              489 drivers/scsi/ibmvscsi/ibmvscsi.c 	for (i = 0; i < pool->size; ++i) {
pool              490 drivers/scsi/ibmvscsi/ibmvscsi.c 		if (atomic_read(&pool->events[i].free) != 1)
pool              492 drivers/scsi/ibmvscsi/ibmvscsi.c 		if (pool->events[i].ext_list) {
pool              495 drivers/scsi/ibmvscsi/ibmvscsi.c 				  pool->events[i].ext_list,
pool              496 drivers/scsi/ibmvscsi/ibmvscsi.c 				  pool->events[i].ext_list_token);
pool              502 drivers/scsi/ibmvscsi/ibmvscsi.c 	kfree(pool->events);
pool              504 drivers/scsi/ibmvscsi/ibmvscsi.c 			  pool->size * sizeof(*pool->iu_storage),
pool              505 drivers/scsi/ibmvscsi/ibmvscsi.c 			  pool->iu_storage, pool->iu_token);
pool              515 drivers/scsi/ibmvscsi/ibmvscsi.c static int valid_event_struct(struct event_pool *pool,
pool              518 drivers/scsi/ibmvscsi/ibmvscsi.c 	int index = evt - pool->events;
pool              519 drivers/scsi/ibmvscsi/ibmvscsi.c 	if (index < 0 || index >= pool->size)	/* outside of bounds */
pool              521 drivers/scsi/ibmvscsi/ibmvscsi.c 	if (evt != pool->events + index)	/* unaligned */
pool              532 drivers/scsi/ibmvscsi/ibmvscsi.c static void free_event_struct(struct event_pool *pool,
pool              535 drivers/scsi/ibmvscsi/ibmvscsi.c 	if (!valid_event_struct(pool, evt)) {
pool              537 drivers/scsi/ibmvscsi/ibmvscsi.c 			"(not in pool %p)\n", evt, pool->events);
pool              555 drivers/scsi/ibmvscsi/ibmvscsi.c static struct srp_event_struct *get_event_struct(struct event_pool *pool)
pool              558 drivers/scsi/ibmvscsi/ibmvscsi.c 	int poolsize = pool->size;
pool              559 drivers/scsi/ibmvscsi/ibmvscsi.c 	int offset = pool->next;
pool              563 drivers/scsi/ibmvscsi/ibmvscsi.c 		if (!atomic_dec_if_positive(&pool->events[offset].free)) {
pool              564 drivers/scsi/ibmvscsi/ibmvscsi.c 			pool->next = offset;
pool              565 drivers/scsi/ibmvscsi/ibmvscsi.c 			return &pool->events[offset];
pool              801 drivers/scsi/ibmvscsi/ibmvscsi.c 		free_event_struct(&evt->hostdata->pool, evt);
pool              951 drivers/scsi/ibmvscsi/ibmvscsi.c 	free_event_struct(&hostdata->pool, evt_struct);
pool              965 drivers/scsi/ibmvscsi/ibmvscsi.c 	free_event_struct(&hostdata->pool, evt_struct);
pool             1034 drivers/scsi/ibmvscsi/ibmvscsi.c 	evt_struct = get_event_struct(&hostdata->pool);
pool             1049 drivers/scsi/ibmvscsi/ibmvscsi.c 		free_event_struct(&hostdata->pool, evt_struct);
pool             1183 drivers/scsi/ibmvscsi/ibmvscsi.c 	struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
pool             1252 drivers/scsi/ibmvscsi/ibmvscsi.c 	evt_struct = get_event_struct(&hostdata->pool);
pool             1344 drivers/scsi/ibmvscsi/ibmvscsi.c 	evt_struct = get_event_struct(&hostdata->pool);
pool             1420 drivers/scsi/ibmvscsi/ibmvscsi.c 	evt_struct = get_event_struct(&hostdata->pool);
pool             1500 drivers/scsi/ibmvscsi/ibmvscsi.c 		evt = get_event_struct(&hostdata->pool);
pool             1597 drivers/scsi/ibmvscsi/ibmvscsi.c 	free_event_struct(&found_evt->hostdata->pool, found_evt);
pool             1623 drivers/scsi/ibmvscsi/ibmvscsi.c 		evt = get_event_struct(&hostdata->pool);
pool             1702 drivers/scsi/ibmvscsi/ibmvscsi.c 			free_event_struct(&tmp_evt->hostdata->pool,
pool             1808 drivers/scsi/ibmvscsi/ibmvscsi.c 	if (!valid_event_struct(&hostdata->pool, evt_struct)) {
pool             1839 drivers/scsi/ibmvscsi/ibmvscsi.c 	free_event_struct(&evt_struct->hostdata->pool, evt_struct);
pool             2249 drivers/scsi/ibmvscsi/ibmvscsi.c 	if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
pool             2307 drivers/scsi/ibmvscsi/ibmvscsi.c 	release_event_pool(&hostdata->pool, hostdata);
pool             2328 drivers/scsi/ibmvscsi/ibmvscsi.c 	release_event_pool(&hostdata->pool, hostdata);
pool               91 drivers/scsi/ibmvscsi/ibmvscsi.h 	struct event_pool pool;
pool               30 drivers/scsi/ibmvscsi_tgt/libsrp.c 	q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
pool               31 drivers/scsi/ibmvscsi_tgt/libsrp.c 	if (!q->pool)
pool               38 drivers/scsi/ibmvscsi_tgt/libsrp.c 	kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
pool               48 drivers/scsi/ibmvscsi_tgt/libsrp.c 	kfree(q->pool);
pool               55 drivers/scsi/ibmvscsi_tgt/libsrp.c 	kfree(q->pool);
pool               77 drivers/scsi/ibmvscsi_tgt/libsrp.h 	void *pool;
pool               83 drivers/scsi/libfc/fc_exch.c 	struct fc_exch_pool __percpu *pool;
pool              411 drivers/scsi/libfc/fc_exch.c static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
pool              414 drivers/scsi/libfc/fc_exch.c 	struct fc_exch **exches = (struct fc_exch **)(pool + 1);
pool              424 drivers/scsi/libfc/fc_exch.c static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
pool              427 drivers/scsi/libfc/fc_exch.c 	((struct fc_exch **)(pool + 1))[index] = ep;
pool              436 drivers/scsi/libfc/fc_exch.c 	struct fc_exch_pool *pool;
pool              439 drivers/scsi/libfc/fc_exch.c 	pool = ep->pool;
pool              440 drivers/scsi/libfc/fc_exch.c 	spin_lock_bh(&pool->lock);
pool              441 drivers/scsi/libfc/fc_exch.c 	WARN_ON(pool->total_exches <= 0);
pool              442 drivers/scsi/libfc/fc_exch.c 	pool->total_exches--;
pool              447 drivers/scsi/libfc/fc_exch.c 		if (pool->left == FC_XID_UNKNOWN)
pool              448 drivers/scsi/libfc/fc_exch.c 			pool->left = index;
pool              449 drivers/scsi/libfc/fc_exch.c 		else if (pool->right == FC_XID_UNKNOWN)
pool              450 drivers/scsi/libfc/fc_exch.c 			pool->right = index;
pool              452 drivers/scsi/libfc/fc_exch.c 			pool->next_index = index;
pool              453 drivers/scsi/libfc/fc_exch.c 		fc_exch_ptr_set(pool, index, NULL);
pool              455 drivers/scsi/libfc/fc_exch.c 		fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
pool              458 drivers/scsi/libfc/fc_exch.c 	spin_unlock_bh(&pool->lock);
pool              814 drivers/scsi/libfc/fc_exch.c 	struct fc_exch_pool *pool;
pool              825 drivers/scsi/libfc/fc_exch.c 	pool = per_cpu_ptr(mp->pool, cpu);
pool              826 drivers/scsi/libfc/fc_exch.c 	spin_lock_bh(&pool->lock);
pool              830 drivers/scsi/libfc/fc_exch.c 	if (pool->left != FC_XID_UNKNOWN) {
pool              831 drivers/scsi/libfc/fc_exch.c 		if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
pool              832 drivers/scsi/libfc/fc_exch.c 			index = pool->left;
pool              833 drivers/scsi/libfc/fc_exch.c 			pool->left = FC_XID_UNKNOWN;
pool              837 drivers/scsi/libfc/fc_exch.c 	if (pool->right != FC_XID_UNKNOWN) {
pool              838 drivers/scsi/libfc/fc_exch.c 		if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
pool              839 drivers/scsi/libfc/fc_exch.c 			index = pool->right;
pool              840 drivers/scsi/libfc/fc_exch.c 			pool->right = FC_XID_UNKNOWN;
pool              845 drivers/scsi/libfc/fc_exch.c 	index = pool->next_index;
pool              847 drivers/scsi/libfc/fc_exch.c 	while (fc_exch_ptr_get(pool, index)) {
pool              849 drivers/scsi/libfc/fc_exch.c 		if (index == pool->next_index)
pool              852 drivers/scsi/libfc/fc_exch.c 	pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
pool              863 drivers/scsi/libfc/fc_exch.c 	fc_exch_ptr_set(pool, index, ep);
pool              864 drivers/scsi/libfc/fc_exch.c 	list_add_tail(&ep->ex_list, &pool->ex_list);
pool              866 drivers/scsi/libfc/fc_exch.c 	pool->total_exches++;
pool              867 drivers/scsi/libfc/fc_exch.c 	spin_unlock_bh(&pool->lock);
pool              874 drivers/scsi/libfc/fc_exch.c 	ep->pool = pool;
pool              885 drivers/scsi/libfc/fc_exch.c 	spin_unlock_bh(&pool->lock);
pool              926 drivers/scsi/libfc/fc_exch.c 	struct fc_exch_pool *pool;
pool              940 drivers/scsi/libfc/fc_exch.c 		pool = per_cpu_ptr(mp->pool, cpu);
pool              941 drivers/scsi/libfc/fc_exch.c 		spin_lock_bh(&pool->lock);
pool              942 drivers/scsi/libfc/fc_exch.c 		ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
pool              951 drivers/scsi/libfc/fc_exch.c 		spin_unlock_bh(&pool->lock);
pool             1919 drivers/scsi/libfc/fc_exch.c 			       struct fc_exch_pool *pool,
pool             1925 drivers/scsi/libfc/fc_exch.c 	spin_lock_bh(&pool->lock);
pool             1927 drivers/scsi/libfc/fc_exch.c 	list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
pool             1932 drivers/scsi/libfc/fc_exch.c 			spin_unlock_bh(&pool->lock);
pool             1937 drivers/scsi/libfc/fc_exch.c 			spin_lock_bh(&pool->lock);
pool             1946 drivers/scsi/libfc/fc_exch.c 	pool->next_index = 0;
pool             1947 drivers/scsi/libfc/fc_exch.c 	pool->left = FC_XID_UNKNOWN;
pool             1948 drivers/scsi/libfc/fc_exch.c 	pool->right = FC_XID_UNKNOWN;
pool             1949 drivers/scsi/libfc/fc_exch.c 	spin_unlock_bh(&pool->lock);
pool             1971 drivers/scsi/libfc/fc_exch.c 					   per_cpu_ptr(ema->mp->pool, cpu),
pool             2400 drivers/scsi/libfc/fc_exch.c 	free_percpu(mp->pool);
pool             2455 drivers/scsi/libfc/fc_exch.c 	struct fc_exch_pool *pool;
pool             2477 drivers/scsi/libfc/fc_exch.c 	pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
pool             2502 drivers/scsi/libfc/fc_exch.c 	pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
pool             2503 drivers/scsi/libfc/fc_exch.c 	mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
pool             2504 drivers/scsi/libfc/fc_exch.c 	if (!mp->pool)
pool             2507 drivers/scsi/libfc/fc_exch.c 		pool = per_cpu_ptr(mp->pool, cpu);
pool             2508 drivers/scsi/libfc/fc_exch.c 		pool->next_index = 0;
pool             2509 drivers/scsi/libfc/fc_exch.c 		pool->left = FC_XID_UNKNOWN;
pool             2510 drivers/scsi/libfc/fc_exch.c 		pool->right = FC_XID_UNKNOWN;
pool             2511 drivers/scsi/libfc/fc_exch.c 		spin_lock_init(&pool->lock);
pool             2512 drivers/scsi/libfc/fc_exch.c 		INIT_LIST_HEAD(&pool->ex_list);
pool             2517 drivers/scsi/libfc/fc_exch.c 		free_percpu(mp->pool);
pool             2549 drivers/scsi/libiscsi.c 	q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL);
pool             2550 drivers/scsi/libiscsi.c 	if (q->pool == NULL)
pool             2553 drivers/scsi/libiscsi.c 	kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
pool             2556 drivers/scsi/libiscsi.c 		q->pool[i] = kzalloc(item_size, GFP_KERNEL);
pool             2557 drivers/scsi/libiscsi.c 		if (q->pool[i] == NULL) {
pool             2561 drivers/scsi/libiscsi.c 		kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
pool             2565 drivers/scsi/libiscsi.c 		*items = q->pool + max;
pool             2566 drivers/scsi/libiscsi.c 		memcpy(*items, q->pool, max * sizeof(void *));
pool             2582 drivers/scsi/libiscsi.c 		kfree(q->pool[i]);
pool             2583 drivers/scsi/libiscsi.c 	kvfree(q->pool);
pool               89 drivers/scsi/lpfc/lpfc_mem.c 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
pool               99 drivers/scsi/lpfc/lpfc_mem.c 	pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
pool              102 drivers/scsi/lpfc/lpfc_mem.c 	if (!pool->elements)
pool              105 drivers/scsi/lpfc/lpfc_mem.c 	pool->max_count = 0;
pool              106 drivers/scsi/lpfc/lpfc_mem.c 	pool->current_count = 0;
pool              108 drivers/scsi/lpfc/lpfc_mem.c 		pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
pool              109 drivers/scsi/lpfc/lpfc_mem.c 				       GFP_KERNEL, &pool->elements[i].phys);
pool              110 drivers/scsi/lpfc/lpfc_mem.c 		if (!pool->elements[i].virt)
pool              112 drivers/scsi/lpfc/lpfc_mem.c 		pool->max_count++;
pool              113 drivers/scsi/lpfc/lpfc_mem.c 		pool->current_count++;
pool              181 drivers/scsi/lpfc/lpfc_mem.c 		dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
pool              182 drivers/scsi/lpfc/lpfc_mem.c 						 pool->elements[i].phys);
pool              183 drivers/scsi/lpfc/lpfc_mem.c 	kfree(pool->elements);
pool              219 drivers/scsi/lpfc/lpfc_mem.c 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
pool              255 drivers/scsi/lpfc/lpfc_mem.c 	for (i = 0; i < pool->current_count; i++)
pool              256 drivers/scsi/lpfc/lpfc_mem.c 		dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
pool              257 drivers/scsi/lpfc/lpfc_mem.c 			      pool->elements[i].phys);
pool              258 drivers/scsi/lpfc/lpfc_mem.c 	kfree(pool->elements);
pool              370 drivers/scsi/lpfc/lpfc_mem.c 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
pool              377 drivers/scsi/lpfc/lpfc_mem.c 	if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
pool              378 drivers/scsi/lpfc/lpfc_mem.c 		pool->current_count--;
pool              379 drivers/scsi/lpfc/lpfc_mem.c 		ret = pool->elements[pool->current_count].virt;
pool              380 drivers/scsi/lpfc/lpfc_mem.c 		*handle = pool->elements[pool->current_count].phys;
pool              403 drivers/scsi/lpfc/lpfc_mem.c 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
pool              405 drivers/scsi/lpfc/lpfc_mem.c 	if (pool->current_count < pool->max_count) {
pool              406 drivers/scsi/lpfc/lpfc_mem.c 		pool->elements[pool->current_count].virt = virt;
pool              407 drivers/scsi/lpfc/lpfc_mem.c 		pool->elements[pool->current_count].phys = dma;
pool              408 drivers/scsi/lpfc/lpfc_mem.c 		pool->current_count++;
pool              514 drivers/scsi/megaraid/megaraid_mm.c 	mm_dmapool_t	*pool;
pool              531 drivers/scsi/megaraid/megaraid_mm.c 		pool = &adp->dma_pool_list[i];
pool              533 drivers/scsi/megaraid/megaraid_mm.c 		if (xferlen > pool->buf_size)
pool              539 drivers/scsi/megaraid/megaraid_mm.c 		spin_lock_irqsave(&pool->lock, flags);
pool              541 drivers/scsi/megaraid/megaraid_mm.c 		if (!pool->in_use) {
pool              543 drivers/scsi/megaraid/megaraid_mm.c 			pool->in_use		= 1;
pool              545 drivers/scsi/megaraid/megaraid_mm.c 			kioc->buf_vaddr		= pool->vaddr;
pool              546 drivers/scsi/megaraid/megaraid_mm.c 			kioc->buf_paddr		= pool->paddr;
pool              548 drivers/scsi/megaraid/megaraid_mm.c 			spin_unlock_irqrestore(&pool->lock, flags);
pool              552 drivers/scsi/megaraid/megaraid_mm.c 			spin_unlock_irqrestore(&pool->lock, flags);
pool              567 drivers/scsi/megaraid/megaraid_mm.c 	pool = &adp->dma_pool_list[right_pool];
pool              569 drivers/scsi/megaraid/megaraid_mm.c 	spin_lock_irqsave(&pool->lock, flags);
pool              573 drivers/scsi/megaraid/megaraid_mm.c 	kioc->buf_vaddr		= dma_pool_alloc(pool->handle, GFP_ATOMIC,
pool              575 drivers/scsi/megaraid/megaraid_mm.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              640 drivers/scsi/megaraid/megaraid_mm.c 	mm_dmapool_t	*pool;
pool              644 drivers/scsi/megaraid/megaraid_mm.c 		pool = &adp->dma_pool_list[kioc->pool_index];
pool              647 drivers/scsi/megaraid/megaraid_mm.c 		spin_lock_irqsave(&pool->lock, flags);
pool              657 drivers/scsi/megaraid/megaraid_mm.c 			dma_pool_free(pool->handle, kioc->buf_vaddr, 
pool              660 drivers/scsi/megaraid/megaraid_mm.c 			pool->in_use = 0;
pool              662 drivers/scsi/megaraid/megaraid_mm.c 		spin_unlock_irqrestore(&pool->lock, flags);
pool             1069 drivers/scsi/megaraid/megaraid_mm.c 	mm_dmapool_t	*pool;
pool             1080 drivers/scsi/megaraid/megaraid_mm.c 		pool = &adp->dma_pool_list[i];
pool             1082 drivers/scsi/megaraid/megaraid_mm.c 		pool->buf_size = bufsize;
pool             1083 drivers/scsi/megaraid/megaraid_mm.c 		spin_lock_init(&pool->lock);
pool             1085 drivers/scsi/megaraid/megaraid_mm.c 		pool->handle = dma_pool_create("megaraid mm data buffer",
pool             1089 drivers/scsi/megaraid/megaraid_mm.c 		if (!pool->handle) {
pool             1093 drivers/scsi/megaraid/megaraid_mm.c 		pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
pool             1094 drivers/scsi/megaraid/megaraid_mm.c 							&pool->paddr);
pool             1096 drivers/scsi/megaraid/megaraid_mm.c 		if (!pool->vaddr)
pool             1185 drivers/scsi/megaraid/megaraid_mm.c 	mm_dmapool_t	*pool;
pool             1189 drivers/scsi/megaraid/megaraid_mm.c 		pool = &adp->dma_pool_list[i];
pool             1191 drivers/scsi/megaraid/megaraid_mm.c 		if (pool->handle) {
pool             1193 drivers/scsi/megaraid/megaraid_mm.c 			if (pool->vaddr)
pool             1194 drivers/scsi/megaraid/megaraid_mm.c 				dma_pool_free(pool->handle, pool->vaddr,
pool             1195 drivers/scsi/megaraid/megaraid_mm.c 							pool->paddr);
pool             1197 drivers/scsi/megaraid/megaraid_mm.c 			dma_pool_destroy(pool->handle);
pool             1198 drivers/scsi/megaraid/megaraid_mm.c 			pool->handle = NULL;
pool             1768 drivers/scsi/mvumi.c 	struct mvumi_ob_data *pool;
pool             1771 drivers/scsi/mvumi.c 		pool = list_first_entry(&mhba->free_ob_list,
pool             1773 drivers/scsi/mvumi.c 		list_del_init(&pool->list);
pool             1774 drivers/scsi/mvumi.c 		list_add_tail(&pool->list, &mhba->ob_data_list);
pool             1776 drivers/scsi/mvumi.c 		ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
pool             2219 drivers/scsi/qla2xxx/qla_attr.c 	    ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
pool             4290 drivers/scsi/qla2xxx/qla_def.h 	} pool;
pool             4057 drivers/scsi/qla2xxx/qla_os.c 			INIT_LIST_HEAD(&ha->pool.good.head);
pool             4058 drivers/scsi/qla2xxx/qla_os.c 			INIT_LIST_HEAD(&ha->pool.unusable.head);
pool             4059 drivers/scsi/qla2xxx/qla_os.c 			ha->pool.good.count = 0;
pool             4060 drivers/scsi/qla2xxx/qla_os.c 			ha->pool.unusable.count = 0;
pool             4092 drivers/scsi/qla2xxx/qla_os.c 					    &ha->pool.unusable.head);
pool             4093 drivers/scsi/qla2xxx/qla_os.c 					ha->pool.unusable.count++;
pool             4096 drivers/scsi/qla2xxx/qla_os.c 					    &ha->pool.good.head);
pool             4097 drivers/scsi/qla2xxx/qla_os.c 					ha->pool.good.count++;
pool             4103 drivers/scsi/qla2xxx/qla_os.c 			    &ha->pool.good.head, list) {
pool             4114 drivers/scsi/qla2xxx/qla_os.c 			    __func__, ha->pool.good.count,
pool             4115 drivers/scsi/qla2xxx/qla_os.c 			    ha->pool.unusable.count);
pool             4302 drivers/scsi/qla2xxx/qla_os.c 		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
pool             4310 drivers/scsi/qla2xxx/qla_os.c 			ha->pool.unusable.count--;
pool             4735 drivers/scsi/qla2xxx/qla_os.c 		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
pool             4743 drivers/scsi/qla2xxx/qla_os.c 			ha->pool.unusable.count--;
pool             4745 drivers/scsi/qla2xxx/qla_os.c 		list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
pool              364 drivers/scsi/snic/snic_main.c 	mempool_t *pool;
pool              563 drivers/scsi/snic/snic_main.c 	pool = mempool_create_slab_pool(2,
pool              565 drivers/scsi/snic/snic_main.c 	if (!pool) {
pool              572 drivers/scsi/snic/snic_main.c 	snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
pool              574 drivers/scsi/snic/snic_main.c 	pool = mempool_create_slab_pool(2,
pool              576 drivers/scsi/snic/snic_main.c 	if (!pool) {
pool              583 drivers/scsi/snic/snic_main.c 	snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
pool              585 drivers/scsi/snic/snic_main.c 	pool = mempool_create_slab_pool(2,
pool              587 drivers/scsi/snic/snic_main.c 	if (!pool) {
pool              594 drivers/scsi/snic/snic_main.c 	snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
pool              697 drivers/soc/fsl/qbman/bman.c 	struct bman_pool *pool = NULL;
pool              703 drivers/soc/fsl/qbman/bman.c 	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
pool              704 drivers/soc/fsl/qbman/bman.c 	if (!pool)
pool              707 drivers/soc/fsl/qbman/bman.c 	pool->bpid = bpid;
pool              709 drivers/soc/fsl/qbman/bman.c 	return pool;
pool              712 drivers/soc/fsl/qbman/bman.c 	kfree(pool);
pool              717 drivers/soc/fsl/qbman/bman.c void bman_free_pool(struct bman_pool *pool)
pool              719 drivers/soc/fsl/qbman/bman.c 	bm_release_bpid(pool->bpid);
pool              721 drivers/soc/fsl/qbman/bman.c 	kfree(pool);
pool              725 drivers/soc/fsl/qbman/bman.c int bman_get_bpid(const struct bman_pool *pool)
pool              727 drivers/soc/fsl/qbman/bman.c 	return pool->bpid;
pool              739 drivers/soc/fsl/qbman/bman.c int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
pool              774 drivers/soc/fsl/qbman/bman.c 	bm_buffer_set_bpid(r->bufs, pool->bpid);
pool              787 drivers/soc/fsl/qbman/bman.c int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
pool              797 drivers/soc/fsl/qbman/bman.c 	mcc->bpid = pool->bpid;
pool               37 drivers/soc/fsl/qbman/bman_test_api.c static struct bman_pool *pool;
pool              105 drivers/soc/fsl/qbman/bman_test_api.c 	pool = bman_new_pool();
pool              106 drivers/soc/fsl/qbman/bman_test_api.c 	if (!pool) {
pool              119 drivers/soc/fsl/qbman/bman_test_api.c 		if (bman_release(pool, bufs_in + i, num)) {
pool              132 drivers/soc/fsl/qbman/bman_test_api.c 		tmp = bman_acquire(pool, bufs_out + i - num, num);
pool              136 drivers/soc/fsl/qbman/bman_test_api.c 	i = bman_acquire(pool, NULL, 1);
pool              145 drivers/soc/fsl/qbman/bman_test_api.c 	bman_free_pool(pool);
pool              362 drivers/soc/ti/knav_qmss.h #define for_each_pool(kdev, pool)				\
pool              363 drivers/soc/ti/knav_qmss.h 	list_for_each_entry(pool, &kdev->pools, list)
pool              693 drivers/soc/ti/knav_qmss_queue.c static void kdesc_fill_pool(struct knav_pool *pool)
pool              698 drivers/soc/ti/knav_qmss_queue.c 	region = pool->region;
pool              699 drivers/soc/ti/knav_qmss_queue.c 	pool->desc_size = region->desc_size;
pool              700 drivers/soc/ti/knav_qmss_queue.c 	for (i = 0; i < pool->num_desc; i++) {
pool              701 drivers/soc/ti/knav_qmss_queue.c 		int index = pool->region_offset + i;
pool              705 drivers/soc/ti/knav_qmss_queue.c 		dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
pool              706 drivers/soc/ti/knav_qmss_queue.c 		dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
pool              708 drivers/soc/ti/knav_qmss_queue.c 		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
pool              713 drivers/soc/ti/knav_qmss_queue.c static void kdesc_empty_pool(struct knav_pool *pool)
pool              720 drivers/soc/ti/knav_qmss_queue.c 	if (!pool->queue)
pool              724 drivers/soc/ti/knav_qmss_queue.c 		dma = knav_queue_pop(pool->queue, &size);
pool              727 drivers/soc/ti/knav_qmss_queue.c 		desc = knav_pool_desc_dma_to_virt(pool, dma);
pool              729 drivers/soc/ti/knav_qmss_queue.c 			dev_dbg(pool->kdev->dev,
pool              734 drivers/soc/ti/knav_qmss_queue.c 	WARN_ON(i != pool->num_desc);
pool              735 drivers/soc/ti/knav_qmss_queue.c 	knav_queue_close(pool->queue);
pool              742 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool = ph;
pool              743 drivers/soc/ti/knav_qmss_queue.c 	return pool->region->dma_start + (virt - pool->region->virt_start);
pool              749 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool = ph;
pool              750 drivers/soc/ti/knav_qmss_queue.c 	return pool->region->virt_start + (dma - pool->region->dma_start);
pool              768 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool, *pi;
pool              780 drivers/soc/ti/knav_qmss_queue.c 	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
pool              781 drivers/soc/ti/knav_qmss_queue.c 	if (!pool) {
pool              799 drivers/soc/ti/knav_qmss_queue.c 	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
pool              800 drivers/soc/ti/knav_qmss_queue.c 	if (IS_ERR_OR_NULL(pool->queue)) {
pool              803 drivers/soc/ti/knav_qmss_queue.c 			name, PTR_ERR(pool->queue));
pool              804 drivers/soc/ti/knav_qmss_queue.c 		ret = PTR_ERR(pool->queue);
pool              808 drivers/soc/ti/knav_qmss_queue.c 	pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
pool              809 drivers/soc/ti/knav_qmss_queue.c 	pool->kdev = kdev;
pool              810 drivers/soc/ti/knav_qmss_queue.c 	pool->dev = kdev->dev;
pool              838 drivers/soc/ti/knav_qmss_queue.c 		pool->region = region;
pool              839 drivers/soc/ti/knav_qmss_queue.c 		pool->num_desc = num_desc;
pool              840 drivers/soc/ti/knav_qmss_queue.c 		pool->region_offset = last_offset;
pool              842 drivers/soc/ti/knav_qmss_queue.c 		list_add_tail(&pool->list, &kdev->pools);
pool              843 drivers/soc/ti/knav_qmss_queue.c 		list_add_tail(&pool->region_inst, node);
pool              852 drivers/soc/ti/knav_qmss_queue.c 	kdesc_fill_pool(pool);
pool              853 drivers/soc/ti/knav_qmss_queue.c 	return pool;
pool              858 drivers/soc/ti/knav_qmss_queue.c 	kfree(pool->name);
pool              859 drivers/soc/ti/knav_qmss_queue.c 	devm_kfree(kdev->dev, pool);
pool              870 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool = ph;
pool              872 drivers/soc/ti/knav_qmss_queue.c 	if (!pool)
pool              875 drivers/soc/ti/knav_qmss_queue.c 	if (!pool->region)
pool              878 drivers/soc/ti/knav_qmss_queue.c 	kdesc_empty_pool(pool);
pool              881 drivers/soc/ti/knav_qmss_queue.c 	pool->region->used_desc -= pool->num_desc;
pool              882 drivers/soc/ti/knav_qmss_queue.c 	list_del(&pool->region_inst);
pool              883 drivers/soc/ti/knav_qmss_queue.c 	list_del(&pool->list);
pool              886 drivers/soc/ti/knav_qmss_queue.c 	kfree(pool->name);
pool              887 drivers/soc/ti/knav_qmss_queue.c 	devm_kfree(kdev->dev, pool);
pool              900 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool = ph;
pool              905 drivers/soc/ti/knav_qmss_queue.c 	dma = knav_queue_pop(pool->queue, &size);
pool              908 drivers/soc/ti/knav_qmss_queue.c 	data = knav_pool_desc_dma_to_virt(pool, dma);
pool              919 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool = ph;
pool              921 drivers/soc/ti/knav_qmss_queue.c 	dma = knav_pool_desc_virt_to_dma(pool, desc);
pool              922 drivers/soc/ti/knav_qmss_queue.c 	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
pool              939 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool = ph;
pool              940 drivers/soc/ti/knav_qmss_queue.c 	*dma = knav_pool_desc_virt_to_dma(pool, desc);
pool              941 drivers/soc/ti/knav_qmss_queue.c 	size = min(size, pool->region->desc_size);
pool              944 drivers/soc/ti/knav_qmss_queue.c 	dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
pool              964 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool = ph;
pool              968 drivers/soc/ti/knav_qmss_queue.c 	desc_sz = min(dma_sz, pool->region->desc_size);
pool              969 drivers/soc/ti/knav_qmss_queue.c 	desc = knav_pool_desc_dma_to_virt(pool, dma);
pool              970 drivers/soc/ti/knav_qmss_queue.c 	dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
pool              983 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool = ph;
pool              984 drivers/soc/ti/knav_qmss_queue.c 	return knav_queue_get_count(pool->queue);
pool              994 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool;
pool             1036 drivers/soc/ti/knav_qmss_queue.c 	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
pool             1037 drivers/soc/ti/knav_qmss_queue.c 	if (!pool) {
pool             1041 drivers/soc/ti/knav_qmss_queue.c 	pool->num_desc = 0;
pool             1042 drivers/soc/ti/knav_qmss_queue.c 	pool->region_offset = region->num_desc;
pool             1043 drivers/soc/ti/knav_qmss_queue.c 	list_add(&pool->region_inst, &region->pools);
pool             1358 drivers/soc/ti/knav_qmss_queue.c 	struct knav_pool *pool, *tmp;
pool             1365 drivers/soc/ti/knav_qmss_queue.c 		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
pool             1366 drivers/soc/ti/knav_qmss_queue.c 			knav_pool_destroy(pool);
pool              289 drivers/staging/android/ion/ion.h void ion_page_pool_destroy(struct ion_page_pool *pool);
pool              290 drivers/staging/android/ion/ion.h struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
pool              291 drivers/staging/android/ion/ion.h void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
pool              300 drivers/staging/android/ion/ion.h int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
pool               15 drivers/staging/android/ion/ion_page_pool.c static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
pool               19 drivers/staging/android/ion/ion_page_pool.c 	return alloc_pages(pool->gfp_mask, pool->order);
pool               22 drivers/staging/android/ion/ion_page_pool.c static void ion_page_pool_free_pages(struct ion_page_pool *pool,
pool               25 drivers/staging/android/ion/ion_page_pool.c 	__free_pages(page, pool->order);
pool               28 drivers/staging/android/ion/ion_page_pool.c static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
pool               30 drivers/staging/android/ion/ion_page_pool.c 	mutex_lock(&pool->mutex);
pool               32 drivers/staging/android/ion/ion_page_pool.c 		list_add_tail(&page->lru, &pool->high_items);
pool               33 drivers/staging/android/ion/ion_page_pool.c 		pool->high_count++;
pool               35 drivers/staging/android/ion/ion_page_pool.c 		list_add_tail(&page->lru, &pool->low_items);
pool               36 drivers/staging/android/ion/ion_page_pool.c 		pool->low_count++;
pool               40 drivers/staging/android/ion/ion_page_pool.c 							1 << pool->order);
pool               41 drivers/staging/android/ion/ion_page_pool.c 	mutex_unlock(&pool->mutex);
pool               44 drivers/staging/android/ion/ion_page_pool.c static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
pool               49 drivers/staging/android/ion/ion_page_pool.c 		BUG_ON(!pool->high_count);
pool               50 drivers/staging/android/ion/ion_page_pool.c 		page = list_first_entry(&pool->high_items, struct page, lru);
pool               51 drivers/staging/android/ion/ion_page_pool.c 		pool->high_count--;
pool               53 drivers/staging/android/ion/ion_page_pool.c 		BUG_ON(!pool->low_count);
pool               54 drivers/staging/android/ion/ion_page_pool.c 		page = list_first_entry(&pool->low_items, struct page, lru);
pool               55 drivers/staging/android/ion/ion_page_pool.c 		pool->low_count--;
pool               60 drivers/staging/android/ion/ion_page_pool.c 							-(1 << pool->order));
pool               64 drivers/staging/android/ion/ion_page_pool.c struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
pool               68 drivers/staging/android/ion/ion_page_pool.c 	BUG_ON(!pool);
pool               70 drivers/staging/android/ion/ion_page_pool.c 	mutex_lock(&pool->mutex);
pool               71 drivers/staging/android/ion/ion_page_pool.c 	if (pool->high_count)
pool               72 drivers/staging/android/ion/ion_page_pool.c 		page = ion_page_pool_remove(pool, true);
pool               73 drivers/staging/android/ion/ion_page_pool.c 	else if (pool->low_count)
pool               74 drivers/staging/android/ion/ion_page_pool.c 		page = ion_page_pool_remove(pool, false);
pool               75 drivers/staging/android/ion/ion_page_pool.c 	mutex_unlock(&pool->mutex);
pool               78 drivers/staging/android/ion/ion_page_pool.c 		page = ion_page_pool_alloc_pages(pool);
pool               83 drivers/staging/android/ion/ion_page_pool.c void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
pool               85 drivers/staging/android/ion/ion_page_pool.c 	BUG_ON(pool->order != compound_order(page));
pool               87 drivers/staging/android/ion/ion_page_pool.c 	ion_page_pool_add(pool, page);
pool               90 drivers/staging/android/ion/ion_page_pool.c static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
pool               92 drivers/staging/android/ion/ion_page_pool.c 	int count = pool->low_count;
pool               95 drivers/staging/android/ion/ion_page_pool.c 		count += pool->high_count;
pool               97 drivers/staging/android/ion/ion_page_pool.c 	return count << pool->order;
pool              100 drivers/staging/android/ion/ion_page_pool.c int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
pool              112 drivers/staging/android/ion/ion_page_pool.c 		return ion_page_pool_total(pool, high);
pool              117 drivers/staging/android/ion/ion_page_pool.c 		mutex_lock(&pool->mutex);
pool              118 drivers/staging/android/ion/ion_page_pool.c 		if (pool->low_count) {
pool              119 drivers/staging/android/ion/ion_page_pool.c 			page = ion_page_pool_remove(pool, false);
pool              120 drivers/staging/android/ion/ion_page_pool.c 		} else if (high && pool->high_count) {
pool              121 drivers/staging/android/ion/ion_page_pool.c 			page = ion_page_pool_remove(pool, true);
pool              123 drivers/staging/android/ion/ion_page_pool.c 			mutex_unlock(&pool->mutex);
pool              126 drivers/staging/android/ion/ion_page_pool.c 		mutex_unlock(&pool->mutex);
pool              127 drivers/staging/android/ion/ion_page_pool.c 		ion_page_pool_free_pages(pool, page);
pool              128 drivers/staging/android/ion/ion_page_pool.c 		freed += (1 << pool->order);
pool              136 drivers/staging/android/ion/ion_page_pool.c 	struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
pool              138 drivers/staging/android/ion/ion_page_pool.c 	if (!pool)
pool              140 drivers/staging/android/ion/ion_page_pool.c 	pool->high_count = 0;
pool              141 drivers/staging/android/ion/ion_page_pool.c 	pool->low_count = 0;
pool              142 drivers/staging/android/ion/ion_page_pool.c 	INIT_LIST_HEAD(&pool->low_items);
pool              143 drivers/staging/android/ion/ion_page_pool.c 	INIT_LIST_HEAD(&pool->high_items);
pool              144 drivers/staging/android/ion/ion_page_pool.c 	pool->gfp_mask = gfp_mask | __GFP_COMP;
pool              145 drivers/staging/android/ion/ion_page_pool.c 	pool->order = order;
pool              146 drivers/staging/android/ion/ion_page_pool.c 	mutex_init(&pool->mutex);
pool              147 drivers/staging/android/ion/ion_page_pool.c 	plist_node_init(&pool->list, order);
pool              149 drivers/staging/android/ion/ion_page_pool.c 	return pool;
pool              152 drivers/staging/android/ion/ion_page_pool.c void ion_page_pool_destroy(struct ion_page_pool *pool)
pool              154 drivers/staging/android/ion/ion_page_pool.c 	kfree(pool);
pool               51 drivers/staging/android/ion/ion_system_heap.c 	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
pool               53 drivers/staging/android/ion/ion_system_heap.c 	return ion_page_pool_alloc(pool);
pool               59 drivers/staging/android/ion/ion_system_heap.c 	struct ion_page_pool *pool;
pool               68 drivers/staging/android/ion/ion_system_heap.c 	pool = heap->pools[order_to_index(order)];
pool               70 drivers/staging/android/ion/ion_system_heap.c 	ion_page_pool_free(pool, page);
pool              174 drivers/staging/android/ion/ion_system_heap.c 	struct ion_page_pool *pool;
pool              186 drivers/staging/android/ion/ion_system_heap.c 		pool = sys_heap->pools[i];
pool              189 drivers/staging/android/ion/ion_system_heap.c 			nr_total += ion_page_pool_shrink(pool,
pool              194 drivers/staging/android/ion/ion_system_heap.c 			nr_freed = ion_page_pool_shrink(pool,
pool              229 drivers/staging/android/ion/ion_system_heap.c 		struct ion_page_pool *pool;
pool              235 drivers/staging/android/ion/ion_system_heap.c 		pool = ion_page_pool_create(gfp_flags, orders[i]);
pool              236 drivers/staging/android/ion/ion_system_heap.c 		if (!pool)
pool              238 drivers/staging/android/ion/ion_system_heap.c 		pools[i] = pool;
pool               25 drivers/staging/media/ipu3/ipu3-css-pool.c void imgu_css_pool_cleanup(struct imgu_device *imgu, struct imgu_css_pool *pool)
pool               30 drivers/staging/media/ipu3/ipu3-css-pool.c 		imgu_dmamap_free(imgu, &pool->entry[i].param);
pool               33 drivers/staging/media/ipu3/ipu3-css-pool.c int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool,
pool               39 drivers/staging/media/ipu3/ipu3-css-pool.c 		pool->entry[i].valid = false;
pool               41 drivers/staging/media/ipu3/ipu3-css-pool.c 			pool->entry[i].param.vaddr = NULL;
pool               45 drivers/staging/media/ipu3/ipu3-css-pool.c 		if (!imgu_dmamap_alloc(imgu, &pool->entry[i].param, size))
pool               49 drivers/staging/media/ipu3/ipu3-css-pool.c 	pool->last = IPU3_CSS_POOL_SIZE;
pool               54 drivers/staging/media/ipu3/ipu3-css-pool.c 	imgu_css_pool_cleanup(imgu, pool);
pool               61 drivers/staging/media/ipu3/ipu3-css-pool.c void imgu_css_pool_get(struct imgu_css_pool *pool)
pool               64 drivers/staging/media/ipu3/ipu3-css-pool.c 	u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE;
pool               66 drivers/staging/media/ipu3/ipu3-css-pool.c 	pool->entry[n].valid = true;
pool               67 drivers/staging/media/ipu3/ipu3-css-pool.c 	pool->last = n;
pool               73 drivers/staging/media/ipu3/ipu3-css-pool.c void imgu_css_pool_put(struct imgu_css_pool *pool)
pool               75 drivers/staging/media/ipu3/ipu3-css-pool.c 	pool->entry[pool->last].valid = false;
pool               76 drivers/staging/media/ipu3/ipu3-css-pool.c 	pool->last = (pool->last + IPU3_CSS_POOL_SIZE - 1) % IPU3_CSS_POOL_SIZE;
pool               89 drivers/staging/media/ipu3/ipu3-css-pool.c imgu_css_pool_last(struct imgu_css_pool *pool, unsigned int n)
pool               92 drivers/staging/media/ipu3/ipu3-css-pool.c 	int i = (pool->last + IPU3_CSS_POOL_SIZE - n) % IPU3_CSS_POOL_SIZE;
pool               96 drivers/staging/media/ipu3/ipu3-css-pool.c 	if (!pool->entry[i].valid)
pool               99 drivers/staging/media/ipu3/ipu3-css-pool.c 	return &pool->entry[i].param;
pool               47 drivers/staging/media/ipu3/ipu3-css-pool.h 			   struct imgu_css_pool *pool);
pool               48 drivers/staging/media/ipu3/ipu3-css-pool.h int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool,
pool               50 drivers/staging/media/ipu3/ipu3-css-pool.h void imgu_css_pool_get(struct imgu_css_pool *pool);
pool               51 drivers/staging/media/ipu3/ipu3-css-pool.h void imgu_css_pool_put(struct imgu_css_pool *pool);
pool               52 drivers/staging/media/ipu3/ipu3-css-pool.h const struct imgu_css_map *imgu_css_pool_last(struct imgu_css_pool *pool,
pool              668 drivers/staging/media/ipu3/ipu3-css.c 			      &css->pipes[pipe].pool.parameter_set_info);
pool              669 drivers/staging/media/ipu3/ipu3-css.c 	imgu_css_pool_cleanup(imgu, &css->pipes[pipe].pool.acc);
pool              670 drivers/staging/media/ipu3/ipu3-css.c 	imgu_css_pool_cleanup(imgu, &css->pipes[pipe].pool.gdc);
pool              671 drivers/staging/media/ipu3/ipu3-css.c 	imgu_css_pool_cleanup(imgu, &css->pipes[pipe].pool.obgrid);
pool              675 drivers/staging/media/ipu3/ipu3-css.c 				      &css->pipes[pipe].pool.binary_params_p[i]);
pool             1071 drivers/staging/media/ipu3/ipu3-css.c 	if (imgu_css_pool_init(imgu, &css_pipe->pool.parameter_set_info,
pool             1073 drivers/staging/media/ipu3/ipu3-css.c 	    imgu_css_pool_init(imgu, &css_pipe->pool.acc,
pool             1075 drivers/staging/media/ipu3/ipu3-css.c 	    imgu_css_pool_init(imgu, &css_pipe->pool.gdc,
pool             1079 drivers/staging/media/ipu3/ipu3-css.c 	    imgu_css_pool_init(imgu, &css_pipe->pool.obgrid,
pool             1086 drivers/staging/media/ipu3/ipu3-css.c 				       &css_pipe->pool.binary_params_p[i],
pool             2173 drivers/staging/media/ipu3/ipu3-css.c 	imgu_css_pool_get(&css_pipe->pool.parameter_set_info);
pool             2174 drivers/staging/media/ipu3/ipu3-css.c 	param_set = imgu_css_pool_last(&css_pipe->pool.parameter_set_info,
pool             2178 drivers/staging/media/ipu3/ipu3-css.c 	map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
pool             2180 drivers/staging/media/ipu3/ipu3-css.c 		imgu_css_pool_get(&css_pipe->pool.acc);
pool             2181 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
pool             2187 drivers/staging/media/ipu3/ipu3-css.c 	map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
pool             2191 drivers/staging/media/ipu3/ipu3-css.c 		imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
pool             2192 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
pool             2198 drivers/staging/media/ipu3/ipu3-css.c 	map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
pool             2201 drivers/staging/media/ipu3/ipu3-css.c 		imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
pool             2202 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
pool             2209 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.acc, 1);
pool             2220 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
pool             2229 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
pool             2242 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
pool             2244 drivers/staging/media/ipu3/ipu3-css.c 			imgu_css_pool_get(&css_pipe->pool.gdc);
pool             2245 drivers/staging/media/ipu3/ipu3-css.c 			map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
pool             2259 drivers/staging/media/ipu3/ipu3-css.c 	map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
pool             2261 drivers/staging/media/ipu3/ipu3-css.c 		imgu_css_pool_get(&css_pipe->pool.obgrid);
pool             2262 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
pool             2276 drivers/staging/media/ipu3/ipu3-css.c 	map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
pool             2279 drivers/staging/media/ipu3/ipu3-css.c 	map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
pool             2283 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
pool             2289 drivers/staging/media/ipu3/ipu3-css.c 		map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
pool             2294 drivers/staging/media/ipu3/ipu3-css.c 	map = imgu_css_pool_last(&css_pipe->pool.parameter_set_info, 0);
pool             2333 drivers/staging/media/ipu3/ipu3-css.c 	imgu_css_pool_put(&css_pipe->pool.parameter_set_info);
pool             2335 drivers/staging/media/ipu3/ipu3-css.c 		imgu_css_pool_put(&css_pipe->pool.acc);
pool             2337 drivers/staging/media/ipu3/ipu3-css.c 		imgu_css_pool_put(&css_pipe->pool.gdc);
pool             2339 drivers/staging/media/ipu3/ipu3-css.c 		imgu_css_pool_put(&css_pipe->pool.obgrid);
pool             2342 drivers/staging/media/ipu3/ipu3-css.c 			&css_pipe->pool.binary_params_p
pool             2346 drivers/staging/media/ipu3/ipu3-css.c 			&css_pipe->pool.binary_params_p
pool              143 drivers/staging/media/ipu3/ipu3-css.h 	} pool;
pool               24 drivers/staging/octeon/ethernet-mem.c static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
pool               35 drivers/staging/octeon/ethernet-mem.c 		cvmx_fpa_free(skb->data, pool, size / 128);
pool               47 drivers/staging/octeon/ethernet-mem.c static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
pool               52 drivers/staging/octeon/ethernet-mem.c 		memory = cvmx_fpa_alloc(pool);
pool               63 drivers/staging/octeon/ethernet-mem.c 			pool, elements);
pool               66 drivers/staging/octeon/ethernet-mem.c 			pool, elements);
pool               77 drivers/staging/octeon/ethernet-mem.c static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
pool               97 drivers/staging/octeon/ethernet-mem.c 				elements * size, pool);
pool              102 drivers/staging/octeon/ethernet-mem.c 		cvmx_fpa_free(fpa, pool, 0);
pool              114 drivers/staging/octeon/ethernet-mem.c static void cvm_oct_free_hw_memory(int pool, int size, int elements)
pool              120 drivers/staging/octeon/ethernet-mem.c 		fpa = cvmx_fpa_alloc(pool);
pool              131 drivers/staging/octeon/ethernet-mem.c 			pool, elements);
pool              134 drivers/staging/octeon/ethernet-mem.c 			pool, elements);
pool              137 drivers/staging/octeon/ethernet-mem.c int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
pool              141 drivers/staging/octeon/ethernet-mem.c 	if (pool == CVMX_FPA_PACKET_POOL)
pool              142 drivers/staging/octeon/ethernet-mem.c 		freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
pool              144 drivers/staging/octeon/ethernet-mem.c 		freed = cvm_oct_fill_hw_memory(pool, size, elements);
pool              148 drivers/staging/octeon/ethernet-mem.c void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
pool              150 drivers/staging/octeon/ethernet-mem.c 	if (pool == CVMX_FPA_PACKET_POOL)
pool              151 drivers/staging/octeon/ethernet-mem.c 		cvm_oct_free_hw_skbuff(pool, size, elements);
pool              153 drivers/staging/octeon/ethernet-mem.c 		cvm_oct_free_hw_memory(pool, size, elements);
pool                8 drivers/staging/octeon/ethernet-mem.h int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
pool                9 drivers/staging/octeon/ethernet-mem.h void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
pool              265 drivers/staging/octeon/ethernet-tx.c 		hw_buffer.s.pool = 0;
pool              269 drivers/staging/octeon/ethernet-tx.c 		hw_buffer.s.pool = 0;
pool              574 drivers/staging/octeon/ethernet-tx.c 	work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
pool              185 drivers/staging/octeon/ethernet.c 				      segment_ptr.s.pool,
pool              180 drivers/staging/octeon/octeon-stubs.h 		uint64_t pool:3;
pool             1242 drivers/staging/octeon/octeon-stubs.h static inline void *cvmx_fpa_alloc(uint64_t pool)
pool             1247 drivers/staging/octeon/octeon-stubs.h static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
pool              560 drivers/tee/optee/core.c 	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
pool              592 drivers/tee/optee/core.c 		pool = optee_config_dyn_shm();
pool              597 drivers/tee/optee/core.c 	if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
pool              598 drivers/tee/optee/core.c 		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
pool              600 drivers/tee/optee/core.c 	if (IS_ERR(pool))
pool              601 drivers/tee/optee/core.c 		return (void *)pool;
pool              612 drivers/tee/optee/core.c 	teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
pool              619 drivers/tee/optee/core.c 	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
pool              639 drivers/tee/optee/core.c 	optee->pool = pool;
pool              658 drivers/tee/optee/core.c 	if (pool)
pool              659 drivers/tee/optee/core.c 		tee_shm_pool_free(pool);
pool              681 drivers/tee/optee/core.c 	tee_shm_pool_free(optee->pool);
pool               89 drivers/tee/optee/optee_private.h 	struct tee_shm_pool *pool;
pool              707 drivers/tee/tee_core.c 				    struct tee_shm_pool *pool,
pool              717 drivers/tee/tee_core.c 	    !teedesc->ops->release || !pool)
pool              774 drivers/tee/tee_core.c 	teedev->pool = pool;
pool              919 drivers/tee/tee_core.c 	teedev->pool = NULL;
pool               57 drivers/tee/tee_private.h 	struct tee_shm_pool *pool;
pool               28 drivers/tee/tee_shm.c 			poolm = teedev->pool->dma_buf_mgr;
pool               30 drivers/tee/tee_shm.c 			poolm = teedev->pool->private_mgr;
pool              128 drivers/tee/tee_shm.c 	if (!teedev->pool) {
pool              144 drivers/tee/tee_shm.c 		poolm = teedev->pool->dma_buf_mgr;
pool              146 drivers/tee/tee_shm.c 		poolm = teedev->pool->private_mgr;
pool              155 drivers/tee/tee_shm_pool.c 	struct tee_shm_pool *pool;
pool              160 drivers/tee/tee_shm_pool.c 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool              161 drivers/tee/tee_shm_pool.c 	if (!pool)
pool              164 drivers/tee/tee_shm_pool.c 	pool->private_mgr = priv_mgr;
pool              165 drivers/tee/tee_shm_pool.c 	pool->dma_buf_mgr = dmabuf_mgr;
pool              167 drivers/tee/tee_shm_pool.c 	return pool;
pool              178 drivers/tee/tee_shm_pool.c void tee_shm_pool_free(struct tee_shm_pool *pool)
pool              180 drivers/tee/tee_shm_pool.c 	if (pool->private_mgr)
pool              181 drivers/tee/tee_shm_pool.c 		tee_shm_pool_mgr_destroy(pool->private_mgr);
pool              182 drivers/tee/tee_shm_pool.c 	if (pool->dma_buf_mgr)
pool              183 drivers/tee/tee_shm_pool.c 		tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
pool              184 drivers/tee/tee_shm_pool.c 	kfree(pool);
pool               77 drivers/usb/core/buffer.c 		hcd->pool[i] = dma_pool_create(name, hcd->self.sysdev,
pool               79 drivers/usb/core/buffer.c 		if (!hcd->pool[i]) {
pool              103 drivers/usb/core/buffer.c 		dma_pool_destroy(hcd->pool[i]);
pool              104 drivers/usb/core/buffer.c 		hcd->pool[i] = NULL;
pool              137 drivers/usb/core/buffer.c 			return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
pool              167 drivers/usb/core/buffer.c 			dma_pool_free(hcd->pool[i], addr, dma);
pool              229 drivers/usb/gadget/function/u_serial.c 	struct list_head	*pool = &port->write_pool;
pool              239 drivers/usb/gadget/function/u_serial.c 	while (!port->write_busy && !list_empty(pool)) {
pool              246 drivers/usb/gadget/function/u_serial.c 		req = list_entry(pool->next, struct usb_request, list);
pool              278 drivers/usb/gadget/function/u_serial.c 			list_add(&req->list, pool);
pool              303 drivers/usb/gadget/function/u_serial.c 	struct list_head	*pool = &port->read_pool;
pool              306 drivers/usb/gadget/function/u_serial.c 	while (!list_empty(pool)) {
pool              319 drivers/usb/gadget/function/u_serial.c 		req = list_entry(pool->next, struct usb_request, list);
pool              333 drivers/usb/gadget/function/u_serial.c 			list_add(&req->list, pool);
pool               38 drivers/usb/host/xhci-dbgtty.c 	struct list_head	*pool = &port->write_pool;
pool               40 drivers/usb/host/xhci-dbgtty.c 	while (!list_empty(pool)) {
pool               41 drivers/usb/host/xhci-dbgtty.c 		req = list_entry(pool->next, struct dbc_request, list_pool);
pool               55 drivers/usb/host/xhci-dbgtty.c 			list_add(&req->list_pool, pool);
pool               72 drivers/usb/host/xhci-dbgtty.c 	struct list_head	*pool = &port->read_pool;
pool               74 drivers/usb/host/xhci-dbgtty.c 	while (!list_empty(pool)) {
pool               78 drivers/usb/host/xhci-dbgtty.c 		req = list_entry(pool->next, struct dbc_request, list_pool);
pool               87 drivers/usb/host/xhci-dbgtty.c 			list_add(&req->list_pool, pool);
pool              128 drivers/usb/musb/cppi_dma.c 		bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
pool              149 drivers/usb/musb/cppi_dma.c 			dma_pool_free(cppi->pool, bd, bd->dma);
pool             1325 drivers/usb/musb/cppi_dma.c 	controller->pool = dma_pool_create("cppi",
pool             1329 drivers/usb/musb/cppi_dma.c 	if (!controller->pool) {
pool             1363 drivers/usb/musb/cppi_dma.c 	dma_pool_destroy(cppi->pool);
pool              119 drivers/usb/musb/cppi_dma.h 	struct dma_pool			*pool;
pool              862 fs/ceph/addr.c 		mempool_t *pool = NULL;	/* Becomes non-null if mempool used */
pool              964 fs/ceph/addr.c 					pool = fsc->wb_pagevec_pool;
pool              965 fs/ceph/addr.c 					pages = mempool_alloc(pool, GFP_NOFS);
pool              972 fs/ceph/addr.c 				if (num_ops >= (pool ?  CEPH_OSD_SLAB_OPS :
pool             1068 fs/ceph/addr.c 							!!pool, false);
pool             1095 fs/ceph/addr.c 						 0, !!pool, false);
pool             1100 fs/ceph/addr.c 		pool = NULL;
pool             1111 fs/ceph/addr.c 				pool = fsc->wb_pagevec_pool;
pool             1112 fs/ceph/addr.c 				pages = mempool_alloc(pool, GFP_NOFS);
pool             1815 fs/ceph/addr.c 				s64 pool, struct ceph_string *pool_ns)
pool             1830 fs/ceph/addr.c 		if (pool < perm->pool)
pool             1832 fs/ceph/addr.c 		else if (pool > perm->pool)
pool             1854 fs/ceph/addr.c 		     pool, (int)pool_ns->len, pool_ns->str);
pool             1856 fs/ceph/addr.c 		dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
pool             1864 fs/ceph/addr.c 		if (pool < perm->pool)
pool             1866 fs/ceph/addr.c 		else if (pool > perm->pool)
pool             1896 fs/ceph/addr.c 	rd_req->r_base_oloc.pool = pool;
pool             1964 fs/ceph/addr.c 	perm->pool = pool;
pool             1984 fs/ceph/addr.c 		     pool, (int)pool_ns->len, pool_ns->str, err);
pool             1986 fs/ceph/addr.c 		dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
pool             1994 fs/ceph/addr.c 	s64 pool;
pool             2012 fs/ceph/addr.c 	pool = ci->i_layout.pool_id;
pool             2018 fs/ceph/addr.c 			     pool);
pool             2023 fs/ceph/addr.c 			     pool);
pool             2030 fs/ceph/addr.c 	ret = __ceph_pool_perm_get(ci, pool, pool_ns);
pool             2042 fs/ceph/addr.c 	if (pool == ci->i_layout.pool_id &&
pool             2046 fs/ceph/addr.c 		pool = ci->i_layout.pool_id;
pool             2050 fs/ceph/file.c 	src_oloc.pool = src_ci->i_layout.pool_id;
pool             2052 fs/ceph/file.c 	dst_oloc.pool = dst_ci->i_layout.pool_id;
pool              211 fs/ceph/ioctl.c 	oloc.pool = ci->i_layout.pool_id;
pool              317 fs/ceph/mds_client.h 	s64 pool;
pool               62 fs/ceph/xattr.c 	s64 pool = ci->i_layout.pool_id;
pool               73 fs/ceph/xattr.c 	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
pool               84 fs/ceph/xattr.c 		ci->i_layout.object_size, pool);
pool              164 fs/ceph/xattr.c 	s64 pool = ci->i_layout.pool_id;
pool              168 fs/ceph/xattr.c 	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
pool              174 fs/ceph/xattr.c 		ret = ceph_fmt_xattr(val, size, "%lld", pool);
pool              347 fs/ceph/xattr.c 	XATTR_LAYOUT_FIELD(dir, layout, pool),
pool              396 fs/ceph/xattr.c 	XATTR_LAYOUT_FIELD(file, layout, pool),
pool              385 fs/erofs/internal.h struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
pool               10 fs/erofs/utils.c struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
pool               14 fs/erofs/utils.c 	if (!list_empty(pool)) {
pool               15 fs/erofs/utils.c 		page = lru_to_page(pool);
pool              104 include/linux/agpgart.h 	struct agp_memory *pool;
pool              387 include/linux/bio.h extern int biovec_init_pool(mempool_t *pool, int pool_entries);
pool              240 include/linux/ceph/messenger.h 	struct ceph_msgpool *pool;
pool               13 include/linux/ceph/msgpool.h 	mempool_t *pool;
pool               19 include/linux/ceph/msgpool.h int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
pool               22 include/linux/ceph/msgpool.h extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
pool               23 include/linux/ceph/msgpool.h struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
pool              312 include/linux/ceph/osd_client.h 	s64 pool;
pool               23 include/linux/ceph/osdmap.h 	uint64_t pool;
pool               63 include/linux/ceph/osdmap.h static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
pool               65 include/linux/ceph/osdmap.h 	switch (pool->type) {
pool               76 include/linux/ceph/osdmap.h 	s64 pool;
pool               82 include/linux/ceph/osdmap.h 	oloc->pool = -1;
pool               88 include/linux/ceph/osdmap.h 	return oloc->pool == -1;
pool              236 include/linux/ceph/osdmap.h 	pgid->pool = ceph_decode_64(p);
pool               63 include/linux/ceph/rados.h 	__le32 pool;      /* object pool */
pool               24 include/linux/dmapool.h void dma_pool_destroy(struct dma_pool *pool);
pool               26 include/linux/dmapool.h void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
pool               28 include/linux/dmapool.h void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
pool               35 include/linux/dmapool.h void dmam_pool_destroy(struct dma_pool *pool);
pool               41 include/linux/dmapool.h static inline void dma_pool_destroy(struct dma_pool *pool) { }
pool               42 include/linux/dmapool.h static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
pool               44 include/linux/dmapool.h static inline void dma_pool_free(struct dma_pool *pool, void *vaddr,
pool               49 include/linux/dmapool.h static inline void dmam_pool_destroy(struct dma_pool *pool) { }
pool               52 include/linux/dmapool.h static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
pool               55 include/linux/dmapool.h 	return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
pool               52 include/linux/genalloc.h 			void *data, struct gen_pool *pool,
pool               97 include/linux/genalloc.h extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
pool              101 include/linux/genalloc.h static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr,
pool              104 include/linux/genalloc.h 	return gen_pool_add_owner(pool, addr, phys, size, nid, NULL);
pool              119 include/linux/genalloc.h static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
pool              122 include/linux/genalloc.h 	return gen_pool_add_virt(pool, addr, -1, size, nid);
pool              125 include/linux/genalloc.h unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
pool              128 include/linux/genalloc.h static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool,
pool              131 include/linux/genalloc.h 	return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data,
pool              135 include/linux/genalloc.h static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
pool              138 include/linux/genalloc.h 	return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL);
pool              151 include/linux/genalloc.h static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
pool              153 include/linux/genalloc.h 	return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
pool              156 include/linux/genalloc.h extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
pool              158 include/linux/genalloc.h extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
pool              160 include/linux/genalloc.h extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
pool              162 include/linux/genalloc.h extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma);
pool              163 include/linux/genalloc.h extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
pool              165 include/linux/genalloc.h extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
pool              167 include/linux/genalloc.h extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr,
pool              169 include/linux/genalloc.h static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr,
pool              172 include/linux/genalloc.h 	gen_pool_free_owner(pool, addr, size, NULL);
pool              180 include/linux/genalloc.h extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
pool              185 include/linux/genalloc.h 		struct gen_pool *pool, unsigned long start_addr);
pool              189 include/linux/genalloc.h 		void *data, struct gen_pool *pool, unsigned long start_addr);
pool              193 include/linux/genalloc.h 		void *data, struct gen_pool *pool, unsigned long start_addr);
pool              198 include/linux/genalloc.h 		void *data, struct gen_pool *pool, unsigned long start_addr);
pool              202 include/linux/genalloc.h 		struct gen_pool *pool, unsigned long start_addr);
pool              209 include/linux/genalloc.h bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
pool              644 include/linux/i3c/master.h void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool);
pool              647 include/linux/i3c/master.h i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool);
pool              648 include/linux/i3c/master.h void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
pool               74 include/linux/iio/trigger.h 	unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
pool               28 include/linux/mempool.h static inline bool mempool_initialized(mempool_t *pool)
pool               30 include/linux/mempool.h 	return pool->elements != NULL;
pool               33 include/linux/mempool.h void mempool_exit(mempool_t *pool);
pool               34 include/linux/mempool.h int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
pool               37 include/linux/mempool.h int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
pool               46 include/linux/mempool.h extern int mempool_resize(mempool_t *pool, int new_min_nr);
pool               47 include/linux/mempool.h extern void mempool_destroy(mempool_t *pool);
pool               48 include/linux/mempool.h extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
pool               49 include/linux/mempool.h extern void mempool_free(void *element, mempool_t *pool);
pool               60 include/linux/mempool.h mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc)
pool               62 include/linux/mempool.h 	return mempool_init(pool, min_nr, mempool_alloc_slab,
pool               80 include/linux/mempool.h static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size)
pool               82 include/linux/mempool.h 	return mempool_init(pool, min_nr, mempool_kmalloc,
pool               99 include/linux/mempool.h static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order)
pool              101 include/linux/mempool.h 	return mempool_init(pool, min_nr, mempool_alloc_pages,
pool              303 include/linux/mlx5/driver.h 	struct dma_pool *pool;
pool             1421 include/linux/pci.h #define	pci_pool_destroy(pool) dma_pool_destroy(pool)
pool             1422 include/linux/pci.h #define	pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
pool             1423 include/linux/pci.h #define	pci_pool_zalloc(pool, flags, handle) \
pool             1424 include/linux/pci.h 		dma_pool_zalloc(pool, flags, handle)
pool             1425 include/linux/pci.h #define	pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
pool               19 include/linux/sram.h void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size);
pool               21 include/linux/sram.h static inline void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
pool              496 include/linux/sunrpc/svc.h 					struct svc_pool *pool, int node);
pool              498 include/linux/sunrpc/svc.h 					struct svc_pool *pool, int node);
pool              145 include/linux/tee_drv.h 				    struct tee_shm_pool *pool,
pool              297 include/linux/tee_drv.h void tee_shm_pool_free(struct tee_shm_pool *pool);
pool              203 include/linux/usb/hcd.h 	struct dma_pool		*pool[HCD_BUFFER_POOLS];
pool               10 include/linux/zbud.h 	int (*evict)(struct zbud_pool *pool, unsigned long handle);
pool               14 include/linux/zbud.h void zbud_destroy_pool(struct zbud_pool *pool);
pool               15 include/linux/zbud.h int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
pool               17 include/linux/zbud.h void zbud_free(struct zbud_pool *pool, unsigned long handle);
pool               18 include/linux/zbud.h int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
pool               19 include/linux/zbud.h void *zbud_map(struct zbud_pool *pool, unsigned long handle);
pool               20 include/linux/zbud.h void zbud_unmap(struct zbud_pool *pool, unsigned long handle);
pool               21 include/linux/zbud.h u64 zbud_get_pool_size(struct zbud_pool *pool);
pool               18 include/linux/zpool.h 	int (*evict)(struct zpool *pool, unsigned long handle);
pool               45 include/linux/zpool.h const char *zpool_get_type(struct zpool *pool);
pool               47 include/linux/zpool.h void zpool_destroy_pool(struct zpool *pool);
pool               49 include/linux/zpool.h bool zpool_malloc_support_movable(struct zpool *pool);
pool               51 include/linux/zpool.h int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
pool               54 include/linux/zpool.h void zpool_free(struct zpool *pool, unsigned long handle);
pool               56 include/linux/zpool.h int zpool_shrink(struct zpool *pool, unsigned int pages,
pool               59 include/linux/zpool.h void *zpool_map_handle(struct zpool *pool, unsigned long handle,
pool               62 include/linux/zpool.h void zpool_unmap_handle(struct zpool *pool, unsigned long handle);
pool               64 include/linux/zpool.h u64 zpool_get_total_size(struct zpool *pool);
pool               93 include/linux/zpool.h 	void (*destroy)(void *pool);
pool               96 include/linux/zpool.h 	int (*malloc)(void *pool, size_t size, gfp_t gfp,
pool               98 include/linux/zpool.h 	void (*free)(void *pool, unsigned long handle);
pool              100 include/linux/zpool.h 	int (*shrink)(void *pool, unsigned int pages,
pool              103 include/linux/zpool.h 	void *(*map)(void *pool, unsigned long handle,
pool              105 include/linux/zpool.h 	void (*unmap)(void *pool, unsigned long handle);
pool              107 include/linux/zpool.h 	u64 (*total_size)(void *pool);
pool              114 include/linux/zpool.h bool zpool_evictable(struct zpool *pool);
pool               45 include/linux/zsmalloc.h void zs_destroy_pool(struct zs_pool *pool);
pool               47 include/linux/zsmalloc.h unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
pool               48 include/linux/zsmalloc.h void zs_free(struct zs_pool *pool, unsigned long obj);
pool               50 include/linux/zsmalloc.h size_t zs_huge_class_size(struct zs_pool *pool);
pool               52 include/linux/zsmalloc.h void *zs_map_object(struct zs_pool *pool, unsigned long handle,
pool               54 include/linux/zsmalloc.h void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
pool               56 include/linux/zsmalloc.h unsigned long zs_get_total_pages(struct zs_pool *pool);
pool               57 include/linux/zsmalloc.h unsigned long zs_compact(struct zs_pool *pool);
pool               59 include/linux/zsmalloc.h void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
pool              117 include/net/page_pool.h struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
pool              119 include/net/page_pool.h static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
pool              123 include/net/page_pool.h 	return page_pool_alloc_pages(pool, gfp);
pool              130 include/net/page_pool.h inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
pool              132 include/net/page_pool.h 	return pool->p.dma_dir;
pool              138 include/net/page_pool.h void page_pool_destroy(struct page_pool *pool);
pool              139 include/net/page_pool.h void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
pool              141 include/net/page_pool.h static inline void page_pool_destroy(struct page_pool *pool)
pool              145 include/net/page_pool.h static inline void page_pool_use_xdp_mem(struct page_pool *pool,
pool              152 include/net/page_pool.h void __page_pool_put_page(struct page_pool *pool,
pool              155 include/net/page_pool.h static inline void page_pool_put_page(struct page_pool *pool,
pool              162 include/net/page_pool.h 	__page_pool_put_page(pool, page, allow_direct);
pool              166 include/net/page_pool.h static inline void page_pool_recycle_direct(struct page_pool *pool,
pool              169 include/net/page_pool.h 	__page_pool_put_page(pool, page, true);
pool              177 include/net/page_pool.h void page_pool_unmap_page(struct page_pool *pool, struct page *page);
pool              178 include/net/page_pool.h static inline void page_pool_release_page(struct page_pool *pool,
pool              182 include/net/page_pool.h 	page_pool_unmap_page(pool, page);
pool              200 include/net/page_pool.h static inline bool page_pool_put(struct page_pool *pool)
pool              202 include/net/page_pool.h 	return refcount_dec_and_test(&pool->user_cnt);
pool               61 include/rdma/ib_fmr_pool.h 	void                  (*flush_function)(struct ib_fmr_pool *pool,
pool               69 include/rdma/ib_fmr_pool.h 	struct ib_fmr_pool *pool;
pool               82 include/rdma/ib_fmr_pool.h void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
pool               84 include/rdma/ib_fmr_pool.h int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
pool              429 include/scsi/libfc.h 	struct fc_exch_pool *pool;
pool              247 include/scsi/libiscsi.h 	void			**pool;		/* Pool of elements */
pool               94 include/soc/fsl/bman.h void bman_free_pool(struct bman_pool *pool);
pool              103 include/soc/fsl/bman.h int bman_get_bpid(const struct bman_pool *pool);
pool              114 include/soc/fsl/bman.h int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
pool              127 include/soc/fsl/bman.h int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
pool               15 include/trace/events/page_pool.h 	TP_PROTO(const struct page_pool *pool,
pool               18 include/trace/events/page_pool.h 	TP_ARGS(pool, inflight, hold, release),
pool               21 include/trace/events/page_pool.h 		__field(const struct page_pool *, pool)
pool               28 include/trace/events/page_pool.h 		__entry->pool		= pool;
pool               35 include/trace/events/page_pool.h 	  __entry->pool, __entry->inflight, __entry->hold, __entry->release)
pool               40 include/trace/events/page_pool.h 	TP_PROTO(const struct page_pool *pool,
pool               43 include/trace/events/page_pool.h 	TP_ARGS(pool, page, release),
pool               46 include/trace/events/page_pool.h 		__field(const struct page_pool *,	pool)
pool               52 include/trace/events/page_pool.h 		__entry->pool		= pool;
pool               58 include/trace/events/page_pool.h 		  __entry->pool, __entry->page, __entry->release)
pool               63 include/trace/events/page_pool.h 	TP_PROTO(const struct page_pool *pool,
pool               66 include/trace/events/page_pool.h 	TP_ARGS(pool, page, hold),
pool               69 include/trace/events/page_pool.h 		__field(const struct page_pool *,	pool)
pool               75 include/trace/events/page_pool.h 		__entry->pool	= pool;
pool               81 include/trace/events/page_pool.h 		  __entry->pool, __entry->page, __entry->hold)
pool               60 include/trace/events/workqueue.h 		__entry->cpu		= pwq->pool->cpu;
pool              341 include/uapi/linux/hyperv.h 	__u8 pool;
pool              396 include/uapi/linux/hyperv.h 	__u8 pool;
pool               43 include/uapi/linux/netfilter_bridge/ebt_among.h 	struct ebt_mac_wormhash_tuple pool[0];
pool              117 kernel/cgroup/rdma.c 	struct rdmacg_resource_pool *pool;
pool              121 kernel/cgroup/rdma.c 	list_for_each_entry(pool, &cg->rpools, cg_node)
pool              122 kernel/cgroup/rdma.c 		if (pool->device == device)
pool              123 kernel/cgroup/rdma.c 			return pool;
pool              200 kernel/workqueue.c 	struct worker_pool	*pool;		/* I: the associated pool */
pool              378 kernel/workqueue.c #define for_each_cpu_worker_pool(pool, cpu)				\
pool              379 kernel/workqueue.c 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
pool              380 kernel/workqueue.c 	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
pool              381 kernel/workqueue.c 	     (pool)++)
pool              395 kernel/workqueue.c #define for_each_pool(pool, pi)						\
pool              396 kernel/workqueue.c 	idr_for_each_entry(&worker_pool_idr, pool, pi)			\
pool              410 kernel/workqueue.c #define for_each_pool_worker(worker, pool)				\
pool              411 kernel/workqueue.c 	list_for_each_entry((worker), &(pool)->workers, node)		\
pool              537 kernel/workqueue.c static int worker_pool_assign_id(struct worker_pool *pool)
pool              543 kernel/workqueue.c 	ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
pool              546 kernel/workqueue.c 		pool->id = ret;
pool              720 kernel/workqueue.c 			(data & WORK_STRUCT_WQ_DATA_MASK))->pool;
pool              742 kernel/workqueue.c 			(data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
pool              768 kernel/workqueue.c static bool __need_more_worker(struct worker_pool *pool)
pool              770 kernel/workqueue.c 	return !atomic_read(&pool->nr_running);
pool              781 kernel/workqueue.c static bool need_more_worker(struct worker_pool *pool)
pool              783 kernel/workqueue.c 	return !list_empty(&pool->worklist) && __need_more_worker(pool);
pool              787 kernel/workqueue.c static bool may_start_working(struct worker_pool *pool)
pool              789 kernel/workqueue.c 	return pool->nr_idle;
pool              793 kernel/workqueue.c static bool keep_working(struct worker_pool *pool)
pool              795 kernel/workqueue.c 	return !list_empty(&pool->worklist) &&
pool              796 kernel/workqueue.c 		atomic_read(&pool->nr_running) <= 1;
pool              800 kernel/workqueue.c static bool need_to_create_worker(struct worker_pool *pool)
pool              802 kernel/workqueue.c 	return need_more_worker(pool) && !may_start_working(pool);
pool              806 kernel/workqueue.c static bool too_many_workers(struct worker_pool *pool)
pool              808 kernel/workqueue.c 	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
pool              809 kernel/workqueue.c 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
pool              810 kernel/workqueue.c 	int nr_busy = pool->nr_workers - nr_idle;
pool              820 kernel/workqueue.c static struct worker *first_idle_worker(struct worker_pool *pool)
pool              822 kernel/workqueue.c 	if (unlikely(list_empty(&pool->idle_list)))
pool              825 kernel/workqueue.c 	return list_first_entry(&pool->idle_list, struct worker, entry);
pool              837 kernel/workqueue.c static void wake_up_worker(struct worker_pool *pool)
pool              839 kernel/workqueue.c 	struct worker *worker = first_idle_worker(pool);
pool              858 kernel/workqueue.c 		atomic_inc(&worker->pool->nr_running);
pool              872 kernel/workqueue.c 	struct worker_pool *pool;
pool              882 kernel/workqueue.c 	pool = worker->pool;
pool              888 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool              901 kernel/workqueue.c 	if (atomic_dec_and_test(&pool->nr_running) &&
pool              902 kernel/workqueue.c 	    !list_empty(&pool->worklist)) {
pool              903 kernel/workqueue.c 		next = first_idle_worker(pool);
pool              907 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool              953 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
pool              960 kernel/workqueue.c 		atomic_dec(&pool->nr_running);
pool              978 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
pool              992 kernel/workqueue.c 			atomic_inc(&pool->nr_running);
pool             1028 kernel/workqueue.c static struct worker *find_worker_executing_work(struct worker_pool *pool,
pool             1033 kernel/workqueue.c 	hash_for_each_possible(pool->busy_hash, worker, hentry,
pool             1092 kernel/workqueue.c 	lockdep_assert_held(&pwq->pool->lock);
pool             1106 kernel/workqueue.c 	lockdep_assert_held(&pwq->pool->lock);
pool             1135 kernel/workqueue.c 		spin_lock_irq(&pwq->pool->lock);
pool             1137 kernel/workqueue.c 		spin_unlock_irq(&pwq->pool->lock);
pool             1146 kernel/workqueue.c 	if (list_empty(&pwq->pool->worklist))
pool             1147 kernel/workqueue.c 		pwq->pool->watchdog_ts = jiffies;
pool             1148 kernel/workqueue.c 	move_linked_works(work, &pwq->pool->worklist, NULL);
pool             1238 kernel/workqueue.c 	struct worker_pool *pool;
pool             1265 kernel/workqueue.c 	pool = get_work_pool(work);
pool             1266 kernel/workqueue.c 	if (!pool)
pool             1269 kernel/workqueue.c 	spin_lock(&pool->lock);
pool             1279 kernel/workqueue.c 	if (pwq && pwq->pool == pool) {
pool             1296 kernel/workqueue.c 		set_work_pool_and_keep_pending(work, pool->id);
pool             1298 kernel/workqueue.c 		spin_unlock(&pool->lock);
pool             1302 kernel/workqueue.c 	spin_unlock(&pool->lock);
pool             1328 kernel/workqueue.c 	struct worker_pool *pool = pwq->pool;
pool             1342 kernel/workqueue.c 	if (__need_more_worker(pool))
pool             1343 kernel/workqueue.c 		wake_up_worker(pool);
pool             1437 kernel/workqueue.c 	if (last_pool && last_pool != pwq->pool) {
pool             1449 kernel/workqueue.c 			spin_lock(&pwq->pool->lock);
pool             1452 kernel/workqueue.c 		spin_lock(&pwq->pool->lock);
pool             1465 kernel/workqueue.c 			spin_unlock(&pwq->pool->lock);
pool             1486 kernel/workqueue.c 		worklist = &pwq->pool->worklist;
pool             1488 kernel/workqueue.c 			pwq->pool->watchdog_ts = jiffies;
pool             1497 kernel/workqueue.c 	spin_unlock(&pwq->pool->lock);
pool             1770 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
pool             1779 kernel/workqueue.c 	pool->nr_idle++;
pool             1783 kernel/workqueue.c 	list_add(&worker->entry, &pool->idle_list);
pool             1785 kernel/workqueue.c 	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
pool             1786 kernel/workqueue.c 		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
pool             1794 kernel/workqueue.c 	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
pool             1795 kernel/workqueue.c 		     pool->nr_workers == pool->nr_idle &&
pool             1796 kernel/workqueue.c 		     atomic_read(&pool->nr_running));
pool             1810 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
pool             1815 kernel/workqueue.c 	pool->nr_idle--;
pool             1844 kernel/workqueue.c 				   struct worker_pool *pool)
pool             1852 kernel/workqueue.c 	set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
pool             1859 kernel/workqueue.c 	if (pool->flags & POOL_DISASSOCIATED)
pool             1862 kernel/workqueue.c 	list_add_tail(&worker->node, &pool->workers);
pool             1863 kernel/workqueue.c 	worker->pool = pool;
pool             1878 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
pool             1884 kernel/workqueue.c 	worker->pool = NULL;
pool             1886 kernel/workqueue.c 	if (list_empty(&pool->workers))
pool             1887 kernel/workqueue.c 		detach_completion = pool->detach_completion;
pool             1909 kernel/workqueue.c static struct worker *create_worker(struct worker_pool *pool)
pool             1916 kernel/workqueue.c 	id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
pool             1920 kernel/workqueue.c 	worker = alloc_worker(pool->node);
pool             1926 kernel/workqueue.c 	if (pool->cpu >= 0)
pool             1927 kernel/workqueue.c 		snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
pool             1928 kernel/workqueue.c 			 pool->attrs->nice < 0  ? "H" : "");
pool             1930 kernel/workqueue.c 		snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
pool             1932 kernel/workqueue.c 	worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
pool             1937 kernel/workqueue.c 	set_user_nice(worker->task, pool->attrs->nice);
pool             1938 kernel/workqueue.c 	kthread_bind_mask(worker->task, pool->attrs->cpumask);
pool             1941 kernel/workqueue.c 	worker_attach_to_pool(worker, pool);
pool             1944 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool             1945 kernel/workqueue.c 	worker->pool->nr_workers++;
pool             1948 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             1954 kernel/workqueue.c 		ida_simple_remove(&pool->worker_ida, id);
pool             1971 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
pool             1973 kernel/workqueue.c 	lockdep_assert_held(&pool->lock);
pool             1981 kernel/workqueue.c 	pool->nr_workers--;
pool             1982 kernel/workqueue.c 	pool->nr_idle--;
pool             1991 kernel/workqueue.c 	struct worker_pool *pool = from_timer(pool, t, idle_timer);
pool             1993 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool             1995 kernel/workqueue.c 	while (too_many_workers(pool)) {
pool             2000 kernel/workqueue.c 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
pool             2004 kernel/workqueue.c 			mod_timer(&pool->idle_timer, expires);
pool             2011 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             2039 kernel/workqueue.c 	struct worker_pool *pool = from_timer(pool, t, mayday_timer);
pool             2042 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool             2045 kernel/workqueue.c 	if (need_to_create_worker(pool)) {
pool             2052 kernel/workqueue.c 		list_for_each_entry(work, &pool->worklist, entry)
pool             2057 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             2059 kernel/workqueue.c 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
pool             2080 kernel/workqueue.c static void maybe_create_worker(struct worker_pool *pool)
pool             2081 kernel/workqueue.c __releases(&pool->lock)
pool             2082 kernel/workqueue.c __acquires(&pool->lock)
pool             2085 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             2088 kernel/workqueue.c 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
pool             2091 kernel/workqueue.c 		if (create_worker(pool) || !need_to_create_worker(pool))
pool             2096 kernel/workqueue.c 		if (!need_to_create_worker(pool))
pool             2100 kernel/workqueue.c 	del_timer_sync(&pool->mayday_timer);
pool             2101 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool             2107 kernel/workqueue.c 	if (need_to_create_worker(pool))
pool             2135 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
pool             2137 kernel/workqueue.c 	if (pool->flags & POOL_MANAGER_ACTIVE)
pool             2140 kernel/workqueue.c 	pool->flags |= POOL_MANAGER_ACTIVE;
pool             2141 kernel/workqueue.c 	pool->manager = worker;
pool             2143 kernel/workqueue.c 	maybe_create_worker(pool);
pool             2145 kernel/workqueue.c 	pool->manager = NULL;
pool             2146 kernel/workqueue.c 	pool->flags &= ~POOL_MANAGER_ACTIVE;
pool             2166 kernel/workqueue.c __releases(&pool->lock)
pool             2167 kernel/workqueue.c __acquires(&pool->lock)
pool             2170 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
pool             2187 kernel/workqueue.c 	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
pool             2188 kernel/workqueue.c 		     raw_smp_processor_id() != pool->cpu);
pool             2196 kernel/workqueue.c 	collision = find_worker_executing_work(pool, work);
pool             2204 kernel/workqueue.c 	hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
pool             2234 kernel/workqueue.c 	if (need_more_worker(pool))
pool             2235 kernel/workqueue.c 		wake_up_worker(pool);
pool             2243 kernel/workqueue.c 	set_work_pool_and_clear_pending(work, pool->id);
pool             2245 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             2300 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool             2363 kernel/workqueue.c 	struct worker_pool *pool = worker->pool;
pool             2368 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool             2372 kernel/workqueue.c 		spin_unlock_irq(&pool->lock);
pool             2377 kernel/workqueue.c 		ida_simple_remove(&pool->worker_ida, worker->id);
pool             2386 kernel/workqueue.c 	if (!need_more_worker(pool))
pool             2390 kernel/workqueue.c 	if (unlikely(!may_start_working(pool)) && manage_workers(worker))
pool             2411 kernel/workqueue.c 			list_first_entry(&pool->worklist,
pool             2414 kernel/workqueue.c 		pool->watchdog_ts = jiffies;
pool             2425 kernel/workqueue.c 	} while (keep_working(pool));
pool             2438 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             2497 kernel/workqueue.c 		struct worker_pool *pool = pwq->pool;
pool             2506 kernel/workqueue.c 		worker_attach_to_pool(rescuer, pool);
pool             2508 kernel/workqueue.c 		spin_lock_irq(&pool->lock);
pool             2515 kernel/workqueue.c 		list_for_each_entry_safe(work, n, &pool->worklist, entry) {
pool             2518 kernel/workqueue.c 					pool->watchdog_ts = jiffies;
pool             2536 kernel/workqueue.c 			if (need_to_create_worker(pool)) {
pool             2561 kernel/workqueue.c 		if (need_more_worker(pool))
pool             2562 kernel/workqueue.c 			wake_up_worker(pool);
pool             2564 kernel/workqueue.c 		spin_unlock_irq(&pool->lock);
pool             2736 kernel/workqueue.c 		struct worker_pool *pool = pwq->pool;
pool             2738 kernel/workqueue.c 		spin_lock_irq(&pool->lock);
pool             2755 kernel/workqueue.c 		spin_unlock_irq(&pool->lock);
pool             2955 kernel/workqueue.c 		spin_lock_irq(&pwq->pool->lock);
pool             2957 kernel/workqueue.c 		spin_unlock_irq(&pwq->pool->lock);
pool             2981 kernel/workqueue.c 	struct worker_pool *pool;
pool             2987 kernel/workqueue.c 	pool = get_work_pool(work);
pool             2988 kernel/workqueue.c 	if (!pool) {
pool             2993 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool             2997 kernel/workqueue.c 		if (unlikely(pwq->pool != pool))
pool             3000 kernel/workqueue.c 		worker = find_worker_executing_work(pool, work);
pool             3009 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             3028 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             3419 kernel/workqueue.c static int init_worker_pool(struct worker_pool *pool)
pool             3421 kernel/workqueue.c 	spin_lock_init(&pool->lock);
pool             3422 kernel/workqueue.c 	pool->id = -1;
pool             3423 kernel/workqueue.c 	pool->cpu = -1;
pool             3424 kernel/workqueue.c 	pool->node = NUMA_NO_NODE;
pool             3425 kernel/workqueue.c 	pool->flags |= POOL_DISASSOCIATED;
pool             3426 kernel/workqueue.c 	pool->watchdog_ts = jiffies;
pool             3427 kernel/workqueue.c 	INIT_LIST_HEAD(&pool->worklist);
pool             3428 kernel/workqueue.c 	INIT_LIST_HEAD(&pool->idle_list);
pool             3429 kernel/workqueue.c 	hash_init(pool->busy_hash);
pool             3431 kernel/workqueue.c 	timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
pool             3433 kernel/workqueue.c 	timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
pool             3435 kernel/workqueue.c 	INIT_LIST_HEAD(&pool->workers);
pool             3437 kernel/workqueue.c 	ida_init(&pool->worker_ida);
pool             3438 kernel/workqueue.c 	INIT_HLIST_NODE(&pool->hash_node);
pool             3439 kernel/workqueue.c 	pool->refcnt = 1;
pool             3442 kernel/workqueue.c 	pool->attrs = alloc_workqueue_attrs();
pool             3443 kernel/workqueue.c 	if (!pool->attrs)
pool             3504 kernel/workqueue.c 	struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
pool             3506 kernel/workqueue.c 	ida_destroy(&pool->worker_ida);
pool             3507 kernel/workqueue.c 	free_workqueue_attrs(pool->attrs);
pool             3508 kernel/workqueue.c 	kfree(pool);
pool             3522 kernel/workqueue.c static void put_unbound_pool(struct worker_pool *pool)
pool             3529 kernel/workqueue.c 	if (--pool->refcnt)
pool             3533 kernel/workqueue.c 	if (WARN_ON(!(pool->cpu < 0)) ||
pool             3534 kernel/workqueue.c 	    WARN_ON(!list_empty(&pool->worklist)))
pool             3538 kernel/workqueue.c 	if (pool->id >= 0)
pool             3539 kernel/workqueue.c 		idr_remove(&worker_pool_idr, pool->id);
pool             3540 kernel/workqueue.c 	hash_del(&pool->hash_node);
pool             3547 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool             3549 kernel/workqueue.c 			    !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
pool             3550 kernel/workqueue.c 	pool->flags |= POOL_MANAGER_ACTIVE;
pool             3552 kernel/workqueue.c 	while ((worker = first_idle_worker(pool)))
pool             3554 kernel/workqueue.c 	WARN_ON(pool->nr_workers || pool->nr_idle);
pool             3555 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             3558 kernel/workqueue.c 	if (!list_empty(&pool->workers))
pool             3559 kernel/workqueue.c 		pool->detach_completion = &detach_completion;
pool             3562 kernel/workqueue.c 	if (pool->detach_completion)
pool             3563 kernel/workqueue.c 		wait_for_completion(pool->detach_completion);
pool             3566 kernel/workqueue.c 	del_timer_sync(&pool->idle_timer);
pool             3567 kernel/workqueue.c 	del_timer_sync(&pool->mayday_timer);
pool             3570 kernel/workqueue.c 	call_rcu(&pool->rcu, rcu_free_pool);
pool             3590 kernel/workqueue.c 	struct worker_pool *pool;
pool             3597 kernel/workqueue.c 	hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
pool             3598 kernel/workqueue.c 		if (wqattrs_equal(pool->attrs, attrs)) {
pool             3599 kernel/workqueue.c 			pool->refcnt++;
pool             3600 kernel/workqueue.c 			return pool;
pool             3616 kernel/workqueue.c 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
pool             3617 kernel/workqueue.c 	if (!pool || init_worker_pool(pool) < 0)
pool             3620 kernel/workqueue.c 	lockdep_set_subclass(&pool->lock, 1);	/* see put_pwq() */
pool             3621 kernel/workqueue.c 	copy_workqueue_attrs(pool->attrs, attrs);
pool             3622 kernel/workqueue.c 	pool->node = target_node;
pool             3628 kernel/workqueue.c 	pool->attrs->no_numa = false;
pool             3630 kernel/workqueue.c 	if (worker_pool_assign_id(pool) < 0)
pool             3634 kernel/workqueue.c 	if (wq_online && !create_worker(pool))
pool             3638 kernel/workqueue.c 	hash_add(unbound_pool_hash, &pool->hash_node, hash);
pool             3640 kernel/workqueue.c 	return pool;
pool             3642 kernel/workqueue.c 	if (pool)
pool             3643 kernel/workqueue.c 		put_unbound_pool(pool);
pool             3662 kernel/workqueue.c 	struct worker_pool *pool = pwq->pool;
pool             3674 kernel/workqueue.c 	put_unbound_pool(pool);
pool             3711 kernel/workqueue.c 	spin_lock_irqsave(&pwq->pool->lock, flags);
pool             3729 kernel/workqueue.c 		wake_up_worker(pwq->pool);
pool             3734 kernel/workqueue.c 	spin_unlock_irqrestore(&pwq->pool->lock, flags);
pool             3739 kernel/workqueue.c 		     struct worker_pool *pool)
pool             3745 kernel/workqueue.c 	pwq->pool = pool;
pool             3780 kernel/workqueue.c 	struct worker_pool *pool;
pool             3785 kernel/workqueue.c 	pool = get_unbound_pool(attrs);
pool             3786 kernel/workqueue.c 	if (!pool)
pool             3789 kernel/workqueue.c 	pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
pool             3791 kernel/workqueue.c 		put_unbound_pool(pool);
pool             3795 kernel/workqueue.c 	init_pwq(pwq, wq, pool);
pool             4114 kernel/workqueue.c 	if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
pool             4115 kernel/workqueue.c 		if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
pool             4136 kernel/workqueue.c 	spin_lock_irq(&wq->dfl_pwq->pool->lock);
pool             4138 kernel/workqueue.c 	spin_unlock_irq(&wq->dfl_pwq->pool->lock);
pool             4539 kernel/workqueue.c 	struct worker_pool *pool;
pool             4547 kernel/workqueue.c 	pool = get_work_pool(work);
pool             4548 kernel/workqueue.c 	if (pool) {
pool             4549 kernel/workqueue.c 		spin_lock_irqsave(&pool->lock, flags);
pool             4550 kernel/workqueue.c 		if (find_worker_executing_work(pool, work))
pool             4552 kernel/workqueue.c 		spin_unlock_irqrestore(&pool->lock, flags);
pool             4632 kernel/workqueue.c static void pr_cont_pool_info(struct worker_pool *pool)
pool             4634 kernel/workqueue.c 	pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
pool             4635 kernel/workqueue.c 	if (pool->node != NUMA_NO_NODE)
pool             4636 kernel/workqueue.c 		pr_cont(" node=%d", pool->node);
pool             4637 kernel/workqueue.c 	pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
pool             4656 kernel/workqueue.c 	struct worker_pool *pool = pwq->pool;
pool             4662 kernel/workqueue.c 	pr_info("  pwq %d:", pool->id);
pool             4663 kernel/workqueue.c 	pr_cont_pool_info(pool);
pool             4669 kernel/workqueue.c 	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
pool             4679 kernel/workqueue.c 		hash_for_each(pool->busy_hash, bkt, worker, hentry) {
pool             4694 kernel/workqueue.c 	list_for_each_entry(work, &pool->worklist, entry) {
pool             4704 kernel/workqueue.c 		list_for_each_entry(work, &pool->worklist, entry) {
pool             4735 kernel/workqueue.c 	struct worker_pool *pool;
pool             4759 kernel/workqueue.c 			spin_lock_irqsave(&pwq->pool->lock, flags);
pool             4762 kernel/workqueue.c 			spin_unlock_irqrestore(&pwq->pool->lock, flags);
pool             4772 kernel/workqueue.c 	for_each_pool(pool, pi) {
pool             4776 kernel/workqueue.c 		spin_lock_irqsave(&pool->lock, flags);
pool             4777 kernel/workqueue.c 		if (pool->nr_workers == pool->nr_idle)
pool             4780 kernel/workqueue.c 		pr_info("pool %d:", pool->id);
pool             4781 kernel/workqueue.c 		pr_cont_pool_info(pool);
pool             4783 kernel/workqueue.c 			jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
pool             4784 kernel/workqueue.c 			pool->nr_workers);
pool             4785 kernel/workqueue.c 		if (pool->manager)
pool             4787 kernel/workqueue.c 				task_pid_nr(pool->manager->task));
pool             4788 kernel/workqueue.c 		list_for_each_entry(worker, &pool->idle_list, entry) {
pool             4795 kernel/workqueue.c 		spin_unlock_irqrestore(&pool->lock, flags);
pool             4822 kernel/workqueue.c 		struct worker_pool *pool = worker->pool;
pool             4824 kernel/workqueue.c 		if (pool) {
pool             4825 kernel/workqueue.c 			spin_lock_irq(&pool->lock);
pool             4839 kernel/workqueue.c 			spin_unlock_irq(&pool->lock);
pool             4865 kernel/workqueue.c 	struct worker_pool *pool;
pool             4868 kernel/workqueue.c 	for_each_cpu_worker_pool(pool, cpu) {
pool             4870 kernel/workqueue.c 		spin_lock_irq(&pool->lock);
pool             4879 kernel/workqueue.c 		for_each_pool_worker(worker, pool)
pool             4882 kernel/workqueue.c 		pool->flags |= POOL_DISASSOCIATED;
pool             4884 kernel/workqueue.c 		spin_unlock_irq(&pool->lock);
pool             4903 kernel/workqueue.c 		atomic_set(&pool->nr_running, 0);
pool             4910 kernel/workqueue.c 		spin_lock_irq(&pool->lock);
pool             4911 kernel/workqueue.c 		wake_up_worker(pool);
pool             4912 kernel/workqueue.c 		spin_unlock_irq(&pool->lock);
pool             4922 kernel/workqueue.c static void rebind_workers(struct worker_pool *pool)
pool             4935 kernel/workqueue.c 	for_each_pool_worker(worker, pool)
pool             4937 kernel/workqueue.c 						  pool->attrs->cpumask) < 0);
pool             4939 kernel/workqueue.c 	spin_lock_irq(&pool->lock);
pool             4941 kernel/workqueue.c 	pool->flags &= ~POOL_DISASSOCIATED;
pool             4943 kernel/workqueue.c 	for_each_pool_worker(worker, pool) {
pool             4978 kernel/workqueue.c 	spin_unlock_irq(&pool->lock);
pool             4991 kernel/workqueue.c static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
pool             4999 kernel/workqueue.c 	if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
pool             5002 kernel/workqueue.c 	cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
pool             5005 kernel/workqueue.c 	for_each_pool_worker(worker, pool)
pool             5011 kernel/workqueue.c 	struct worker_pool *pool;
pool             5013 kernel/workqueue.c 	for_each_cpu_worker_pool(pool, cpu) {
pool             5014 kernel/workqueue.c 		if (pool->nr_workers)
pool             5016 kernel/workqueue.c 		if (!create_worker(pool))
pool             5024 kernel/workqueue.c 	struct worker_pool *pool;
pool             5030 kernel/workqueue.c 	for_each_pool(pool, pi) {
pool             5033 kernel/workqueue.c 		if (pool->cpu == cpu)
pool             5034 kernel/workqueue.c 			rebind_workers(pool);
pool             5035 kernel/workqueue.c 		else if (pool->cpu < 0)
pool             5036 kernel/workqueue.c 			restore_unbound_workers_cpumask(pool, cpu);
pool             5400 kernel/workqueue.c 				     unbound_pwq_by_node(wq, node)->pool->id);
pool             5731 kernel/workqueue.c 	struct worker_pool *pool;
pool             5739 kernel/workqueue.c 	for_each_pool(pool, pi) {
pool             5742 kernel/workqueue.c 		if (list_empty(&pool->worklist))
pool             5746 kernel/workqueue.c 		pool_ts = READ_ONCE(pool->watchdog_ts);
pool             5754 kernel/workqueue.c 		if (pool->cpu >= 0) {
pool             5757 kernel/workqueue.c 						  pool->cpu));
pool             5766 kernel/workqueue.c 			pr_cont_pool_info(pool);
pool             5906 kernel/workqueue.c 		struct worker_pool *pool;
pool             5909 kernel/workqueue.c 		for_each_cpu_worker_pool(pool, cpu) {
pool             5910 kernel/workqueue.c 			BUG_ON(init_worker_pool(pool));
pool             5911 kernel/workqueue.c 			pool->cpu = cpu;
pool             5912 kernel/workqueue.c 			cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
pool             5913 kernel/workqueue.c 			pool->attrs->nice = std_nice[i++];
pool             5914 kernel/workqueue.c 			pool->node = cpu_to_node(cpu);
pool             5918 kernel/workqueue.c 			BUG_ON(worker_pool_assign_id(pool));
pool             5974 kernel/workqueue.c 	struct worker_pool *pool;
pool             5991 kernel/workqueue.c 		for_each_cpu_worker_pool(pool, cpu) {
pool             5992 kernel/workqueue.c 			pool->node = cpu_to_node(cpu);
pool             6007 kernel/workqueue.c 		for_each_cpu_worker_pool(pool, cpu) {
pool             6008 kernel/workqueue.c 			pool->flags &= ~POOL_DISASSOCIATED;
pool             6009 kernel/workqueue.c 			BUG_ON(!create_worker(pool));
pool             6013 kernel/workqueue.c 	hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
pool             6014 kernel/workqueue.c 		BUG_ON(!create_worker(pool));
pool               39 kernel/workqueue_internal.h 	struct worker_pool	*pool;		/* A: the associated pool */
pool              153 lib/genalloc.c 	struct gen_pool *pool;
pool              155 lib/genalloc.c 	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
pool              156 lib/genalloc.c 	if (pool != NULL) {
pool              157 lib/genalloc.c 		spin_lock_init(&pool->lock);
pool              158 lib/genalloc.c 		INIT_LIST_HEAD(&pool->chunks);
pool              159 lib/genalloc.c 		pool->min_alloc_order = min_alloc_order;
pool              160 lib/genalloc.c 		pool->algo = gen_pool_first_fit;
pool              161 lib/genalloc.c 		pool->data = NULL;
pool              162 lib/genalloc.c 		pool->name = NULL;
pool              164 lib/genalloc.c 	return pool;
pool              182 lib/genalloc.c int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
pool              186 lib/genalloc.c 	int nbits = size >> pool->min_alloc_order;
pool              200 lib/genalloc.c 	spin_lock(&pool->lock);
pool              201 lib/genalloc.c 	list_add_rcu(&chunk->next_chunk, &pool->chunks);
pool              202 lib/genalloc.c 	spin_unlock(&pool->lock);
pool              215 lib/genalloc.c phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
pool              221 lib/genalloc.c 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
pool              240 lib/genalloc.c void gen_pool_destroy(struct gen_pool *pool)
pool              244 lib/genalloc.c 	int order = pool->min_alloc_order;
pool              247 lib/genalloc.c 	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
pool              257 lib/genalloc.c 	kfree_const(pool->name);
pool              258 lib/genalloc.c 	kfree(pool);
pool              275 lib/genalloc.c unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
pool              280 lib/genalloc.c 	int order = pool->min_alloc_order;
pool              295 lib/genalloc.c 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
pool              303 lib/genalloc.c 				 nbits, data, pool, chunk->start_addr);
pool              339 lib/genalloc.c void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
pool              341 lib/genalloc.c 	return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
pool              360 lib/genalloc.c void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
pool              365 lib/genalloc.c 	if (!pool)
pool              368 lib/genalloc.c 	vaddr = gen_pool_alloc_algo(pool, size, algo, data);
pool              373 lib/genalloc.c 		*dma = gen_pool_virt_to_phys(pool, vaddr);
pool              393 lib/genalloc.c void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
pool              398 lib/genalloc.c 	return gen_pool_dma_alloc_algo(pool, size, dma,
pool              417 lib/genalloc.c void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
pool              419 lib/genalloc.c 	return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
pool              438 lib/genalloc.c void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
pool              441 lib/genalloc.c 	void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
pool              464 lib/genalloc.c void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
pool              469 lib/genalloc.c 	return gen_pool_dma_zalloc_algo(pool, size, dma,
pool              485 lib/genalloc.c void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
pool              489 lib/genalloc.c 	int order = pool->min_alloc_order;
pool              501 lib/genalloc.c 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
pool              529 lib/genalloc.c void gen_pool_for_each_chunk(struct gen_pool *pool,
pool              530 lib/genalloc.c 	void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
pool              536 lib/genalloc.c 	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
pool              537 lib/genalloc.c 		func(pool, chunk, data);
pool              551 lib/genalloc.c bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
pool              559 lib/genalloc.c 	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
pool              577 lib/genalloc.c size_t gen_pool_avail(struct gen_pool *pool)
pool              583 lib/genalloc.c 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
pool              596 lib/genalloc.c size_t gen_pool_size(struct gen_pool *pool)
pool              602 lib/genalloc.c 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
pool              619 lib/genalloc.c void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
pool              623 lib/genalloc.c 	pool->algo = algo;
pool              624 lib/genalloc.c 	if (!pool->algo)
pool              625 lib/genalloc.c 		pool->algo = gen_pool_first_fit;
pool              627 lib/genalloc.c 	pool->data = data;
pool              645 lib/genalloc.c 		struct gen_pool *pool, unsigned long start_addr)
pool              663 lib/genalloc.c 		struct gen_pool *pool, unsigned long start_addr)
pool              670 lib/genalloc.c 	order = pool->min_alloc_order;
pool              690 lib/genalloc.c 		struct gen_pool *pool, unsigned long start_addr)
pool              698 lib/genalloc.c 	order = pool->min_alloc_order;
pool              724 lib/genalloc.c 		unsigned int nr, void *data, struct gen_pool *pool,
pool              748 lib/genalloc.c 		struct gen_pool *pool, unsigned long start_addr)
pool              824 lib/genalloc.c 	struct gen_pool **ptr, *pool;
pool              841 lib/genalloc.c 	pool = gen_pool_create(min_alloc_order, nid);
pool              842 lib/genalloc.c 	if (!pool)
pool              845 lib/genalloc.c 	*ptr = pool;
pool              846 lib/genalloc.c 	pool->name = pool_name;
pool              849 lib/genalloc.c 	return pool;
pool              877 lib/genalloc.c 	struct gen_pool *pool = NULL;
pool              895 lib/genalloc.c 		pool = gen_pool_get(&pdev->dev, name);
pool              898 lib/genalloc.c 	return pool;
pool               14 lib/sg_pool.c  	mempool_t	*pool;
pool               59 lib/sg_pool.c  	mempool_free(sgl, sgp->pool);
pool               67 lib/sg_pool.c  	return mempool_alloc(sgp->pool, gfp_mask);
pool              157 lib/sg_pool.c  		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
pool              159 lib/sg_pool.c  		if (!sgp->pool) {
pool              172 lib/sg_pool.c  		mempool_destroy(sgp->pool);
pool              185 lib/sg_pool.c  		mempool_destroy(sgp->pool);
pool               71 mm/dmapool.c   	struct dma_pool *pool;
pool               81 mm/dmapool.c   	list_for_each_entry(pool, &dev->dma_pools, pools) {
pool               85 mm/dmapool.c   		spin_lock_irq(&pool->lock);
pool               86 mm/dmapool.c   		list_for_each_entry(page, &pool->page_list, page_list) {
pool               90 mm/dmapool.c   		spin_unlock_irq(&pool->lock);
pool               94 mm/dmapool.c   				 pool->name, blocks,
pool               95 mm/dmapool.c   				 pages * (pool->allocation / pool->size),
pool               96 mm/dmapool.c   				 pool->size, pages);
pool              205 mm/dmapool.c   static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
pool              208 mm/dmapool.c   	unsigned int next_boundary = pool->boundary;
pool              211 mm/dmapool.c   		unsigned int next = offset + pool->size;
pool              212 mm/dmapool.c   		if (unlikely((next + pool->size) >= next_boundary)) {
pool              214 mm/dmapool.c   			next_boundary += pool->boundary;
pool              218 mm/dmapool.c   	} while (offset < pool->allocation);
pool              221 mm/dmapool.c   static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
pool              228 mm/dmapool.c   	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
pool              232 mm/dmapool.c   		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
pool              234 mm/dmapool.c   		pool_initialise_page(pool, page);
pool              249 mm/dmapool.c   static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
pool              254 mm/dmapool.c   	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
pool              256 mm/dmapool.c   	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
pool              269 mm/dmapool.c   void dma_pool_destroy(struct dma_pool *pool)
pool              273 mm/dmapool.c   	if (unlikely(!pool))
pool              278 mm/dmapool.c   	list_del(&pool->pools);
pool              279 mm/dmapool.c   	if (pool->dev && list_empty(&pool->dev->dma_pools))
pool              283 mm/dmapool.c   		device_remove_file(pool->dev, &dev_attr_pools);
pool              286 mm/dmapool.c   	while (!list_empty(&pool->page_list)) {
pool              288 mm/dmapool.c   		page = list_entry(pool->page_list.next,
pool              291 mm/dmapool.c   			if (pool->dev)
pool              292 mm/dmapool.c   				dev_err(pool->dev,
pool              294 mm/dmapool.c   					pool->name, page->vaddr);
pool              297 mm/dmapool.c   				       pool->name, page->vaddr);
pool              302 mm/dmapool.c   			pool_free_page(pool, page);
pool              305 mm/dmapool.c   	kfree(pool);
pool              319 mm/dmapool.c   void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
pool              329 mm/dmapool.c   	spin_lock_irqsave(&pool->lock, flags);
pool              330 mm/dmapool.c   	list_for_each_entry(page, &pool->page_list, page_list) {
pool              331 mm/dmapool.c   		if (page->offset < pool->allocation)
pool              336 mm/dmapool.c   	spin_unlock_irqrestore(&pool->lock, flags);
pool              338 mm/dmapool.c   	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
pool              342 mm/dmapool.c   	spin_lock_irqsave(&pool->lock, flags);
pool              344 mm/dmapool.c   	list_add(&page->page_list, &pool->page_list);
pool              356 mm/dmapool.c   		for (i = sizeof(page->offset); i < pool->size; i++) {
pool              359 mm/dmapool.c   			if (pool->dev)
pool              360 mm/dmapool.c   				dev_err(pool->dev,
pool              362 mm/dmapool.c   					pool->name, retval);
pool              365 mm/dmapool.c   					pool->name, retval);
pool              372 mm/dmapool.c   					data, pool->size, 1);
pool              377 mm/dmapool.c   		memset(retval, POOL_POISON_ALLOCATED, pool->size);
pool              379 mm/dmapool.c   	spin_unlock_irqrestore(&pool->lock, flags);
pool              382 mm/dmapool.c   		memset(retval, 0, pool->size);
pool              388 mm/dmapool.c   static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
pool              392 mm/dmapool.c   	list_for_each_entry(page, &pool->page_list, page_list) {
pool              395 mm/dmapool.c   		if ((dma - page->dma) < pool->allocation)
pool              410 mm/dmapool.c   void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
pool              416 mm/dmapool.c   	spin_lock_irqsave(&pool->lock, flags);
pool              417 mm/dmapool.c   	page = pool_find_page(pool, dma);
pool              419 mm/dmapool.c   		spin_unlock_irqrestore(&pool->lock, flags);
pool              420 mm/dmapool.c   		if (pool->dev)
pool              421 mm/dmapool.c   			dev_err(pool->dev,
pool              423 mm/dmapool.c   				pool->name, vaddr, (unsigned long)dma);
pool              426 mm/dmapool.c   			       pool->name, vaddr, (unsigned long)dma);
pool              432 mm/dmapool.c   		memset(vaddr, 0, pool->size);
pool              435 mm/dmapool.c   		spin_unlock_irqrestore(&pool->lock, flags);
pool              436 mm/dmapool.c   		if (pool->dev)
pool              437 mm/dmapool.c   			dev_err(pool->dev,
pool              439 mm/dmapool.c   				pool->name, vaddr, &dma);
pool              442 mm/dmapool.c   			       pool->name, vaddr, &dma);
pool              447 mm/dmapool.c   		while (chain < pool->allocation) {
pool              452 mm/dmapool.c   			spin_unlock_irqrestore(&pool->lock, flags);
pool              453 mm/dmapool.c   			if (pool->dev)
pool              454 mm/dmapool.c   				dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
pool              455 mm/dmapool.c   					pool->name, &dma);
pool              458 mm/dmapool.c   				       pool->name, &dma);
pool              462 mm/dmapool.c   	memset(vaddr, POOL_POISON_FREED, pool->size);
pool              473 mm/dmapool.c   	spin_unlock_irqrestore(&pool->lock, flags);
pool              482 mm/dmapool.c   	struct dma_pool *pool = *(struct dma_pool **)res;
pool              484 mm/dmapool.c   	dma_pool_destroy(pool);
pool              509 mm/dmapool.c   	struct dma_pool **ptr, *pool;
pool              515 mm/dmapool.c   	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
pool              516 mm/dmapool.c   	if (pool)
pool              521 mm/dmapool.c   	return pool;
pool              531 mm/dmapool.c   void dmam_pool_destroy(struct dma_pool *pool)
pool              533 mm/dmapool.c   	struct device *dev = pool->dev;
pool              535 mm/dmapool.c   	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
pool               25 mm/mempool.c   static void poison_error(mempool_t *pool, void *element, size_t size,
pool               28 mm/mempool.c   	const int nr = pool->curr_nr;
pool               34 mm/mempool.c   	pr_err("Mempool %p size %zu\n", pool, size);
pool               42 mm/mempool.c   static void __check_element(mempool_t *pool, void *element, size_t size)
pool               51 mm/mempool.c   			poison_error(pool, element, size, i);
pool               58 mm/mempool.c   static void check_element(mempool_t *pool, void *element)
pool               61 mm/mempool.c   	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
pool               62 mm/mempool.c   		__check_element(pool, element, ksize(element));
pool               65 mm/mempool.c   	if (pool->free == mempool_free_pages) {
pool               66 mm/mempool.c   		int order = (int)(long)pool->pool_data;
pool               69 mm/mempool.c   		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
pool               82 mm/mempool.c   static void poison_element(mempool_t *pool, void *element)
pool               85 mm/mempool.c   	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
pool               89 mm/mempool.c   	if (pool->alloc == mempool_alloc_pages) {
pool               90 mm/mempool.c   		int order = (int)(long)pool->pool_data;
pool               98 mm/mempool.c   static inline void check_element(mempool_t *pool, void *element)
pool              101 mm/mempool.c   static inline void poison_element(mempool_t *pool, void *element)
pool              106 mm/mempool.c   static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
pool              108 mm/mempool.c   	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
pool              110 mm/mempool.c   	if (pool->alloc == mempool_alloc_pages)
pool              111 mm/mempool.c   		kasan_free_pages(element, (unsigned long)pool->pool_data);
pool              114 mm/mempool.c   static void kasan_unpoison_element(mempool_t *pool, void *element)
pool              116 mm/mempool.c   	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
pool              118 mm/mempool.c   	if (pool->alloc == mempool_alloc_pages)
pool              119 mm/mempool.c   		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
pool              122 mm/mempool.c   static __always_inline void add_element(mempool_t *pool, void *element)
pool              124 mm/mempool.c   	BUG_ON(pool->curr_nr >= pool->min_nr);
pool              125 mm/mempool.c   	poison_element(pool, element);
pool              126 mm/mempool.c   	kasan_poison_element(pool, element);
pool              127 mm/mempool.c   	pool->elements[pool->curr_nr++] = element;
pool              130 mm/mempool.c   static void *remove_element(mempool_t *pool)
pool              132 mm/mempool.c   	void *element = pool->elements[--pool->curr_nr];
pool              134 mm/mempool.c   	BUG_ON(pool->curr_nr < 0);
pool              135 mm/mempool.c   	kasan_unpoison_element(pool, element);
pool              136 mm/mempool.c   	check_element(pool, element);
pool              151 mm/mempool.c   void mempool_exit(mempool_t *pool)
pool              153 mm/mempool.c   	while (pool->curr_nr) {
pool              154 mm/mempool.c   		void *element = remove_element(pool);
pool              155 mm/mempool.c   		pool->free(element, pool->pool_data);
pool              157 mm/mempool.c   	kfree(pool->elements);
pool              158 mm/mempool.c   	pool->elements = NULL;
pool              170 mm/mempool.c   void mempool_destroy(mempool_t *pool)
pool              172 mm/mempool.c   	if (unlikely(!pool))
pool              175 mm/mempool.c   	mempool_exit(pool);
pool              176 mm/mempool.c   	kfree(pool);
pool              180 mm/mempool.c   int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
pool              184 mm/mempool.c   	spin_lock_init(&pool->lock);
pool              185 mm/mempool.c   	pool->min_nr	= min_nr;
pool              186 mm/mempool.c   	pool->pool_data = pool_data;
pool              187 mm/mempool.c   	pool->alloc	= alloc_fn;
pool              188 mm/mempool.c   	pool->free	= free_fn;
pool              189 mm/mempool.c   	init_waitqueue_head(&pool->wait);
pool              191 mm/mempool.c   	pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
pool              193 mm/mempool.c   	if (!pool->elements)
pool              199 mm/mempool.c   	while (pool->curr_nr < pool->min_nr) {
pool              202 mm/mempool.c   		element = pool->alloc(gfp_mask, pool->pool_data);
pool              204 mm/mempool.c   			mempool_exit(pool);
pool              207 mm/mempool.c   		add_element(pool, element);
pool              228 mm/mempool.c   int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
pool              231 mm/mempool.c   	return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
pool              265 mm/mempool.c   	mempool_t *pool;
pool              267 mm/mempool.c   	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
pool              268 mm/mempool.c   	if (!pool)
pool              271 mm/mempool.c   	if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
pool              273 mm/mempool.c   		kfree(pool);
pool              277 mm/mempool.c   	return pool;
pool              299 mm/mempool.c   int mempool_resize(mempool_t *pool, int new_min_nr)
pool              308 mm/mempool.c   	spin_lock_irqsave(&pool->lock, flags);
pool              309 mm/mempool.c   	if (new_min_nr <= pool->min_nr) {
pool              310 mm/mempool.c   		while (new_min_nr < pool->curr_nr) {
pool              311 mm/mempool.c   			element = remove_element(pool);
pool              312 mm/mempool.c   			spin_unlock_irqrestore(&pool->lock, flags);
pool              313 mm/mempool.c   			pool->free(element, pool->pool_data);
pool              314 mm/mempool.c   			spin_lock_irqsave(&pool->lock, flags);
pool              316 mm/mempool.c   		pool->min_nr = new_min_nr;
pool              319 mm/mempool.c   	spin_unlock_irqrestore(&pool->lock, flags);
pool              327 mm/mempool.c   	spin_lock_irqsave(&pool->lock, flags);
pool              328 mm/mempool.c   	if (unlikely(new_min_nr <= pool->min_nr)) {
pool              330 mm/mempool.c   		spin_unlock_irqrestore(&pool->lock, flags);
pool              334 mm/mempool.c   	memcpy(new_elements, pool->elements,
pool              335 mm/mempool.c   			pool->curr_nr * sizeof(*new_elements));
pool              336 mm/mempool.c   	kfree(pool->elements);
pool              337 mm/mempool.c   	pool->elements = new_elements;
pool              338 mm/mempool.c   	pool->min_nr = new_min_nr;
pool              340 mm/mempool.c   	while (pool->curr_nr < pool->min_nr) {
pool              341 mm/mempool.c   		spin_unlock_irqrestore(&pool->lock, flags);
pool              342 mm/mempool.c   		element = pool->alloc(GFP_KERNEL, pool->pool_data);
pool              345 mm/mempool.c   		spin_lock_irqsave(&pool->lock, flags);
pool              346 mm/mempool.c   		if (pool->curr_nr < pool->min_nr) {
pool              347 mm/mempool.c   			add_element(pool, element);
pool              349 mm/mempool.c   			spin_unlock_irqrestore(&pool->lock, flags);
pool              350 mm/mempool.c   			pool->free(element, pool->pool_data);	/* Raced */
pool              355 mm/mempool.c   	spin_unlock_irqrestore(&pool->lock, flags);
pool              375 mm/mempool.c   void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
pool              393 mm/mempool.c   	element = pool->alloc(gfp_temp, pool->pool_data);
pool              397 mm/mempool.c   	spin_lock_irqsave(&pool->lock, flags);
pool              398 mm/mempool.c   	if (likely(pool->curr_nr)) {
pool              399 mm/mempool.c   		element = remove_element(pool);
pool              400 mm/mempool.c   		spin_unlock_irqrestore(&pool->lock, flags);
pool              416 mm/mempool.c   		spin_unlock_irqrestore(&pool->lock, flags);
pool              423 mm/mempool.c   		spin_unlock_irqrestore(&pool->lock, flags);
pool              429 mm/mempool.c   	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
pool              431 mm/mempool.c   	spin_unlock_irqrestore(&pool->lock, flags);
pool              439 mm/mempool.c   	finish_wait(&pool->wait, &wait);
pool              452 mm/mempool.c   void mempool_free(void *element, mempool_t *pool)
pool              492 mm/mempool.c   	if (unlikely(pool->curr_nr < pool->min_nr)) {
pool              493 mm/mempool.c   		spin_lock_irqsave(&pool->lock, flags);
pool              494 mm/mempool.c   		if (likely(pool->curr_nr < pool->min_nr)) {
pool              495 mm/mempool.c   			add_element(pool, element);
pool              496 mm/mempool.c   			spin_unlock_irqrestore(&pool->lock, flags);
pool              497 mm/mempool.c   			wake_up(&pool->wait);
pool              500 mm/mempool.c   		spin_unlock_irqrestore(&pool->lock, flags);
pool              502 mm/mempool.c   	pool->free(element, pool->pool_data);
pool               75 mm/z3fold.c    	int (*evict)(struct z3fold_pool *pool, unsigned long handle);
pool               92 mm/z3fold.c    	unsigned long pool; /* back link + flags */
pool              119 mm/z3fold.c    	struct z3fold_pool *pool;
pool              196 mm/z3fold.c    static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
pool              201 mm/z3fold.c    	slots = kmem_cache_alloc(pool->c_handle,
pool              206 mm/z3fold.c    		slots->pool = (unsigned long)pool;
pool              214 mm/z3fold.c    	return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
pool              243 mm/z3fold.c    		struct z3fold_pool *pool = slots_to_pool(slots);
pool              245 mm/z3fold.c    		kmem_cache_free(pool->c_handle, slots);
pool              278 mm/z3fold.c    static int z3fold_register_migration(struct z3fold_pool *pool)
pool              280 mm/z3fold.c    	pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
pool              281 mm/z3fold.c    	if (IS_ERR(pool->inode)) {
pool              282 mm/z3fold.c    		pool->inode = NULL;
pool              286 mm/z3fold.c    	pool->inode->i_mapping->private_data = pool;
pool              287 mm/z3fold.c    	pool->inode->i_mapping->a_ops = &z3fold_aops;
pool              291 mm/z3fold.c    static void z3fold_unregister_migration(struct z3fold_pool *pool)
pool              293 mm/z3fold.c    	if (pool->inode)
pool              294 mm/z3fold.c    		iput(pool->inode);
pool              299 mm/z3fold.c    					struct z3fold_pool *pool, gfp_t gfp)
pool              313 mm/z3fold.c    	slots = alloc_slots(pool, gfp);
pool              326 mm/z3fold.c    	zhdr->pool = pool;
pool              438 mm/z3fold.c    	return zhdr->pool;
pool              444 mm/z3fold.c    	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
pool              449 mm/z3fold.c    	spin_lock(&pool->lock);
pool              452 mm/z3fold.c    	spin_unlock(&pool->lock);
pool              455 mm/z3fold.c    	spin_lock(&pool->stale_lock);
pool              456 mm/z3fold.c    	list_add(&zhdr->buddy, &pool->stale);
pool              457 mm/z3fold.c    	queue_work(pool->release_wq, &pool->work);
pool              458 mm/z3fold.c    	spin_unlock(&pool->stale_lock);
pool              481 mm/z3fold.c    	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
pool              482 mm/z3fold.c    	spin_lock(&pool->lock);
pool              484 mm/z3fold.c    	spin_unlock(&pool->lock);
pool              492 mm/z3fold.c    	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
pool              494 mm/z3fold.c    	spin_lock(&pool->stale_lock);
pool              495 mm/z3fold.c    	while (!list_empty(&pool->stale)) {
pool              496 mm/z3fold.c    		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
pool              503 mm/z3fold.c    		spin_unlock(&pool->stale_lock);
pool              507 mm/z3fold.c    		spin_lock(&pool->stale_lock);
pool              509 mm/z3fold.c    	spin_unlock(&pool->stale_lock);
pool              537 mm/z3fold.c    static inline void add_to_unbuddied(struct z3fold_pool *pool,
pool              542 mm/z3fold.c    		struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
pool              545 mm/z3fold.c    		spin_lock(&pool->lock);
pool              547 mm/z3fold.c    		spin_unlock(&pool->lock);
pool              549 mm/z3fold.c    		put_cpu_ptr(pool->unbuddied);
pool              613 mm/z3fold.c    	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
pool              625 mm/z3fold.c    	spin_lock(&pool->lock);
pool              627 mm/z3fold.c    	spin_unlock(&pool->lock);
pool              630 mm/z3fold.c    		atomic64_dec(&pool->pages_nr);
pool              642 mm/z3fold.c    	add_to_unbuddied(pool, zhdr);
pool              655 mm/z3fold.c    static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
pool              665 mm/z3fold.c    	unbuddied = get_cpu_ptr(pool->unbuddied);
pool              676 mm/z3fold.c    		spin_lock(&pool->lock);
pool              681 mm/z3fold.c    			spin_unlock(&pool->lock);
pool              683 mm/z3fold.c    			put_cpu_ptr(pool->unbuddied);
pool              690 mm/z3fold.c    		spin_unlock(&pool->lock);
pool              696 mm/z3fold.c    			put_cpu_ptr(pool->unbuddied);
pool              711 mm/z3fold.c    	put_cpu_ptr(pool->unbuddied);
pool              720 mm/z3fold.c    			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
pool              721 mm/z3fold.c    			spin_lock(&pool->lock);
pool              728 mm/z3fold.c    				spin_unlock(&pool->lock);
pool              734 mm/z3fold.c    			spin_unlock(&pool->lock);
pool              768 mm/z3fold.c    	struct z3fold_pool *pool = NULL;
pool              771 mm/z3fold.c    	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
pool              772 mm/z3fold.c    	if (!pool)
pool              774 mm/z3fold.c    	pool->c_handle = kmem_cache_create("z3fold_handle",
pool              777 mm/z3fold.c    	if (!pool->c_handle)
pool              779 mm/z3fold.c    	spin_lock_init(&pool->lock);
pool              780 mm/z3fold.c    	spin_lock_init(&pool->stale_lock);
pool              781 mm/z3fold.c    	pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
pool              782 mm/z3fold.c    	if (!pool->unbuddied)
pool              786 mm/z3fold.c    				per_cpu_ptr(pool->unbuddied, cpu);
pool              790 mm/z3fold.c    	INIT_LIST_HEAD(&pool->lru);
pool              791 mm/z3fold.c    	INIT_LIST_HEAD(&pool->stale);
pool              792 mm/z3fold.c    	atomic64_set(&pool->pages_nr, 0);
pool              793 mm/z3fold.c    	pool->name = name;
pool              794 mm/z3fold.c    	pool->compact_wq = create_singlethread_workqueue(pool->name);
pool              795 mm/z3fold.c    	if (!pool->compact_wq)
pool              797 mm/z3fold.c    	pool->release_wq = create_singlethread_workqueue(pool->name);
pool              798 mm/z3fold.c    	if (!pool->release_wq)
pool              800 mm/z3fold.c    	if (z3fold_register_migration(pool))
pool              802 mm/z3fold.c    	INIT_WORK(&pool->work, free_pages_work);
pool              803 mm/z3fold.c    	pool->ops = ops;
pool              804 mm/z3fold.c    	return pool;
pool              807 mm/z3fold.c    	destroy_workqueue(pool->release_wq);
pool              809 mm/z3fold.c    	destroy_workqueue(pool->compact_wq);
pool              811 mm/z3fold.c    	free_percpu(pool->unbuddied);
pool              813 mm/z3fold.c    	kmem_cache_destroy(pool->c_handle);
pool              815 mm/z3fold.c    	kfree(pool);
pool              826 mm/z3fold.c    static void z3fold_destroy_pool(struct z3fold_pool *pool)
pool              828 mm/z3fold.c    	kmem_cache_destroy(pool->c_handle);
pool              839 mm/z3fold.c    	destroy_workqueue(pool->compact_wq);
pool              840 mm/z3fold.c    	destroy_workqueue(pool->release_wq);
pool              841 mm/z3fold.c    	z3fold_unregister_migration(pool);
pool              842 mm/z3fold.c    	kfree(pool);
pool              864 mm/z3fold.c    static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
pool              883 mm/z3fold.c    		zhdr = __z3fold_alloc(pool, size, can_sleep);
pool              898 mm/z3fold.c    					atomic64_dec(&pool->pages_nr);
pool              913 mm/z3fold.c    		spin_lock(&pool->stale_lock);
pool              914 mm/z3fold.c    		zhdr = list_first_entry_or_null(&pool->stale,
pool              923 mm/z3fold.c    			spin_unlock(&pool->stale_lock);
pool              927 mm/z3fold.c    			spin_unlock(&pool->stale_lock);
pool              936 mm/z3fold.c    	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
pool              941 mm/z3fold.c    	atomic64_inc(&pool->pages_nr);
pool              949 mm/z3fold.c    		__SetPageMovable(page, pool->inode->i_mapping);
pool              953 mm/z3fold.c    			__SetPageMovable(page, pool->inode->i_mapping);
pool              968 mm/z3fold.c    	add_to_unbuddied(pool, zhdr);
pool              971 mm/z3fold.c    	spin_lock(&pool->lock);
pool              976 mm/z3fold.c    	list_add(&page->lru, &pool->lru);
pool              979 mm/z3fold.c    	spin_unlock(&pool->lock);
pool              996 mm/z3fold.c    static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
pool             1014 mm/z3fold.c    			spin_lock(&pool->lock);
pool             1016 mm/z3fold.c    			spin_unlock(&pool->lock);
pool             1018 mm/z3fold.c    			atomic64_dec(&pool->pages_nr);
pool             1046 mm/z3fold.c    		atomic64_dec(&pool->pages_nr);
pool             1061 mm/z3fold.c    		spin_lock(&pool->lock);
pool             1063 mm/z3fold.c    		spin_unlock(&pool->lock);
pool             1071 mm/z3fold.c    	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
pool             1112 mm/z3fold.c    static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
pool             1121 mm/z3fold.c    	spin_lock(&pool->lock);
pool             1122 mm/z3fold.c    	if (!pool->ops || !pool->ops->evict || retries == 0) {
pool             1123 mm/z3fold.c    		spin_unlock(&pool->lock);
pool             1127 mm/z3fold.c    		if (list_empty(&pool->lru)) {
pool             1128 mm/z3fold.c    			spin_unlock(&pool->lock);
pool             1131 mm/z3fold.c    		list_for_each_prev(pos, &pool->lru) {
pool             1166 mm/z3fold.c    		spin_unlock(&pool->lock);
pool             1199 mm/z3fold.c    			ret = pool->ops->evict(pool, middle_handle);
pool             1204 mm/z3fold.c    			ret = pool->ops->evict(pool, first_handle);
pool             1209 mm/z3fold.c    			ret = pool->ops->evict(pool, last_handle);
pool             1217 mm/z3fold.c    				atomic64_dec(&pool->pages_nr);
pool             1220 mm/z3fold.c    			spin_lock(&pool->lock);
pool             1221 mm/z3fold.c    			list_add(&page->lru, &pool->lru);
pool             1222 mm/z3fold.c    			spin_unlock(&pool->lock);
pool             1228 mm/z3fold.c    				atomic64_dec(&pool->pages_nr);
pool             1236 mm/z3fold.c    			spin_lock(&pool->lock);
pool             1237 mm/z3fold.c    			list_add(&page->lru, &pool->lru);
pool             1238 mm/z3fold.c    			spin_unlock(&pool->lock);
pool             1244 mm/z3fold.c    		spin_lock(&pool->lock);
pool             1246 mm/z3fold.c    	spin_unlock(&pool->lock);
pool             1260 mm/z3fold.c    static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
pool             1306 mm/z3fold.c    static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
pool             1332 mm/z3fold.c    static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
pool             1334 mm/z3fold.c    	return atomic64_read(&pool->pages_nr);
pool             1340 mm/z3fold.c    	struct z3fold_pool *pool;
pool             1355 mm/z3fold.c    	pool = zhdr_to_pool(zhdr);
pool             1361 mm/z3fold.c    		spin_lock(&pool->lock);
pool             1364 mm/z3fold.c    		spin_unlock(&pool->lock);
pool             1377 mm/z3fold.c    	struct z3fold_pool *pool;
pool             1385 mm/z3fold.c    	pool = zhdr_to_pool(zhdr);
pool             1424 mm/z3fold.c    	spin_lock(&pool->lock);
pool             1425 mm/z3fold.c    	list_add(&newpage->lru, &pool->lru);
pool             1426 mm/z3fold.c    	spin_unlock(&pool->lock);
pool             1430 mm/z3fold.c    	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
pool             1440 mm/z3fold.c    	struct z3fold_pool *pool;
pool             1443 mm/z3fold.c    	pool = zhdr_to_pool(zhdr);
pool             1450 mm/z3fold.c    		atomic64_dec(&pool->pages_nr);
pool             1453 mm/z3fold.c    	spin_lock(&pool->lock);
pool             1454 mm/z3fold.c    	list_add(&page->lru, &pool->lru);
pool             1455 mm/z3fold.c    	spin_unlock(&pool->lock);
pool             1469 mm/z3fold.c    static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
pool             1471 mm/z3fold.c    	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
pool             1472 mm/z3fold.c    		return pool->zpool_ops->evict(pool->zpool, handle);
pool             1485 mm/z3fold.c    	struct z3fold_pool *pool;
pool             1487 mm/z3fold.c    	pool = z3fold_create_pool(name, gfp,
pool             1489 mm/z3fold.c    	if (pool) {
pool             1490 mm/z3fold.c    		pool->zpool = zpool;
pool             1491 mm/z3fold.c    		pool->zpool_ops = zpool_ops;
pool             1493 mm/z3fold.c    	return pool;
pool             1496 mm/z3fold.c    static void z3fold_zpool_destroy(void *pool)
pool             1498 mm/z3fold.c    	z3fold_destroy_pool(pool);
pool             1501 mm/z3fold.c    static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
pool             1504 mm/z3fold.c    	return z3fold_alloc(pool, size, gfp, handle);
pool             1506 mm/z3fold.c    static void z3fold_zpool_free(void *pool, unsigned long handle)
pool             1508 mm/z3fold.c    	z3fold_free(pool, handle);
pool             1511 mm/z3fold.c    static int z3fold_zpool_shrink(void *pool, unsigned int pages,
pool             1518 mm/z3fold.c    		ret = z3fold_reclaim_page(pool, 8);
pool             1530 mm/z3fold.c    static void *z3fold_zpool_map(void *pool, unsigned long handle,
pool             1533 mm/z3fold.c    	return z3fold_map(pool, handle);
pool             1535 mm/z3fold.c    static void z3fold_zpool_unmap(void *pool, unsigned long handle)
pool             1537 mm/z3fold.c    	z3fold_unmap(pool, handle);
pool             1540 mm/z3fold.c    static u64 z3fold_zpool_total_size(void *pool)
pool             1542 mm/z3fold.c    	return z3fold_get_pool_size(pool) * PAGE_SIZE;
pool              129 mm/zbud.c      static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
pool              131 mm/zbud.c      	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
pool              132 mm/zbud.c      		return pool->zpool_ops->evict(pool->zpool, handle);
pool              145 mm/zbud.c      	struct zbud_pool *pool;
pool              147 mm/zbud.c      	pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
pool              148 mm/zbud.c      	if (pool) {
pool              149 mm/zbud.c      		pool->zpool = zpool;
pool              150 mm/zbud.c      		pool->zpool_ops = zpool_ops;
pool              152 mm/zbud.c      	return pool;
pool              155 mm/zbud.c      static void zbud_zpool_destroy(void *pool)
pool              157 mm/zbud.c      	zbud_destroy_pool(pool);
pool              160 mm/zbud.c      static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
pool              163 mm/zbud.c      	return zbud_alloc(pool, size, gfp, handle);
pool              165 mm/zbud.c      static void zbud_zpool_free(void *pool, unsigned long handle)
pool              167 mm/zbud.c      	zbud_free(pool, handle);
pool              170 mm/zbud.c      static int zbud_zpool_shrink(void *pool, unsigned int pages,
pool              177 mm/zbud.c      		ret = zbud_reclaim_page(pool, 8);
pool              189 mm/zbud.c      static void *zbud_zpool_map(void *pool, unsigned long handle,
pool              192 mm/zbud.c      	return zbud_map(pool, handle);
pool              194 mm/zbud.c      static void zbud_zpool_unmap(void *pool, unsigned long handle)
pool              196 mm/zbud.c      	zbud_unmap(pool, handle);
pool              199 mm/zbud.c      static u64 zbud_zpool_total_size(void *pool)
pool              201 mm/zbud.c      	return zbud_get_pool_size(pool) * PAGE_SIZE;
pool              308 mm/zbud.c      	struct zbud_pool *pool;
pool              311 mm/zbud.c      	pool = kzalloc(sizeof(struct zbud_pool), gfp);
pool              312 mm/zbud.c      	if (!pool)
pool              314 mm/zbud.c      	spin_lock_init(&pool->lock);
pool              316 mm/zbud.c      		INIT_LIST_HEAD(&pool->unbuddied[i]);
pool              317 mm/zbud.c      	INIT_LIST_HEAD(&pool->buddied);
pool              318 mm/zbud.c      	INIT_LIST_HEAD(&pool->lru);
pool              319 mm/zbud.c      	pool->pages_nr = 0;
pool              320 mm/zbud.c      	pool->ops = ops;
pool              321 mm/zbud.c      	return pool;
pool              330 mm/zbud.c      void zbud_destroy_pool(struct zbud_pool *pool)
pool              332 mm/zbud.c      	kfree(pool);
pool              354 mm/zbud.c      int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
pool              367 mm/zbud.c      	spin_lock(&pool->lock);
pool              372 mm/zbud.c      		if (!list_empty(&pool->unbuddied[i])) {
pool              373 mm/zbud.c      			zhdr = list_first_entry(&pool->unbuddied[i],
pool              385 mm/zbud.c      	spin_unlock(&pool->lock);
pool              389 mm/zbud.c      	spin_lock(&pool->lock);
pool              390 mm/zbud.c      	pool->pages_nr++;
pool              403 mm/zbud.c      		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
pool              406 mm/zbud.c      		list_add(&zhdr->buddy, &pool->buddied);
pool              412 mm/zbud.c      	list_add(&zhdr->lru, &pool->lru);
pool              415 mm/zbud.c      	spin_unlock(&pool->lock);
pool              430 mm/zbud.c      void zbud_free(struct zbud_pool *pool, unsigned long handle)
pool              435 mm/zbud.c      	spin_lock(&pool->lock);
pool              446 mm/zbud.c      		spin_unlock(&pool->lock);
pool              457 mm/zbud.c      		pool->pages_nr--;
pool              461 mm/zbud.c      		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
pool              464 mm/zbud.c      	spin_unlock(&pool->lock);
pool              502 mm/zbud.c      int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
pool              508 mm/zbud.c      	spin_lock(&pool->lock);
pool              509 mm/zbud.c      	if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
pool              511 mm/zbud.c      		spin_unlock(&pool->lock);
pool              515 mm/zbud.c      		zhdr = list_last_entry(&pool->lru, struct zbud_header, lru);
pool              530 mm/zbud.c      		spin_unlock(&pool->lock);
pool              534 mm/zbud.c      			ret = pool->ops->evict(pool, first_handle);
pool              539 mm/zbud.c      			ret = pool->ops->evict(pool, last_handle);
pool              544 mm/zbud.c      		spin_lock(&pool->lock);
pool              552 mm/zbud.c      			pool->pages_nr--;
pool              553 mm/zbud.c      			spin_unlock(&pool->lock);
pool              559 mm/zbud.c      			list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
pool              562 mm/zbud.c      			list_add(&zhdr->buddy, &pool->buddied);
pool              566 mm/zbud.c      		list_add(&zhdr->lru, &pool->lru);
pool              568 mm/zbud.c      	spin_unlock(&pool->lock);
pool              584 mm/zbud.c      void *zbud_map(struct zbud_pool *pool, unsigned long handle)
pool              594 mm/zbud.c      void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
pool              605 mm/zbud.c      u64 zbud_get_pool_size(struct zbud_pool *pool)
pool              607 mm/zbud.c      	return pool->pages_nr;
pool               23 mm/zpool.c     	void *pool;
pool              183 mm/zpool.c     	zpool->pool = driver->create(name, gfp, ops, zpool);
pool              187 mm/zpool.c     	if (!zpool->pool) {
pool              221 mm/zpool.c     	zpool->driver->destroy(zpool->pool);
pool              276 mm/zpool.c     	return zpool->driver->malloc(zpool->pool, size, gfp, handle);
pool              295 mm/zpool.c     	zpool->driver->free(zpool->pool, handle);
pool              319 mm/zpool.c     	       zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
pool              347 mm/zpool.c     	return zpool->driver->map(zpool->pool, handle, mapmode);
pool              362 mm/zpool.c     	zpool->driver->unmap(zpool->pool, handle);
pool              375 mm/zpool.c     	return zpool->driver->total_size(zpool->pool);
pool              306 mm/zsmalloc.c  static int zs_register_migration(struct zs_pool *pool);
pool              307 mm/zsmalloc.c  static void zs_unregister_migration(struct zs_pool *pool);
pool              311 mm/zsmalloc.c  static void kick_deferred_free(struct zs_pool *pool);
pool              312 mm/zsmalloc.c  static void init_deferred_free(struct zs_pool *pool);
pool              313 mm/zsmalloc.c  static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
pool              317 mm/zsmalloc.c  static int zs_register_migration(struct zs_pool *pool) { return 0; }
pool              318 mm/zsmalloc.c  static void zs_unregister_migration(struct zs_pool *pool) {}
pool              322 mm/zsmalloc.c  static void kick_deferred_free(struct zs_pool *pool) {}
pool              323 mm/zsmalloc.c  static void init_deferred_free(struct zs_pool *pool) {}
pool              324 mm/zsmalloc.c  static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
pool              327 mm/zsmalloc.c  static int create_cache(struct zs_pool *pool)
pool              329 mm/zsmalloc.c  	pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
pool              331 mm/zsmalloc.c  	if (!pool->handle_cachep)
pool              334 mm/zsmalloc.c  	pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
pool              336 mm/zsmalloc.c  	if (!pool->zspage_cachep) {
pool              337 mm/zsmalloc.c  		kmem_cache_destroy(pool->handle_cachep);
pool              338 mm/zsmalloc.c  		pool->handle_cachep = NULL;
pool              345 mm/zsmalloc.c  static void destroy_cache(struct zs_pool *pool)
pool              347 mm/zsmalloc.c  	kmem_cache_destroy(pool->handle_cachep);
pool              348 mm/zsmalloc.c  	kmem_cache_destroy(pool->zspage_cachep);
pool              351 mm/zsmalloc.c  static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
pool              353 mm/zsmalloc.c  	return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
pool              357 mm/zsmalloc.c  static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
pool              359 mm/zsmalloc.c  	kmem_cache_free(pool->handle_cachep, (void *)handle);
pool              362 mm/zsmalloc.c  static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
pool              364 mm/zsmalloc.c  	return kmem_cache_alloc(pool->zspage_cachep,
pool              368 mm/zsmalloc.c  static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
pool              370 mm/zsmalloc.c  	kmem_cache_free(pool->zspage_cachep, zspage);
pool              399 mm/zsmalloc.c  static void zs_zpool_destroy(void *pool)
pool              401 mm/zsmalloc.c  	zs_destroy_pool(pool);
pool              404 mm/zsmalloc.c  static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
pool              407 mm/zsmalloc.c  	*handle = zs_malloc(pool, size, gfp);
pool              410 mm/zsmalloc.c  static void zs_zpool_free(void *pool, unsigned long handle)
pool              412 mm/zsmalloc.c  	zs_free(pool, handle);
pool              415 mm/zsmalloc.c  static void *zs_zpool_map(void *pool, unsigned long handle,
pool              433 mm/zsmalloc.c  	return zs_map_object(pool, handle, zs_mm);
pool              435 mm/zsmalloc.c  static void zs_zpool_unmap(void *pool, unsigned long handle)
pool              437 mm/zsmalloc.c  	zs_unmap_object(pool, handle);
pool              440 mm/zsmalloc.c  static u64 zs_zpool_total_size(void *pool)
pool              442 mm/zsmalloc.c  	return zs_get_total_pages(pool) << PAGE_SHIFT;
pool              593 mm/zsmalloc.c  	struct zs_pool *pool = s->private;
pool              608 mm/zsmalloc.c  		class = pool->size_class[i];
pool              649 mm/zsmalloc.c  static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
pool              656 mm/zsmalloc.c  	pool->stat_dentry = debugfs_create_dir(name, zs_stat_root);
pool              658 mm/zsmalloc.c  	debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
pool              662 mm/zsmalloc.c  static void zs_pool_stat_destroy(struct zs_pool *pool)
pool              664 mm/zsmalloc.c  	debugfs_remove_recursive(pool->stat_dentry);
pool              676 mm/zsmalloc.c  static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
pool              680 mm/zsmalloc.c  static inline void zs_pool_stat_destroy(struct zs_pool *pool)
pool              935 mm/zsmalloc.c  static void __free_zspage(struct zs_pool *pool, struct size_class *class,
pool              960 mm/zsmalloc.c  	cache_free_zspage(pool, zspage);
pool              964 mm/zsmalloc.c  					&pool->pages_allocated);
pool              967 mm/zsmalloc.c  static void free_zspage(struct zs_pool *pool, struct size_class *class,
pool              974 mm/zsmalloc.c  		kick_deferred_free(pool);
pool              979 mm/zsmalloc.c  	__free_zspage(pool, class, zspage);
pool             1063 mm/zsmalloc.c  static struct zspage *alloc_zspage(struct zs_pool *pool,
pool             1069 mm/zsmalloc.c  	struct zspage *zspage = cache_alloc_zspage(pool, gfp);
pool             1087 mm/zsmalloc.c  			cache_free_zspage(pool, zspage);
pool             1270 mm/zsmalloc.c  unsigned long zs_get_total_pages(struct zs_pool *pool)
pool             1272 mm/zsmalloc.c  	return atomic_long_read(&pool->pages_allocated);
pool             1291 mm/zsmalloc.c  void *zs_map_object(struct zs_pool *pool, unsigned long handle,
pool             1324 mm/zsmalloc.c  	class = pool->size_class[class_idx];
pool             1350 mm/zsmalloc.c  void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
pool             1366 mm/zsmalloc.c  	class = pool->size_class[class_idx];
pool             1401 mm/zsmalloc.c  size_t zs_huge_class_size(struct zs_pool *pool)
pool             1459 mm/zsmalloc.c  unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
pool             1469 mm/zsmalloc.c  	handle = cache_alloc_handle(pool, gfp);
pool             1475 mm/zsmalloc.c  	class = pool->size_class[get_size_class_index(size)];
pool             1491 mm/zsmalloc.c  	zspage = alloc_zspage(pool, class, gfp);
pool             1493 mm/zsmalloc.c  		cache_free_handle(pool, handle);
pool             1504 mm/zsmalloc.c  				&pool->pages_allocated);
pool             1508 mm/zsmalloc.c  	SetZsPageMovable(pool, zspage);
pool             1540 mm/zsmalloc.c  void zs_free(struct zs_pool *pool, unsigned long handle)
pool             1562 mm/zsmalloc.c  	class = pool->size_class[class_idx];
pool             1576 mm/zsmalloc.c  		free_zspage(pool, class, zspage);
pool             1581 mm/zsmalloc.c  	cache_free_handle(pool, handle);
pool             1695 mm/zsmalloc.c  static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
pool             1867 mm/zsmalloc.c  static void putback_zspage_deferred(struct zs_pool *pool,
pool             1875 mm/zsmalloc.c  		schedule_work(&pool->free_work);
pool             1879 mm/zsmalloc.c  static inline void zs_pool_dec_isolated(struct zs_pool *pool)
pool             1881 mm/zsmalloc.c  	VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
pool             1882 mm/zsmalloc.c  	atomic_long_dec(&pool->isolated_pages);
pool             1888 mm/zsmalloc.c  	if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
pool             1889 mm/zsmalloc.c  		wake_up_all(&pool->migration_wait);
pool             1917 mm/zsmalloc.c  	struct zs_pool *pool;
pool             1940 mm/zsmalloc.c  	pool = mapping->private_data;
pool             1941 mm/zsmalloc.c  	class = pool->size_class[class_idx];
pool             1961 mm/zsmalloc.c  		atomic_long_inc(&pool->isolated_pages);
pool             1974 mm/zsmalloc.c  	struct zs_pool *pool;
pool             2003 mm/zsmalloc.c  	pool = mapping->private_data;
pool             2004 mm/zsmalloc.c  	class = pool->size_class[class_idx];
pool             2068 mm/zsmalloc.c  		putback_zspage_deferred(pool, class, zspage);
pool             2069 mm/zsmalloc.c  		zs_pool_dec_isolated(pool);
pool             2102 mm/zsmalloc.c  	struct zs_pool *pool;
pool             2115 mm/zsmalloc.c  	pool = mapping->private_data;
pool             2116 mm/zsmalloc.c  	class = pool->size_class[class_idx];
pool             2125 mm/zsmalloc.c  		putback_zspage_deferred(pool, class, zspage);
pool             2126 mm/zsmalloc.c  		zs_pool_dec_isolated(pool);
pool             2137 mm/zsmalloc.c  static int zs_register_migration(struct zs_pool *pool)
pool             2139 mm/zsmalloc.c  	pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
pool             2140 mm/zsmalloc.c  	if (IS_ERR(pool->inode)) {
pool             2141 mm/zsmalloc.c  		pool->inode = NULL;
pool             2145 mm/zsmalloc.c  	pool->inode->i_mapping->private_data = pool;
pool             2146 mm/zsmalloc.c  	pool->inode->i_mapping->a_ops = &zsmalloc_aops;
pool             2150 mm/zsmalloc.c  static bool pool_isolated_are_drained(struct zs_pool *pool)
pool             2152 mm/zsmalloc.c  	return atomic_long_read(&pool->isolated_pages) == 0;
pool             2156 mm/zsmalloc.c  static void wait_for_isolated_drain(struct zs_pool *pool)
pool             2165 mm/zsmalloc.c  	wait_event(pool->migration_wait,
pool             2166 mm/zsmalloc.c  		   pool_isolated_are_drained(pool));
pool             2169 mm/zsmalloc.c  static void zs_unregister_migration(struct zs_pool *pool)
pool             2171 mm/zsmalloc.c  	pool->destroying = true;
pool             2179 mm/zsmalloc.c  	wait_for_isolated_drain(pool); /* This can block */
pool             2180 mm/zsmalloc.c  	flush_work(&pool->free_work);
pool             2181 mm/zsmalloc.c  	iput(pool->inode);
pool             2196 mm/zsmalloc.c  	struct zs_pool *pool = container_of(work, struct zs_pool,
pool             2200 mm/zsmalloc.c  		class = pool->size_class[i];
pool             2216 mm/zsmalloc.c  		class = pool->size_class[class_idx];
pool             2218 mm/zsmalloc.c  		__free_zspage(pool, pool->size_class[class_idx], zspage);
pool             2223 mm/zsmalloc.c  static void kick_deferred_free(struct zs_pool *pool)
pool             2225 mm/zsmalloc.c  	schedule_work(&pool->free_work);
pool             2228 mm/zsmalloc.c  static void init_deferred_free(struct zs_pool *pool)
pool             2230 mm/zsmalloc.c  	INIT_WORK(&pool->free_work, async_free_zspage);
pool             2233 mm/zsmalloc.c  static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
pool             2239 mm/zsmalloc.c  		__SetPageMovable(page, pool->inode->i_mapping);
pool             2265 mm/zsmalloc.c  static void __zs_compact(struct zs_pool *pool, struct size_class *class)
pool             2286 mm/zsmalloc.c  			if (!migrate_zspage(pool, class, &cc))
pool             2298 mm/zsmalloc.c  			free_zspage(pool, class, src_zspage);
pool             2299 mm/zsmalloc.c  			pool->stats.pages_compacted += class->pages_per_zspage;
pool             2312 mm/zsmalloc.c  unsigned long zs_compact(struct zs_pool *pool)
pool             2318 mm/zsmalloc.c  		class = pool->size_class[i];
pool             2323 mm/zsmalloc.c  		__zs_compact(pool, class);
pool             2326 mm/zsmalloc.c  	return pool->stats.pages_compacted;
pool             2330 mm/zsmalloc.c  void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
pool             2332 mm/zsmalloc.c  	memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
pool             2340 mm/zsmalloc.c  	struct zs_pool *pool = container_of(shrinker, struct zs_pool,
pool             2343 mm/zsmalloc.c  	pages_freed = pool->stats.pages_compacted;
pool             2349 mm/zsmalloc.c  	pages_freed = zs_compact(pool) - pages_freed;
pool             2360 mm/zsmalloc.c  	struct zs_pool *pool = container_of(shrinker, struct zs_pool,
pool             2364 mm/zsmalloc.c  		class = pool->size_class[i];
pool             2376 mm/zsmalloc.c  static void zs_unregister_shrinker(struct zs_pool *pool)
pool             2378 mm/zsmalloc.c  	unregister_shrinker(&pool->shrinker);
pool             2381 mm/zsmalloc.c  static int zs_register_shrinker(struct zs_pool *pool)
pool             2383 mm/zsmalloc.c  	pool->shrinker.scan_objects = zs_shrinker_scan;
pool             2384 mm/zsmalloc.c  	pool->shrinker.count_objects = zs_shrinker_count;
pool             2385 mm/zsmalloc.c  	pool->shrinker.batch = 0;
pool             2386 mm/zsmalloc.c  	pool->shrinker.seeks = DEFAULT_SEEKS;
pool             2388 mm/zsmalloc.c  	return register_shrinker(&pool->shrinker);
pool             2404 mm/zsmalloc.c  	struct zs_pool *pool;
pool             2407 mm/zsmalloc.c  	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool             2408 mm/zsmalloc.c  	if (!pool)
pool             2411 mm/zsmalloc.c  	init_deferred_free(pool);
pool             2413 mm/zsmalloc.c  	pool->name = kstrdup(name, GFP_KERNEL);
pool             2414 mm/zsmalloc.c  	if (!pool->name)
pool             2418 mm/zsmalloc.c  	init_waitqueue_head(&pool->migration_wait);
pool             2421 mm/zsmalloc.c  	if (create_cache(pool))
pool             2473 mm/zsmalloc.c  				pool->size_class[i] = prev_class;
pool             2487 mm/zsmalloc.c  		pool->size_class[i] = class;
pool             2496 mm/zsmalloc.c  	zs_pool_stat_create(pool, name);
pool             2498 mm/zsmalloc.c  	if (zs_register_migration(pool))
pool             2507 mm/zsmalloc.c  	zs_register_shrinker(pool);
pool             2509 mm/zsmalloc.c  	return pool;
pool             2512 mm/zsmalloc.c  	zs_destroy_pool(pool);
pool             2517 mm/zsmalloc.c  void zs_destroy_pool(struct zs_pool *pool)
pool             2521 mm/zsmalloc.c  	zs_unregister_shrinker(pool);
pool             2522 mm/zsmalloc.c  	zs_unregister_migration(pool);
pool             2523 mm/zsmalloc.c  	zs_pool_stat_destroy(pool);
pool             2527 mm/zsmalloc.c  		struct size_class *class = pool->size_class[i];
pool             2544 mm/zsmalloc.c  	destroy_cache(pool);
pool             2545 mm/zsmalloc.c  	kfree(pool->name);
pool             2546 mm/zsmalloc.c  	kfree(pool);
pool              156 mm/zswap.c     	struct zswap_pool *pool;
pool              203 mm/zswap.c     static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
pool              204 mm/zswap.c     static int zswap_pool_get(struct zswap_pool *pool);
pool              205 mm/zswap.c     static void zswap_pool_put(struct zswap_pool *pool);
pool              219 mm/zswap.c     	struct zswap_pool *pool;
pool              224 mm/zswap.c     	list_for_each_entry_rcu(pool, &zswap_pools, list)
pool              225 mm/zswap.c     		total += zpool_get_total_size(pool->zpool);
pool              328 mm/zswap.c     		zpool_free(entry->pool->zpool, entry->handle);
pool              329 mm/zswap.c     		zswap_pool_put(entry->pool);
pool              400 mm/zswap.c     	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
pool              403 mm/zswap.c     	if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
pool              406 mm/zswap.c     	tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
pool              409 mm/zswap.c     		       pool->tfm_name, PTR_ERR(tfm));
pool              412 mm/zswap.c     	*per_cpu_ptr(pool->tfm, cpu) = tfm;
pool              418 mm/zswap.c     	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
pool              421 mm/zswap.c     	tfm = *per_cpu_ptr(pool->tfm, cpu);
pool              424 mm/zswap.c     	*per_cpu_ptr(pool->tfm, cpu) = NULL;
pool              434 mm/zswap.c     	struct zswap_pool *pool;
pool              436 mm/zswap.c     	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
pool              437 mm/zswap.c     	WARN_ONCE(!pool && zswap_has_pool,
pool              440 mm/zswap.c     	return pool;
pool              452 mm/zswap.c     	struct zswap_pool *pool;
pool              456 mm/zswap.c     	pool = __zswap_pool_current();
pool              457 mm/zswap.c     	if (!zswap_pool_get(pool))
pool              458 mm/zswap.c     		pool = NULL;
pool              462 mm/zswap.c     	return pool;
pool              467 mm/zswap.c     	struct zswap_pool *pool, *last = NULL;
pool              471 mm/zswap.c     	list_for_each_entry_rcu(pool, &zswap_pools, list)
pool              472 mm/zswap.c     		last = pool;
pool              486 mm/zswap.c     	struct zswap_pool *pool;
pool              490 mm/zswap.c     	list_for_each_entry_rcu(pool, &zswap_pools, list) {
pool              491 mm/zswap.c     		if (strcmp(pool->tfm_name, compressor))
pool              493 mm/zswap.c     		if (strcmp(zpool_get_type(pool->zpool), type))
pool              496 mm/zswap.c     		if (!zswap_pool_get(pool))
pool              498 mm/zswap.c     		return pool;
pool              506 mm/zswap.c     	struct zswap_pool *pool;
pool              522 mm/zswap.c     	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool              523 mm/zswap.c     	if (!pool)
pool              529 mm/zswap.c     	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
pool              530 mm/zswap.c     	if (!pool->zpool) {
pool              534 mm/zswap.c     	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
pool              536 mm/zswap.c     	strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
pool              537 mm/zswap.c     	pool->tfm = alloc_percpu(struct crypto_comp *);
pool              538 mm/zswap.c     	if (!pool->tfm) {
pool              544 mm/zswap.c     				       &pool->node);
pool              547 mm/zswap.c     	pr_debug("using %s compressor\n", pool->tfm_name);
pool              552 mm/zswap.c     	kref_init(&pool->kref);
pool              553 mm/zswap.c     	INIT_LIST_HEAD(&pool->list);
pool              555 mm/zswap.c     	zswap_pool_debug("created", pool);
pool              557 mm/zswap.c     	return pool;
pool              560 mm/zswap.c     	free_percpu(pool->tfm);
pool              561 mm/zswap.c     	if (pool->zpool)
pool              562 mm/zswap.c     		zpool_destroy_pool(pool->zpool);
pool              563 mm/zswap.c     	kfree(pool);
pool              607 mm/zswap.c     static void zswap_pool_destroy(struct zswap_pool *pool)
pool              609 mm/zswap.c     	zswap_pool_debug("destroying", pool);
pool              611 mm/zswap.c     	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
pool              612 mm/zswap.c     	free_percpu(pool->tfm);
pool              613 mm/zswap.c     	zpool_destroy_pool(pool->zpool);
pool              614 mm/zswap.c     	kfree(pool);
pool              617 mm/zswap.c     static int __must_check zswap_pool_get(struct zswap_pool *pool)
pool              619 mm/zswap.c     	if (!pool)
pool              622 mm/zswap.c     	return kref_get_unless_zero(&pool->kref);
pool              627 mm/zswap.c     	struct zswap_pool *pool = container_of(work, typeof(*pool), work);
pool              632 mm/zswap.c     	WARN_ON(kref_get_unless_zero(&pool->kref));
pool              635 mm/zswap.c     	zswap_pool_destroy(pool);
pool              640 mm/zswap.c     	struct zswap_pool *pool;
pool              642 mm/zswap.c     	pool = container_of(kref, typeof(*pool), kref);
pool              646 mm/zswap.c     	WARN_ON(pool == zswap_pool_current());
pool              648 mm/zswap.c     	list_del_rcu(&pool->list);
pool              650 mm/zswap.c     	INIT_WORK(&pool->work, __zswap_pool_release);
pool              651 mm/zswap.c     	schedule_work(&pool->work);
pool              656 mm/zswap.c     static void zswap_pool_put(struct zswap_pool *pool)
pool              658 mm/zswap.c     	kref_put(&pool->kref, __zswap_pool_empty);
pool              669 mm/zswap.c     	struct zswap_pool *pool, *put_pool = NULL;
pool              707 mm/zswap.c     	pool = zswap_pool_find_get(type, compressor);
pool              708 mm/zswap.c     	if (pool) {
pool              709 mm/zswap.c     		zswap_pool_debug("using existing", pool);
pool              710 mm/zswap.c     		WARN_ON(pool == zswap_pool_current());
pool              711 mm/zswap.c     		list_del_rcu(&pool->list);
pool              716 mm/zswap.c     	if (!pool)
pool              717 mm/zswap.c     		pool = zswap_pool_create(type, compressor);
pool              719 mm/zswap.c     	if (pool)
pool              728 mm/zswap.c     		list_add_rcu(&pool->list, &zswap_pools);
pool              730 mm/zswap.c     	} else if (pool) {
pool              735 mm/zswap.c     		list_add_tail_rcu(&pool->list, &zswap_pools);
pool              736 mm/zswap.c     		put_pool = pool;
pool              741 mm/zswap.c     	if (!zswap_has_pool && !pool) {
pool              840 mm/zswap.c     static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
pool              857 mm/zswap.c     	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
pool              868 mm/zswap.c     		zpool_unmap_handle(pool, handle);
pool              891 mm/zswap.c     		tfm = *get_cpu_ptr(entry->pool->tfm);
pool              894 mm/zswap.c     		put_cpu_ptr(entry->pool->tfm);
pool              941 mm/zswap.c     	zpool_unmap_handle(pool, handle);
pool              947 mm/zswap.c     	struct zswap_pool *pool;
pool              950 mm/zswap.c     	pool = zswap_pool_last_get();
pool              951 mm/zswap.c     	if (!pool)
pool              954 mm/zswap.c     	ret = zpool_shrink(pool->zpool, 1, NULL);
pool              956 mm/zswap.c     	zswap_pool_put(pool);
pool             1053 mm/zswap.c     	entry->pool = zswap_pool_current_get();
pool             1054 mm/zswap.c     	if (!entry->pool) {
pool             1061 mm/zswap.c     	tfm = *get_cpu_ptr(entry->pool->tfm);
pool             1065 mm/zswap.c     	put_cpu_ptr(entry->pool->tfm);
pool             1072 mm/zswap.c     	hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
pool             1074 mm/zswap.c     	if (zpool_malloc_support_movable(entry->pool->zpool))
pool             1076 mm/zswap.c     	ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
pool             1085 mm/zswap.c     	buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
pool             1088 mm/zswap.c     	zpool_unmap_handle(entry->pool->zpool, handle);
pool             1118 mm/zswap.c     	zswap_pool_put(entry->pool);
pool             1158 mm/zswap.c     	src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
pool             1159 mm/zswap.c     	if (zpool_evictable(entry->pool->zpool))
pool             1162 mm/zswap.c     	tfm = *get_cpu_ptr(entry->pool->tfm);
pool             1164 mm/zswap.c     	put_cpu_ptr(entry->pool->tfm);
pool             1166 mm/zswap.c     	zpool_unmap_handle(entry->pool->zpool, entry->handle);
pool             1300 mm/zswap.c     	struct zswap_pool *pool;
pool             1324 mm/zswap.c     	pool = __zswap_pool_create_fallback();
pool             1325 mm/zswap.c     	if (pool) {
pool             1326 mm/zswap.c     		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
pool             1327 mm/zswap.c     			zpool_get_type(pool->zpool));
pool             1328 mm/zswap.c     		list_add(&pool->list, &zswap_pools);
pool               37 net/bridge/netfilter/ebt_among.c 			p = &wh->pool[i];
pool               44 net/bridge/netfilter/ebt_among.c 			p = &wh->pool[i];
pool               94 net/ceph/debugfs.c 		seq_printf(s, "pg_temp %llu.%x [", pg->pgid.pool,
pool              105 net/ceph/debugfs.c 		seq_printf(s, "primary_temp %llu.%x %d\n", pg->pgid.pool,
pool              112 net/ceph/debugfs.c 		seq_printf(s, "pg_upmap %llu.%x [", pg->pgid.pool,
pool              123 net/ceph/debugfs.c 		seq_printf(s, "pg_upmap_items %llu.%x [", pg->pgid.pool,
pool              176 net/ceph/debugfs.c 	seq_printf(s, "%llu.%x", spgid->pgid.pool, spgid->pgid.seed);
pool              185 net/ceph/debugfs.c 	seq_printf(s, "osd%d\t%llu.%x\t", t->osd, t->pgid.pool, t->pgid.seed);
pool              298 net/ceph/debugfs.c 	    hoid->pool == S64_MIN) {
pool              306 net/ceph/debugfs.c 	seq_printf(s, "%lld:%08x:", hoid->pool, hoid->hash_reverse_bits);
pool             3509 net/ceph/messenger.c 	if (m->pool)
pool             3510 net/ceph/messenger.c 		ceph_msgpool_put(m->pool, m);
pool               14 net/ceph/msgpool.c 	struct ceph_msgpool *pool = arg;
pool               17 net/ceph/msgpool.c 	msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items,
pool               20 net/ceph/msgpool.c 		dout("msgpool_alloc %s failed\n", pool->name);
pool               22 net/ceph/msgpool.c 		dout("msgpool_alloc %s %p\n", pool->name, msg);
pool               23 net/ceph/msgpool.c 		msg->pool = pool;
pool               30 net/ceph/msgpool.c 	struct ceph_msgpool *pool = arg;
pool               33 net/ceph/msgpool.c 	dout("msgpool_release %s %p\n", pool->name, msg);
pool               34 net/ceph/msgpool.c 	msg->pool = NULL;
pool               38 net/ceph/msgpool.c int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
pool               43 net/ceph/msgpool.c 	pool->type = type;
pool               44 net/ceph/msgpool.c 	pool->front_len = front_len;
pool               45 net/ceph/msgpool.c 	pool->max_data_items = max_data_items;
pool               46 net/ceph/msgpool.c 	pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
pool               47 net/ceph/msgpool.c 	if (!pool->pool)
pool               49 net/ceph/msgpool.c 	pool->name = name;
pool               53 net/ceph/msgpool.c void ceph_msgpool_destroy(struct ceph_msgpool *pool)
pool               55 net/ceph/msgpool.c 	dout("msgpool %s destroy\n", pool->name);
pool               56 net/ceph/msgpool.c 	mempool_destroy(pool->pool);
pool               59 net/ceph/msgpool.c struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
pool               64 net/ceph/msgpool.c 	if (front_len > pool->front_len ||
pool               65 net/ceph/msgpool.c 	    max_data_items > pool->max_data_items) {
pool               67 net/ceph/msgpool.c 		    __func__, front_len, max_data_items, pool->name,
pool               68 net/ceph/msgpool.c 		    pool->front_len, pool->max_data_items);
pool               72 net/ceph/msgpool.c 		return ceph_msg_new2(pool->type, front_len, max_data_items,
pool               76 net/ceph/msgpool.c 	msg = mempool_alloc(pool->pool, GFP_NOFS);
pool               77 net/ceph/msgpool.c 	dout("msgpool_get %s %p\n", pool->name, msg);
pool               81 net/ceph/msgpool.c void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
pool               83 net/ceph/msgpool.c 	dout("msgpool_put %s %p\n", pool->name, msg);
pool               86 net/ceph/msgpool.c 	msg->front.iov_len = pool->front_len;
pool               87 net/ceph/msgpool.c 	msg->hdr.front_len = cpu_to_le32(pool->front_len);
pool               93 net/ceph/msgpool.c 	mempool_free(msg, pool->pool);
pool             1115 net/ceph/osd_client.c 	req->r_base_oloc.pool = layout->pool_id;
pool             1494 net/ceph/osd_client.c 	WARN_ON(pi->id != t->target_oloc.pool);
pool             1523 net/ceph/osd_client.c 	pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
pool             1544 net/ceph/osd_client.c 			t->target_oloc.pool = pi->read_tier;
pool             1546 net/ceph/osd_client.c 			t->target_oloc.pool = pi->write_tier;
pool             1548 net/ceph/osd_client.c 		pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
pool             1557 net/ceph/osd_client.c 	last_pgid.pool = pgid.pool;
pool             1688 net/ceph/osd_client.c 	if (lhs->pool < rhs->pool)
pool             1690 net/ceph/osd_client.c 	if (lhs->pool > rhs->pool)
pool             1772 net/ceph/osd_client.c 	ceph_decode_64_safe(p, end, hoid->pool, e_inval);
pool             1796 net/ceph/osd_client.c 	ceph_encode_64(p, hoid->pool);
pool             1910 net/ceph/osd_client.c 	hoid->pool = t->target_oloc.pool;
pool             1931 net/ceph/osd_client.c 	     __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
pool             2015 net/ceph/osd_client.c 	ceph_encode_64(p, pgid->pool);
pool             2031 net/ceph/osd_client.c 	ceph_encode_64(p, oloc->pool);
pool             2157 net/ceph/osd_client.c 		pgid.pool = ceph_decode_64(&p);
pool             2240 net/ceph/osd_client.c 	     __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
pool             2241 net/ceph/osd_client.c 	     req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
pool             2323 net/ceph/osd_client.c 		    pool_full(osdc, req->r_t.base_oloc.pool))) {
pool             2542 net/ceph/osd_client.c 	     pool_full(osdc, req->r_t.base_oloc.pool))) {
pool             3408 net/ceph/osd_client.c 	oloc->pool = ceph_decode_64(p);
pool             3629 net/ceph/osd_client.c 	     __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
pool             3646 net/ceph/osd_client.c 		     m.redirect.oloc.pool);
pool             3654 net/ceph/osd_client.c 		req->r_t.target_oloc.pool = m.redirect.oloc.pool;
pool             3775 net/ceph/osd_client.c 			     pool_cleared_full(osdc, lreq->t.base_oloc.pool));
pool             3810 net/ceph/osd_client.c 			     pool_cleared_full(osdc, req->r_t.base_oloc.pool));
pool             4223 net/ceph/osd_client.c 	     m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
pool             4283 net/ceph/osd_client.c 	     m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
pool             4288 net/ceph/osd_client.c 		       __func__, osd->o_osd, m->spgid.pgid.pool,
pool             4296 net/ceph/osd_client.c 		       __func__, osd->o_osd, m->spgid.pgid.pool,
pool              582 net/ceph/osdmap.c 	if (lhs->pool < rhs->pool)
pool              584 net/ceph/osdmap.c 	if (lhs->pool > rhs->pool)
pool              897 net/ceph/osdmap.c 	u64 pool;
pool              902 net/ceph/osdmap.c 		ceph_decode_64_safe(p, end, pool, bad);
pool              904 net/ceph/osdmap.c 		dout("  pool %llu len %d\n", pool, len);
pool              906 net/ceph/osdmap.c 		pi = __lookup_pg_pool(&map->pg_pools, pool);
pool             1152 net/ceph/osdmap.c 		u64 pool;
pool             1155 net/ceph/osdmap.c 		ceph_decode_64_safe(p, end, pool, e_inval);
pool             1157 net/ceph/osdmap.c 		pi = __lookup_pg_pool(&map->pg_pools, pool);
pool             1163 net/ceph/osdmap.c 			pi->id = pool;
pool             1758 net/ceph/osdmap.c 	u64 pool;
pool             1831 net/ceph/osdmap.c 		ceph_decode_64_safe(p, end, pool, e_inval);
pool             1832 net/ceph/osdmap.c 		pi = __lookup_pg_pool(&map->pg_pools, pool);
pool             1909 net/ceph/osdmap.c 	dest->pool = src->pool;
pool             2184 net/ceph/osdmap.c 	WARN_ON(pi->id != oloc->pool);
pool             2187 net/ceph/osdmap.c 		raw_pgid->pool = oloc->pool;
pool             2191 net/ceph/osdmap.c 		     raw_pgid->pool, raw_pgid->seed);
pool             2203 net/ceph/osdmap.c 		raw_pgid->pool = oloc->pool;
pool             2209 net/ceph/osdmap.c 		     raw_pgid->pool, raw_pgid->seed);
pool             2220 net/ceph/osdmap.c 	pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
pool             2236 net/ceph/osdmap.c 	pgid->pool = raw_pgid->pool;
pool             2255 net/ceph/osdmap.c 				      raw_pgid->pool);
pool             2265 net/ceph/osdmap.c 		       (unsigned)raw_pgid->pool;
pool             2611 net/ceph/osdmap.c 	WARN_ON(pi->id != raw_pgid->pool);
pool             2637 net/ceph/osdmap.c 	WARN_ON(pi->id != raw_pgid->pool);
pool             2667 net/ceph/osdmap.c 	pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
pool               24 net/core/page_pool.c static int page_pool_init(struct page_pool *pool,
pool               29 net/core/page_pool.c 	memcpy(&pool->p, params, sizeof(pool->p));
pool               32 net/core/page_pool.c 	if (pool->p.flags & ~(PP_FLAG_ALL))
pool               35 net/core/page_pool.c 	if (pool->p.pool_size)
pool               36 net/core/page_pool.c 		ring_qsize = pool->p.pool_size;
pool               46 net/core/page_pool.c 	if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
pool               47 net/core/page_pool.c 	    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
pool               50 net/core/page_pool.c 	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
pool               53 net/core/page_pool.c 	atomic_set(&pool->pages_state_release_cnt, 0);
pool               56 net/core/page_pool.c 	refcount_set(&pool->user_cnt, 1);
pool               58 net/core/page_pool.c 	if (pool->p.flags & PP_FLAG_DMA_MAP)
pool               59 net/core/page_pool.c 		get_device(pool->p.dev);
pool               66 net/core/page_pool.c 	struct page_pool *pool;
pool               69 net/core/page_pool.c 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
pool               70 net/core/page_pool.c 	if (!pool)
pool               73 net/core/page_pool.c 	err = page_pool_init(pool, params);
pool               76 net/core/page_pool.c 		kfree(pool);
pool               80 net/core/page_pool.c 	return pool;
pool               85 net/core/page_pool.c static struct page *__page_pool_get_cached(struct page_pool *pool)
pool               87 net/core/page_pool.c 	struct ptr_ring *r = &pool->ring;
pool               93 net/core/page_pool.c 		if (likely(pool->alloc.count)) {
pool               95 net/core/page_pool.c 			page = pool->alloc.cache[--pool->alloc.count];
pool              111 net/core/page_pool.c 		pool->alloc.count = __ptr_ring_consume_batched(r,
pool              112 net/core/page_pool.c 							pool->alloc.cache,
pool              120 net/core/page_pool.c static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
pool              130 net/core/page_pool.c 	if (pool->p.order)
pool              141 net/core/page_pool.c 	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
pool              145 net/core/page_pool.c 	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
pool              153 net/core/page_pool.c 	dma = dma_map_page_attrs(pool->p.dev, page, 0,
pool              154 net/core/page_pool.c 				 (PAGE_SIZE << pool->p.order),
pool              155 net/core/page_pool.c 				 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
pool              156 net/core/page_pool.c 	if (dma_mapping_error(pool->p.dev, dma)) {
pool              164 net/core/page_pool.c 	pool->pages_state_hold_cnt++;
pool              166 net/core/page_pool.c 	trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
pool              175 net/core/page_pool.c struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
pool              180 net/core/page_pool.c 	page = __page_pool_get_cached(pool);
pool              185 net/core/page_pool.c 	page = __page_pool_alloc_pages_slow(pool, gfp);
pool              195 net/core/page_pool.c static s32 page_pool_inflight(struct page_pool *pool)
pool              197 net/core/page_pool.c 	u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
pool              198 net/core/page_pool.c 	u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
pool              203 net/core/page_pool.c 	trace_page_pool_inflight(pool, inflight, hold_cnt, release_cnt);
pool              210 net/core/page_pool.c static void __page_pool_clean_page(struct page_pool *pool,
pool              216 net/core/page_pool.c 	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
pool              221 net/core/page_pool.c 	dma_unmap_page_attrs(pool->p.dev, dma,
pool              222 net/core/page_pool.c 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
pool              229 net/core/page_pool.c 	count = atomic_inc_return(&pool->pages_state_release_cnt);
pool              230 net/core/page_pool.c 	trace_page_pool_state_release(pool, page, count);
pool              234 net/core/page_pool.c void page_pool_unmap_page(struct page_pool *pool, struct page *page)
pool              239 net/core/page_pool.c 	__page_pool_clean_page(pool, page);
pool              244 net/core/page_pool.c static void __page_pool_return_page(struct page_pool *pool, struct page *page)
pool              246 net/core/page_pool.c 	__page_pool_clean_page(pool, page);
pool              255 net/core/page_pool.c static bool __page_pool_recycle_into_ring(struct page_pool *pool,
pool              261 net/core/page_pool.c 		ret = ptr_ring_produce(&pool->ring, page);
pool              263 net/core/page_pool.c 		ret = ptr_ring_produce_bh(&pool->ring, page);
pool              274 net/core/page_pool.c 				       struct page_pool *pool)
pool              276 net/core/page_pool.c 	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
pool              280 net/core/page_pool.c 	pool->alloc.cache[pool->alloc.count++] = page;
pool              284 net/core/page_pool.c void __page_pool_put_page(struct page_pool *pool,
pool              297 net/core/page_pool.c 			if (__page_pool_recycle_direct(page, pool))
pool              300 net/core/page_pool.c 		if (!__page_pool_recycle_into_ring(pool, page)) {
pool              302 net/core/page_pool.c 			__page_pool_return_page(pool, page);
pool              319 net/core/page_pool.c 	__page_pool_clean_page(pool, page);
pool              324 net/core/page_pool.c static void __page_pool_empty_ring(struct page_pool *pool)
pool              329 net/core/page_pool.c 	while ((page = ptr_ring_consume_bh(&pool->ring))) {
pool              335 net/core/page_pool.c 		__page_pool_return_page(pool, page);
pool              339 net/core/page_pool.c static void page_pool_free(struct page_pool *pool)
pool              341 net/core/page_pool.c 	if (pool->disconnect)
pool              342 net/core/page_pool.c 		pool->disconnect(pool);
pool              344 net/core/page_pool.c 	ptr_ring_cleanup(&pool->ring, NULL);
pool              346 net/core/page_pool.c 	if (pool->p.flags & PP_FLAG_DMA_MAP)
pool              347 net/core/page_pool.c 		put_device(pool->p.dev);
pool              349 net/core/page_pool.c 	kfree(pool);
pool              352 net/core/page_pool.c static void page_pool_scrub(struct page_pool *pool)
pool              360 net/core/page_pool.c 	while (pool->alloc.count) {
pool              361 net/core/page_pool.c 		page = pool->alloc.cache[--pool->alloc.count];
pool              362 net/core/page_pool.c 		__page_pool_return_page(pool, page);
pool              368 net/core/page_pool.c 	__page_pool_empty_ring(pool);
pool              371 net/core/page_pool.c static int page_pool_release(struct page_pool *pool)
pool              375 net/core/page_pool.c 	page_pool_scrub(pool);
pool              376 net/core/page_pool.c 	inflight = page_pool_inflight(pool);
pool              378 net/core/page_pool.c 		page_pool_free(pool);
pool              386 net/core/page_pool.c 	struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
pool              389 net/core/page_pool.c 	inflight = page_pool_release(pool);
pool              394 net/core/page_pool.c 	if (time_after_eq(jiffies, pool->defer_warn)) {
pool              395 net/core/page_pool.c 		int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
pool              399 net/core/page_pool.c 		pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
pool              403 net/core/page_pool.c 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
pool              406 net/core/page_pool.c void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
pool              408 net/core/page_pool.c 	refcount_inc(&pool->user_cnt);
pool              409 net/core/page_pool.c 	pool->disconnect = disconnect;
pool              412 net/core/page_pool.c void page_pool_destroy(struct page_pool *pool)
pool              414 net/core/page_pool.c 	if (!pool)
pool              417 net/core/page_pool.c 	if (!page_pool_put(pool))
pool              420 net/core/page_pool.c 	if (!page_pool_release(pool))
pool              423 net/core/page_pool.c 	pool->defer_start = jiffies;
pool              424 net/core/page_pool.c 	pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
pool              426 net/core/page_pool.c 	INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
pool              427 net/core/page_pool.c 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
pool               37 net/rds/ib_fmr.c 	struct rds_ib_mr_pool *pool;
pool               43 net/rds/ib_fmr.c 		pool = rds_ibdev->mr_8k_pool;
pool               45 net/rds/ib_fmr.c 		pool = rds_ibdev->mr_1m_pool;
pool               47 net/rds/ib_fmr.c 	if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
pool               48 net/rds/ib_fmr.c 		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
pool               51 net/rds/ib_fmr.c 	if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
pool               52 net/rds/ib_fmr.c 		if (pool->pool_type == RDS_IB_MR_8K_POOL)
pool               53 net/rds/ib_fmr.c 			pool = rds_ibdev->mr_1m_pool;
pool               55 net/rds/ib_fmr.c 			pool = rds_ibdev->mr_8k_pool;
pool               58 net/rds/ib_fmr.c 	ibmr = rds_ib_try_reuse_ibmr(pool);
pool               75 net/rds/ib_fmr.c 			&pool->fmr_attr);
pool               83 net/rds/ib_fmr.c 	ibmr->pool = pool;
pool               84 net/rds/ib_fmr.c 	if (pool->pool_type == RDS_IB_MR_8K_POOL)
pool               93 net/rds/ib_fmr.c 	atomic_dec(&pool->item_count);
pool              148 net/rds/ib_fmr.c 	if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
pool              186 net/rds/ib_fmr.c 	if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
pool              247 net/rds/ib_fmr.c 		    ibmr->remap_count >= ibmr->pool->fmr_attr.max_maps) {
pool              248 net/rds/ib_fmr.c 			if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
pool              263 net/rds/ib_fmr.c 	struct rds_ib_mr_pool *pool = ibmr->pool;
pool              265 net/rds/ib_fmr.c 	if (ibmr->remap_count >= pool->fmr_attr.max_maps)
pool              266 net/rds/ib_fmr.c 		llist_add(&ibmr->llnode, &pool->drop_list);
pool              268 net/rds/ib_fmr.c 		llist_add(&ibmr->llnode, &pool->free_list);
pool               56 net/rds/ib_frmr.c 	struct rds_ib_mr_pool *pool;
pool               62 net/rds/ib_frmr.c 		pool = rds_ibdev->mr_8k_pool;
pool               64 net/rds/ib_frmr.c 		pool = rds_ibdev->mr_1m_pool;
pool               66 net/rds/ib_frmr.c 	ibmr = rds_ib_try_reuse_ibmr(pool);
pool               79 net/rds/ib_frmr.c 			 pool->fmr_attr.max_pages);
pool               86 net/rds/ib_frmr.c 	ibmr->pool = pool;
pool               87 net/rds/ib_frmr.c 	if (pool->pool_type == RDS_IB_MR_8K_POOL)
pool               92 net/rds/ib_frmr.c 	if (atomic_read(&pool->item_count) > pool->max_items_soft)
pool               93 net/rds/ib_frmr.c 		pool->max_items_soft = pool->max_items;
pool              102 net/rds/ib_frmr.c 	atomic_dec(&pool->item_count);
pool              108 net/rds/ib_frmr.c 	struct rds_ib_mr_pool *pool = ibmr->pool;
pool              111 net/rds/ib_frmr.c 		llist_add(&ibmr->llnode, &pool->drop_list);
pool              113 net/rds/ib_frmr.c 		llist_add(&ibmr->llnode, &pool->free_list);
pool              114 net/rds/ib_frmr.c 	atomic_add(ibmr->sg_len, &pool->free_pinned);
pool              115 net/rds/ib_frmr.c 	atomic_inc(&pool->dirty_count);
pool              118 net/rds/ib_frmr.c 	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
pool              119 net/rds/ib_frmr.c 	    atomic_read(&pool->dirty_count) >= pool->max_items / 5)
pool              120 net/rds/ib_frmr.c 		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
pool              188 net/rds/ib_frmr.c 			   struct rds_ib_mr_pool *pool,
pool              243 net/rds/ib_frmr.c 	if (frmr->dma_npages > ibmr->pool->fmr_attr.max_pages) {
pool              252 net/rds/ib_frmr.c 	if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
pool              387 net/rds/ib_frmr.c 			if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
pool              426 net/rds/ib_frmr.c 	ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
pool              439 net/rds/ib_frmr.c 	struct rds_ib_mr_pool *pool = ibmr->pool;
pool              443 net/rds/ib_frmr.c 		llist_add(&ibmr->llnode, &pool->drop_list);
pool              445 net/rds/ib_frmr.c 		llist_add(&ibmr->llnode, &pool->free_list);
pool               71 net/rds/ib_mr.h 	struct rds_ib_mr_pool		*pool;
pool              191 net/rds/ib_rdma.c struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
pool              197 net/rds/ib_rdma.c 	spin_lock_irqsave(&pool->clean_lock, flags);
pool              198 net/rds/ib_rdma.c 	ret = llist_del_first(&pool->clean_list);
pool              199 net/rds/ib_rdma.c 	spin_unlock_irqrestore(&pool->clean_lock, flags);
pool              202 net/rds/ib_rdma.c 		if (pool->pool_type == RDS_IB_MR_8K_POOL)
pool              265 net/rds/ib_rdma.c 		struct rds_ib_mr_pool *pool = ibmr->pool;
pool              267 net/rds/ib_rdma.c 		atomic_sub(pinned, &pool->free_pinned);
pool              271 net/rds/ib_rdma.c static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
pool              275 net/rds/ib_rdma.c 	item_count = atomic_read(&pool->item_count);
pool              332 net/rds/ib_rdma.c int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
pool              342 net/rds/ib_rdma.c 	if (pool->pool_type == RDS_IB_MR_8K_POOL)
pool              349 net/rds/ib_rdma.c 		while (!mutex_trylock(&pool->flush_lock)) {
pool              350 net/rds/ib_rdma.c 			ibmr = rds_ib_reuse_mr(pool);
pool              353 net/rds/ib_rdma.c 				finish_wait(&pool->flush_wait, &wait);
pool              357 net/rds/ib_rdma.c 			prepare_to_wait(&pool->flush_wait, &wait,
pool              359 net/rds/ib_rdma.c 			if (llist_empty(&pool->clean_list))
pool              362 net/rds/ib_rdma.c 			ibmr = rds_ib_reuse_mr(pool);
pool              365 net/rds/ib_rdma.c 				finish_wait(&pool->flush_wait, &wait);
pool              369 net/rds/ib_rdma.c 		finish_wait(&pool->flush_wait, &wait);
pool              371 net/rds/ib_rdma.c 		mutex_lock(&pool->flush_lock);
pool              374 net/rds/ib_rdma.c 		ibmr = rds_ib_reuse_mr(pool);
pool              384 net/rds/ib_rdma.c 	dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
pool              385 net/rds/ib_rdma.c 	dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
pool              389 net/rds/ib_rdma.c 		spin_lock_irqsave(&pool->clean_lock, flags);
pool              390 net/rds/ib_rdma.c 		llist_append_to_list(&pool->clean_list, &unmap_list);
pool              391 net/rds/ib_rdma.c 		spin_unlock_irqrestore(&pool->clean_lock, flags);
pool              394 net/rds/ib_rdma.c 	free_goal = rds_ib_flush_goal(pool, free_all);
pool              399 net/rds/ib_rdma.c 	if (pool->use_fastreg)
pool              414 net/rds/ib_rdma.c 			spin_lock_irqsave(&pool->clean_lock, flags);
pool              416 net/rds/ib_rdma.c 					&pool->clean_list);
pool              417 net/rds/ib_rdma.c 			spin_unlock_irqrestore(&pool->clean_lock, flags);
pool              421 net/rds/ib_rdma.c 	atomic_sub(unpinned, &pool->free_pinned);
pool              422 net/rds/ib_rdma.c 	atomic_sub(dirty_to_clean, &pool->dirty_count);
pool              423 net/rds/ib_rdma.c 	atomic_sub(nfreed, &pool->item_count);
pool              426 net/rds/ib_rdma.c 	mutex_unlock(&pool->flush_lock);
pool              427 net/rds/ib_rdma.c 	if (waitqueue_active(&pool->flush_wait))
pool              428 net/rds/ib_rdma.c 		wake_up(&pool->flush_wait);
pool              433 net/rds/ib_rdma.c struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
pool              439 net/rds/ib_rdma.c 		ibmr = rds_ib_reuse_mr(pool);
pool              443 net/rds/ib_rdma.c 		if (atomic_inc_return(&pool->item_count) <= pool->max_items)
pool              446 net/rds/ib_rdma.c 		atomic_dec(&pool->item_count);
pool              449 net/rds/ib_rdma.c 			if (pool->pool_type == RDS_IB_MR_8K_POOL)
pool              457 net/rds/ib_rdma.c 		if (pool->pool_type == RDS_IB_MR_8K_POOL)
pool              462 net/rds/ib_rdma.c 		rds_ib_flush_mr_pool(pool, 0, &ibmr);
pool              472 net/rds/ib_rdma.c 	struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
pool              474 net/rds/ib_rdma.c 	rds_ib_flush_mr_pool(pool, 0, NULL);
pool              480 net/rds/ib_rdma.c 	struct rds_ib_mr_pool *pool = ibmr->pool;
pool              491 net/rds/ib_rdma.c 	atomic_add(ibmr->sg_len, &pool->free_pinned);
pool              492 net/rds/ib_rdma.c 	atomic_inc(&pool->dirty_count);
pool              495 net/rds/ib_rdma.c 	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
pool              496 net/rds/ib_rdma.c 	    atomic_read(&pool->dirty_count) >= pool->max_items / 5)
pool              497 net/rds/ib_rdma.c 		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
pool              501 net/rds/ib_rdma.c 			rds_ib_flush_mr_pool(pool, 0, NULL);
pool              507 net/rds/ib_rdma.c 					   &pool->flush_worker, 10);
pool              570 net/rds/ib_rdma.c void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
pool              572 net/rds/ib_rdma.c 	cancel_delayed_work_sync(&pool->flush_worker);
pool              573 net/rds/ib_rdma.c 	rds_ib_flush_mr_pool(pool, 1, NULL);
pool              574 net/rds/ib_rdma.c 	WARN_ON(atomic_read(&pool->item_count));
pool              575 net/rds/ib_rdma.c 	WARN_ON(atomic_read(&pool->free_pinned));
pool              576 net/rds/ib_rdma.c 	kfree(pool);
pool              582 net/rds/ib_rdma.c 	struct rds_ib_mr_pool *pool;
pool              584 net/rds/ib_rdma.c 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool              585 net/rds/ib_rdma.c 	if (!pool)
pool              588 net/rds/ib_rdma.c 	pool->pool_type = pool_type;
pool              589 net/rds/ib_rdma.c 	init_llist_head(&pool->free_list);
pool              590 net/rds/ib_rdma.c 	init_llist_head(&pool->drop_list);
pool              591 net/rds/ib_rdma.c 	init_llist_head(&pool->clean_list);
pool              592 net/rds/ib_rdma.c 	spin_lock_init(&pool->clean_lock);
pool              593 net/rds/ib_rdma.c 	mutex_init(&pool->flush_lock);
pool              594 net/rds/ib_rdma.c 	init_waitqueue_head(&pool->flush_wait);
pool              595 net/rds/ib_rdma.c 	INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
pool              599 net/rds/ib_rdma.c 		pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
pool              600 net/rds/ib_rdma.c 		pool->max_items = rds_ibdev->max_1m_mrs;
pool              603 net/rds/ib_rdma.c 		pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
pool              604 net/rds/ib_rdma.c 		pool->max_items = rds_ibdev->max_8k_mrs;
pool              607 net/rds/ib_rdma.c 	pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
pool              608 net/rds/ib_rdma.c 	pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
pool              609 net/rds/ib_rdma.c 	pool->fmr_attr.page_shift = PAGE_SHIFT;
pool              610 net/rds/ib_rdma.c 	pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
pool              611 net/rds/ib_rdma.c 	pool->use_fastreg = rds_ibdev->use_fastreg;
pool              613 net/rds/ib_rdma.c 	return pool;
pool              474 net/sunrpc/svc.c 		struct svc_pool *pool = &serv->sv_pools[i];
pool              479 net/sunrpc/svc.c 		pool->sp_id = i;
pool              480 net/sunrpc/svc.c 		INIT_LIST_HEAD(&pool->sp_sockets);
pool              481 net/sunrpc/svc.c 		INIT_LIST_HEAD(&pool->sp_all_threads);
pool              482 net/sunrpc/svc.c 		spin_lock_init(&pool->sp_lock);
pool              604 net/sunrpc/svc.c svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
pool              615 net/sunrpc/svc.c 	rqstp->rq_pool = pool;
pool              636 net/sunrpc/svc.c svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
pool              640 net/sunrpc/svc.c 	rqstp = svc_rqst_alloc(serv, pool, node);
pool              645 net/sunrpc/svc.c 	spin_lock_bh(&pool->sp_lock);
pool              646 net/sunrpc/svc.c 	pool->sp_nrthreads++;
pool              647 net/sunrpc/svc.c 	list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
pool              648 net/sunrpc/svc.c 	spin_unlock_bh(&pool->sp_lock);
pool              657 net/sunrpc/svc.c choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
pool              659 net/sunrpc/svc.c 	if (pool != NULL)
pool              660 net/sunrpc/svc.c 		return pool;
pool              669 net/sunrpc/svc.c choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
pool              674 net/sunrpc/svc.c 	if (pool != NULL) {
pool              675 net/sunrpc/svc.c 		spin_lock_bh(&pool->sp_lock);
pool              679 net/sunrpc/svc.c 			pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
pool              680 net/sunrpc/svc.c 			spin_lock_bh(&pool->sp_lock);
pool              681 net/sunrpc/svc.c 			if (!list_empty(&pool->sp_all_threads))
pool              683 net/sunrpc/svc.c 			spin_unlock_bh(&pool->sp_lock);
pool              689 net/sunrpc/svc.c 	if (!list_empty(&pool->sp_all_threads)) {
pool              696 net/sunrpc/svc.c 		rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
pool              701 net/sunrpc/svc.c 	spin_unlock_bh(&pool->sp_lock);
pool              708 net/sunrpc/svc.c svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
pool              718 net/sunrpc/svc.c 		chosen_pool = choose_pool(serv, pool, &state);
pool              748 net/sunrpc/svc.c svc_signal_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
pool              755 net/sunrpc/svc.c 		task = choose_victim(serv, pool, &state);
pool              780 net/sunrpc/svc.c svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
pool              782 net/sunrpc/svc.c 	if (pool == NULL) {
pool              786 net/sunrpc/svc.c 		spin_lock_bh(&pool->sp_lock);
pool              787 net/sunrpc/svc.c 		nrservs -= pool->sp_nrthreads;
pool              788 net/sunrpc/svc.c 		spin_unlock_bh(&pool->sp_lock);
pool              792 net/sunrpc/svc.c 		return svc_start_kthreads(serv, pool, nrservs);
pool              794 net/sunrpc/svc.c 		return svc_signal_kthreads(serv, pool, nrservs);
pool              801 net/sunrpc/svc.c svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
pool              808 net/sunrpc/svc.c 		task = choose_victim(serv, pool, &state);
pool              818 net/sunrpc/svc.c svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
pool              820 net/sunrpc/svc.c 	if (pool == NULL) {
pool              824 net/sunrpc/svc.c 		spin_lock_bh(&pool->sp_lock);
pool              825 net/sunrpc/svc.c 		nrservs -= pool->sp_nrthreads;
pool              826 net/sunrpc/svc.c 		spin_unlock_bh(&pool->sp_lock);
pool              830 net/sunrpc/svc.c 		return svc_start_kthreads(serv, pool, nrservs);
pool              832 net/sunrpc/svc.c 		return svc_stop_kthreads(serv, pool, nrservs);
pool              856 net/sunrpc/svc.c 	struct svc_pool	*pool = rqstp->rq_pool;
pool              858 net/sunrpc/svc.c 	spin_lock_bh(&pool->sp_lock);
pool              859 net/sunrpc/svc.c 	pool->sp_nrthreads--;
pool              862 net/sunrpc/svc.c 	spin_unlock_bh(&pool->sp_lock);
pool              399 net/sunrpc/svc_xprt.c 	struct svc_pool *pool;
pool              415 net/sunrpc/svc_xprt.c 	pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
pool              417 net/sunrpc/svc_xprt.c 	atomic_long_inc(&pool->sp_stats.packets);
pool              419 net/sunrpc/svc_xprt.c 	spin_lock_bh(&pool->sp_lock);
pool              420 net/sunrpc/svc_xprt.c 	list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool              421 net/sunrpc/svc_xprt.c 	pool->sp_stats.sockets_queued++;
pool              422 net/sunrpc/svc_xprt.c 	spin_unlock_bh(&pool->sp_lock);
pool              426 net/sunrpc/svc_xprt.c 	list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
pool              429 net/sunrpc/svc_xprt.c 		atomic_long_inc(&pool->sp_stats.threads_woken);
pool              434 net/sunrpc/svc_xprt.c 	set_bit(SP_CONGESTED, &pool->sp_flags);
pool              459 net/sunrpc/svc_xprt.c static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
pool              463 net/sunrpc/svc_xprt.c 	if (list_empty(&pool->sp_sockets))
pool              466 net/sunrpc/svc_xprt.c 	spin_lock_bh(&pool->sp_lock);
pool              467 net/sunrpc/svc_xprt.c 	if (likely(!list_empty(&pool->sp_sockets))) {
pool              468 net/sunrpc/svc_xprt.c 		xprt = list_first_entry(&pool->sp_sockets,
pool              473 net/sunrpc/svc_xprt.c 	spin_unlock_bh(&pool->sp_lock);
pool              543 net/sunrpc/svc_xprt.c 	struct svc_pool *pool;
pool              545 net/sunrpc/svc_xprt.c 	pool = &serv->sv_pools[0];
pool              548 net/sunrpc/svc_xprt.c 	list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
pool              560 net/sunrpc/svc_xprt.c 	set_bit(SP_TASK_PENDING, &pool->sp_flags);
pool              678 net/sunrpc/svc_xprt.c 	struct svc_pool		*pool = rqstp->rq_pool;
pool              681 net/sunrpc/svc_xprt.c 	if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
pool              685 net/sunrpc/svc_xprt.c 	if (!list_empty(&pool->sp_sockets))
pool              701 net/sunrpc/svc_xprt.c 	struct svc_pool		*pool = rqstp->rq_pool;
pool              707 net/sunrpc/svc_xprt.c 	rqstp->rq_xprt = svc_xprt_dequeue(pool);
pool              717 net/sunrpc/svc_xprt.c 	clear_bit(SP_CONGESTED, &pool->sp_flags);
pool              730 net/sunrpc/svc_xprt.c 	rqstp->rq_xprt = svc_xprt_dequeue(pool);
pool              735 net/sunrpc/svc_xprt.c 		atomic_long_inc(&pool->sp_stats.threads_timedout);
pool              744 net/sunrpc/svc_xprt.c 	if (!test_bit(SP_CONGESTED, &pool->sp_flags))
pool             1080 net/sunrpc/svc_xprt.c 	struct svc_pool *pool;
pool             1086 net/sunrpc/svc_xprt.c 		pool = &serv->sv_pools[i];
pool             1088 net/sunrpc/svc_xprt.c 		spin_lock_bh(&pool->sp_lock);
pool             1089 net/sunrpc/svc_xprt.c 		list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
pool             1093 net/sunrpc/svc_xprt.c 			spin_unlock_bh(&pool->sp_lock);
pool             1096 net/sunrpc/svc_xprt.c 		spin_unlock_bh(&pool->sp_lock);
pool             1377 net/sunrpc/svc_xprt.c 	struct svc_pool *pool = p;
pool             1383 net/sunrpc/svc_xprt.c 		pool = &serv->sv_pools[0];
pool             1385 net/sunrpc/svc_xprt.c 		unsigned int pidx = (pool - &serv->sv_pools[0]);
pool             1387 net/sunrpc/svc_xprt.c 			pool = &serv->sv_pools[pidx+1];
pool             1389 net/sunrpc/svc_xprt.c 			pool = NULL;
pool             1392 net/sunrpc/svc_xprt.c 	return pool;
pool             1401 net/sunrpc/svc_xprt.c 	struct svc_pool *pool = p;
pool             1409 net/sunrpc/svc_xprt.c 		pool->sp_id,
pool             1410 net/sunrpc/svc_xprt.c 		(unsigned long)atomic_long_read(&pool->sp_stats.packets),
pool             1411 net/sunrpc/svc_xprt.c 		pool->sp_stats.sockets_queued,
pool             1412 net/sunrpc/svc_xprt.c 		(unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
pool             1413 net/sunrpc/svc_xprt.c 		(unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
pool               65 sound/core/memalloc.c 	struct gen_pool *pool = NULL;
pool               71 sound/core/memalloc.c 		pool = of_gen_pool_get(dev->of_node, "iram", 0);
pool               73 sound/core/memalloc.c 	if (!pool)
pool               77 sound/core/memalloc.c 	dmab->private_data = pool;
pool               79 sound/core/memalloc.c 	dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr);
pool               88 sound/core/memalloc.c 	struct gen_pool *pool = dmab->private_data;
pool               90 sound/core/memalloc.c 	if (pool && dmab->area)
pool               91 sound/core/memalloc.c 		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
pool               28 sound/core/seq/oss/seq_oss_writeq.c 	struct snd_seq_client_pool pool;
pool               39 sound/core/seq/oss/seq_oss_writeq.c 	memset(&pool, 0, sizeof(pool));
pool               40 sound/core/seq/oss/seq_oss_writeq.c 	pool.client = dp->cseq;
pool               41 sound/core/seq/oss/seq_oss_writeq.c 	pool.output_pool = maxlen;
pool               42 sound/core/seq/oss/seq_oss_writeq.c 	pool.output_room = maxlen / 2;
pool               44 sound/core/seq/oss/seq_oss_writeq.c 	snd_seq_oss_control(dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool);
pool              140 sound/core/seq/oss/seq_oss_writeq.c 	struct snd_seq_client_pool pool;
pool              141 sound/core/seq/oss/seq_oss_writeq.c 	pool.client = q->dp->cseq;
pool              142 sound/core/seq/oss/seq_oss_writeq.c 	snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool);
pool              143 sound/core/seq/oss/seq_oss_writeq.c 	return pool.output_free;
pool              153 sound/core/seq/oss/seq_oss_writeq.c 	struct snd_seq_client_pool pool;
pool              154 sound/core/seq/oss/seq_oss_writeq.c 	pool.client = q->dp->cseq;
pool              155 sound/core/seq/oss/seq_oss_writeq.c 	snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool);
pool              156 sound/core/seq/oss/seq_oss_writeq.c 	pool.output_room = val;
pool              157 sound/core/seq/oss/seq_oss_writeq.c 	snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool);
pool               89 sound/core/seq/seq_clientmgr.c 	return snd_seq_total_cells(client->pool) > 0;
pool              233 sound/core/seq/seq_clientmgr.c 	client->pool = snd_seq_pool_new(poolsize);
pool              234 sound/core/seq/seq_clientmgr.c 	if (client->pool == NULL) {
pool              265 sound/core/seq/seq_clientmgr.c 	snd_seq_pool_delete(&client->pool);
pool              283 sound/core/seq/seq_clientmgr.c 	if (client->pool)
pool              284 sound/core/seq/seq_clientmgr.c 		snd_seq_pool_delete(&client->pool);
pool              968 sound/core/seq/seq_clientmgr.c 	err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
pool             1034 sound/core/seq/seq_clientmgr.c 	if (!client->accept_output || client->pool == NULL)
pool             1041 sound/core/seq/seq_clientmgr.c 	if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
pool             1042 sound/core/seq/seq_clientmgr.c 		err = snd_seq_pool_init(client->pool);
pool             1143 sound/core/seq/seq_clientmgr.c 		    snd_seq_pool_poll_wait(client->pool, file, wait))
pool             1831 sound/core/seq/seq_clientmgr.c 	info->output_pool = cptr->pool->size;
pool             1832 sound/core/seq/seq_clientmgr.c 	info->output_room = cptr->pool->room;
pool             1834 sound/core/seq/seq_clientmgr.c 	info->output_free = snd_seq_unused_cells(cptr->pool);
pool             1860 sound/core/seq/seq_clientmgr.c 	     info->output_pool != client->pool->size)) {
pool             1863 sound/core/seq/seq_clientmgr.c 			if (atomic_read(&client->pool->counter))
pool             1866 sound/core/seq/seq_clientmgr.c 			snd_seq_pool_mark_closing(client->pool);
pool             1867 sound/core/seq/seq_clientmgr.c 			snd_seq_pool_done(client->pool);
pool             1869 sound/core/seq/seq_clientmgr.c 		client->pool->size = info->output_pool;
pool             1870 sound/core/seq/seq_clientmgr.c 		rc = snd_seq_pool_init(client->pool);
pool             1885 sound/core/seq/seq_clientmgr.c 	    info->output_room <= client->pool->size) {
pool             1886 sound/core/seq/seq_clientmgr.c 		client->pool->room  = info->output_room;
pool             2373 sound/core/seq/seq_clientmgr.c 	if (snd_seq_pool_poll_wait(client->pool, file, wait))
pool             2473 sound/core/seq/seq_clientmgr.c 			snd_seq_info_pool(buffer, client->pool, "    ");
pool             2476 sound/core/seq/seq_clientmgr.c 		    client->data.user.fifo->pool) {
pool             2478 sound/core/seq/seq_clientmgr.c 			snd_seq_info_pool(buffer, client->data.user.fifo->pool, "    ");
pool               53 sound/core/seq/seq_clientmgr.h 	struct snd_seq_pool *pool;		/* memory pool for this client */
pool               26 sound/core/seq/seq_fifo.c 	f->pool = snd_seq_pool_new(poolsize);
pool               27 sound/core/seq/seq_fifo.c 	if (f->pool == NULL) {
pool               31 sound/core/seq/seq_fifo.c 	if (snd_seq_pool_init(f->pool) < 0) {
pool               32 sound/core/seq/seq_fifo.c 		snd_seq_pool_delete(&f->pool);
pool               60 sound/core/seq/seq_fifo.c 	if (f->pool)
pool               61 sound/core/seq/seq_fifo.c 		snd_seq_pool_mark_closing(f->pool);
pool               72 sound/core/seq/seq_fifo.c 	if (f->pool) {
pool               73 sound/core/seq/seq_fifo.c 		snd_seq_pool_done(f->pool);
pool               74 sound/core/seq/seq_fifo.c 		snd_seq_pool_delete(&f->pool);
pool              112 sound/core/seq/seq_fifo.c 	err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
pool              229 sound/core/seq/seq_fifo.c 	if (snd_BUG_ON(!f || !f->pool))
pool              243 sound/core/seq/seq_fifo.c 	oldpool = f->pool;
pool              246 sound/core/seq/seq_fifo.c 	f->pool = newpool;
pool              278 sound/core/seq/seq_fifo.c 	cells = snd_seq_unused_cells(f->pool);
pool               16 sound/core/seq/seq_fifo.h 	struct snd_seq_pool *pool;		/* FIFO pool */
pool               22 sound/core/seq/seq_memory.c static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
pool               24 sound/core/seq/seq_memory.c 	return pool->total_elements - atomic_read(&pool->counter);
pool               27 sound/core/seq/seq_memory.c static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
pool               29 sound/core/seq/seq_memory.c 	return snd_seq_pool_available(pool) >= pool->room;
pool              163 sound/core/seq/seq_memory.c static inline void free_cell(struct snd_seq_pool *pool,
pool              166 sound/core/seq/seq_memory.c 	cell->next = pool->free;
pool              167 sound/core/seq/seq_memory.c 	pool->free = cell;
pool              168 sound/core/seq/seq_memory.c 	atomic_dec(&pool->counter);
pool              174 sound/core/seq/seq_memory.c 	struct snd_seq_pool *pool;
pool              178 sound/core/seq/seq_memory.c 	pool = cell->pool;
pool              179 sound/core/seq/seq_memory.c 	if (snd_BUG_ON(!pool))
pool              182 sound/core/seq/seq_memory.c 	spin_lock_irqsave(&pool->lock, flags);
pool              183 sound/core/seq/seq_memory.c 	free_cell(pool, cell);
pool              190 sound/core/seq/seq_memory.c 				curp->next = pool->free;
pool              191 sound/core/seq/seq_memory.c 				free_cell(pool, curp);
pool              195 sound/core/seq/seq_memory.c 	if (waitqueue_active(&pool->output_sleep)) {
pool              197 sound/core/seq/seq_memory.c 		if (snd_seq_output_ok(pool))
pool              198 sound/core/seq/seq_memory.c 			wake_up(&pool->output_sleep);
pool              200 sound/core/seq/seq_memory.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              207 sound/core/seq/seq_memory.c static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
pool              217 sound/core/seq/seq_memory.c 	if (pool == NULL)
pool              223 sound/core/seq/seq_memory.c 	spin_lock_irqsave(&pool->lock, flags);
pool              224 sound/core/seq/seq_memory.c 	if (pool->ptr == NULL) {	/* not initialized */
pool              229 sound/core/seq/seq_memory.c 	while (pool->free == NULL && ! nonblock && ! pool->closing) {
pool              232 sound/core/seq/seq_memory.c 		add_wait_queue(&pool->output_sleep, &wait);
pool              233 sound/core/seq/seq_memory.c 		spin_unlock_irqrestore(&pool->lock, flags);
pool              239 sound/core/seq/seq_memory.c 		spin_lock_irqsave(&pool->lock, flags);
pool              240 sound/core/seq/seq_memory.c 		remove_wait_queue(&pool->output_sleep, &wait);
pool              247 sound/core/seq/seq_memory.c 	if (pool->closing) { /* closing.. */
pool              252 sound/core/seq/seq_memory.c 	cell = pool->free;
pool              255 sound/core/seq/seq_memory.c 		pool->free = cell->next;
pool              256 sound/core/seq/seq_memory.c 		atomic_inc(&pool->counter);
pool              257 sound/core/seq/seq_memory.c 		used = atomic_read(&pool->counter);
pool              258 sound/core/seq/seq_memory.c 		if (pool->max_used < used)
pool              259 sound/core/seq/seq_memory.c 			pool->max_used = used;
pool              260 sound/core/seq/seq_memory.c 		pool->event_alloc_success++;
pool              265 sound/core/seq/seq_memory.c 		pool->event_alloc_failures++;
pool              269 sound/core/seq/seq_memory.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              279 sound/core/seq/seq_memory.c int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
pool              295 sound/core/seq/seq_memory.c 	if (ncells >= pool->total_elements)
pool              298 sound/core/seq/seq_memory.c 	err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
pool              324 sound/core/seq/seq_memory.c 			err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
pool              360 sound/core/seq/seq_memory.c int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
pool              363 sound/core/seq/seq_memory.c 	poll_wait(file, &pool->output_sleep, wait);
pool              364 sound/core/seq/seq_memory.c 	return snd_seq_output_ok(pool);
pool              369 sound/core/seq/seq_memory.c int snd_seq_pool_init(struct snd_seq_pool *pool)
pool              374 sound/core/seq/seq_memory.c 	if (snd_BUG_ON(!pool))
pool              377 sound/core/seq/seq_memory.c 	cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size,
pool              383 sound/core/seq/seq_memory.c 	spin_lock_irq(&pool->lock);
pool              384 sound/core/seq/seq_memory.c 	if (pool->ptr) {
pool              385 sound/core/seq/seq_memory.c 		spin_unlock_irq(&pool->lock);
pool              390 sound/core/seq/seq_memory.c 	pool->ptr = cellptr;
pool              391 sound/core/seq/seq_memory.c 	pool->free = NULL;
pool              393 sound/core/seq/seq_memory.c 	for (cell = 0; cell < pool->size; cell++) {
pool              394 sound/core/seq/seq_memory.c 		cellptr = pool->ptr + cell;
pool              395 sound/core/seq/seq_memory.c 		cellptr->pool = pool;
pool              396 sound/core/seq/seq_memory.c 		cellptr->next = pool->free;
pool              397 sound/core/seq/seq_memory.c 		pool->free = cellptr;
pool              399 sound/core/seq/seq_memory.c 	pool->room = (pool->size + 1) / 2;
pool              402 sound/core/seq/seq_memory.c 	pool->max_used = 0;
pool              403 sound/core/seq/seq_memory.c 	pool->total_elements = pool->size;
pool              404 sound/core/seq/seq_memory.c 	spin_unlock_irq(&pool->lock);
pool              409 sound/core/seq/seq_memory.c void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
pool              413 sound/core/seq/seq_memory.c 	if (snd_BUG_ON(!pool))
pool              415 sound/core/seq/seq_memory.c 	spin_lock_irqsave(&pool->lock, flags);
pool              416 sound/core/seq/seq_memory.c 	pool->closing = 1;
pool              417 sound/core/seq/seq_memory.c 	spin_unlock_irqrestore(&pool->lock, flags);
pool              421 sound/core/seq/seq_memory.c int snd_seq_pool_done(struct snd_seq_pool *pool)
pool              425 sound/core/seq/seq_memory.c 	if (snd_BUG_ON(!pool))
pool              429 sound/core/seq/seq_memory.c 	if (waitqueue_active(&pool->output_sleep))
pool              430 sound/core/seq/seq_memory.c 		wake_up(&pool->output_sleep);
pool              432 sound/core/seq/seq_memory.c 	while (atomic_read(&pool->counter) > 0)
pool              436 sound/core/seq/seq_memory.c 	spin_lock_irq(&pool->lock);
pool              437 sound/core/seq/seq_memory.c 	ptr = pool->ptr;
pool              438 sound/core/seq/seq_memory.c 	pool->ptr = NULL;
pool              439 sound/core/seq/seq_memory.c 	pool->free = NULL;
pool              440 sound/core/seq/seq_memory.c 	pool->total_elements = 0;
pool              441 sound/core/seq/seq_memory.c 	spin_unlock_irq(&pool->lock);
pool              445 sound/core/seq/seq_memory.c 	spin_lock_irq(&pool->lock);
pool              446 sound/core/seq/seq_memory.c 	pool->closing = 0;
pool              447 sound/core/seq/seq_memory.c 	spin_unlock_irq(&pool->lock);
pool              456 sound/core/seq/seq_memory.c 	struct snd_seq_pool *pool;
pool              459 sound/core/seq/seq_memory.c 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
pool              460 sound/core/seq/seq_memory.c 	if (!pool)
pool              462 sound/core/seq/seq_memory.c 	spin_lock_init(&pool->lock);
pool              463 sound/core/seq/seq_memory.c 	pool->ptr = NULL;
pool              464 sound/core/seq/seq_memory.c 	pool->free = NULL;
pool              465 sound/core/seq/seq_memory.c 	pool->total_elements = 0;
pool              466 sound/core/seq/seq_memory.c 	atomic_set(&pool->counter, 0);
pool              467 sound/core/seq/seq_memory.c 	pool->closing = 0;
pool              468 sound/core/seq/seq_memory.c 	init_waitqueue_head(&pool->output_sleep);
pool              470 sound/core/seq/seq_memory.c 	pool->size = poolsize;
pool              473 sound/core/seq/seq_memory.c 	pool->max_used = 0;
pool              474 sound/core/seq/seq_memory.c 	return pool;
pool              480 sound/core/seq/seq_memory.c 	struct snd_seq_pool *pool = *ppool;
pool              483 sound/core/seq/seq_memory.c 	if (pool == NULL)
pool              485 sound/core/seq/seq_memory.c 	snd_seq_pool_mark_closing(pool);
pool              486 sound/core/seq/seq_memory.c 	snd_seq_pool_done(pool);
pool              487 sound/core/seq/seq_memory.c 	kfree(pool);
pool              493 sound/core/seq/seq_memory.c 		       struct snd_seq_pool *pool, char *space)
pool              495 sound/core/seq/seq_memory.c 	if (pool == NULL)
pool              497 sound/core/seq/seq_memory.c 	snd_iprintf(buffer, "%sPool size          : %d\n", space, pool->total_elements);
pool              498 sound/core/seq/seq_memory.c 	snd_iprintf(buffer, "%sCells in use       : %d\n", space, atomic_read(&pool->counter));
pool              499 sound/core/seq/seq_memory.c 	snd_iprintf(buffer, "%sPeak cells in use  : %d\n", space, pool->max_used);
pool              500 sound/core/seq/seq_memory.c 	snd_iprintf(buffer, "%sAlloc success      : %d\n", space, pool->event_alloc_success);
pool              501 sound/core/seq/seq_memory.c 	snd_iprintf(buffer, "%sAlloc failures     : %d\n", space, pool->event_alloc_failures);
pool               17 sound/core/seq/seq_memory.h 	struct snd_seq_pool *pool;				/* used pool */
pool               53 sound/core/seq/seq_memory.h int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
pool               58 sound/core/seq/seq_memory.h static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
pool               60 sound/core/seq/seq_memory.h 	return pool ? pool->total_elements - atomic_read(&pool->counter) : 0;
pool               64 sound/core/seq/seq_memory.h static inline int snd_seq_total_cells(struct snd_seq_pool *pool)
pool               66 sound/core/seq/seq_memory.h 	return pool ? pool->total_elements : 0;
pool               70 sound/core/seq/seq_memory.h int snd_seq_pool_init(struct snd_seq_pool *pool);
pool               73 sound/core/seq/seq_memory.h void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
pool               74 sound/core/seq/seq_memory.h int snd_seq_pool_done(struct snd_seq_pool *pool);
pool               80 sound/core/seq/seq_memory.h int snd_seq_pool_delete(struct snd_seq_pool **pool);
pool               83 sound/core/seq/seq_memory.h int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait);
pool               86 sound/core/seq/seq_memory.h 		       struct snd_seq_pool *pool, char *space);
pool              121 tools/hv/hv_kvp_daemon.c static void kvp_acquire_lock(int pool)
pool              126 tools/hv/hv_kvp_daemon.c 	if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) {
pool              127 tools/hv/hv_kvp_daemon.c 		syslog(LOG_ERR, "Failed to acquire the lock pool: %d; error: %d %s", pool,
pool              133 tools/hv/hv_kvp_daemon.c static void kvp_release_lock(int pool)
pool              138 tools/hv/hv_kvp_daemon.c 	if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) {
pool              139 tools/hv/hv_kvp_daemon.c 		syslog(LOG_ERR, "Failed to release the lock pool: %d; error: %d %s", pool,
pool              145 tools/hv/hv_kvp_daemon.c static void kvp_update_file(int pool)
pool              153 tools/hv/hv_kvp_daemon.c 	kvp_acquire_lock(pool);
pool              155 tools/hv/hv_kvp_daemon.c 	filep = fopen(kvp_file_info[pool].fname, "we");
pool              157 tools/hv/hv_kvp_daemon.c 		syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool,
pool              159 tools/hv/hv_kvp_daemon.c 		kvp_release_lock(pool);
pool              163 tools/hv/hv_kvp_daemon.c 	fwrite(kvp_file_info[pool].records, sizeof(struct kvp_record),
pool              164 tools/hv/hv_kvp_daemon.c 				kvp_file_info[pool].num_records, filep);
pool              167 tools/hv/hv_kvp_daemon.c 		kvp_release_lock(pool);
pool              168 tools/hv/hv_kvp_daemon.c 		syslog(LOG_ERR, "Failed to write file, pool: %d", pool);
pool              172 tools/hv/hv_kvp_daemon.c 	kvp_release_lock(pool);
pool              175 tools/hv/hv_kvp_daemon.c static void kvp_update_mem_state(int pool)
pool              179 tools/hv/hv_kvp_daemon.c 	struct kvp_record *record = kvp_file_info[pool].records;
pool              181 tools/hv/hv_kvp_daemon.c 	int num_blocks = kvp_file_info[pool].num_blocks;
pool              184 tools/hv/hv_kvp_daemon.c 	kvp_acquire_lock(pool);
pool              186 tools/hv/hv_kvp_daemon.c 	filep = fopen(kvp_file_info[pool].fname, "re");
pool              188 tools/hv/hv_kvp_daemon.c 		syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool,
pool              190 tools/hv/hv_kvp_daemon.c 		kvp_release_lock(pool);
pool              202 tools/hv/hv_kvp_daemon.c 				 pool, errno, strerror(errno));
pool              203 tools/hv/hv_kvp_daemon.c 			kvp_release_lock(pool);
pool              216 tools/hv/hv_kvp_daemon.c 				kvp_release_lock(pool);
pool              224 tools/hv/hv_kvp_daemon.c 	kvp_file_info[pool].num_blocks = num_blocks;
pool              225 tools/hv/hv_kvp_daemon.c 	kvp_file_info[pool].records = record;
pool              226 tools/hv/hv_kvp_daemon.c 	kvp_file_info[pool].num_records = records_read;
pool              229 tools/hv/hv_kvp_daemon.c 	kvp_release_lock(pool);
pool              267 tools/hv/hv_kvp_daemon.c static int kvp_key_delete(int pool, const __u8 *key, int key_size)
pool              277 tools/hv/hv_kvp_daemon.c 	kvp_update_mem_state(pool);
pool              279 tools/hv/hv_kvp_daemon.c 	num_records = kvp_file_info[pool].num_records;
pool              280 tools/hv/hv_kvp_daemon.c 	record = kvp_file_info[pool].records;
pool              290 tools/hv/hv_kvp_daemon.c 			kvp_file_info[pool].num_records--;
pool              291 tools/hv/hv_kvp_daemon.c 			kvp_update_file(pool);
pool              303 tools/hv/hv_kvp_daemon.c 		kvp_file_info[pool].num_records--;
pool              304 tools/hv/hv_kvp_daemon.c 		kvp_update_file(pool);
pool              310 tools/hv/hv_kvp_daemon.c static int kvp_key_add_or_modify(int pool, const __u8 *key, int key_size,
pool              325 tools/hv/hv_kvp_daemon.c 	kvp_update_mem_state(pool);
pool              327 tools/hv/hv_kvp_daemon.c 	num_records = kvp_file_info[pool].num_records;
pool              328 tools/hv/hv_kvp_daemon.c 	record = kvp_file_info[pool].records;
pool              329 tools/hv/hv_kvp_daemon.c 	num_blocks = kvp_file_info[pool].num_blocks;
pool              339 tools/hv/hv_kvp_daemon.c 		kvp_update_file(pool);
pool              353 tools/hv/hv_kvp_daemon.c 		kvp_file_info[pool].num_blocks++;
pool              358 tools/hv/hv_kvp_daemon.c 	kvp_file_info[pool].records = record;
pool              359 tools/hv/hv_kvp_daemon.c 	kvp_file_info[pool].num_records++;
pool              360 tools/hv/hv_kvp_daemon.c 	kvp_update_file(pool);
pool              364 tools/hv/hv_kvp_daemon.c static int kvp_get_value(int pool, const __u8 *key, int key_size, __u8 *value,
pool              378 tools/hv/hv_kvp_daemon.c 	kvp_update_mem_state(pool);
pool              380 tools/hv/hv_kvp_daemon.c 	num_records = kvp_file_info[pool].num_records;
pool              381 tools/hv/hv_kvp_daemon.c 	record = kvp_file_info[pool].records;
pool              396 tools/hv/hv_kvp_daemon.c static int kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size,
pool              404 tools/hv/hv_kvp_daemon.c 	kvp_update_mem_state(pool);
pool              405 tools/hv/hv_kvp_daemon.c 	record = kvp_file_info[pool].records;
pool              407 tools/hv/hv_kvp_daemon.c 	if (index >= kvp_file_info[pool].num_records) {
pool             1371 tools/hv/hv_kvp_daemon.c 	int	pool;
pool             1470 tools/hv/hv_kvp_daemon.c 		pool = hv_msg->kvp_hdr.pool;
pool             1522 tools/hv/hv_kvp_daemon.c 			if (kvp_key_add_or_modify(pool,
pool             1531 tools/hv/hv_kvp_daemon.c 			if (kvp_get_value(pool,
pool             1540 tools/hv/hv_kvp_daemon.c 			if (kvp_key_delete(pool,
pool             1558 tools/hv/hv_kvp_daemon.c 		if (pool != KVP_POOL_AUTO) {
pool             1559 tools/hv/hv_kvp_daemon.c 			if (kvp_pool_enumerate(pool,
pool              143 tools/usb/usbip/libsrc/names.c 	struct pool *next;
pool              147 tools/usb/usbip/libsrc/names.c static struct pool *pool_head;
pool              151 tools/usb/usbip/libsrc/names.c 	struct pool *p;
pool              153 tools/usb/usbip/libsrc/names.c 	p = calloc(1, sizeof(struct pool));
pool              171 tools/usb/usbip/libsrc/names.c 	struct pool *pool;
pool              176 tools/usb/usbip/libsrc/names.c 	for (pool = pool_head; pool != NULL; ) {
pool              177 tools/usb/usbip/libsrc/names.c 		struct pool *tmp;
pool              179 tools/usb/usbip/libsrc/names.c 		if (pool->mem)
pool              180 tools/usb/usbip/libsrc/names.c 			free(pool->mem);
pool              182 tools/usb/usbip/libsrc/names.c 		tmp = pool;
pool              183 tools/usb/usbip/libsrc/names.c 		pool = pool->next;