Lines Matching refs:ac
207 struct array_cache ac; member
660 static void init_arraycache(struct array_cache *ac, int limit, int batch) in init_arraycache() argument
669 kmemleak_no_scan(ac); in init_arraycache()
670 if (ac) { in init_arraycache()
671 ac->avail = 0; in init_arraycache()
672 ac->limit = limit; in init_arraycache()
673 ac->batchcount = batch; in init_arraycache()
674 ac->touched = 0; in init_arraycache()
682 struct array_cache *ac = NULL; in alloc_arraycache() local
684 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache()
685 init_arraycache(ac, entries, batchcount); in alloc_arraycache()
686 return ac; in alloc_arraycache()
696 struct array_cache *ac) in recheck_pfmemalloc_active() argument
723 static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, in __ac_get_obj() argument
727 void *objp = ac->entry[--ac->avail]; in __ac_get_obj()
739 for (i = 0; i < ac->avail; i++) { in __ac_get_obj()
741 if (!is_obj_pfmemalloc(ac->entry[i])) { in __ac_get_obj()
742 objp = ac->entry[i]; in __ac_get_obj()
743 ac->entry[i] = ac->entry[ac->avail]; in __ac_get_obj()
744 ac->entry[ac->avail] = objp; in __ac_get_obj()
758 recheck_pfmemalloc_active(cachep, ac); in __ac_get_obj()
763 ac->avail++; in __ac_get_obj()
771 struct array_cache *ac, gfp_t flags, bool force_refill) in ac_get_obj() argument
776 objp = __ac_get_obj(cachep, ac, flags, force_refill); in ac_get_obj()
778 objp = ac->entry[--ac->avail]; in ac_get_obj()
784 struct array_cache *ac, void *objp) in __ac_put_obj() argument
796 static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, in ac_put_obj() argument
800 objp = __ac_put_obj(cachep, ac, objp); in ac_put_obj()
802 ac->entry[ac->avail++] = objp; in ac_put_obj()
877 init_arraycache(&alc->ac, entries, batch); in __alloc_alien_cache()
920 struct array_cache *ac, int node, in __drain_alien_cache() argument
925 if (ac->avail) { in __drain_alien_cache()
933 transfer_objects(n->shared, ac, ac->limit); in __drain_alien_cache()
935 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
936 ac->avail = 0; in __drain_alien_cache()
950 struct array_cache *ac; in reap_alien() local
953 ac = &alc->ac; in reap_alien()
954 if (ac->avail && spin_trylock_irq(&alc->lock)) { in reap_alien()
957 __drain_alien_cache(cachep, ac, node, &list); in reap_alien()
970 struct array_cache *ac; in drain_alien_cache() local
978 ac = &alc->ac; in drain_alien_cache()
980 __drain_alien_cache(cachep, ac, i, &list); in drain_alien_cache()
992 struct array_cache *ac; in __cache_free_alien() local
999 ac = &alien->ac; in __cache_free_alien()
1001 if (unlikely(ac->avail == ac->limit)) { in __cache_free_alien()
1003 __drain_alien_cache(cachep, ac, page_node, &list); in __cache_free_alien()
1005 ac_put_obj(cachep, ac, objp); in __cache_free_alien()
2329 struct array_cache *ac,
2335 struct array_cache *ac; in do_drain() local
2341 ac = cpu_cache_get(cachep); in do_drain()
2344 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
2347 ac->avail = 0; in do_drain()
2773 struct array_cache *ac; in cache_alloc_refill() local
2781 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2782 batchcount = ac->batchcount; in cache_alloc_refill()
2783 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { in cache_alloc_refill()
2793 BUG_ON(ac->avail > 0 || !n); in cache_alloc_refill()
2797 if (n->shared && transfer_objects(ac, n->shared, batchcount)) { in cache_alloc_refill()
2829 ac_put_obj(cachep, ac, slab_get_obj(cachep, page, in cache_alloc_refill()
2842 n->free_objects -= ac->avail; in cache_alloc_refill()
2846 if (unlikely(!ac->avail)) { in cache_alloc_refill()
2852 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2856 if (!x && (ac->avail == 0 || force_refill)) in cache_alloc_refill()
2859 if (!ac->avail) /* objects refilled by interrupt? */ in cache_alloc_refill()
2862 ac->touched = 1; in cache_alloc_refill()
2864 return ac_get_obj(cachep, ac, flags, force_refill); in cache_alloc_refill()
2940 struct array_cache *ac; in ____cache_alloc() local
2945 ac = cpu_cache_get(cachep); in ____cache_alloc()
2946 if (likely(ac->avail)) { in ____cache_alloc()
2947 ac->touched = 1; in ____cache_alloc()
2948 objp = ac_get_obj(cachep, ac, flags, false); in ____cache_alloc()
2967 ac = cpu_cache_get(cachep); in ____cache_alloc()
2976 kmemleak_erase(&ac->entry[ac->avail]); in ____cache_alloc()
3318 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) in cache_flusharray() argument
3325 batchcount = ac->batchcount; in cache_flusharray()
3327 BUG_ON(!batchcount || batchcount > ac->avail); in cache_flusharray()
3339 ac->entry, sizeof(void *) * batchcount); in cache_flusharray()
3345 free_block(cachep, ac->entry, batchcount, node, &list); in cache_flusharray()
3367 ac->avail -= batchcount; in cache_flusharray()
3368 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); in cache_flusharray()
3378 struct array_cache *ac = cpu_cache_get(cachep); in __cache_free() local
3396 if (ac->avail < ac->limit) { in __cache_free()
3400 cache_flusharray(cachep, ac); in __cache_free()
3403 ac_put_obj(cachep, ac, objp); in __cache_free()
3713 struct array_cache *ac = per_cpu_ptr(prev, cpu); in __do_tune_cpucache() local
3718 free_block(cachep, ac->entry, ac->avail, node, &list); in __do_tune_cpucache()
3824 struct array_cache *ac, int force, int node) in drain_array() argument
3829 if (!ac || !ac->avail) in drain_array()
3831 if (ac->touched && !force) { in drain_array()
3832 ac->touched = 0; in drain_array()
3835 if (ac->avail) { in drain_array()
3836 tofree = force ? ac->avail : (ac->limit + 4) / 5; in drain_array()
3837 if (tofree > ac->avail) in drain_array()
3838 tofree = (ac->avail + 1) / 2; in drain_array()
3839 free_block(cachep, ac->entry, tofree, node, &list); in drain_array()
3840 ac->avail -= tofree; in drain_array()
3841 memmove(ac->entry, &(ac->entry[tofree]), in drain_array()
3842 sizeof(void *) * ac->avail); in drain_array()