Lines Matching refs:n

246 			struct kmem_cache_node *n, int tofree);
698 struct kmem_cache_node *n = get_node(cachep, numa_mem_id()); in recheck_pfmemalloc_active() local
705 spin_lock_irqsave(&n->list_lock, flags); in recheck_pfmemalloc_active()
706 list_for_each_entry(page, &n->slabs_full, lru) in recheck_pfmemalloc_active()
710 list_for_each_entry(page, &n->slabs_partial, lru) in recheck_pfmemalloc_active()
714 list_for_each_entry(page, &n->slabs_free, lru) in recheck_pfmemalloc_active()
720 spin_unlock_irqrestore(&n->list_lock, flags); in recheck_pfmemalloc_active()
731 struct kmem_cache_node *n; in __ac_get_obj() local
753 n = get_node(cachep, numa_mem_id()); in __ac_get_obj()
754 if (!list_empty(&n->slabs_free) && force_refill) { in __ac_get_obj()
831 #define reap_alien(cachep, n) do { } while (0) argument
923 struct kmem_cache_node *n = get_node(cachep, node); in __drain_alien_cache() local
926 spin_lock(&n->list_lock); in __drain_alien_cache()
932 if (n->shared) in __drain_alien_cache()
933 transfer_objects(n->shared, ac, ac->limit); in __drain_alien_cache()
937 spin_unlock(&n->list_lock); in __drain_alien_cache()
944 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) in reap_alien() argument
948 if (n->alien) { in reap_alien()
949 struct alien_cache *alc = n->alien[node]; in reap_alien()
990 struct kmem_cache_node *n; in __cache_free_alien() local
995 n = get_node(cachep, node); in __cache_free_alien()
997 if (n->alien && n->alien[page_node]) { in __cache_free_alien()
998 alien = n->alien[page_node]; in __cache_free_alien()
1009 n = get_node(cachep, page_node); in __cache_free_alien()
1010 spin_lock(&n->list_lock); in __cache_free_alien()
1012 spin_unlock(&n->list_lock); in __cache_free_alien()
1054 struct kmem_cache_node *n; in init_cache_node_node() local
1063 n = get_node(cachep, node); in init_cache_node_node()
1064 if (!n) { in init_cache_node_node()
1065 n = kmalloc_node(memsize, GFP_KERNEL, node); in init_cache_node_node()
1066 if (!n) in init_cache_node_node()
1068 kmem_cache_node_init(n); in init_cache_node_node()
1069 n->next_reap = jiffies + REAPTIMEOUT_NODE + in init_cache_node_node()
1077 cachep->node[node] = n; in init_cache_node_node()
1080 spin_lock_irq(&n->list_lock); in init_cache_node_node()
1081 n->free_limit = in init_cache_node_node()
1084 spin_unlock_irq(&n->list_lock); in init_cache_node_node()
1090 struct kmem_cache_node *n) in slabs_tofree() argument
1092 return (n->free_objects + cachep->num - 1) / cachep->num; in slabs_tofree()
1098 struct kmem_cache_node *n = NULL; in cpuup_canceled() local
1108 n = get_node(cachep, node); in cpuup_canceled()
1109 if (!n) in cpuup_canceled()
1112 spin_lock_irq(&n->list_lock); in cpuup_canceled()
1115 n->free_limit -= cachep->batchcount; in cpuup_canceled()
1125 spin_unlock_irq(&n->list_lock); in cpuup_canceled()
1129 shared = n->shared; in cpuup_canceled()
1133 n->shared = NULL; in cpuup_canceled()
1136 alien = n->alien; in cpuup_canceled()
1137 n->alien = NULL; in cpuup_canceled()
1139 spin_unlock_irq(&n->list_lock); in cpuup_canceled()
1156 n = get_node(cachep, node); in cpuup_canceled()
1157 if (!n) in cpuup_canceled()
1159 drain_freelist(cachep, n, slabs_tofree(cachep, n)); in cpuup_canceled()
1166 struct kmem_cache_node *n = NULL; in cpuup_prepare() local
1202 n = get_node(cachep, node); in cpuup_prepare()
1203 BUG_ON(!n); in cpuup_prepare()
1205 spin_lock_irq(&n->list_lock); in cpuup_prepare()
1206 if (!n->shared) { in cpuup_prepare()
1211 n->shared = shared; in cpuup_prepare()
1215 if (!n->alien) { in cpuup_prepare()
1216 n->alien = alien; in cpuup_prepare()
1220 spin_unlock_irq(&n->list_lock); in cpuup_prepare()
1305 struct kmem_cache_node *n; in drain_cache_node_node() local
1307 n = get_node(cachep, node); in drain_cache_node_node()
1308 if (!n) in drain_cache_node_node()
1311 drain_freelist(cachep, n, slabs_tofree(cachep, n)); in drain_cache_node_node()
1313 if (!list_empty(&n->slabs_full) || in drain_cache_node_node()
1314 !list_empty(&n->slabs_partial)) { in drain_cache_node_node()
1531 struct kmem_cache_node *n; in slab_out_of_memory() local
1547 for_each_kmem_cache_node(cachep, node, n) { in slab_out_of_memory()
1551 spin_lock_irqsave(&n->list_lock, flags); in slab_out_of_memory()
1552 list_for_each_entry(page, &n->slabs_full, lru) { in slab_out_of_memory()
1556 list_for_each_entry(page, &n->slabs_partial, lru) { in slab_out_of_memory()
1560 list_for_each_entry(page, &n->slabs_free, lru) in slab_out_of_memory()
1563 free_objects += n->free_objects; in slab_out_of_memory()
1564 spin_unlock_irqrestore(&n->list_lock, flags); in slab_out_of_memory()
1916 struct page *page, *n; in slabs_destroy() local
1918 list_for_each_entry_safe(page, n, list, lru) { in slabs_destroy()
2328 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2337 struct kmem_cache_node *n; in do_drain() local
2342 n = get_node(cachep, node); in do_drain()
2343 spin_lock(&n->list_lock); in do_drain()
2345 spin_unlock(&n->list_lock); in do_drain()
2352 struct kmem_cache_node *n; in drain_cpu_caches() local
2357 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2358 if (n->alien) in drain_cpu_caches()
2359 drain_alien_cache(cachep, n->alien); in drain_cpu_caches()
2361 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2362 drain_array(cachep, n, n->shared, 1, node); in drain_cpu_caches()
2372 struct kmem_cache_node *n, int tofree) in drain_freelist() argument
2379 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { in drain_freelist()
2381 spin_lock_irq(&n->list_lock); in drain_freelist()
2382 p = n->slabs_free.prev; in drain_freelist()
2383 if (p == &n->slabs_free) { in drain_freelist()
2384 spin_unlock_irq(&n->list_lock); in drain_freelist()
2397 n->free_objects -= cache->num; in drain_freelist()
2398 spin_unlock_irq(&n->list_lock); in drain_freelist()
2410 struct kmem_cache_node *n; in __kmem_cache_shrink() local
2415 for_each_kmem_cache_node(cachep, node, n) { in __kmem_cache_shrink()
2416 drain_freelist(cachep, n, slabs_tofree(cachep, n)); in __kmem_cache_shrink()
2418 ret += !list_empty(&n->slabs_full) || in __kmem_cache_shrink()
2419 !list_empty(&n->slabs_partial); in __kmem_cache_shrink()
2427 struct kmem_cache_node *n; in __kmem_cache_shutdown() local
2436 for_each_kmem_cache_node(cachep, i, n) { in __kmem_cache_shutdown()
2437 kfree(n->shared); in __kmem_cache_shutdown()
2438 free_alien_cache(n->alien); in __kmem_cache_shutdown()
2439 kfree(n); in __kmem_cache_shutdown()
2608 struct kmem_cache_node *n; in cache_grow() local
2622 n = get_node(cachep, nodeid); in cache_grow()
2623 spin_lock(&n->list_lock); in cache_grow()
2626 offset = n->colour_next; in cache_grow()
2627 n->colour_next++; in cache_grow()
2628 if (n->colour_next >= cachep->colour) in cache_grow()
2629 n->colour_next = 0; in cache_grow()
2630 spin_unlock(&n->list_lock); in cache_grow()
2667 spin_lock(&n->list_lock); in cache_grow()
2670 list_add_tail(&page->lru, &(n->slabs_free)); in cache_grow()
2672 n->free_objects += cachep->num; in cache_grow()
2673 spin_unlock(&n->list_lock); in cache_grow()
2772 struct kmem_cache_node *n; in cache_alloc_refill() local
2791 n = get_node(cachep, node); in cache_alloc_refill()
2793 BUG_ON(ac->avail > 0 || !n); in cache_alloc_refill()
2794 spin_lock(&n->list_lock); in cache_alloc_refill()
2797 if (n->shared && transfer_objects(ac, n->shared, batchcount)) { in cache_alloc_refill()
2798 n->shared->touched = 1; in cache_alloc_refill()
2806 entry = n->slabs_partial.next; in cache_alloc_refill()
2807 if (entry == &n->slabs_partial) { in cache_alloc_refill()
2808 n->free_touched = 1; in cache_alloc_refill()
2809 entry = n->slabs_free.next; in cache_alloc_refill()
2810 if (entry == &n->slabs_free) in cache_alloc_refill()
2836 list_add(&page->lru, &n->slabs_full); in cache_alloc_refill()
2838 list_add(&page->lru, &n->slabs_partial); in cache_alloc_refill()
2842 n->free_objects -= ac->avail; in cache_alloc_refill()
2844 spin_unlock(&n->list_lock); in cache_alloc_refill()
3099 struct kmem_cache_node *n; in ____cache_alloc_node() local
3104 n = get_node(cachep, nodeid); in ____cache_alloc_node()
3105 BUG_ON(!n); in ____cache_alloc_node()
3109 spin_lock(&n->list_lock); in ____cache_alloc_node()
3110 entry = n->slabs_partial.next; in ____cache_alloc_node()
3111 if (entry == &n->slabs_partial) { in ____cache_alloc_node()
3112 n->free_touched = 1; in ____cache_alloc_node()
3113 entry = n->slabs_free.next; in ____cache_alloc_node()
3114 if (entry == &n->slabs_free) in ____cache_alloc_node()
3128 n->free_objects--; in ____cache_alloc_node()
3133 list_add(&page->lru, &n->slabs_full); in ____cache_alloc_node()
3135 list_add(&page->lru, &n->slabs_partial); in ____cache_alloc_node()
3137 spin_unlock(&n->list_lock); in ____cache_alloc_node()
3141 spin_unlock(&n->list_lock); in ____cache_alloc_node()
3284 struct kmem_cache_node *n = get_node(cachep, node); in free_block() local
3298 n->free_objects++; in free_block()
3302 if (n->free_objects > n->free_limit) { in free_block()
3303 n->free_objects -= cachep->num; in free_block()
3306 list_add(&page->lru, &n->slabs_free); in free_block()
3313 list_add_tail(&page->lru, &n->slabs_partial); in free_block()
3321 struct kmem_cache_node *n; in cache_flusharray() local
3330 n = get_node(cachep, node); in cache_flusharray()
3331 spin_lock(&n->list_lock); in cache_flusharray()
3332 if (n->shared) { in cache_flusharray()
3333 struct array_cache *shared_array = n->shared; in cache_flusharray()
3352 p = n->slabs_free.next; in cache_flusharray()
3353 while (p != &(n->slabs_free)) { in cache_flusharray()
3365 spin_unlock(&n->list_lock); in cache_flusharray()
3603 struct kmem_cache_node *n; in alloc_kmem_cache_node() local
3626 n = get_node(cachep, node); in alloc_kmem_cache_node()
3627 if (n) { in alloc_kmem_cache_node()
3628 struct array_cache *shared = n->shared; in alloc_kmem_cache_node()
3631 spin_lock_irq(&n->list_lock); in alloc_kmem_cache_node()
3637 n->shared = new_shared; in alloc_kmem_cache_node()
3638 if (!n->alien) { in alloc_kmem_cache_node()
3639 n->alien = new_alien; in alloc_kmem_cache_node()
3642 n->free_limit = (1 + nr_cpus_node(node)) * in alloc_kmem_cache_node()
3644 spin_unlock_irq(&n->list_lock); in alloc_kmem_cache_node()
3650 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); in alloc_kmem_cache_node()
3651 if (!n) { in alloc_kmem_cache_node()
3657 kmem_cache_node_init(n); in alloc_kmem_cache_node()
3658 n->next_reap = jiffies + REAPTIMEOUT_NODE + in alloc_kmem_cache_node()
3660 n->shared = new_shared; in alloc_kmem_cache_node()
3661 n->alien = new_alien; in alloc_kmem_cache_node()
3662 n->free_limit = (1 + nr_cpus_node(node)) * in alloc_kmem_cache_node()
3664 cachep->node[node] = n; in alloc_kmem_cache_node()
3673 n = get_node(cachep, node); in alloc_kmem_cache_node()
3674 if (n) { in alloc_kmem_cache_node()
3675 kfree(n->shared); in alloc_kmem_cache_node()
3676 free_alien_cache(n->alien); in alloc_kmem_cache_node()
3677 kfree(n); in alloc_kmem_cache_node()
3712 struct kmem_cache_node *n; in __do_tune_cpucache() local
3716 n = get_node(cachep, node); in __do_tune_cpucache()
3717 spin_lock_irq(&n->list_lock); in __do_tune_cpucache()
3719 spin_unlock_irq(&n->list_lock); in __do_tune_cpucache()
3823 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, in drain_array() argument
3834 spin_lock_irq(&n->list_lock); in drain_array()
3844 spin_unlock_irq(&n->list_lock); in drain_array()
3864 struct kmem_cache_node *n; in cache_reap() local
3880 n = get_node(searchp, node); in cache_reap()
3882 reap_alien(searchp, n); in cache_reap()
3884 drain_array(searchp, n, cpu_cache_get(searchp), 0, node); in cache_reap()
3890 if (time_after(n->next_reap, jiffies)) in cache_reap()
3893 n->next_reap = jiffies + REAPTIMEOUT_NODE; in cache_reap()
3895 drain_array(searchp, n, n->shared, 0, node); in cache_reap()
3897 if (n->free_touched) in cache_reap()
3898 n->free_touched = 0; in cache_reap()
3902 freed = drain_freelist(searchp, n, (n->free_limit + in cache_reap()
3928 struct kmem_cache_node *n; in get_slabinfo() local
3932 for_each_kmem_cache_node(cachep, node, n) { in get_slabinfo()
3935 spin_lock_irq(&n->list_lock); in get_slabinfo()
3937 list_for_each_entry(page, &n->slabs_full, lru) { in get_slabinfo()
3943 list_for_each_entry(page, &n->slabs_partial, lru) { in get_slabinfo()
3951 list_for_each_entry(page, &n->slabs_free, lru) { in get_slabinfo()
3956 free_objects += n->free_objects; in get_slabinfo()
3957 if (n->shared) in get_slabinfo()
3958 shared_avail += n->shared->avail; in get_slabinfo()
3960 spin_unlock_irq(&n->list_lock); in get_slabinfo()
4069 static inline int add_caller(unsigned long *n, unsigned long v) in add_caller() argument
4075 l = n[1]; in add_caller()
4076 p = n + 2; in add_caller()
4091 if (++n[1] == n[0]) in add_caller()
4093 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); in add_caller()
4099 static void handle_slab(unsigned long *n, struct kmem_cache *c, in handle_slab() argument
4105 if (n[0] == n[1]) in handle_slab()
4111 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) in handle_slab()
4136 struct kmem_cache_node *n; in leaks_show() local
4151 for_each_kmem_cache_node(cachep, node, n) { in leaks_show()
4154 spin_lock_irq(&n->list_lock); in leaks_show()
4156 list_for_each_entry(page, &n->slabs_full, lru) in leaks_show()
4158 list_for_each_entry(page, &n->slabs_partial, lru) in leaks_show()
4160 spin_unlock_irq(&n->list_lock); in leaks_show()
4198 unsigned long *n; in slabstats_open() local
4200 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE); in slabstats_open()
4201 if (!n) in slabstats_open()
4204 *n = PAGE_SIZE / (2 * sizeof(unsigned long)); in slabstats_open()