1#ifndef _LINUX_SLAB_DEF_H
2#define	_LINUX_SLAB_DEF_H
3
4#include <linux/reciprocal_div.h>
5
6/*
7 * Definitions unique to the original Linux SLAB allocator.
8 */
9
10struct kmem_cache {
11	struct array_cache __percpu *cpu_cache;
12
13/* 1) Cache tunables. Protected by slab_mutex */
14	unsigned int batchcount;
15	unsigned int limit;
16	unsigned int shared;
17
18	unsigned int size;
19	struct reciprocal_value reciprocal_buffer_size;
20/* 2) touched by every alloc & free from the backend */
21
22	unsigned int flags;		/* constant flags */
23	unsigned int num;		/* # of objs per slab */
24
25/* 3) cache_grow/shrink */
26	/* order of pgs per slab (2^n) */
27	unsigned int gfporder;
28
29	/* force GFP flags, e.g. GFP_DMA */
30	gfp_t allocflags;
31
32	size_t colour;			/* cache colouring range */
33	unsigned int colour_off;	/* colour offset */
34	struct kmem_cache *freelist_cache;
35	unsigned int freelist_size;
36
37	/* constructor func */
38	void (*ctor)(void *obj);
39
40/* 4) cache creation/removal */
41	const char *name;
42	struct list_head list;
43	int refcount;
44	int object_size;
45	int align;
46
47/* 5) statistics */
48#ifdef CONFIG_DEBUG_SLAB
49	unsigned long num_active;
50	unsigned long num_allocations;
51	unsigned long high_mark;
52	unsigned long grown;
53	unsigned long reaped;
54	unsigned long errors;
55	unsigned long max_freeable;
56	unsigned long node_allocs;
57	unsigned long node_frees;
58	unsigned long node_overflow;
59	atomic_t allochit;
60	atomic_t allocmiss;
61	atomic_t freehit;
62	atomic_t freemiss;
63
64	/*
65	 * If debugging is enabled, then the allocator can add additional
66	 * fields and/or padding to every object. size contains the total
67	 * object size including these internal fields, the following two
68	 * variables contain the offset to the user object and its size.
69	 */
70	int obj_offset;
71#endif /* CONFIG_DEBUG_SLAB */
72#ifdef CONFIG_MEMCG_KMEM
73	struct memcg_cache_params memcg_params;
74#endif
75
76	struct kmem_cache_node *node[MAX_NUMNODES];
77};
78
79#endif	/* _LINUX_SLAB_DEF_H */
80