Searched refs:cache (Results 1 - 200 of 3162) sorted by relevance

1234567891011>>

/linux-4.1.27/arch/mn10300/mm/
H A DMakefile5 cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
7 cacheflush-y := cache.o
8 cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
9 cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
10 cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
11 cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
12 cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
13 cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
14 cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
17 cache-dbg-flush-by-tag.o cache-dbg-inv-by-tag.o
19 cache-dbg-flush-by-reg.o
21 cache-dbg-inv-by-tag.o cache-dbg-inv.o
23 cache-dbg-inv-by-reg.o cache-dbg-inv.o
25 cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
H A Dcache-smp-flush.c13 #include "cache-smp.h"
16 * mn10300_dcache_flush - Globally flush data cache
18 * Flush the data cache on all CPUs.
31 * mn10300_dcache_flush_page - Globally flush a page of data cache
34 * Flush a range of addresses in the data cache on all CPUs covering
50 * mn10300_dcache_flush_range - Globally flush range of data cache
54 * Flush a range of addresses in the data cache on all CPUs, between start and
68 * mn10300_dcache_flush_range2 - Globally flush range of data cache
72 * Flush a range of addresses in the data cache on all CPUs, between start and
86 * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
88 * Flush and invalidate the data cache on all CPUs.
102 * cache
105 * Flush and invalidate a range of addresses in the data cache on all CPUs
122 * cache
126 * Flush and invalidate a range of addresses in the data cache on all CPUs,
141 * cache
145 * Flush and invalidate a range of addresses in the data cache on all CPUs,
H A Dcache-smp-inv.c13 #include "cache-smp.h"
16 * mn10300_icache_inv - Globally invalidate instruction cache
18 * Invalidate the instruction cache on all CPUs.
31 * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
34 * Invalidate a range of addresses in the instruction cache on all CPUs
50 * mn10300_icache_inv_range - Globally invalidate range of instruction cache
54 * Invalidate a range of addresses in the instruction cache on all CPUs,
68 * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
72 * Invalidate a range of addresses in the instruction cache on all CPUs,
86 * mn10300_dcache_inv - Globally invalidate data cache
88 * Invalidate the data cache on all CPUs.
101 * mn10300_dcache_inv_page - Globally invalidate a page of data cache
104 * Invalidate a range of addresses in the data cache on all CPUs covering the
120 * mn10300_dcache_inv_range - Globally invalidate range of data cache
124 * Invalidate a range of addresses in the data cache on all CPUs, between start
138 * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
142 * Invalidate a range of addresses in the data cache on all CPUs, between start
H A Dcache-dbg-inv.S1 /* MN10300 CPU cache invalidation routines
15 #include <asm/cache.h>
18 #include "cache.inc"
36 # we only need to invalidate the icache in this cache mode
H A Dcache-dbg-flush-by-tag.S1 /* MN10300 CPU cache invalidation routines, using direct tag flushing
15 #include <asm/cache.h>
18 #include "cache.inc"
26 # Flush the entire data cache back to RAM and invalidate the icache
43 # read the addresses tagged in the cache's tag RAM and attempt to flush
54 # cache
103 # retain valid entries in the cache
H A Dcache-disabled.c1 /* Handle the cache being disabled
14 * allow userspace to flush the instruction cache
H A Dcache-dbg-inv-by-reg.S1 /* MN10300 CPU cache invalidation routines, using automatic purge registers
13 #include <asm/cache.h>
16 #include "cache.inc"
54 # invalidate the cache line at the given address
H A Dcache-dbg-flush-by-reg.S1 /* MN10300 CPU cache invalidation routines, using automatic purge registers
15 #include <asm/cache.h>
18 #include "cache.inc"
25 # Flush the entire data cache back to RAM and invalidate the icache
117 # retain valid entries in the cache
146 # invalidate the cache line at the given address
H A Dcache-dbg-inv-by-tag.S1 /* MN10300 CPU cache invalidation routines, using direct tag flushing
15 #include <asm/cache.h>
18 #include "cache.inc"
46 # cache line then we invalidate that line
70 # check all the way tags for this cache entry
98 # wait for the cache to finish what it's doing
/linux-4.1.27/arch/mips/include/asm/
H A Dr4kcache.h6 * Inline assembly cache operations.
30 * for indexed cache operations. Two issues here:
46 " cache %0, %1 \n" \
154 "1: cache %0, (%1) \n" \
230 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
231 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
232 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
233 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
234 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
235 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
236 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
237 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
238 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
239 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
240 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
241 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
242 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
243 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
244 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
245 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
256 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
257 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
258 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
259 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
260 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
261 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
262 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
263 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
264 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
265 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
266 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
267 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
268 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
269 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
270 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
271 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
282 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
283 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
284 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
285 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
286 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
287 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
288 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
289 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
290 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
291 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
292 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
293 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
294 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
295 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
296 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
297 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
308 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
309 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
310 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
311 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
312 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
313 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
314 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
315 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
316 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
317 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
318 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
319 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
320 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
321 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
322 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
323 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
331 * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
333 * more cache lines
341 " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
342 " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
343 " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
344 " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
345 " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
346 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
347 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
348 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
350 " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
351 " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
352 " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
353 " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
354 " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
355 " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
356 " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
357 " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
369 " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
370 " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
371 " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
372 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
374 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
375 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
376 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
377 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
379 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
380 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
381 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
382 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
384 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
385 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
386 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
387 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
399 " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
400 " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
402 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
403 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
405 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
406 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
408 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
409 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
411 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
412 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
414 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
415 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
417 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
418 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
420 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
421 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
433 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
435 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
437 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
439 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
441 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
443 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
445 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
447 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
449 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
451 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
453 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
455 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
457 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
459 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
461 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
463 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
465 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
473 * Perform the cache operation specified by op using a user mode virtual
559 static inline void extra##blast_##pfx##cache##lsize(void) \
572 cache##lsize##_unroll32(addr|ws, indexop); \
577 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
585 cache##lsize##_unroll32(start, hitop); \
592 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
606 cache##lsize##_unroll32(addr|ws, indexop); \
633 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
641 cache##lsize##_unroll32_user(start, hitop); \
660 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
687 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
H A Dcpu-info.h17 #include <asm/cache.h>
20 * Descriptor for a cache
27 unsigned char waybit; /* Bits to select in a cache set */
28 unsigned char flags; /* Flags describing cache properties */
35 #define MIPS_CACHE_VTAG 0x00000002 /* Virtually tagged cache */
37 #define MIPS_CACHE_IC_F_DC 0x00000008 /* Ic can refill from D-cache */
39 #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
61 struct cache_desc icache; /* Primary I-cache */
62 struct cache_desc dcache; /* Primary D or combined I/D cache */
63 struct cache_desc scache; /* Secondary cache */
64 struct cache_desc tcache; /* Tertiary/split secondary cache */
/linux-4.1.27/arch/powerpc/kernel/
H A Dcacheinfo.c2 * Processor cache information made available to userspace via sysfs;
27 * - a "cache" kobject for the top-level directory
28 * - a list of "index" objects representing the cpu's local cache hierarchy
31 struct kobject *kobj; /* bare (not embedded) kobject for cache
36 /* "index" object: each cpu's cache directory has an index
37 * subdirectory corresponding to a cache object associated with the
43 struct cache *cache; member in struct:cache_index_dir
47 * cache type */
52 /* Allow for both [di]-cache-line-size and
53 * [di]-cache-block-size properties. According to the PowerPC
55 * differs from the cache block size (that which is operated
56 * on by cache instructions), so we look for -line-size first.
64 #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
65 #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
71 /* Embedded systems that use cache-size, cache-block-size,
72 * etc. for the Unified (typically L2) cache. */
74 .size_prop = "cache-size",
75 .line_size_props = { "cache-line-size",
76 "cache-block-size", },
77 .nr_sets_prop = "cache-sets",
80 /* PowerPC Processor binding says the [di]-cache-*
82 * d-cache properties. */
84 .size_prop = "d-cache-size",
85 .line_size_props = { "d-cache-line-size",
86 "d-cache-block-size", },
87 .nr_sets_prop = "d-cache-sets",
91 .size_prop = "i-cache-size",
92 .line_size_props = { "i-cache-line-size",
93 "i-cache-block-size", },
94 .nr_sets_prop = "i-cache-sets",
98 .size_prop = "d-cache-size",
99 .line_size_props = { "d-cache-line-size",
100 "d-cache-block-size", },
101 .nr_sets_prop = "d-cache-sets",
105 /* Cache object: each instance of this corresponds to a distinct cache
109 * cache object. A cache object is released when its shared_cpu_map
112 * A cache object is on two lists: an unsorted global list
113 * (cache_list) of cache objects; and a singly-linked list
114 * representing the local cache hierarchy, which is ordered by level
117 struct cache { struct
118 struct device_node *ofnode; /* OF node for this cache, may be cpu */
119 struct cpumask shared_cpu_map; /* online CPUs using this cache */
120 int type; /* split cache disambiguation */
122 struct list_head list; /* global list of cache objects */
123 struct cache *next_local; /* next cache of >= level */
138 static const char *cache_type_string(const struct cache *cache) cache_type_string() argument
140 return cache_type_info[cache->type].name; cache_type_string()
143 static void cache_init(struct cache *cache, int type, int level, cache_init() argument
146 cache->type = type; cache_init()
147 cache->level = level; cache_init()
148 cache->ofnode = of_node_get(ofnode); cache_init()
149 INIT_LIST_HEAD(&cache->list); cache_init()
150 list_add(&cache->list, &cache_list); cache_init()
153 static struct cache *new_cache(int type, int level, struct device_node *ofnode) new_cache()
155 struct cache *cache; new_cache() local
157 cache = kzalloc(sizeof(*cache), GFP_KERNEL); new_cache()
158 if (cache) new_cache()
159 cache_init(cache, type, level, ofnode); new_cache()
161 return cache; new_cache()
164 static void release_cache_debugcheck(struct cache *cache) release_cache_debugcheck() argument
166 struct cache *iter; release_cache_debugcheck()
169 WARN_ONCE(iter->next_local == cache, release_cache_debugcheck()
170 "cache for %s(%s) refers to cache for %s(%s)\n", release_cache_debugcheck()
173 cache->ofnode->full_name, release_cache_debugcheck()
174 cache_type_string(cache)); release_cache_debugcheck()
177 static void release_cache(struct cache *cache) release_cache() argument
179 if (!cache) release_cache()
182 pr_debug("freeing L%d %s cache for %s\n", cache->level, release_cache()
183 cache_type_string(cache), cache->ofnode->full_name); release_cache()
185 release_cache_debugcheck(cache); release_cache()
186 list_del(&cache->list); release_cache()
187 of_node_put(cache->ofnode); release_cache()
188 kfree(cache); release_cache()
191 static void cache_cpu_set(struct cache *cache, int cpu) cache_cpu_set() argument
193 struct cache *next = cache; cache_cpu_set()
205 static int cache_size(const struct cache *cache, unsigned int *ret) cache_size() argument
210 propname = cache_type_info[cache->type].size_prop; cache_size()
212 cache_size = of_get_property(cache->ofnode, propname, NULL); cache_size()
220 static int cache_size_kb(const struct cache *cache, unsigned int *ret) cache_size_kb() argument
224 if (cache_size(cache, &size)) cache_size_kb()
231 /* not cache_line_size() because that's a macro in include/linux/cache.h */ cache_get_line_size()
232 static int cache_get_line_size(const struct cache *cache, unsigned int *ret) cache_get_line_size() argument
237 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); cache_get_line_size()
242 propname = cache_type_info[cache->type].line_size_props[i]; cache_get_line_size()
243 line_size = of_get_property(cache->ofnode, propname, NULL); cache_get_line_size()
255 static int cache_nr_sets(const struct cache *cache, unsigned int *ret) cache_nr_sets() argument
260 propname = cache_type_info[cache->type].nr_sets_prop; cache_nr_sets()
262 nr_sets = of_get_property(cache->ofnode, propname, NULL); cache_nr_sets()
270 static int cache_associativity(const struct cache *cache, unsigned int *ret) cache_associativity() argument
276 if (cache_nr_sets(cache, &nr_sets)) cache_associativity()
279 /* If the cache is fully associative, there is no need to cache_associativity()
287 if (cache_get_line_size(cache, &line_size)) cache_associativity()
289 if (cache_size(cache, &size)) cache_associativity()
302 static struct cache *cache_find_first_sibling(struct cache *cache) cache_find_first_sibling() argument
304 struct cache *iter; cache_find_first_sibling()
306 if (cache->type == CACHE_TYPE_UNIFIED || cache_find_first_sibling()
307 cache->type == CACHE_TYPE_UNIFIED_D) cache_find_first_sibling()
308 return cache; cache_find_first_sibling()
311 if (iter->ofnode == cache->ofnode && iter->next_local == cache) cache_find_first_sibling()
314 return cache; cache_find_first_sibling()
317 /* return the first cache on a local list matching node */ cache_lookup_by_node()
318 static struct cache *cache_lookup_by_node(const struct device_node *node) cache_lookup_by_node()
320 struct cache *cache = NULL; cache_lookup_by_node() local
321 struct cache *iter; cache_lookup_by_node()
326 cache = cache_find_first_sibling(iter); cache_lookup_by_node()
330 return cache; cache_lookup_by_node()
335 return of_get_property(np, "cache-unified", NULL); cache_node_is_unified()
340 * use cache-size, etc. for the unified cache size, but open firmware systems
341 * use d-cache-size, etc. Check on initialization for which type we have, and
344 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
356 static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) cache_do_one_devnode_unified()
363 static struct cache *cache_do_one_devnode_split(struct device_node *node, cache_do_one_devnode_split()
366 struct cache *dcache, *icache; cache_do_one_devnode_split()
386 static struct cache *cache_do_one_devnode(struct device_node *node, int level) cache_do_one_devnode()
388 struct cache *cache; cache_do_one_devnode() local
391 cache = cache_do_one_devnode_unified(node, level); cache_do_one_devnode()
393 cache = cache_do_one_devnode_split(node, level); cache_do_one_devnode()
395 return cache; cache_do_one_devnode()
398 static struct cache *cache_lookup_or_instantiate(struct device_node *node, cache_lookup_or_instantiate()
401 struct cache *cache; cache_lookup_or_instantiate() local
403 cache = cache_lookup_by_node(node); cache_lookup_or_instantiate()
405 WARN_ONCE(cache && cache->level != level, cache_lookup_or_instantiate()
406 "cache level mismatch on lookup (got %d, expected %d)\n", cache_lookup_or_instantiate()
407 cache->level, level); cache_lookup_or_instantiate()
409 if (!cache) cache_lookup_or_instantiate()
410 cache = cache_do_one_devnode(node, level); cache_lookup_or_instantiate()
412 return cache; cache_lookup_or_instantiate()
415 static void link_cache_lists(struct cache *smaller, struct cache *bigger) link_cache_lists()
426 static void do_subsidiary_caches_debugcheck(struct cache *cache) do_subsidiary_caches_debugcheck() argument
428 WARN_ON_ONCE(cache->level != 1); do_subsidiary_caches_debugcheck()
429 WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu")); do_subsidiary_caches_debugcheck()
432 static void do_subsidiary_caches(struct cache *cache) do_subsidiary_caches() argument
435 int level = cache->level; do_subsidiary_caches()
437 do_subsidiary_caches_debugcheck(cache); do_subsidiary_caches()
439 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { do_subsidiary_caches()
440 struct cache *subcache; do_subsidiary_caches()
448 link_cache_lists(cache, subcache); do_subsidiary_caches()
449 cache = subcache; do_subsidiary_caches()
453 static struct cache *cache_chain_instantiate(unsigned int cpu_id) cache_chain_instantiate()
456 struct cache *cpu_cache = NULL; cache_chain_instantiate()
458 pr_debug("creating cache object(s) for CPU %i\n", cpu_id); cache_chain_instantiate()
489 kobj = kobject_create_and_add("cache", &dev->kobj); cacheinfo_create_cache_dir()
515 pr_debug("freeing index directory for L%d %s cache\n", cache_index_release()
516 index->cache->level, cache_type_string(index->cache)); cache_index_release()
530 static struct cache *index_kobj_to_cache(struct kobject *k) index_kobj_to_cache()
536 return index->cache; index_kobj_to_cache()
542 struct cache *cache; size_show() local
544 cache = index_kobj_to_cache(k); size_show()
546 if (cache_size_kb(cache, &size_kb)) size_show()
559 struct cache *cache; line_size_show() local
561 cache = index_kobj_to_cache(k); line_size_show()
563 if (cache_get_line_size(cache, &line_size)) line_size_show()
575 struct cache *cache; nr_sets_show() local
577 cache = index_kobj_to_cache(k); nr_sets_show()
579 if (cache_nr_sets(cache, &nr_sets)) nr_sets_show()
591 struct cache *cache; associativity_show() local
593 cache = index_kobj_to_cache(k); associativity_show()
595 if (cache_associativity(cache, &associativity)) associativity_show()
606 struct cache *cache; type_show() local
608 cache = index_kobj_to_cache(k); type_show()
610 return sprintf(buf, "%s\n", cache_type_string(cache)); type_show()
619 struct cache *cache; level_show() local
622 cache = index->cache; level_show()
624 return sprintf(buf, "%d\n", cache->level); level_show()
633 struct cache *cache; shared_cpu_map_show() local
637 cache = index->cache; shared_cpu_map_show()
640 cpumask_pr_args(&cache->shared_cpu_map)); shared_cpu_map_show()
651 * minimum data required to uniquely identify a cache.
660 /* Attributes which should be created if the cache device node has the
684 struct cache *cache; cacheinfo_create_index_opt_attrs() local
692 cache = dir->cache; cacheinfo_create_index_opt_attrs()
693 cache_name = cache->ofnode->full_name; cacheinfo_create_index_opt_attrs()
694 cache_type = cache_type_string(cache); cacheinfo_create_index_opt_attrs()
723 static void cacheinfo_create_index_dir(struct cache *cache, int index, cacheinfo_create_index_dir() argument
733 index_dir->cache = cache; cacheinfo_create_index_dir()
751 struct cache *cache_list) cacheinfo_sysfs_populate()
754 struct cache *cache; cacheinfo_sysfs_populate() local
761 cache = cache_list; cacheinfo_sysfs_populate()
762 while (cache) { cacheinfo_sysfs_populate()
763 cacheinfo_create_index_dir(cache, index, cache_dir); cacheinfo_sysfs_populate()
765 cache = cache->next_local; cacheinfo_sysfs_populate()
771 struct cache *cache; cacheinfo_cpu_online() local
773 cache = cache_chain_instantiate(cpu_id); cacheinfo_cpu_online()
774 if (!cache) cacheinfo_cpu_online()
777 cacheinfo_sysfs_populate(cpu_id, cache); cacheinfo_cpu_online()
780 /* functions needed to remove cache entry for cpu offline or suspend/resume */
785 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) cache_lookup_by_cpu()
788 struct cache *cache; cache_lookup_by_cpu() local
795 cache = cache_lookup_by_node(cpu_node); cache_lookup_by_cpu()
798 return cache; cache_lookup_by_cpu()
820 /* Remove cache dir from sysfs */ remove_cache_dir()
828 static void cache_cpu_clear(struct cache *cache, int cpu) cache_cpu_clear() argument
830 while (cache) { cache_cpu_clear()
831 struct cache *next = cache->next_local; cache_cpu_clear()
833 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), cache_cpu_clear()
835 cpu, cache->ofnode->full_name, cache_cpu_clear()
836 cache_type_string(cache)); cache_cpu_clear()
838 cpumask_clear_cpu(cpu, &cache->shared_cpu_map); cache_cpu_clear()
840 /* Release the cache object if all the cpus using it cache_cpu_clear()
842 if (cpumask_empty(&cache->shared_cpu_map)) cache_cpu_clear()
843 release_cache(cache); cache_cpu_clear()
845 cache = next; cache_cpu_clear()
852 struct cache *cache; cacheinfo_cpu_offline() local
864 /* clear the CPU's bit in its cache chain, possibly freeing cacheinfo_cpu_offline()
865 * cache objects */ cacheinfo_cpu_offline()
866 cache = cache_lookup_by_cpu(cpu_id); cacheinfo_cpu_offline()
867 if (cache) cacheinfo_cpu_offline()
868 cache_cpu_clear(cache, cpu_id); cacheinfo_cpu_offline()
H A Dl2cr_6xx.S46 #include <asm/cache.h>
52 things. If you are enabling the cache, you must perform a
53 global invalidate. If you are disabling the cache, you must
54 flush the cache contents first. This routine takes care of
55 doing these things. When first enabling the cache, make sure
59 the cache, you should also set the L2E bit in applyThis. If
60 you want to modify the L2CR contents after the cache has been
62 __setL2CR(0) to disable the cache and then call it again with
65 _setL2CR(0) - disables the cache
67 - L2E set to turn on the cache
80 causes cache pushes from the L1 cache to go to the L2 cache
88 cache, and the invalidate is a control as well as status.
94 * flushes the cache if it was already enabled, always invalidates the
95 * cache, then enables the cache if the L2E bit is set in the value
125 * that invalidates the L2 cache tags.
145 /* Flush the cache. First, read the first 4MB of memory (physical) to
146 * put new data in the cache. (Actually we only need
147 * the size of the L2 cache plus the size of the L1 cache, but 4MB will
152 from getting into the cache. But since we invalidate
153 the next time we enable the cache it doesn't really matter.
185 addi r4,r4,32 /* Go to start of next cache line */
196 addi r4,r4,32 /* Go to start of next cache line */
244 /* See if we need to enable the cache */
248 /* Enable the cache */
287 * Here is a similar routine for dealing with the L3 cache
322 /* Flush the cache.
333 addi r4,r4,32 /* Go to start of next cache line */
376 /* See if we need to enable the cache */
380 /* Enable the cache */
408 /* flush_disable_L1() - Flush and disable L1 cache
420 /* Load counter to 0x4000 cache lines (512k) and
421 * load cache with datas
428 addi r3,r3,0x0020 /* Go to start of next cache line */
433 /* Now flush those cache lines */
439 addi r3,r3,0x0020 /* Go to start of next cache line */
443 /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
451 /* inval_enable_L1 - Invalidate and enable L1 cache
458 /* Enable and then Flash inval the instruction & data cache */
H A Deeh_cache.c2 * PCI address cache; allows the lookup of PCI devices based on I/O address
33 * The pci address cache subsystem. This subsystem places
42 * In particular, the cache does *not* hold the addresses of devices
47 * with the penalty of slow pointer chases for d-cache misses).
106 * than print out the contents of our addr cache.
108 static void eeh_addr_cache_print(struct pci_io_addr_cache *cache) eeh_addr_cache_print() argument
113 n = rb_first(&cache->rb_root); eeh_addr_cache_print()
194 dev_dbg(&dev->dev, "EEH: Skip building address cache\n"); __eeh_addr_cache_insert_dev()
214 * eeh_addr_cache_insert_dev - Add a device to the address cache
218 * we maintain a cache of devices that can be quickly searched.
219 * This routine adds a device to that cache.
254 * eeh_addr_cache_rmv_dev - remove pci device from addr cache
257 * Remove a device from the addr-cache tree.
272 * eeh_addr_cache_build - Build a cache of I/O addresses
274 * Build a cache of pci i/o addresses. This cache will be used to
276 * This routine scans all pci busses to build the cache.
/linux-4.1.27/fs/cachefiles/
H A Dbind.c1 /* Bind and unbind a cache from the filesystem backing it
28 * bind a directory as a cache
30 int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) cachefiles_daemon_bind() argument
33 cache->frun_percent, cachefiles_daemon_bind()
34 cache->fcull_percent, cachefiles_daemon_bind()
35 cache->fstop_percent, cachefiles_daemon_bind()
36 cache->brun_percent, cachefiles_daemon_bind()
37 cache->bcull_percent, cachefiles_daemon_bind()
38 cache->bstop_percent, cachefiles_daemon_bind()
42 ASSERT(cache->fstop_percent >= 0 && cachefiles_daemon_bind()
43 cache->fstop_percent < cache->fcull_percent && cachefiles_daemon_bind()
44 cache->fcull_percent < cache->frun_percent && cachefiles_daemon_bind()
45 cache->frun_percent < 100); cachefiles_daemon_bind()
47 ASSERT(cache->bstop_percent >= 0 && cachefiles_daemon_bind()
48 cache->bstop_percent < cache->bcull_percent && cachefiles_daemon_bind()
49 cache->bcull_percent < cache->brun_percent && cachefiles_daemon_bind()
50 cache->brun_percent < 100); cachefiles_daemon_bind()
57 if (!cache->rootdirname) { cachefiles_daemon_bind()
58 pr_err("No cache directory specified\n"); cachefiles_daemon_bind()
63 if (test_bit(CACHEFILES_READY, &cache->flags)) { cachefiles_daemon_bind()
69 if (!cache->tag) { cachefiles_daemon_bind()
72 cache->tag = kstrdup("CacheFiles", GFP_KERNEL); cachefiles_daemon_bind()
73 if (!cache->tag) cachefiles_daemon_bind()
77 /* add the cache */ cachefiles_daemon_bind()
78 return cachefiles_daemon_add_cache(cache); cachefiles_daemon_bind()
82 * add a cache
84 static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache) cachefiles_daemon_add_cache() argument
96 ret = cachefiles_get_security_ID(cache); cachefiles_daemon_add_cache()
100 cachefiles_begin_secure(cache, &saved_cred); cachefiles_daemon_add_cache()
116 /* look up the directory at the root of the cache */ cachefiles_daemon_add_cache()
117 ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path); cachefiles_daemon_add_cache()
121 cache->mnt = path.mnt; cachefiles_daemon_add_cache()
139 /* determine the security of the on-disk cache as this governs cachefiles_daemon_add_cache()
141 ret = cachefiles_determine_cache_security(cache, root, &saved_cred); cachefiles_daemon_add_cache()
145 /* get the cache size and blocksize */ cachefiles_daemon_add_cache()
158 cache->bsize = stats.f_bsize; cachefiles_daemon_add_cache()
159 cache->bshift = 0; cachefiles_daemon_add_cache()
161 cache->bshift = PAGE_SHIFT - ilog2(stats.f_bsize); cachefiles_daemon_add_cache()
164 cache->bsize, cache->bshift); cachefiles_daemon_add_cache()
172 cache->fstop = stats.f_files * cache->fstop_percent; cachefiles_daemon_add_cache()
173 cache->fcull = stats.f_files * cache->fcull_percent; cachefiles_daemon_add_cache()
174 cache->frun = stats.f_files * cache->frun_percent; cachefiles_daemon_add_cache()
177 (unsigned long long) cache->frun, cachefiles_daemon_add_cache()
178 (unsigned long long) cache->fcull, cachefiles_daemon_add_cache()
179 (unsigned long long) cache->fstop); cachefiles_daemon_add_cache()
181 stats.f_blocks >>= cache->bshift; cachefiles_daemon_add_cache()
183 cache->bstop = stats.f_blocks * cache->bstop_percent; cachefiles_daemon_add_cache()
184 cache->bcull = stats.f_blocks * cache->bcull_percent; cachefiles_daemon_add_cache()
185 cache->brun = stats.f_blocks * cache->brun_percent; cachefiles_daemon_add_cache()
188 (unsigned long long) cache->brun, cachefiles_daemon_add_cache()
189 (unsigned long long) cache->bcull, cachefiles_daemon_add_cache()
190 (unsigned long long) cache->bstop); cachefiles_daemon_add_cache()
192 /* get the cache directory and check its type */ cachefiles_daemon_add_cache()
193 cachedir = cachefiles_get_directory(cache, root, "cache"); cachefiles_daemon_add_cache()
207 graveyard = cachefiles_get_directory(cache, root, "graveyard"); cachefiles_daemon_add_cache()
213 cache->graveyard = graveyard; cachefiles_daemon_add_cache()
215 /* publish the cache */ cachefiles_daemon_add_cache()
216 fscache_init_cache(&cache->cache, cachefiles_daemon_add_cache()
221 fscache_object_init(&fsdef->fscache, NULL, &cache->cache); cachefiles_daemon_add_cache()
223 ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag); cachefiles_daemon_add_cache()
228 set_bit(CACHEFILES_READY, &cache->flags); cachefiles_daemon_add_cache()
231 pr_info("File cache on %s registered\n", cache->cache.identifier); cachefiles_daemon_add_cache()
233 /* check how much space the cache has */ cachefiles_daemon_add_cache()
234 cachefiles_has_space(cache, 0, 0); cachefiles_daemon_add_cache()
235 cachefiles_end_secure(cache, saved_cred); cachefiles_daemon_add_cache()
239 dput(cache->graveyard); cachefiles_daemon_add_cache()
240 cache->graveyard = NULL; cachefiles_daemon_add_cache()
242 mntput(cache->mnt); cachefiles_daemon_add_cache()
243 cache->mnt = NULL; cachefiles_daemon_add_cache()
250 cachefiles_end_secure(cache, saved_cred); cachefiles_daemon_add_cache()
256 * unbind a cache on fd release
258 void cachefiles_daemon_unbind(struct cachefiles_cache *cache) cachefiles_daemon_unbind() argument
262 if (test_bit(CACHEFILES_READY, &cache->flags)) { cachefiles_daemon_unbind()
263 pr_info("File cache on %s unregistering\n", cachefiles_daemon_unbind()
264 cache->cache.identifier); cachefiles_daemon_unbind()
266 fscache_withdraw_cache(&cache->cache); cachefiles_daemon_unbind()
269 dput(cache->graveyard); cachefiles_daemon_unbind()
270 mntput(cache->mnt); cachefiles_daemon_unbind()
272 kfree(cache->rootdirname); cachefiles_daemon_unbind()
273 kfree(cache->secctx); cachefiles_daemon_unbind()
274 kfree(cache->tag); cachefiles_daemon_unbind()
H A Ddaemon.c63 int (*handler)(struct cachefiles_cache *cache, char *args);
89 struct cachefiles_cache *cache; cachefiles_daemon_open() local
101 /* allocate a cache record */ cachefiles_daemon_open()
102 cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL); cachefiles_daemon_open()
103 if (!cache) { cachefiles_daemon_open()
108 mutex_init(&cache->daemon_mutex); cachefiles_daemon_open()
109 cache->active_nodes = RB_ROOT; cachefiles_daemon_open()
110 rwlock_init(&cache->active_lock); cachefiles_daemon_open()
111 init_waitqueue_head(&cache->daemon_pollwq); cachefiles_daemon_open()
118 cache->frun_percent = 7; cachefiles_daemon_open()
119 cache->fcull_percent = 5; cachefiles_daemon_open()
120 cache->fstop_percent = 1; cachefiles_daemon_open()
121 cache->brun_percent = 7; cachefiles_daemon_open()
122 cache->bcull_percent = 5; cachefiles_daemon_open()
123 cache->bstop_percent = 1; cachefiles_daemon_open()
125 file->private_data = cache; cachefiles_daemon_open()
126 cache->cachefilesd = file; cachefiles_daemon_open()
131 * release a cache
135 struct cachefiles_cache *cache = file->private_data; cachefiles_daemon_release() local
139 ASSERT(cache); cachefiles_daemon_release()
141 set_bit(CACHEFILES_DEAD, &cache->flags); cachefiles_daemon_release()
143 cachefiles_daemon_unbind(cache); cachefiles_daemon_release()
145 ASSERT(!cache->active_nodes.rb_node); cachefiles_daemon_release()
148 cache->cachefilesd = NULL; cachefiles_daemon_release()
152 kfree(cache); cachefiles_daemon_release()
159 * read the cache state
164 struct cachefiles_cache *cache = file->private_data; cachefiles_daemon_read() local
170 if (!test_bit(CACHEFILES_READY, &cache->flags)) cachefiles_daemon_read()
173 /* check how much space the cache has */ cachefiles_daemon_read()
174 cachefiles_has_space(cache, 0, 0); cachefiles_daemon_read()
177 clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags); cachefiles_daemon_read()
187 test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0', cachefiles_daemon_read()
188 (unsigned long long) cache->frun, cachefiles_daemon_read()
189 (unsigned long long) cache->fcull, cachefiles_daemon_read()
190 (unsigned long long) cache->fstop, cachefiles_daemon_read()
191 (unsigned long long) cache->brun, cachefiles_daemon_read()
192 (unsigned long long) cache->bcull, cachefiles_daemon_read()
193 (unsigned long long) cache->bstop cachefiles_daemon_read()
206 * command the cache
214 struct cachefiles_cache *cache = file->private_data; cachefiles_daemon_write() local
220 ASSERT(cache); cachefiles_daemon_write()
222 if (test_bit(CACHEFILES_DEAD, &cache->flags)) cachefiles_daemon_write()
276 mutex_lock(&cache->daemon_mutex); cachefiles_daemon_write()
279 if (!test_bit(CACHEFILES_DEAD, &cache->flags)) cachefiles_daemon_write()
280 ret = cmd->handler(cache, args); cachefiles_daemon_write()
282 mutex_unlock(&cache->daemon_mutex); cachefiles_daemon_write()
296 struct cachefiles_cache *cache = file->private_data; cachefiles_daemon_poll() local
299 poll_wait(file, &cache->daemon_pollwq, poll); cachefiles_daemon_poll()
302 if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) cachefiles_daemon_poll()
305 if (test_bit(CACHEFILES_CULLING, &cache->flags)) cachefiles_daemon_poll()
312 * give a range error for cache space constraints
315 static int cachefiles_daemon_range_error(struct cachefiles_cache *cache, cachefiles_daemon_range_error() argument
327 static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args) cachefiles_daemon_frun() argument
340 if (frun <= cache->fcull_percent || frun >= 100) cachefiles_daemon_frun()
341 return cachefiles_daemon_range_error(cache, args); cachefiles_daemon_frun()
343 cache->frun_percent = frun; cachefiles_daemon_frun()
351 static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args) cachefiles_daemon_fcull() argument
364 if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent) cachefiles_daemon_fcull()
365 return cachefiles_daemon_range_error(cache, args); cachefiles_daemon_fcull()
367 cache->fcull_percent = fcull; cachefiles_daemon_fcull()
375 static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args) cachefiles_daemon_fstop() argument
388 if (fstop < 0 || fstop >= cache->fcull_percent) cachefiles_daemon_fstop()
389 return cachefiles_daemon_range_error(cache, args); cachefiles_daemon_fstop()
391 cache->fstop_percent = fstop; cachefiles_daemon_fstop()
399 static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args) cachefiles_daemon_brun() argument
412 if (brun <= cache->bcull_percent || brun >= 100) cachefiles_daemon_brun()
413 return cachefiles_daemon_range_error(cache, args); cachefiles_daemon_brun()
415 cache->brun_percent = brun; cachefiles_daemon_brun()
423 static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args) cachefiles_daemon_bcull() argument
436 if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent) cachefiles_daemon_bcull()
437 return cachefiles_daemon_range_error(cache, args); cachefiles_daemon_bcull()
439 cache->bcull_percent = bcull; cachefiles_daemon_bcull()
447 static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args) cachefiles_daemon_bstop() argument
460 if (bstop < 0 || bstop >= cache->bcull_percent) cachefiles_daemon_bstop()
461 return cachefiles_daemon_range_error(cache, args); cachefiles_daemon_bstop()
463 cache->bstop_percent = bstop; cachefiles_daemon_bstop()
468 * set the cache directory
471 static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args) cachefiles_daemon_dir() argument
482 if (cache->rootdirname) { cachefiles_daemon_dir()
483 pr_err("Second cache directory specified\n"); cachefiles_daemon_dir()
491 cache->rootdirname = dir; cachefiles_daemon_dir()
496 * set the cache security context
499 static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) cachefiles_daemon_secctx() argument
510 if (cache->secctx) { cachefiles_daemon_secctx()
519 cache->secctx = secctx; cachefiles_daemon_secctx()
524 * set the cache tag
527 static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args) cachefiles_daemon_tag() argument
538 if (cache->tag) cachefiles_daemon_tag()
545 cache->tag = tag; cachefiles_daemon_tag()
550 * request a node in the cache be culled from the current working directory
553 static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) cachefiles_daemon_cull() argument
564 if (!test_bit(CACHEFILES_READY, &cache->flags)) { cachefiles_daemon_cull()
565 pr_err("cull applied to unready cache\n"); cachefiles_daemon_cull()
569 if (test_bit(CACHEFILES_DEAD, &cache->flags)) { cachefiles_daemon_cull()
570 pr_err("cull applied to dead cache\n"); cachefiles_daemon_cull()
580 cachefiles_begin_secure(cache, &saved_cred); cachefiles_daemon_cull()
581 ret = cachefiles_cull(cache, path.dentry, args); cachefiles_daemon_cull()
582 cachefiles_end_secure(cache, saved_cred); cachefiles_daemon_cull()
602 static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args) cachefiles_daemon_debug() argument
625 static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) cachefiles_daemon_inuse() argument
636 if (!test_bit(CACHEFILES_READY, &cache->flags)) { cachefiles_daemon_inuse()
637 pr_err("inuse applied to unready cache\n"); cachefiles_daemon_inuse()
641 if (test_bit(CACHEFILES_DEAD, &cache->flags)) { cachefiles_daemon_inuse()
642 pr_err("inuse applied to dead cache\n"); cachefiles_daemon_inuse()
652 cachefiles_begin_secure(cache, &saved_cred); cachefiles_daemon_inuse()
653 ret = cachefiles_check_in_use(cache, path.dentry, args); cachefiles_daemon_inuse()
654 cachefiles_end_secure(cache, saved_cred); cachefiles_daemon_inuse()
672 * cache
674 int cachefiles_has_space(struct cachefiles_cache *cache, cachefiles_has_space() argument
679 .mnt = cache->mnt, cachefiles_has_space()
680 .dentry = cache->mnt->mnt_root, cachefiles_has_space()
685 // (unsigned long long) cache->frun, cachefiles_has_space()
686 // (unsigned long long) cache->fcull, cachefiles_has_space()
687 // (unsigned long long) cache->fstop, cachefiles_has_space()
688 // (unsigned long long) cache->brun, cachefiles_has_space()
689 // (unsigned long long) cache->bcull, cachefiles_has_space()
690 // (unsigned long long) cache->bstop, cachefiles_has_space()
699 cachefiles_io_error(cache, "statfs failed"); cachefiles_has_space()
704 stats.f_bavail >>= cache->bshift; cachefiles_has_space()
722 if (stats.f_ffree < cache->fstop || cachefiles_has_space()
723 stats.f_bavail < cache->bstop) cachefiles_has_space()
727 if (stats.f_ffree < cache->fcull || cachefiles_has_space()
728 stats.f_bavail < cache->bcull) cachefiles_has_space()
731 if (test_bit(CACHEFILES_CULLING, &cache->flags) && cachefiles_has_space()
732 stats.f_ffree >= cache->frun && cachefiles_has_space()
733 stats.f_bavail >= cache->brun && cachefiles_has_space()
734 test_and_clear_bit(CACHEFILES_CULLING, &cache->flags) cachefiles_has_space()
737 cachefiles_state_changed(cache); cachefiles_has_space()
744 if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) { cachefiles_has_space()
746 cachefiles_state_changed(cache); cachefiles_has_space()
H A Dsecurity.c17 * determine the security context within which we access the cache from within
20 int cachefiles_get_security_ID(struct cachefiles_cache *cache) cachefiles_get_security_ID() argument
25 _enter("{%s}", cache->secctx); cachefiles_get_security_ID()
33 if (cache->secctx) { cachefiles_get_security_ID()
34 ret = set_security_override_from_ctx(new, cache->secctx); cachefiles_get_security_ID()
43 cache->cache_cred = new; cachefiles_get_security_ID()
53 static int cachefiles_check_cache_dir(struct cachefiles_cache *cache, cachefiles_check_cache_dir() argument
74 * check the security details of the on-disk cache
79 int cachefiles_determine_cache_security(struct cachefiles_cache *cache, cachefiles_determine_cache_security() argument
88 /* duplicate the cache creds for COW (the override is currently in cachefiles_determine_cache_security()
94 cachefiles_end_secure(cache, *_saved_cred); cachefiles_determine_cache_security()
96 /* use the cache root dir's security context as the basis with cachefiles_determine_cache_security()
101 cachefiles_begin_secure(cache, _saved_cred); cachefiles_determine_cache_security()
106 put_cred(cache->cache_cred); cachefiles_determine_cache_security()
107 cache->cache_cred = new; cachefiles_determine_cache_security()
109 cachefiles_begin_secure(cache, _saved_cred); cachefiles_determine_cache_security()
110 ret = cachefiles_check_cache_dir(cache, root); cachefiles_determine_cache_security()
H A Dinterface.c32 struct cachefiles_cache *cache; cachefiles_alloc_object() local
38 cache = container_of(_cache, struct cachefiles_cache, cache); cachefiles_alloc_object()
40 _enter("{%s},%p,", cache->cache.identifier, cookie); cachefiles_alloc_object()
56 fscache_object_init(&object->fscache, cookie, &cache->cache); cachefiles_alloc_object()
105 fscache_object_destroyed(&cache->cache); cachefiles_alloc_object()
114 * attempt to look up the nominated node in this cache
121 struct cachefiles_cache *cache; cachefiles_lookup_object() local
127 cache = container_of(_object->cache, struct cachefiles_cache, cache); cachefiles_lookup_object()
136 cachefiles_begin_secure(cache, &saved_cred); cachefiles_lookup_object()
140 cachefiles_end_secure(cache, saved_cred); cachefiles_lookup_object()
202 struct cachefiles_cache *cache; cachefiles_update_object() local
210 cache = container_of(object->fscache.cache, struct cachefiles_cache, cachefiles_update_object()
211 cache); cachefiles_update_object()
240 cachefiles_begin_secure(cache, &saved_cred); cachefiles_update_object()
242 cachefiles_end_secure(cache, saved_cred); cachefiles_update_object()
254 struct cachefiles_cache *cache; cachefiles_drop_object() local
264 cache = container_of(object->fscache.cache, cachefiles_drop_object()
265 struct cachefiles_cache, cache); cachefiles_drop_object()
279 _object != cache->cache.fsdef cachefiles_drop_object()
282 cachefiles_begin_secure(cache, &saved_cred); cachefiles_drop_object()
283 cachefiles_delete_object(cache, object); cachefiles_drop_object()
284 cachefiles_end_secure(cache, saved_cred); cachefiles_drop_object()
295 write_lock(&cache->active_lock); cachefiles_drop_object()
299 rb_erase(&object->active_node, &cache->active_nodes); cachefiles_drop_object()
301 write_unlock(&cache->active_lock); cachefiles_drop_object()
316 struct fscache_cache *cache; cachefiles_put_object() local
349 cache = object->fscache.cache; cachefiles_put_object()
352 fscache_object_destroyed(cache); cachefiles_put_object()
359 * sync a cache
363 struct cachefiles_cache *cache; cachefiles_sync_cache() local
369 cache = container_of(_cache, struct cachefiles_cache, cache); cachefiles_sync_cache()
373 cachefiles_begin_secure(cache, &saved_cred); cachefiles_sync_cache()
374 down_read(&cache->mnt->mnt_sb->s_umount); cachefiles_sync_cache()
375 ret = sync_filesystem(cache->mnt->mnt_sb); cachefiles_sync_cache()
376 up_read(&cache->mnt->mnt_sb->s_umount); cachefiles_sync_cache()
377 cachefiles_end_secure(cache, saved_cred); cachefiles_sync_cache()
380 cachefiles_io_error(cache, cachefiles_sync_cache()
387 * check if the backing cache is updated to FS-Cache
388 * - called by FS-Cache when evaluates if need to invalidate the cache
393 struct cachefiles_cache *cache; cachefiles_check_consistency() local
400 cache = container_of(object->fscache.cache, cachefiles_check_consistency()
401 struct cachefiles_cache, cache); cachefiles_check_consistency()
403 cachefiles_begin_secure(cache, &saved_cred); cachefiles_check_consistency()
405 cachefiles_end_secure(cache, saved_cred); cachefiles_check_consistency()
418 struct cachefiles_cache *cache; cachefiles_attr_changed() local
431 cache = container_of(object->fscache.cache, cachefiles_attr_changed()
432 struct cachefiles_cache, cache); cachefiles_attr_changed()
448 cachefiles_begin_secure(cache, &saved_cred); cachefiles_attr_changed()
469 cachefiles_end_secure(cache, saved_cred); cachefiles_attr_changed()
487 struct cachefiles_cache *cache; cachefiles_invalidate_object() local
494 cache = container_of(object->fscache.cache, cachefiles_invalidate_object()
495 struct cachefiles_cache, cache); cachefiles_invalidate_object()
509 path.mnt = cache->mnt; cachefiles_invalidate_object()
511 cachefiles_begin_secure(cache, &saved_cred); cachefiles_invalidate_object()
515 cachefiles_end_secure(cache, saved_cred); cachefiles_invalidate_object()
530 * dissociate a cache from all the pages it was backing
532 static void cachefiles_dissociate_pages(struct fscache_cache *cache) cachefiles_dissociate_pages() argument
H A Dinternal.h1 /* General netfs cache on cache files internal defs
19 #include <linux/fscache-cache.h>
57 * Cache files cache definition
60 struct fscache_cache cache; /* FS-Cache record */ member in struct:cachefiles_cache
61 struct vfsmount *mnt; /* mountpoint holding the cache */
64 const struct cred *cache_cred; /* security override for accessing cache */
76 unsigned bsize; /* cache's block size */
85 #define CACHEFILES_READY 0 /* T if cache prepared */
86 #define CACHEFILES_DEAD 1 /* T if cache dead */
89 char *rootdirname; /* name of cache root directory */
91 char *tag; /* cache binding tag */
128 static inline void cachefiles_state_changed(struct cachefiles_cache *cache) cachefiles_state_changed() argument
130 set_bit(CACHEFILES_STATE_CHANGED, &cache->flags); cachefiles_state_changed()
131 wake_up_all(&cache->daemon_pollwq); cachefiles_state_changed()
137 extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args);
138 extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache);
145 extern int cachefiles_has_space(struct cachefiles_cache *cache,
161 extern int cachefiles_delete_object(struct cachefiles_cache *cache,
167 extern struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
171 extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
174 extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
220 extern int cachefiles_get_security_ID(struct cachefiles_cache *cache);
221 extern int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
225 static inline void cachefiles_begin_secure(struct cachefiles_cache *cache, cachefiles_begin_secure() argument
228 *_saved_cred = override_creds(cache->cache_cred); cachefiles_begin_secure()
231 static inline void cachefiles_end_secure(struct cachefiles_cache *cache, cachefiles_end_secure() argument
248 extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
259 fscache_io_error(&(___cache)->cache); \
267 ___cache = container_of((object)->fscache.cache, \
268 struct cachefiles_cache, cache); \
H A Dnamei.c99 static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, cachefiles_mark_object_buried() argument
107 write_lock(&cache->active_lock); cachefiles_mark_object_buried()
109 p = cache->active_nodes.rb_node; cachefiles_mark_object_buried()
120 write_unlock(&cache->active_lock); cachefiles_mark_object_buried()
139 write_unlock(&cache->active_lock); cachefiles_mark_object_buried()
146 static int cachefiles_mark_object_active(struct cachefiles_cache *cache, cachefiles_mark_object_active() argument
156 write_lock(&cache->active_lock); cachefiles_mark_object_active()
165 _p = &cache->active_nodes.rb_node; cachefiles_mark_object_active()
182 rb_insert_color(&object->active_node, &cache->active_nodes); cachefiles_mark_object_active()
184 write_unlock(&cache->active_lock); cachefiles_mark_object_active()
198 write_unlock(&cache->active_lock); cachefiles_mark_object_active()
248 cache->cache.ops->put_object(&xobject->fscache); cachefiles_mark_object_active()
253 cache->cache.ops->put_object(&xobject->fscache); cachefiles_mark_object_active()
259 * delete an object representation from the cache
265 static int cachefiles_bury_object(struct cachefiles_cache *cache, cachefiles_bury_object() argument
283 path.mnt = cache->mnt; cachefiles_bury_object()
287 cachefiles_io_error(cache, "Unlink security error"); cachefiles_bury_object()
292 cachefiles_mark_object_buried(cache, rep); cachefiles_bury_object()
298 cachefiles_io_error(cache, "Unlink failed"); cachefiles_bury_object()
312 (uint32_t) atomic_inc_return(&cache->gravecounter)); cachefiles_bury_object()
315 trap = lock_rename(cache->graveyard, dir); cachefiles_bury_object()
321 unlock_rename(cache->graveyard, dir); cachefiles_bury_object()
326 if (!d_can_lookup(cache->graveyard)) { cachefiles_bury_object()
327 unlock_rename(cache->graveyard, dir); cachefiles_bury_object()
328 cachefiles_io_error(cache, "Graveyard no longer a directory"); cachefiles_bury_object()
333 unlock_rename(cache->graveyard, dir); cachefiles_bury_object()
334 cachefiles_io_error(cache, "May not make directory loop"); cachefiles_bury_object()
339 unlock_rename(cache->graveyard, dir); cachefiles_bury_object()
340 cachefiles_io_error(cache, "Mountpoint in cache"); cachefiles_bury_object()
344 grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer)); cachefiles_bury_object()
346 unlock_rename(cache->graveyard, dir); cachefiles_bury_object()
353 cachefiles_io_error(cache, "Lookup error %ld", cachefiles_bury_object()
359 unlock_rename(cache->graveyard, dir); cachefiles_bury_object()
367 unlock_rename(cache->graveyard, dir); cachefiles_bury_object()
369 cachefiles_io_error(cache, "Mountpoint in graveyard"); cachefiles_bury_object()
375 unlock_rename(cache->graveyard, dir); cachefiles_bury_object()
377 cachefiles_io_error(cache, "May not make directory loop"); cachefiles_bury_object()
382 path.mnt = cache->mnt; cachefiles_bury_object()
384 path_to_graveyard.mnt = cache->mnt; cachefiles_bury_object()
385 path_to_graveyard.dentry = cache->graveyard; cachefiles_bury_object()
388 cachefiles_io_error(cache, "Rename security error %d", ret); cachefiles_bury_object()
391 d_inode(cache->graveyard), grave, NULL, 0); cachefiles_bury_object()
393 cachefiles_io_error(cache, cachefiles_bury_object()
397 cachefiles_mark_object_buried(cache, rep); cachefiles_bury_object()
400 unlock_rename(cache->graveyard, dir); cachefiles_bury_object()
407 * delete an object representation from the cache
409 int cachefiles_delete_object(struct cachefiles_cache *cache, cachefiles_delete_object() argument
435 ret = cachefiles_bury_object(cache, dir, cachefiles_delete_object()
460 struct cachefiles_cache *cache; cachefiles_walk_to_object() local
471 cache = container_of(parent->fscache.cache, cachefiles_walk_to_object()
472 struct cachefiles_cache, cache); cachefiles_walk_to_object()
473 path.mnt = cache->mnt; cachefiles_walk_to_object()
523 ret = cachefiles_has_space(cache, 1, 0); cachefiles_walk_to_object()
552 ret = cachefiles_has_space(cache, 1, 0); cachefiles_walk_to_object()
605 ret = cachefiles_bury_object(cache, dir, next, true); cachefiles_walk_to_object()
618 ret = cachefiles_mark_object_active(cache, object); cachefiles_walk_to_object()
668 cachefiles_io_error(cache, "Create/mkdir failed"); cachefiles_walk_to_object()
677 write_lock(&cache->active_lock); cachefiles_walk_to_object()
678 rb_erase(&object->active_node, &cache->active_nodes); cachefiles_walk_to_object()
681 write_unlock(&cache->active_lock); cachefiles_walk_to_object()
695 cachefiles_io_error(cache, "Lookup failed"); cachefiles_walk_to_object()
710 struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, cachefiles_get_directory() argument
738 ret = cachefiles_has_space(cache, 1, 0); cachefiles_get_directory()
744 path.mnt = cache->mnt; cachefiles_get_directory()
816 static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, cachefiles_check_active() argument
852 read_lock(&cache->active_lock); cachefiles_check_active()
854 _n = cache->active_nodes.rb_node; cachefiles_check_active()
867 read_unlock(&cache->active_lock); cachefiles_check_active()
873 read_unlock(&cache->active_lock); cachefiles_check_active()
889 cachefiles_io_error(cache, "Lookup failed"); cachefiles_check_active()
901 * - called only by cache manager daemon
903 int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, cachefiles_cull() argument
911 victim = cachefiles_check_active(cache, dir, filename); cachefiles_cull()
923 ret = cachefiles_remove_object_xattr(cache, victim); cachefiles_cull()
930 ret = cachefiles_bury_object(cache, dir, victim, false); cachefiles_cull()
959 * - called only by cache manager daemon
962 int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir, cachefiles_check_in_use() argument
970 victim = cachefiles_check_active(cache, dir, filename); cachefiles_check_in_use()
/linux-4.1.27/arch/cris/include/asm/
H A Dcache.h4 #include <arch/cache.h>
/linux-4.1.27/drivers/md/
H A Ddm-cache-target.c10 #include "dm-cache-metadata.h"
21 #define DM_MSG_PREFIX "cache"
24 "A percentage of time allocated for copying to and/or from cache");
32 * cblock: index of a cache block
33 * promotion: movement of a block from origin to cache
34 * demotion: movement of a block from cache to origin
35 * migration: movement of a block between the origin and cache device,
104 * The block size of the device holding cache data must be
111 * FIXME: the cache is read/write for the time being.
121 * dirty. If you lose the cache device you will lose data.
127 * Data is written to both cache and origin. Blocks are never
133 * A degraded mode useful for various cache coherency situations
135 * origin. If a write goes to a cached oblock, then the cache
178 struct cache { struct
206 * Size of the cache device in blocks.
301 * structure and the 'cache' member must be the first as it
304 struct cache *cache; member in struct:per_bio_data
311 struct cache *cache; member in struct:dm_cache_migration
341 static void wake_worker(struct cache *cache) wake_worker() argument
343 queue_work(cache->wq, &cache->worker); wake_worker()
348 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) alloc_prison_cell() argument
351 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); alloc_prison_cell()
354 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) free_prison_cell() argument
356 dm_bio_prison_free_cell(cache->prison, cell); free_prison_cell()
359 static struct dm_cache_migration *alloc_migration(struct cache *cache) alloc_migration() argument
363 mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); alloc_migration()
365 mg->cache = cache; alloc_migration()
366 atomic_inc(&mg->cache->nr_allocated_migrations); alloc_migration()
374 if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations)) free_migration()
375 wake_up(&mg->cache->migration_wait); free_migration()
377 mempool_free(mg, mg->cache->migration_pool); free_migration()
380 static int prealloc_data_structs(struct cache *cache, struct prealloc *p) prealloc_data_structs() argument
383 p->mg = alloc_migration(cache); prealloc_data_structs()
389 p->cell1 = alloc_prison_cell(cache); prealloc_data_structs()
395 p->cell2 = alloc_prison_cell(cache); prealloc_data_structs()
403 static void prealloc_free_structs(struct cache *cache, struct prealloc *p) prealloc_free_structs() argument
406 free_prison_cell(cache, p->cell2); prealloc_free_structs()
409 free_prison_cell(cache, p->cell1); prealloc_free_structs()
479 static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end, bio_detain_range() argument
488 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); bio_detain_range()
495 static int bio_detain(struct cache *cache, dm_oblock_t oblock, bio_detain() argument
501 return bio_detain_range(cache, oblock, end, bio, bio_detain()
505 static int get_cell(struct cache *cache, get_cell() argument
517 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); get_cell()
526 static bool is_dirty(struct cache *cache, dm_cblock_t b) is_dirty() argument
528 return test_bit(from_cblock(b), cache->dirty_bitset); is_dirty()
531 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) set_dirty() argument
533 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { set_dirty()
534 atomic_inc(&cache->nr_dirty); set_dirty()
535 policy_set_dirty(cache->policy, oblock); set_dirty()
539 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) clear_dirty() argument
541 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { clear_dirty()
542 policy_clear_dirty(cache->policy, oblock); clear_dirty()
543 if (atomic_dec_return(&cache->nr_dirty) == 0) clear_dirty()
544 dm_table_event(cache->ti->table); clear_dirty()
550 static bool block_size_is_power_of_two(struct cache *cache) block_size_is_power_of_two() argument
552 return cache->sectors_per_block_shift >= 0; block_size_is_power_of_two()
566 static dm_block_t oblocks_per_dblock(struct cache *cache) oblocks_per_dblock() argument
568 dm_block_t oblocks = cache->discard_block_size; oblocks_per_dblock()
570 if (block_size_is_power_of_two(cache)) oblocks_per_dblock()
571 oblocks >>= cache->sectors_per_block_shift; oblocks_per_dblock()
573 oblocks = block_div(oblocks, cache->sectors_per_block); oblocks_per_dblock()
578 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) oblock_to_dblock() argument
581 oblocks_per_dblock(cache))); oblock_to_dblock()
584 static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock) dblock_to_oblock() argument
586 return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache)); dblock_to_oblock()
589 static void set_discard(struct cache *cache, dm_dblock_t b) set_discard() argument
593 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); set_discard()
594 atomic_inc(&cache->stats.discard_count); set_discard()
596 spin_lock_irqsave(&cache->lock, flags); set_discard()
597 set_bit(from_dblock(b), cache->discard_bitset); set_discard()
598 spin_unlock_irqrestore(&cache->lock, flags); set_discard()
601 static void clear_discard(struct cache *cache, dm_dblock_t b) clear_discard() argument
605 spin_lock_irqsave(&cache->lock, flags); clear_discard()
606 clear_bit(from_dblock(b), cache->discard_bitset); clear_discard()
607 spin_unlock_irqrestore(&cache->lock, flags); clear_discard()
610 static bool is_discarded(struct cache *cache, dm_dblock_t b) is_discarded() argument
615 spin_lock_irqsave(&cache->lock, flags); is_discarded()
616 r = test_bit(from_dblock(b), cache->discard_bitset); is_discarded()
617 spin_unlock_irqrestore(&cache->lock, flags); is_discarded()
622 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) is_discarded_oblock() argument
627 spin_lock_irqsave(&cache->lock, flags); is_discarded_oblock()
628 r = test_bit(from_dblock(oblock_to_dblock(cache, b)), is_discarded_oblock()
629 cache->discard_bitset); is_discarded_oblock()
630 spin_unlock_irqrestore(&cache->lock, flags); is_discarded_oblock()
637 static void load_stats(struct cache *cache) load_stats() argument
641 dm_cache_metadata_get_stats(cache->cmd, &stats); load_stats()
642 atomic_set(&cache->stats.read_hit, stats.read_hits); load_stats()
643 atomic_set(&cache->stats.read_miss, stats.read_misses); load_stats()
644 atomic_set(&cache->stats.write_hit, stats.write_hits); load_stats()
645 atomic_set(&cache->stats.write_miss, stats.write_misses); load_stats()
648 static void save_stats(struct cache *cache) save_stats() argument
652 stats.read_hits = atomic_read(&cache->stats.read_hit); save_stats()
653 stats.read_misses = atomic_read(&cache->stats.read_miss); save_stats()
654 stats.write_hits = atomic_read(&cache->stats.write_hit); save_stats()
655 stats.write_misses = atomic_read(&cache->stats.write_miss); save_stats()
657 dm_cache_metadata_set_stats(cache->cmd, &stats); save_stats()
667 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
685 static size_t get_per_bio_data_size(struct cache *cache) get_per_bio_data_size() argument
687 return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; get_per_bio_data_size()
711 static void remap_to_origin(struct cache *cache, struct bio *bio) remap_to_origin() argument
713 bio->bi_bdev = cache->origin_dev->bdev; remap_to_origin()
716 static void remap_to_cache(struct cache *cache, struct bio *bio, remap_to_cache() argument
722 bio->bi_bdev = cache->cache_dev->bdev; remap_to_cache()
723 if (!block_size_is_power_of_two(cache)) remap_to_cache()
725 (block * cache->sectors_per_block) + remap_to_cache()
726 sector_div(bi_sector, cache->sectors_per_block); remap_to_cache()
729 (block << cache->sectors_per_block_shift) | remap_to_cache()
730 (bi_sector & (cache->sectors_per_block - 1)); remap_to_cache()
733 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) check_if_tick_bio_needed() argument
736 size_t pb_data_size = get_per_bio_data_size(cache); check_if_tick_bio_needed()
739 spin_lock_irqsave(&cache->lock, flags); check_if_tick_bio_needed()
740 if (cache->need_tick_bio && check_if_tick_bio_needed()
743 cache->need_tick_bio = false; check_if_tick_bio_needed()
745 spin_unlock_irqrestore(&cache->lock, flags); check_if_tick_bio_needed()
748 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, remap_to_origin_clear_discard() argument
751 check_if_tick_bio_needed(cache, bio); remap_to_origin_clear_discard()
752 remap_to_origin(cache, bio); remap_to_origin_clear_discard()
754 clear_discard(cache, oblock_to_dblock(cache, oblock)); remap_to_origin_clear_discard()
757 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, remap_to_cache_dirty() argument
760 check_if_tick_bio_needed(cache, bio); remap_to_cache_dirty()
761 remap_to_cache(cache, bio, cblock); remap_to_cache_dirty()
763 set_dirty(cache, oblock, cblock); remap_to_cache_dirty()
764 clear_discard(cache, oblock_to_dblock(cache, oblock)); remap_to_cache_dirty()
768 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) get_bio_block() argument
772 if (!block_size_is_power_of_two(cache)) get_bio_block()
773 (void) sector_div(block_nr, cache->sectors_per_block); get_bio_block()
775 block_nr >>= cache->sectors_per_block_shift; get_bio_block()
780 static int bio_triggers_commit(struct cache *cache, struct bio *bio) bio_triggers_commit() argument
789 static void inc_ds(struct cache *cache, struct bio *bio, inc_ds() argument
792 size_t pb_data_size = get_per_bio_data_size(cache); inc_ds()
798 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); inc_ds()
801 static void issue(struct cache *cache, struct bio *bio) issue() argument
805 if (!bio_triggers_commit(cache, bio)) { issue()
814 spin_lock_irqsave(&cache->lock, flags); issue()
815 cache->commit_requested = true; issue()
816 bio_list_add(&cache->deferred_flush_bios, bio); issue()
817 spin_unlock_irqrestore(&cache->lock, flags); issue()
820 static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) inc_and_issue() argument
822 inc_ds(cache, bio, cell); inc_and_issue()
823 issue(cache, bio); inc_and_issue()
826 static void defer_writethrough_bio(struct cache *cache, struct bio *bio) defer_writethrough_bio() argument
830 spin_lock_irqsave(&cache->lock, flags); defer_writethrough_bio()
831 bio_list_add(&cache->deferred_writethrough_bios, bio); defer_writethrough_bio()
832 spin_unlock_irqrestore(&cache->lock, flags); defer_writethrough_bio()
834 wake_worker(cache); defer_writethrough_bio()
849 remap_to_cache(pb->cache, bio, pb->cblock); writethrough_endio()
856 defer_writethrough_bio(pb->cache, bio); writethrough_endio()
861 * to both the cache and origin devices. In future we'd like to clone the
865 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, remap_to_origin_then_cache() argument
870 pb->cache = cache; remap_to_origin_then_cache()
875 remap_to_origin_clear_discard(pb->cache, bio, oblock); remap_to_origin_then_cache()
881 * Migration covers moving data from the origin device to the cache, or
884 static void inc_io_migrations(struct cache *cache) inc_io_migrations() argument
886 atomic_inc(&cache->nr_io_migrations); inc_io_migrations()
889 static void dec_io_migrations(struct cache *cache) dec_io_migrations() argument
891 atomic_dec(&cache->nr_io_migrations); dec_io_migrations()
894 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, __cell_defer() argument
898 (cache->prison, cell, &cache->deferred_bios); __cell_defer()
899 free_prison_cell(cache, cell); __cell_defer()
902 static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, cell_defer() argument
907 spin_lock_irqsave(&cache->lock, flags); cell_defer()
908 __cell_defer(cache, cell, holder); cell_defer()
909 spin_unlock_irqrestore(&cache->lock, flags); cell_defer()
911 wake_worker(cache); cell_defer()
916 dec_io_migrations(mg->cache); free_io_migration()
922 struct cache *cache = mg->cache; migration_failure() local
926 set_dirty(cache, mg->old_oblock, mg->cblock); migration_failure()
927 cell_defer(cache, mg->old_ocell, false); migration_failure()
931 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); migration_failure()
933 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); migration_failure()
935 cell_defer(cache, mg->new_ocell, true); migration_failure()
938 policy_remove_mapping(cache->policy, mg->new_oblock); migration_failure()
939 cell_defer(cache, mg->new_ocell, true); migration_failure()
948 struct cache *cache = mg->cache; migration_success_pre_commit() local
951 clear_dirty(cache, mg->old_oblock, mg->cblock); migration_success_pre_commit()
952 cell_defer(cache, mg->old_ocell, false); migration_success_pre_commit()
957 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) { migration_success_pre_commit()
959 policy_force_mapping(cache->policy, mg->new_oblock, migration_success_pre_commit()
962 cell_defer(cache, mg->new_ocell, true); migration_success_pre_commit()
967 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { migration_success_pre_commit()
969 policy_remove_mapping(cache->policy, mg->new_oblock); migration_success_pre_commit()
975 spin_lock_irqsave(&cache->lock, flags); migration_success_pre_commit()
976 list_add_tail(&mg->list, &cache->need_commit_migrations); migration_success_pre_commit()
977 cache->commit_requested = true; migration_success_pre_commit()
978 spin_unlock_irqrestore(&cache->lock, flags); migration_success_pre_commit()
984 struct cache *cache = mg->cache; migration_success_post_commit() local
991 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); migration_success_post_commit()
996 spin_lock_irqsave(&cache->lock, flags); migration_success_post_commit()
997 list_add_tail(&mg->list, &cache->quiesced_migrations); migration_success_post_commit()
998 spin_unlock_irqrestore(&cache->lock, flags); migration_success_post_commit()
1002 policy_remove_mapping(cache->policy, mg->old_oblock); migration_success_post_commit()
1008 clear_dirty(cache, mg->new_oblock, mg->cblock); migration_success_post_commit()
1009 cell_defer(cache, mg->new_ocell, true); migration_success_post_commit()
1014 set_dirty(cache, mg->new_oblock, mg->cblock); migration_success_post_commit()
1016 cell_defer(cache, mg->new_ocell, false); migration_success_post_commit()
1026 struct cache *cache = mg->cache; copy_complete() local
1031 spin_lock_irqsave(&cache->lock, flags); copy_complete()
1032 list_add_tail(&mg->list, &cache->completed_migrations); copy_complete()
1033 spin_unlock_irqrestore(&cache->lock, flags); copy_complete()
1035 wake_worker(cache); copy_complete()
1042 struct cache *cache = mg->cache; issue_copy() local
1045 o_region.bdev = cache->origin_dev->bdev; issue_copy()
1046 o_region.count = cache->sectors_per_block; issue_copy()
1048 c_region.bdev = cache->cache_dev->bdev; issue_copy()
1049 c_region.sector = cblock * cache->sectors_per_block; issue_copy()
1050 c_region.count = cache->sectors_per_block; issue_copy()
1054 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; issue_copy()
1055 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg); issue_copy()
1058 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; issue_copy()
1059 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg); issue_copy()
1071 struct cache *cache = mg->cache; overwrite_endio() local
1072 size_t pb_data_size = get_per_bio_data_size(cache); overwrite_endio()
1083 spin_lock_irqsave(&cache->lock, flags); overwrite_endio()
1084 list_add_tail(&mg->list, &cache->completed_migrations); overwrite_endio()
1085 spin_unlock_irqrestore(&cache->lock, flags); overwrite_endio()
1087 wake_worker(cache); overwrite_endio()
1092 size_t pb_data_size = get_per_bio_data_size(mg->cache); issue_overwrite()
1096 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); issue_overwrite()
1105 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) bio_writes_complete_block() argument
1108 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); bio_writes_complete_block()
1113 atomic_inc(&mg->cache->stats.copies_avoided); avoid_copy()
1117 static void calc_discard_block_range(struct cache *cache, struct bio *bio, calc_discard_block_range() argument
1123 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size)); calc_discard_block_range()
1125 if (se - sb < cache->discard_block_size) calc_discard_block_range()
1128 *e = to_dblock(block_div(se, cache->discard_block_size)); calc_discard_block_range()
1136 calc_discard_block_range(mg->cache, bio, &b, &e); issue_discard()
1138 set_discard(mg->cache, b); issue_discard()
1143 cell_defer(mg->cache, mg->new_ocell, false); issue_discard()
1150 struct cache *cache = mg->cache; issue_copy_or_discard() local
1158 avoid = !is_dirty(cache, mg->cblock) || issue_copy_or_discard()
1159 is_discarded_oblock(cache, mg->old_oblock); issue_copy_or_discard()
1163 avoid = is_discarded_oblock(cache, mg->new_oblock); issue_copy_or_discard()
1165 if (writeback_mode(&cache->features) && issue_copy_or_discard()
1166 !avoid && bio_writes_complete_block(cache, bio)) { issue_copy_or_discard()
1183 static void process_migrations(struct cache *cache, struct list_head *head, process_migrations() argument
1191 spin_lock_irqsave(&cache->lock, flags); process_migrations()
1193 spin_unlock_irqrestore(&cache->lock, flags); process_migrations()
1201 list_add_tail(&mg->list, &mg->cache->quiesced_migrations); __queue_quiesced_migration()
1207 struct cache *cache = mg->cache; queue_quiesced_migration() local
1209 spin_lock_irqsave(&cache->lock, flags); queue_quiesced_migration()
1211 spin_unlock_irqrestore(&cache->lock, flags); queue_quiesced_migration()
1213 wake_worker(cache); queue_quiesced_migration()
1216 static void queue_quiesced_migrations(struct cache *cache, struct list_head *work) queue_quiesced_migrations() argument
1221 spin_lock_irqsave(&cache->lock, flags); queue_quiesced_migrations()
1224 spin_unlock_irqrestore(&cache->lock, flags); queue_quiesced_migrations()
1226 wake_worker(cache); queue_quiesced_migrations()
1229 static void check_for_quiesced_migrations(struct cache *cache, check_for_quiesced_migrations() argument
1241 queue_quiesced_migrations(cache, &work); check_for_quiesced_migrations()
1246 if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list)) quiesce_migration()
1250 static void promote(struct cache *cache, struct prealloc *structs, promote() argument
1263 mg->cache = cache; promote()
1270 inc_io_migrations(cache); promote()
1274 static void writeback(struct cache *cache, struct prealloc *structs, writeback() argument
1287 mg->cache = cache; writeback()
1294 inc_io_migrations(cache); writeback()
1298 static void demote_then_promote(struct cache *cache, struct prealloc *structs, demote_then_promote() argument
1313 mg->cache = cache; demote_then_promote()
1321 inc_io_migrations(cache); demote_then_promote()
1326 * Invalidate a cache entry. No writeback occurs; any changes in the cache
1329 static void invalidate(struct cache *cache, struct prealloc *structs, invalidate() argument
1342 mg->cache = cache; invalidate()
1349 inc_io_migrations(cache); invalidate()
1353 static void discard(struct cache *cache, struct prealloc *structs, discard() argument
1365 mg->cache = cache; discard()
1376 static void defer_bio(struct cache *cache, struct bio *bio) defer_bio() argument
1380 spin_lock_irqsave(&cache->lock, flags); defer_bio()
1381 bio_list_add(&cache->deferred_bios, bio); defer_bio()
1382 spin_unlock_irqrestore(&cache->lock, flags); defer_bio()
1384 wake_worker(cache); defer_bio()
1387 static void process_flush_bio(struct cache *cache, struct bio *bio) process_flush_bio() argument
1389 size_t pb_data_size = get_per_bio_data_size(cache); process_flush_bio()
1394 remap_to_origin(cache, bio); process_flush_bio()
1396 remap_to_cache(cache, bio, 0); process_flush_bio()
1403 issue(cache, bio); process_flush_bio()
1406 static void process_discard_bio(struct cache *cache, struct prealloc *structs, process_discard_bio() argument
1413 calc_discard_block_range(cache, bio, &b, &e); process_discard_bio()
1420 r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc, process_discard_bio()
1426 discard(cache, structs, new_ocell); process_discard_bio()
1429 static bool spare_migration_bandwidth(struct cache *cache) spare_migration_bandwidth() argument
1431 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * spare_migration_bandwidth()
1432 cache->sectors_per_block; spare_migration_bandwidth()
1433 return current_volume < cache->migration_threshold; spare_migration_bandwidth()
1436 static void inc_hit_counter(struct cache *cache, struct bio *bio) inc_hit_counter() argument
1439 &cache->stats.read_hit : &cache->stats.write_hit); inc_hit_counter()
1442 static void inc_miss_counter(struct cache *cache, struct bio *bio) inc_miss_counter() argument
1445 &cache->stats.read_miss : &cache->stats.write_miss); inc_miss_counter()
1452 struct cache *cache; member in struct:old_oblock_lock
1469 return bio_detain(l->cache, b, NULL, cell_prealloc, cell_locker()
1474 static void process_bio(struct cache *cache, struct prealloc *structs, process_bio() argument
1479 dm_oblock_t block = get_bio_block(cache, bio); process_bio()
1482 bool passthrough = passthrough_mode(&cache->features); process_bio()
1490 r = bio_detain(cache, block, bio, cell_prealloc, process_bio()
1496 discarded_block = is_discarded_oblock(cache, block); process_bio()
1497 can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); process_bio()
1500 ool.cache = cache; process_bio()
1503 r = policy_map(cache->policy, block, true, can_migrate, discarded_block, process_bio()
1513 inc_miss_counter(cache, bio); process_bio()
1517 * invalidating any cache blocks that are written process_bio()
1522 atomic_inc(&cache->stats.demotion); process_bio()
1523 invalidate(cache, structs, block, lookup_result.cblock, new_ocell); process_bio()
1528 remap_to_origin_clear_discard(cache, bio, block); process_bio()
1529 inc_and_issue(cache, bio, new_ocell); process_bio()
1532 inc_hit_counter(cache, bio); process_bio()
1535 writethrough_mode(&cache->features) && process_bio()
1536 !is_dirty(cache, lookup_result.cblock)) { process_bio()
1537 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); process_bio()
1538 inc_and_issue(cache, bio, new_ocell); process_bio()
1541 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); process_bio()
1542 inc_and_issue(cache, bio, new_ocell); process_bio()
1549 inc_miss_counter(cache, bio); process_bio()
1550 remap_to_origin_clear_discard(cache, bio, block); process_bio()
1551 inc_and_issue(cache, bio, new_ocell); process_bio()
1555 atomic_inc(&cache->stats.promotion); process_bio()
1556 promote(cache, structs, block, lookup_result.cblock, new_ocell); process_bio()
1561 atomic_inc(&cache->stats.demotion); process_bio()
1562 atomic_inc(&cache->stats.promotion); process_bio()
1563 demote_then_promote(cache, structs, lookup_result.old_oblock, process_bio()
1576 cell_defer(cache, new_ocell, false); process_bio()
1579 static int need_commit_due_to_time(struct cache *cache) need_commit_due_to_time() argument
1581 return !time_in_range(jiffies, cache->last_commit_jiffies, need_commit_due_to_time()
1582 cache->last_commit_jiffies + COMMIT_PERIOD); need_commit_due_to_time()
1585 static int commit_if_needed(struct cache *cache) commit_if_needed() argument
1589 if ((cache->commit_requested || need_commit_due_to_time(cache)) && commit_if_needed()
1590 dm_cache_changed_this_transaction(cache->cmd)) { commit_if_needed()
1591 atomic_inc(&cache->stats.commit_count); commit_if_needed()
1592 cache->commit_requested = false; commit_if_needed()
1593 r = dm_cache_commit(cache->cmd, false); commit_if_needed()
1594 cache->last_commit_jiffies = jiffies; commit_if_needed()
1600 static void process_deferred_bios(struct cache *cache) process_deferred_bios() argument
1610 spin_lock_irqsave(&cache->lock, flags); process_deferred_bios()
1611 bio_list_merge(&bios, &cache->deferred_bios); process_deferred_bios()
1612 bio_list_init(&cache->deferred_bios); process_deferred_bios()
1613 spin_unlock_irqrestore(&cache->lock, flags); process_deferred_bios()
1621 if (prealloc_data_structs(cache, &structs)) { process_deferred_bios()
1622 spin_lock_irqsave(&cache->lock, flags); process_deferred_bios()
1623 bio_list_merge(&cache->deferred_bios, &bios); process_deferred_bios()
1624 spin_unlock_irqrestore(&cache->lock, flags); process_deferred_bios()
1631 process_flush_bio(cache, bio); process_deferred_bios()
1633 process_discard_bio(cache, &structs, bio); process_deferred_bios()
1635 process_bio(cache, &structs, bio); process_deferred_bios()
1638 prealloc_free_structs(cache, &structs); process_deferred_bios()
1641 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) process_deferred_flush_bios() argument
1649 spin_lock_irqsave(&cache->lock, flags); process_deferred_flush_bios()
1650 bio_list_merge(&bios, &cache->deferred_flush_bios); process_deferred_flush_bios()
1651 bio_list_init(&cache->deferred_flush_bios); process_deferred_flush_bios()
1652 spin_unlock_irqrestore(&cache->lock, flags); process_deferred_flush_bios()
1661 static void process_deferred_writethrough_bios(struct cache *cache) process_deferred_writethrough_bios() argument
1669 spin_lock_irqsave(&cache->lock, flags); process_deferred_writethrough_bios()
1670 bio_list_merge(&bios, &cache->deferred_writethrough_bios); process_deferred_writethrough_bios()
1671 bio_list_init(&cache->deferred_writethrough_bios); process_deferred_writethrough_bios()
1672 spin_unlock_irqrestore(&cache->lock, flags); process_deferred_writethrough_bios()
1681 static void writeback_some_dirty_blocks(struct cache *cache) writeback_some_dirty_blocks() argument
1691 while (spare_migration_bandwidth(cache)) { writeback_some_dirty_blocks()
1692 if (prealloc_data_structs(cache, &structs)) writeback_some_dirty_blocks()
1695 r = policy_writeback_work(cache->policy, &oblock, &cblock); writeback_some_dirty_blocks()
1699 r = get_cell(cache, oblock, &structs, &old_ocell); writeback_some_dirty_blocks()
1701 policy_set_dirty(cache->policy, oblock); writeback_some_dirty_blocks()
1705 writeback(cache, &structs, oblock, cblock, old_ocell); writeback_some_dirty_blocks()
1708 prealloc_free_structs(cache, &structs); writeback_some_dirty_blocks()
1713 * Dropping something from the cache *without* writing back.
1716 static void process_invalidation_request(struct cache *cache, struct invalidation_request *req) process_invalidation_request() argument
1723 r = policy_remove_cblock(cache->policy, to_cblock(begin)); process_invalidation_request()
1725 r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin)); process_invalidation_request()
1741 cache->commit_requested = true; process_invalidation_request()
1749 static void process_invalidation_requests(struct cache *cache) process_invalidation_requests() argument
1755 spin_lock(&cache->invalidation_lock); process_invalidation_requests()
1756 list_splice_init(&cache->invalidation_requests, &list); process_invalidation_requests()
1757 spin_unlock(&cache->invalidation_lock); process_invalidation_requests()
1760 process_invalidation_request(cache, req); process_invalidation_requests()
1766 static bool is_quiescing(struct cache *cache) is_quiescing() argument
1768 return atomic_read(&cache->quiescing); is_quiescing()
1771 static void ack_quiescing(struct cache *cache) ack_quiescing() argument
1773 if (is_quiescing(cache)) { ack_quiescing()
1774 atomic_inc(&cache->quiescing_ack); ack_quiescing()
1775 wake_up(&cache->quiescing_wait); ack_quiescing()
1779 static void wait_for_quiescing_ack(struct cache *cache) wait_for_quiescing_ack() argument
1781 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); wait_for_quiescing_ack()
1784 static void start_quiescing(struct cache *cache) start_quiescing() argument
1786 atomic_inc(&cache->quiescing); start_quiescing()
1787 wait_for_quiescing_ack(cache); start_quiescing()
1790 static void stop_quiescing(struct cache *cache) stop_quiescing() argument
1792 atomic_set(&cache->quiescing, 0); stop_quiescing()
1793 atomic_set(&cache->quiescing_ack, 0); stop_quiescing()
1796 static void wait_for_migrations(struct cache *cache) wait_for_migrations() argument
1798 wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations)); wait_for_migrations()
1801 static void stop_worker(struct cache *cache) stop_worker() argument
1803 cancel_delayed_work(&cache->waker); stop_worker()
1804 flush_workqueue(cache->wq); stop_worker()
1807 static void requeue_deferred_io(struct cache *cache) requeue_deferred_io() argument
1813 bio_list_merge(&bios, &cache->deferred_bios); requeue_deferred_io()
1814 bio_list_init(&cache->deferred_bios); requeue_deferred_io()
1820 static int more_work(struct cache *cache) more_work() argument
1822 if (is_quiescing(cache)) more_work()
1823 return !list_empty(&cache->quiesced_migrations) || more_work()
1824 !list_empty(&cache->completed_migrations) || more_work()
1825 !list_empty(&cache->need_commit_migrations); more_work()
1827 return !bio_list_empty(&cache->deferred_bios) || more_work()
1828 !bio_list_empty(&cache->deferred_flush_bios) || more_work()
1829 !bio_list_empty(&cache->deferred_writethrough_bios) || more_work()
1830 !list_empty(&cache->quiesced_migrations) || more_work()
1831 !list_empty(&cache->completed_migrations) || more_work()
1832 !list_empty(&cache->need_commit_migrations) || more_work()
1833 cache->invalidate; more_work()
1838 struct cache *cache = container_of(ws, struct cache, worker); do_worker() local
1841 if (!is_quiescing(cache)) { do_worker()
1842 writeback_some_dirty_blocks(cache); do_worker()
1843 process_deferred_writethrough_bios(cache); do_worker()
1844 process_deferred_bios(cache); do_worker()
1845 process_invalidation_requests(cache); do_worker()
1848 process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard); do_worker()
1849 process_migrations(cache, &cache->completed_migrations, complete_migration); do_worker()
1851 if (commit_if_needed(cache)) { do_worker()
1852 process_deferred_flush_bios(cache, false); do_worker()
1853 process_migrations(cache, &cache->need_commit_migrations, migration_failure); do_worker()
1860 process_deferred_flush_bios(cache, true); do_worker()
1861 process_migrations(cache, &cache->need_commit_migrations, do_worker()
1865 ack_quiescing(cache); do_worker()
1867 } while (more_work(cache)); do_worker()
1876 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); do_waker() local
1877 policy_tick(cache->policy); do_waker()
1878 wake_worker(cache); do_waker()
1879 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); do_waker()
1892 struct cache *cache = container_of(cb, struct cache, callbacks); cache_is_congested() local
1894 return is_congested(cache->origin_dev, bdi_bits) || cache_is_congested()
1895 is_congested(cache->cache_dev, bdi_bits); cache_is_congested()
1906 static void destroy(struct cache *cache) destroy() argument
1910 if (cache->migration_pool) destroy()
1911 mempool_destroy(cache->migration_pool); destroy()
1913 if (cache->all_io_ds) destroy()
1914 dm_deferred_set_destroy(cache->all_io_ds); destroy()
1916 if (cache->prison) destroy()
1917 dm_bio_prison_destroy(cache->prison); destroy()
1919 if (cache->wq) destroy()
1920 destroy_workqueue(cache->wq); destroy()
1922 if (cache->dirty_bitset) destroy()
1923 free_bitset(cache->dirty_bitset); destroy()
1925 if (cache->discard_bitset) destroy()
1926 free_bitset(cache->discard_bitset); destroy()
1928 if (cache->copier) destroy()
1929 dm_kcopyd_client_destroy(cache->copier); destroy()
1931 if (cache->cmd) destroy()
1932 dm_cache_metadata_close(cache->cmd); destroy()
1934 if (cache->metadata_dev) destroy()
1935 dm_put_device(cache->ti, cache->metadata_dev); destroy()
1937 if (cache->origin_dev) destroy()
1938 dm_put_device(cache->ti, cache->origin_dev); destroy()
1940 if (cache->cache_dev) destroy()
1941 dm_put_device(cache->ti, cache->cache_dev); destroy()
1943 if (cache->policy) destroy()
1944 dm_cache_policy_destroy(cache->policy); destroy()
1946 for (i = 0; i < cache->nr_ctr_args ; i++) destroy()
1947 kfree(cache->ctr_args[i]); destroy()
1948 kfree(cache->ctr_args); destroy()
1950 kfree(cache); destroy()
1955 struct cache *cache = ti->private; cache_dtr() local
1957 destroy(cache); cache_dtr()
1968 * Construct a cache device mapping.
1970 * cache <metadata dev> <cache dev> <origin dev> <block size>
1975 * cache dev : fast device holding cached data blocks
1977 * block size : cache unit size in sectors
1987 * See cache-policies.txt for details.
1990 * writethrough : write through caching that prohibits cache block
1993 * back cache block contents later for performance reasons,
2076 *error = "Error opening cache device"; parse_cache_dev()
2125 *error = "Data block size is larger than the cache device"; parse_block_size()
2144 {0, 1, "Invalid number of cache feature arguments"}, parse_features()
2171 *error = "Unrecognised cache feature requested"; parse_features()
2245 static int process_config_option(struct cache *cache, const char *key, const char *value) process_config_option() argument
2253 cache->migration_threshold = tmp; process_config_option()
2260 static int set_config_value(struct cache *cache, const char *key, const char *value) set_config_value() argument
2262 int r = process_config_option(cache, key, value); set_config_value()
2265 r = policy_set_config_value(cache->policy, key, value); set_config_value()
2273 static int set_config_values(struct cache *cache, int argc, const char **argv) set_config_values() argument
2283 r = set_config_value(cache, argv[0], argv[1]); set_config_values()
2294 static int create_cache_policy(struct cache *cache, struct cache_args *ca, create_cache_policy() argument
2298 cache->cache_size, create_cache_policy()
2299 cache->origin_sectors, create_cache_policy()
2300 cache->sectors_per_block); create_cache_policy()
2302 *error = "Error creating cache's policy"; create_cache_policy()
2305 cache->policy = p; create_cache_policy()
2311 * We want the discard block size to be at least the size of the cache
2336 static void set_cache_size(struct cache *cache, dm_cblock_t size) set_cache_size() argument
2340 if (nr_blocks > (1 << 20) && cache->cache_size != size) set_cache_size()
2341 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n" set_cache_size()
2343 "Please consider increasing the cache block size to reduce the overall cache block count.", set_cache_size()
2346 cache->cache_size = size; set_cache_size()
2351 static int cache_create(struct cache_args *ca, struct cache **result) cache_create()
2355 struct cache *cache; cache_create() local
2361 cache = kzalloc(sizeof(*cache), GFP_KERNEL); cache_create()
2362 if (!cache) cache_create()
2365 cache->ti = ca->ti; cache_create()
2366 ti->private = cache; cache_create()
2375 cache->features = ca->features; cache_create()
2376 ti->per_bio_data_size = get_per_bio_data_size(cache); cache_create()
2378 cache->callbacks.congested_fn = cache_is_congested; cache_create()
2379 dm_table_add_target_callbacks(ti->table, &cache->callbacks); cache_create()
2381 cache->metadata_dev = ca->metadata_dev; cache_create()
2382 cache->origin_dev = ca->origin_dev; cache_create()
2383 cache->cache_dev = ca->cache_dev; cache_create()
2388 origin_blocks = cache->origin_sectors = ca->origin_sectors; cache_create()
2390 cache->origin_blocks = to_oblock(origin_blocks); cache_create()
2392 cache->sectors_per_block = ca->block_size; cache_create()
2393 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { cache_create()
2401 cache->sectors_per_block_shift = -1; cache_create()
2403 set_cache_size(cache, to_cblock(cache_size)); cache_create()
2405 cache->sectors_per_block_shift = __ffs(ca->block_size); cache_create()
2406 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift)); cache_create()
2409 r = create_cache_policy(cache, ca, error); cache_create()
2413 cache->policy_nr_args = ca->policy_argc; cache_create()
2414 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; cache_create()
2416 r = set_config_values(cache, ca->policy_argc, ca->policy_argv); cache_create()
2418 *error = "Error setting cache policy's config values"; cache_create()
2422 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, cache_create()
2424 dm_cache_policy_get_hint_size(cache->policy)); cache_create()
2430 cache->cmd = cmd; cache_create()
2432 if (passthrough_mode(&cache->features)) { cache_create()
2435 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); cache_create()
2448 spin_lock_init(&cache->lock); cache_create()
2449 bio_list_init(&cache->deferred_bios); cache_create()
2450 bio_list_init(&cache->deferred_flush_bios); cache_create()
2451 bio_list_init(&cache->deferred_writethrough_bios); cache_create()
2452 INIT_LIST_HEAD(&cache->quiesced_migrations); cache_create()
2453 INIT_LIST_HEAD(&cache->completed_migrations); cache_create()
2454 INIT_LIST_HEAD(&cache->need_commit_migrations); cache_create()
2455 atomic_set(&cache->nr_allocated_migrations, 0); cache_create()
2456 atomic_set(&cache->nr_io_migrations, 0); cache_create()
2457 init_waitqueue_head(&cache->migration_wait); cache_create()
2459 init_waitqueue_head(&cache->quiescing_wait); cache_create()
2460 atomic_set(&cache->quiescing, 0); cache_create()
2461 atomic_set(&cache->quiescing_ack, 0); cache_create()
2464 atomic_set(&cache->nr_dirty, 0); cache_create()
2465 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); cache_create()
2466 if (!cache->dirty_bitset) { cache_create()
2470 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); cache_create()
2472 cache->discard_block_size = cache_create()
2473 calculate_discard_block_size(cache->sectors_per_block, cache_create()
2474 cache->origin_sectors); cache_create()
2475 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors, cache_create()
2476 cache->discard_block_size)); cache_create()
2477 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); cache_create()
2478 if (!cache->discard_bitset) { cache_create()
2482 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); cache_create()
2484 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); cache_create()
2485 if (IS_ERR(cache->copier)) { cache_create()
2487 r = PTR_ERR(cache->copier); cache_create()
2491 cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); cache_create()
2492 if (!cache->wq) { cache_create()
2496 INIT_WORK(&cache->worker, do_worker); cache_create()
2497 INIT_DELAYED_WORK(&cache->waker, do_waker); cache_create()
2498 cache->last_commit_jiffies = jiffies; cache_create()
2500 cache->prison = dm_bio_prison_create(); cache_create()
2501 if (!cache->prison) { cache_create()
2506 cache->all_io_ds = dm_deferred_set_create(); cache_create()
2507 if (!cache->all_io_ds) { cache_create()
2512 cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE, cache_create()
2514 if (!cache->migration_pool) { cache_create()
2515 *error = "Error creating cache's migration mempool"; cache_create()
2519 cache->need_tick_bio = true; cache_create()
2520 cache->sized = false; cache_create()
2521 cache->invalidate = false; cache_create()
2522 cache->commit_requested = false; cache_create()
2523 cache->loaded_mappings = false; cache_create()
2524 cache->loaded_discards = false; cache_create()
2526 load_stats(cache); cache_create()
2528 atomic_set(&cache->stats.demotion, 0); cache_create()
2529 atomic_set(&cache->stats.promotion, 0); cache_create()
2530 atomic_set(&cache->stats.copies_avoided, 0); cache_create()
2531 atomic_set(&cache->stats.cache_cell_clash, 0); cache_create()
2532 atomic_set(&cache->stats.commit_count, 0); cache_create()
2533 atomic_set(&cache->stats.discard_count, 0); cache_create()
2535 spin_lock_init(&cache->invalidation_lock); cache_create()
2536 INIT_LIST_HEAD(&cache->invalidation_requests); cache_create()
2538 *result = cache; cache_create()
2542 destroy(cache); cache_create()
2546 static int copy_ctr_args(struct cache *cache, int argc, const char **argv) copy_ctr_args() argument
2564 cache->nr_ctr_args = argc; copy_ctr_args()
2565 cache->ctr_args = copy; copy_ctr_args()
2574 struct cache *cache = NULL; cache_ctr() local
2578 ti->error = "Error allocating memory for cache"; cache_ctr()
2587 r = cache_create(ca, &cache); cache_ctr()
2591 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); cache_ctr()
2593 destroy(cache); cache_ctr()
2597 ti->private = cache; cache_ctr()
2604 static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell) __cache_map() argument
2607 dm_oblock_t block = get_bio_block(cache, bio); __cache_map()
2608 size_t pb_data_size = get_per_bio_data_size(cache); __cache_map()
2617 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { __cache_map()
2620 * the end of the origin device. We don't cache these. __cache_map()
2623 remap_to_origin(cache, bio); __cache_map()
2628 defer_bio(cache, bio); __cache_map()
2635 *cell = alloc_prison_cell(cache); __cache_map()
2637 defer_bio(cache, bio); __cache_map()
2641 r = bio_detain(cache, block, bio, *cell, __cache_map()
2643 cache, cell); __cache_map()
2646 defer_bio(cache, bio); __cache_map()
2651 discarded_block = is_discarded_oblock(cache, block); __cache_map()
2653 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, __cache_map()
2656 cell_defer(cache, *cell, true); __cache_map()
2660 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); __cache_map()
2661 cell_defer(cache, *cell, false); __cache_map()
2669 if (passthrough_mode(&cache->features)) { __cache_map()
2675 cell_defer(cache, *cell, true); __cache_map()
2679 inc_miss_counter(cache, bio); __cache_map()
2680 remap_to_origin_clear_discard(cache, bio, block); __cache_map()
2684 inc_hit_counter(cache, bio); __cache_map()
2685 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && __cache_map()
2686 !is_dirty(cache, lookup_result.cblock)) __cache_map()
2687 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); __cache_map()
2689 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); __cache_map()
2694 inc_miss_counter(cache, bio); __cache_map()
2701 cell_defer(cache, *cell, false); __cache_map()
2705 remap_to_origin_clear_discard(cache, bio, block); __cache_map()
2712 cell_defer(cache, *cell, false); __cache_map()
2724 struct cache *cache = ti->private; cache_map() local
2726 r = __cache_map(cache, bio, &cell); cache_map()
2728 inc_ds(cache, bio, cell); cache_map()
2729 cell_defer(cache, cell, false); cache_map()
2737 struct cache *cache = ti->private; cache_end_io() local
2739 size_t pb_data_size = get_per_bio_data_size(cache); cache_end_io()
2743 policy_tick(cache->policy); cache_end_io()
2745 spin_lock_irqsave(&cache->lock, flags); cache_end_io()
2746 cache->need_tick_bio = true; cache_end_io()
2747 spin_unlock_irqrestore(&cache->lock, flags); cache_end_io()
2750 check_for_quiesced_migrations(cache, pb); cache_end_io()
2755 static int write_dirty_bitset(struct cache *cache) write_dirty_bitset() argument
2759 for (i = 0; i < from_cblock(cache->cache_size); i++) { write_dirty_bitset()
2760 r = dm_cache_set_dirty(cache->cmd, to_cblock(i), write_dirty_bitset()
2761 is_dirty(cache, to_cblock(i))); write_dirty_bitset()
2769 static int write_discard_bitset(struct cache *cache) write_discard_bitset() argument
2773 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, write_discard_bitset()
2774 cache->discard_nr_blocks); write_discard_bitset()
2780 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { write_discard_bitset()
2781 r = dm_cache_set_discard(cache->cmd, to_dblock(i), write_discard_bitset()
2782 is_discarded(cache, to_dblock(i))); write_discard_bitset()
2793 static bool sync_metadata(struct cache *cache) sync_metadata() argument
2797 r1 = write_dirty_bitset(cache); sync_metadata()
2801 r2 = write_discard_bitset(cache); sync_metadata()
2805 save_stats(cache); sync_metadata()
2807 r3 = dm_cache_write_hints(cache->cmd, cache->policy); sync_metadata()
2816 r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3); sync_metadata()
2818 DMERR("could not write cache metadata. Data loss may occur."); sync_metadata()
2825 struct cache *cache = ti->private; cache_postsuspend() local
2827 start_quiescing(cache); cache_postsuspend()
2828 wait_for_migrations(cache); cache_postsuspend()
2829 stop_worker(cache); cache_postsuspend()
2830 requeue_deferred_io(cache); cache_postsuspend()
2831 stop_quiescing(cache); cache_postsuspend()
2833 (void) sync_metadata(cache); cache_postsuspend()
2840 struct cache *cache = context; load_mapping() local
2842 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); load_mapping()
2847 set_dirty(cache, oblock, cblock); load_mapping()
2849 clear_dirty(cache, oblock, cblock); load_mapping()
2861 struct cache *cache; member in struct:discard_load_info
2871 static void discard_load_info_init(struct cache *cache, discard_load_info_init() argument
2874 li->cache = cache; discard_load_info_init()
2894 b = dm_sector_div_up(b, li->cache->discard_block_size); set_discard_range()
2895 sector_div(e, li->cache->discard_block_size); set_discard_range()
2901 if (e > from_dblock(li->cache->discard_nr_blocks)) set_discard_range()
2902 e = from_dblock(li->cache->discard_nr_blocks); set_discard_range()
2905 set_discard(li->cache, to_dblock(b)); set_discard_range()
2938 static dm_cblock_t get_cache_dev_size(struct cache *cache) get_cache_dev_size() argument
2940 sector_t size = get_dev_size(cache->cache_dev); get_cache_dev_size()
2941 (void) sector_div(size, cache->sectors_per_block); get_cache_dev_size()
2945 static bool can_resize(struct cache *cache, dm_cblock_t new_size) can_resize() argument
2947 if (from_cblock(new_size) > from_cblock(cache->cache_size)) can_resize()
2951 * We can't drop a dirty block when shrinking the cache. can_resize()
2953 while (from_cblock(new_size) < from_cblock(cache->cache_size)) { can_resize()
2955 if (is_dirty(cache, new_size)) { can_resize()
2956 DMERR("unable to shrink cache; cache block %llu is dirty", can_resize()
2965 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) resize_cache_dev() argument
2969 r = dm_cache_resize(cache->cmd, new_size); resize_cache_dev()
2971 DMERR("could not resize cache metadata"); resize_cache_dev()
2975 set_cache_size(cache, new_size); resize_cache_dev()
2983 struct cache *cache = ti->private; cache_preresume() local
2984 dm_cblock_t csize = get_cache_dev_size(cache); cache_preresume()
2987 * Check to see if the cache has resized. cache_preresume()
2989 if (!cache->sized) { cache_preresume()
2990 r = resize_cache_dev(cache, csize); cache_preresume()
2994 cache->sized = true; cache_preresume()
2996 } else if (csize != cache->cache_size) { cache_preresume()
2997 if (!can_resize(cache, csize)) cache_preresume()
3000 r = resize_cache_dev(cache, csize); cache_preresume()
3005 if (!cache->loaded_mappings) { cache_preresume()
3006 r = dm_cache_load_mappings(cache->cmd, cache->policy, cache_preresume()
3007 load_mapping, cache); cache_preresume()
3009 DMERR("could not load cache mappings"); cache_preresume()
3013 cache->loaded_mappings = true; cache_preresume()
3016 if (!cache->loaded_discards) { cache_preresume()
3024 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); cache_preresume()
3026 discard_load_info_init(cache, &li); cache_preresume()
3027 r = dm_cache_load_discards(cache->cmd, load_discard, &li); cache_preresume()
3034 cache->loaded_discards = true; cache_preresume()
3042 struct cache *cache = ti->private; cache_resume() local
3044 cache->need_tick_bio = true; cache_resume()
3045 do_waker(&cache->waker.work); cache_resume()
3052 * <cache block size> <#used cache blocks>/<#total cache blocks>
3068 struct cache *cache = ti->private; cache_status() local
3075 r = dm_cache_commit(cache->cmd, false); cache_status()
3080 r = dm_cache_get_free_metadata_block_count(cache->cmd, cache_status()
3087 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); cache_status()
3093 residency = policy_residency(cache->policy); cache_status()
3099 cache->sectors_per_block, cache_status()
3101 (unsigned long long) from_cblock(cache->cache_size), cache_status()
3102 (unsigned) atomic_read(&cache->stats.read_hit), cache_status()
3103 (unsigned) atomic_read(&cache->stats.read_miss), cache_status()
3104 (unsigned) atomic_read(&cache->stats.write_hit), cache_status()
3105 (unsigned) atomic_read(&cache->stats.write_miss), cache_status()
3106 (unsigned) atomic_read(&cache->stats.demotion), cache_status()
3107 (unsigned) atomic_read(&cache->stats.promotion), cache_status()
3108 (unsigned long) atomic_read(&cache->nr_dirty)); cache_status()
3110 if (writethrough_mode(&cache->features)) cache_status()
3113 else if (passthrough_mode(&cache->features)) cache_status()
3116 else if (writeback_mode(&cache->features)) cache_status()
3120 DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode); cache_status()
3124 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); cache_status()
3126 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); cache_status()
3128 r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); cache_status()
3136 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); cache_status()
3138 format_dev_t(buf, cache->cache_dev->bdev->bd_dev); cache_status()
3140 format_dev_t(buf, cache->origin_dev->bdev->bd_dev); cache_status()
3143 for (i = 0; i < cache->nr_ctr_args - 1; i++) cache_status()
3144 DMEMIT(" %s", cache->ctr_args[i]); cache_status()
3145 if (cache->nr_ctr_args) cache_status()
3146 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); cache_status()
3156 * A cache block range can take two forms:
3161 static int parse_cblock_range(struct cache *cache, const char *str, parse_cblock_range() argument
3198 static int validate_cblock_range(struct cache *cache, struct cblock_range *range) validate_cblock_range() argument
3202 uint64_t n = from_cblock(cache->cache_size); validate_cblock_range()
3222 static int request_invalidation(struct cache *cache, struct cblock_range *range) request_invalidation() argument
3232 spin_lock(&cache->invalidation_lock); request_invalidation()
3233 list_add(&req.list, &cache->invalidation_requests); request_invalidation()
3234 spin_unlock(&cache->invalidation_lock); request_invalidation()
3235 wake_worker(cache); request_invalidation()
3241 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count, process_invalidate_cblocks_message() argument
3248 if (!passthrough_mode(&cache->features)) { process_invalidate_cblocks_message()
3249 DMERR("cache has to be in passthrough mode for invalidation"); process_invalidate_cblocks_message()
3254 r = parse_cblock_range(cache, cblock_ranges[i], &range); process_invalidate_cblocks_message()
3258 r = validate_cblock_range(cache, &range); process_invalidate_cblocks_message()
3265 r = request_invalidation(cache, &range); process_invalidate_cblocks_message()
3279 * The key migration_threshold is supported by the cache target core.
3283 struct cache *cache = ti->private; cache_message() local
3289 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); cache_message()
3294 return set_config_value(cache, argv[0], argv[1]); cache_message()
3301 struct cache *cache = ti->private; cache_iterate_devices() local
3303 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); cache_iterate_devices()
3305 r = fn(ti, cache->origin_dev, 0, ti->len, data); cache_iterate_devices()
3320 struct cache *cache = ti->private; cache_bvec_merge() local
3321 struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev); cache_bvec_merge()
3326 bvm->bi_bdev = cache->origin_dev->bdev; cache_bvec_merge()
3330 static void set_discard_limits(struct cache *cache, struct queue_limits *limits) set_discard_limits() argument
3333 * FIXME: these limits may be incompatible with the cache device set_discard_limits()
3335 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, set_discard_limits()
3336 cache->origin_sectors); set_discard_limits()
3337 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; set_discard_limits()
3342 struct cache *cache = ti->private; cache_io_hints() local
3347 * cache's blocksize (io_opt is a factor) do not override them. cache_io_hints()
3349 if (io_opt_sectors < cache->sectors_per_block || cache_io_hints()
3350 do_div(io_opt_sectors, cache->sectors_per_block)) { cache_io_hints()
3351 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); cache_io_hints()
3352 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); cache_io_hints()
3354 set_discard_limits(cache, limits); cache_io_hints()
3360 .name = "cache",
3383 DMERR("cache target registration failed: %d", r); dm_cache_init()
3405 MODULE_DESCRIPTION(DM_NAME " cache target");
H A Ddm-cache-policy.h10 #include "dm-cache-block-types.h"
21 * The cache policy makes the important decisions about which blocks get to
22 * live on the faster cache device.
28 * That block is in the cache. Remap to the cache and carry on.
38 * - copy the origin to the given cache block
40 * - remap the original block to the cache
44 * move it to the cache, with the added complication that the destination
45 * cache block needs a writeback first. The core should:
50 * - copy new block to cache
52 * - remap bio to cache and reissue.
75 * writeback operation since the block remains in the cache.
97 * The cache policy object. Just a bunch of methods. It is envisaged that
129 * cache block if it wants.)
141 * Sometimes we want to see if a block is in the cache, without
146 * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
155 * Called when a cache target is first created. Used to load a
192 * How full is the cache?
245 * Policies may store a hint for each each cache block.
H A Ddm-bufio.h21 * Create a buffered IO cache on a given device
30 * Release a buffered IO cache.
52 * Like dm_bufio_read, but return buffer from cache, don't read
53 * it. If the buffer is not in the cache, return NULL.
66 * Prefetch the specified blocks to the cache.
101 * Send an empty write barrier to the device to flush hardware disk cache.
/linux-4.1.27/fs/fscache/
H A Dcache.c1 /* FS-Cache cache handling
25 * look up a cache tag
71 * release a reference to a cache tag
90 * select a cache in which to store an object
91 * - the cache addremove semaphore must be at least read-locked by the caller
99 struct fscache_cache *cache; fscache_select_cache_for_object() local
104 _leave(" = NULL [no cache]"); fscache_select_cache_for_object()
108 /* we check the parent to determine the cache to use */ fscache_select_cache_for_object()
112 * cache */ fscache_select_cache_for_object()
117 cache = object->cache; fscache_select_cache_for_object()
119 test_bit(FSCACHE_IOERROR, &cache->flags)) fscache_select_cache_for_object()
120 cache = NULL; fscache_select_cache_for_object()
123 _leave(" = %p [parent]", cache); fscache_select_cache_for_object()
124 return cache; fscache_select_cache_for_object()
151 if (!tag->cache) { fscache_select_cache_for_object()
156 if (test_bit(FSCACHE_IOERROR, &tag->cache->flags)) fscache_select_cache_for_object()
159 _leave(" = %p [specific]", tag->cache); fscache_select_cache_for_object()
160 return tag->cache; fscache_select_cache_for_object()
163 /* netfs has no preference - just select first cache */ fscache_select_cache_for_object()
164 cache = list_entry(fscache_cache_list.next, fscache_select_cache_for_object()
166 _leave(" = %p [first]", cache); fscache_select_cache_for_object()
167 return cache; fscache_select_cache_for_object()
171 * fscache_init_cache - Initialise a cache record
172 * @cache: The cache record to be initialised
173 * @ops: The cache operations to be installed in that record
177 * Initialise a record of a cache and fill in the name.
182 void fscache_init_cache(struct fscache_cache *cache, fscache_init_cache() argument
189 memset(cache, 0, sizeof(*cache)); fscache_init_cache()
191 cache->ops = ops; fscache_init_cache()
194 vsnprintf(cache->identifier, sizeof(cache->identifier), idfmt, va); fscache_init_cache()
197 INIT_WORK(&cache->op_gc, fscache_operation_gc); fscache_init_cache()
198 INIT_LIST_HEAD(&cache->link); fscache_init_cache()
199 INIT_LIST_HEAD(&cache->object_list); fscache_init_cache()
200 INIT_LIST_HEAD(&cache->op_gc_list); fscache_init_cache()
201 spin_lock_init(&cache->object_list_lock); fscache_init_cache()
202 spin_lock_init(&cache->op_gc_list_lock); fscache_init_cache()
207 * fscache_add_cache - Declare a cache as being open for business
208 * @cache: The record describing the cache
209 * @ifsdef: The record of the cache object describing the top-level index
210 * @tagname: The tag describing this cache
212 * Add a cache to the system, making it available for netfs's to use.
217 int fscache_add_cache(struct fscache_cache *cache, fscache_add_cache() argument
223 BUG_ON(!cache->ops); fscache_add_cache()
226 cache->flags = 0; fscache_add_cache()
233 tagname = cache->identifier; fscache_add_cache()
237 _enter("{%s.%s},,%s", cache->ops->name, cache->identifier, tagname); fscache_add_cache()
239 /* we use the cache tag to uniquely identify caches */ fscache_add_cache()
247 cache->kobj = kobject_create_and_add(tagname, fscache_root); fscache_add_cache()
248 if (!cache->kobj) fscache_add_cache()
252 ifsdef->cache = cache; fscache_add_cache()
253 cache->fsdef = ifsdef; fscache_add_cache()
257 tag->cache = cache; fscache_add_cache()
258 cache->tag = tag; fscache_add_cache()
260 /* add the cache to the list */ fscache_add_cache()
261 list_add(&cache->link, &fscache_cache_list); fscache_add_cache()
263 /* add the cache's netfs definition index object to the cache's fscache_add_cache()
265 spin_lock(&cache->object_list_lock); fscache_add_cache()
266 list_add_tail(&ifsdef->cache_link, &cache->object_list); fscache_add_cache()
267 spin_unlock(&cache->object_list_lock); fscache_add_cache()
270 /* add the cache's netfs definition index object to the top level index fscache_add_cache()
284 cache->tag->name, cache->ops->name); fscache_add_cache()
285 kobject_uevent(cache->kobj, KOBJ_ADD); fscache_add_cache()
287 _leave(" = 0 [%s]", cache->identifier); fscache_add_cache()
308 * fscache_io_error - Note a cache I/O error
309 * @cache: The record describing the cache
311 * Note that an I/O error occurred in a cache and that it should no longer be
317 void fscache_io_error(struct fscache_cache *cache) fscache_io_error() argument
319 if (!test_and_set_bit(FSCACHE_IOERROR, &cache->flags)) fscache_io_error()
321 cache->ops->name); fscache_io_error()
326 * request withdrawal of all the objects in a cache
329 static void fscache_withdraw_all_objects(struct fscache_cache *cache, fscache_withdraw_all_objects() argument
334 while (!list_empty(&cache->object_list)) { fscache_withdraw_all_objects()
335 spin_lock(&cache->object_list_lock); fscache_withdraw_all_objects()
337 if (!list_empty(&cache->object_list)) { fscache_withdraw_all_objects()
338 object = list_entry(cache->object_list.next, fscache_withdraw_all_objects()
350 spin_unlock(&cache->object_list_lock); fscache_withdraw_all_objects()
356 * fscache_withdraw_cache - Withdraw a cache from the active service
357 * @cache: The record describing the cache
359 * Withdraw a cache from service, unbinding all its cache objects from the
365 void fscache_withdraw_cache(struct fscache_cache *cache) fscache_withdraw_cache() argument
371 pr_notice("Withdrawing cache \"%s\"\n", fscache_withdraw_cache()
372 cache->tag->name); fscache_withdraw_cache()
374 /* make the cache unavailable for cookie acquisition */ fscache_withdraw_cache()
375 if (test_and_set_bit(FSCACHE_CACHE_WITHDRAWN, &cache->flags)) fscache_withdraw_cache()
379 list_del_init(&cache->link); fscache_withdraw_cache()
380 cache->tag->cache = NULL; fscache_withdraw_cache()
386 cache->ops->sync_cache(cache); fscache_withdraw_cache()
389 /* dissociate all the netfs pages backed by this cache from the block fscache_withdraw_cache()
390 * mappings in the cache */ fscache_withdraw_cache()
392 cache->ops->dissociate_pages(cache); fscache_withdraw_cache()
396 * cache - which we do by passing them off to thread pool to be fscache_withdraw_cache()
400 fscache_withdraw_all_objects(cache, &dying_objects); fscache_withdraw_cache()
406 atomic_read(&cache->object_count) == 0); fscache_withdraw_cache()
409 list_empty(&cache->object_list)); fscache_withdraw_cache()
413 kobject_put(cache->kobj); fscache_withdraw_cache()
415 clear_bit(FSCACHE_TAG_RESERVED, &cache->tag->flags); fscache_withdraw_cache()
416 fscache_release_cache_tag(cache->tag); fscache_withdraw_cache()
417 cache->tag = NULL; fscache_withdraw_cache()
H A DMakefile6 cache.o \
H A Dcookie.c25 static int fscache_alloc_object(struct fscache_cache *cache,
53 * - other objects are stored in a selected cache immediately, and all the
194 struct fscache_cache *cache; fscache_acquire_non_index_cookie() local
212 /* select a cache in which to store the object */ fscache_acquire_non_index_cookie()
213 cache = fscache_select_cache_for_object(cookie->parent); fscache_acquire_non_index_cookie()
214 if (!cache) { fscache_acquire_non_index_cookie()
217 _leave(" = -ENOMEDIUM [no cache]"); fscache_acquire_non_index_cookie()
221 _debug("cache %s", cache->tag->name); fscache_acquire_non_index_cookie()
225 /* ask the cache to allocate objects for this cookie and its parent fscache_acquire_non_index_cookie()
227 ret = fscache_alloc_object(cache, cookie); fscache_acquire_non_index_cookie()
275 * recursively allocate cache object records for a cookie/cache combination
278 static int fscache_alloc_object(struct fscache_cache *cache, fscache_alloc_object() argument
284 _enter("%p,%p{%s}", cache, cookie, cookie->def->name); fscache_alloc_object()
289 if (object->cache == cache) fscache_alloc_object()
294 /* ask the cache to allocate an object (we may end up with duplicate fscache_alloc_object()
297 object = cache->ops->alloc_object(cache, cookie); fscache_alloc_object()
312 ret = fscache_alloc_object(cache, cookie->parent); fscache_alloc_object()
321 cache->ops->put_object(object); fscache_alloc_object()
340 cache->ops->put_object(object); fscache_alloc_object()
348 * attach a cache object to a cookie
354 struct fscache_cache *cache = object->cache; fscache_attach_object() local
365 if (p->cache == object->cache) { fscache_attach_object()
376 if (p->cache == object->cache) { fscache_attach_object()
391 /* attach to the cache's object list */ fscache_attach_object()
393 spin_lock(&cache->object_list_lock); fscache_attach_object()
394 list_add(&object->cache_link, &cache->object_list); fscache_attach_object()
395 spin_unlock(&cache->object_list_lock); fscache_attach_object()
494 /* update the index entry on disk in each cache backing this __fscache_update_cookie()
576 * release a cookie back to the cache
649 * check the consistency between the netfs inode and the backing cache
686 if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) __fscache_check_consistency()
701 /* ask the cache to honour the operation */ __fscache_check_consistency()
702 ret = object->cache->ops->check_consistency(op); __fscache_check_consistency()
/linux-4.1.27/arch/m32r/mm/
H A Dcache.c2 * linux/arch/m32r/mm/cache.c
16 #define MCCR_IIV (1UL << 6) /* I-cache invalidate */
17 #define MCCR_DIV (1UL << 5) /* D-cache invalidate */
18 #define MCCR_DCB (1UL << 4) /* D-cache copy back */
19 #define MCCR_ICM (1UL << 1) /* I-cache mode [0:off,1:on] */
20 #define MCCR_DCM (1UL << 0) /* D-cache mode [0:off,1:on] */
27 #define MCCR_IIV (1UL << 0) /* I-cache invalidate */
31 #define MCCR_IIV (1UL << 8) /* I-cache invalidate */
32 #define MCCR_DIV (1UL << 9) /* D-cache invalidate */
33 #define MCCR_DCB (1UL << 10) /* D-cache copy back */
34 #define MCCR_ICM (1UL << 0) /* I-cache mode [0:off,1:on] */
35 #define MCCR_DCM (1UL << 1) /* D-cache mode [0:off,1:on] */
42 #error Unknown cache type.
46 /* Copy back and invalidate D-cache and invalidate I-cache all */ _flush_cache_all()
55 /* Copyback and invalidate D-cache */ _flush_cache_all()
56 /* Invalidate I-cache */ _flush_cache_all()
61 /* Copyback and invalidate D-cache */ _flush_cache_all()
62 /* Invalidate I-cache */ _flush_cache_all()
68 /* Copy back D-cache and invalidate I-cache all */ _flush_cache_copyback_all()
77 /* Copyback and invalidate D-cache */ _flush_cache_copyback_all()
78 /* Invalidate I-cache */ _flush_cache_copyback_all()
83 /* Copyback D-cache */ _flush_cache_copyback_all()
84 /* Invalidate I-cache */ _flush_cache_copyback_all()
H A DMakefile6 obj-y := init.o fault.o mmu.o extable.o ioremap.o cache.o page.o
8 obj-y := init.o fault-nommu.o mmu.o extable.o ioremap-nommu.o cache.o page.o
H A Dpage.S27 ld r3, @r0 /* cache line allocate */
43 ld r3, @r0 /* cache line allocate */
66 ld r3, @r0 /* cache line allocate */
74 ld r3, @r0 /* cache line allocate */
/linux-4.1.27/drivers/acpi/acpica/
H A Dutcache.c3 * Module Name: utcache - local cache allocation routines
55 * PARAMETERS: cache_name - Ascii name for the cache
57 * max_depth - Maximum depth of the cache (in objects)
58 * return_cache - Where the new cache object is returned
62 * DESCRIPTION: Create a cache object
70 struct acpi_memory_list *cache; acpi_os_create_cache() local
78 /* Create the cache object */ acpi_os_create_cache()
80 cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); acpi_os_create_cache()
81 if (!cache) { acpi_os_create_cache()
85 /* Populate the cache object and return it */ acpi_os_create_cache()
87 ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); acpi_os_create_cache()
88 cache->list_name = cache_name; acpi_os_create_cache()
89 cache->object_size = object_size; acpi_os_create_cache()
90 cache->max_depth = max_depth; acpi_os_create_cache()
92 *return_cache = cache; acpi_os_create_cache()
100 * PARAMETERS: cache - Handle to cache object
104 * DESCRIPTION: Free all objects within the requested cache.
108 acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache) acpi_os_purge_cache() argument
115 if (!cache) { acpi_os_purge_cache()
124 /* Walk the list of objects in this cache */ acpi_os_purge_cache()
126 while (cache->list_head) { acpi_os_purge_cache()
130 next = ACPI_GET_DESCRIPTOR_PTR(cache->list_head); acpi_os_purge_cache()
131 ACPI_FREE(cache->list_head); acpi_os_purge_cache()
133 cache->list_head = next; acpi_os_purge_cache()
134 cache->current_depth--; acpi_os_purge_cache()
145 * PARAMETERS: cache - Handle to cache object
149 * DESCRIPTION: Free all objects within the requested cache and delete the
150 * cache object.
154 acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache) acpi_os_delete_cache() argument
160 /* Purge all objects in the cache */ acpi_os_delete_cache()
162 status = acpi_os_purge_cache(cache); acpi_os_delete_cache()
167 /* Now we can delete the cache object */ acpi_os_delete_cache()
169 acpi_os_free(cache); acpi_os_delete_cache()
177 * PARAMETERS: cache - Handle to cache object
182 * DESCRIPTION: Release an object to the specified cache. If cache is full,
188 acpi_os_release_object(struct acpi_memory_list * cache, void *object) acpi_os_release_object() argument
194 if (!cache || !object) { acpi_os_release_object()
198 /* If cache is full, just free this object */ acpi_os_release_object()
200 if (cache->current_depth >= cache->max_depth) { acpi_os_release_object()
202 ACPI_MEM_TRACKING(cache->total_freed++); acpi_os_release_object()
205 /* Otherwise put this object back into the cache */ acpi_os_release_object()
215 ACPI_MEMSET(object, 0xCA, cache->object_size); acpi_os_release_object()
218 /* Put the object at the head of the cache list */ acpi_os_release_object()
220 ACPI_SET_DESCRIPTOR_PTR(object, cache->list_head); acpi_os_release_object()
221 cache->list_head = object; acpi_os_release_object()
222 cache->current_depth++; acpi_os_release_object()
234 * PARAMETERS: cache - Handle to cache object
238 * DESCRIPTION: Get an object from the specified cache. If cache is empty,
243 void *acpi_os_acquire_object(struct acpi_memory_list *cache) acpi_os_acquire_object() argument
250 if (!cache) { acpi_os_acquire_object()
259 ACPI_MEM_TRACKING(cache->requests++); acpi_os_acquire_object()
261 /* Check the cache first */ acpi_os_acquire_object()
263 if (cache->list_head) { acpi_os_acquire_object()
267 object = cache->list_head; acpi_os_acquire_object()
268 cache->list_head = ACPI_GET_DESCRIPTOR_PTR(object); acpi_os_acquire_object()
270 cache->current_depth--; acpi_os_acquire_object()
272 ACPI_MEM_TRACKING(cache->hits++); acpi_os_acquire_object()
274 "Object %p from %s cache\n", object, acpi_os_acquire_object()
275 cache->list_name)); acpi_os_acquire_object()
284 ACPI_MEMSET(object, 0, cache->object_size); acpi_os_acquire_object()
286 /* The cache is empty, create a new object */ acpi_os_acquire_object()
288 ACPI_MEM_TRACKING(cache->total_allocated++); acpi_os_acquire_object()
291 if ((cache->total_allocated - cache->total_freed) > acpi_os_acquire_object()
292 cache->max_occupied) { acpi_os_acquire_object()
293 cache->max_occupied = acpi_os_acquire_object()
294 cache->total_allocated - cache->total_freed; acpi_os_acquire_object()
305 object = ACPI_ALLOCATE_ZEROED(cache->object_size); acpi_os_acquire_object()
/linux-4.1.27/include/asm-generic/
H A Dcache.h4 * 32 bytes appears to be the most common cache line size,
6 * cache lines need to provide their own cache.h.
H A Dhardirq.h4 #include <linux/cache.h>
/linux-4.1.27/fs/afs/
H A DMakefile5 afs-cache-$(CONFIG_AFS_FSCACHE) := cache.o
8 $(afs-cache-y) \
H A Dfile.c106 * deal with notification that a page was read from the cache
144 ret = fscache_read_or_alloc_page(vnode->cache, afs_page_filler()
153 /* read BIO submitted (page in cache) */ afs_page_filler()
159 _debug("cache said ENODATA"); afs_page_filler()
164 _debug("cache said ENOBUFS"); afs_page_filler()
182 fscache_uncache_page(vnode->cache, page); afs_page_filler()
190 /* send the page to the cache */ afs_page_filler()
193 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { afs_page_filler()
194 fscache_uncache_page(vnode->cache, page); afs_page_filler()
260 ret = fscache_read_or_alloc_pages(vnode->cache, afs_readpages()
272 /* all pages are being read from the cache */ afs_readpages()
279 /* there were pages that couldn't be read from the cache */ afs_readpages()
326 fscache_wait_on_page_write(vnode->cache, page); afs_invalidatepage()
327 fscache_uncache_page(vnode->cache, page); afs_invalidatepage()
358 /* deny if page is being written to the cache and the caller hasn't afs_releasepage()
361 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) { afs_releasepage()
362 _leave(" = F [cache busy]"); afs_releasepage()
/linux-4.1.27/arch/m68k/include/asm/
H A Dcache.h2 * include/asm-m68k/cache.h
7 /* bytes per L1 cache line */
H A Dm52xxacr.h4 * m52xxacr.h -- ColdFire version 2 core cache support
16 * cache setup. Although not absolutely identical the cache register
18 * configurable cache memory that can be instruction only, data only,
21 * cache only. Cache size varies from 2k up to 16k.
27 #define CACR_CENB 0x80000000 /* Enable cache */
30 #define CACR_CINV 0x01000000 /* Invalidate cache */
31 #define CACR_DISI 0x00800000 /* Disable instruction cache */
32 #define CACR_DISD 0x00400000 /* Disable data cache */
33 #define CACR_INVI 0x00200000 /* Invalidate instruction cache */
34 #define CACR_INVD 0x00100000 /* Invalidate data cache */
36 #define CACR_DCM 0x00000200 /* Default cache mode */
56 * Set the cache controller settings we will use. On the cores that support
57 * a split cache configuration we allow all the combinations at Kconfig
58 * time. For those cores that only have an instruction cache we just set
72 /* This is the instruction cache only devices (no split cache, no eusp) */
H A Dm54xxacr.h11 #define CACR_DEC 0x80000000 /* Enable data cache */
15 #define CACR_DHCLK 0x08000000 /* Half data cache lock mode */
16 #define CACR_DDCM_WT 0x00000000 /* Write through cache*/
17 #define CACR_DDCM_CP 0x02000000 /* Copyback cache */
18 #define CACR_DDCM_P 0x04000000 /* No cache, precise */
19 #define CACR_DDCM_IMP 0x06000000 /* No cache, imprecise */
20 #define CACR_DCINVA 0x01000000 /* Invalidate data cache */
21 #define CACR_BEC 0x00080000 /* Enable branch cache */
22 #define CACR_BCINVA 0x00040000 /* Invalidate branch cache */
23 #define CACR_IEC 0x00008000 /* Enable instruction cache */
26 #define CACR_IHLCK 0x00000800 /* Intruction cache half lock */
27 #define CACR_IDCM 0x00000400 /* Intruction cache inhibit */
28 #define CACR_ICINVA 0x00000100 /* Invalidate instr cache */
39 #define ACR_CM_OFF_PRE 0x00000040 /* No cache, precise */
40 #define ACR_CM_OFF_IMP 0x00000060 /* No cache, imprecise */
74 * and data cache. Enable data and instruction caches, also enable write
80 /* Enable data cache */
82 /* outside ACRs : No cache, precise */
131 /* Copyback cache mode must push dirty cache lines first */
H A Dm53xxacr.h4 * m53xxacr.h -- ColdFire version 3 core cache support
16 * cache setup. They have a unified instruction and data cache, with
23 #define CACR_EC 0x80000000 /* Enable cache */
26 #define CACR_HLCK 0x08000000 /* Half cache lock mode */
27 #define CACR_CINVA 0x01000000 /* Invalidate cache */
52 * Define the cache type and arrangement (needed for pushes).
55 #define CACHE_SIZE 0x2000 /* 8k of unified cache */
59 #define CACHE_SIZE 0x4000 /* 16k of unified cache */
68 * Set the cache controller settings we will use. This default in the
69 * CACR is cache inhibited, we use the ACR register to set cacheing
86 * Unified cache means we will never need to flush for coherency of
H A Dhardirq.h5 #include <linux/cache.h>
/linux-4.1.27/arch/cris/include/arch-v10/arch/
H A Dcache.h4 /* Etrax 100LX have 32-byte cache-lines. */
/linux-4.1.27/arch/m32r/include/asm/
H A Dcache.h4 /* L1 cache line size */
H A Dcachectl.h2 * cachectl.h -- defines for M32R cache control system calls
14 #define ICACHE (1<<0) /* flush instruction cache */
15 #define DCACHE (1<<1) /* writeback and flush data cache */
/linux-4.1.27/arch/m68k/include/uapi/asm/
H A Dcachectl.h6 #define FLUSH_SCOPE_LINE 1 /* Flush a cache line */
8 #define FLUSH_SCOPE_ALL 3 /* Flush the whole cache -- superuser only */
10 #define FLUSH_CACHE_DATA 1 /* Writeback and flush data cache */
11 #define FLUSH_CACHE_INSN 2 /* Flush instruction cache */
H A Dbootinfo-hp300.h24 #define HP_320 0 /* 16MHz 68020+HP MMU+16K external cache */
27 #define HP_345 3 /* 50MHz 68030+32K external cache */
28 #define HP_350 4 /* 25MHz 68020+HP MMU+32K external cache */
30 #define HP_370 6 /* 33MHz 68030+64K external cache */
31 #define HP_375 7 /* 50MHz 68030+32K external cache */
35 #define HP_400 10 /* 50MHz 68030+32K external cache */
/linux-4.1.27/arch/powerpc/boot/
H A Dvirtex405-head.S13 * Invalidate the data cache if the data cache is turned off.
14 * - The 405 core does not invalidate the data cache on power-up
15 * or reset but does turn off the data cache. We cannot assume
16 * that the cache contents are valid.
17 * - If the data cache is turned on this must have been done by
18 * a bootloader and we assume that the cache contents are
/linux-4.1.27/drivers/staging/lustre/lustre/fld/
H A Dfld_cache.c62 * create fld cache.
67 struct fld_cache *cache; fld_cache_init() local
72 OBD_ALLOC_PTR(cache); fld_cache_init()
73 if (cache == NULL) fld_cache_init()
76 INIT_LIST_HEAD(&cache->fci_entries_head); fld_cache_init()
77 INIT_LIST_HEAD(&cache->fci_lru); fld_cache_init()
79 cache->fci_cache_count = 0; fld_cache_init()
80 rwlock_init(&cache->fci_lock); fld_cache_init()
82 strlcpy(cache->fci_name, name, fld_cache_init()
83 sizeof(cache->fci_name)); fld_cache_init()
85 cache->fci_cache_size = cache_size; fld_cache_init()
86 cache->fci_threshold = cache_threshold; fld_cache_init()
88 /* Init fld cache info. */ fld_cache_init()
89 memset(&cache->fci_stat, 0, sizeof(cache->fci_stat)); fld_cache_init()
91 CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n", fld_cache_init()
92 cache->fci_name, cache_size, cache_threshold); fld_cache_init()
94 return cache; fld_cache_init()
98 * destroy fld cache.
100 void fld_cache_fini(struct fld_cache *cache) fld_cache_fini() argument
104 LASSERT(cache != NULL); fld_cache_fini()
105 fld_cache_flush(cache); fld_cache_fini()
107 if (cache->fci_stat.fst_count > 0) { fld_cache_fini()
108 pct = cache->fci_stat.fst_cache * 100; fld_cache_fini()
109 do_div(pct, cache->fci_stat.fst_count); fld_cache_fini()
114 CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name); fld_cache_fini()
115 CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count); fld_cache_fini()
116 CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache); fld_cache_fini()
119 OBD_FREE_PTR(cache); fld_cache_fini()
125 void fld_cache_entry_delete(struct fld_cache *cache, fld_cache_entry_delete() argument
130 cache->fci_cache_count--; fld_cache_entry_delete()
137 static void fld_fix_new_list(struct fld_cache *cache) fld_fix_new_list() argument
143 struct list_head *head = &cache->fci_entries_head; fld_fix_new_list()
167 fld_cache_entry_delete(cache, f_curr); list_for_each_entry_safe()
177 fld_cache_entry_delete(cache, f_curr); list_for_each_entry_safe()
181 fld_cache_entry_delete(cache, f_curr); list_for_each_entry_safe()
194 fld_cache_entry_delete(cache, f_curr); list_for_each_entry_safe()
199 * add node to fld cache
201 static inline void fld_cache_entry_add(struct fld_cache *cache, fld_cache_entry_add() argument
206 list_add(&f_new->fce_lru, &cache->fci_lru); fld_cache_entry_add()
208 cache->fci_cache_count++; fld_cache_entry_add()
209 fld_fix_new_list(cache); fld_cache_entry_add()
213 * Check if cache needs to be shrunk. If so - do it.
214 * Remove one entry in list and so on until cache is shrunk enough.
216 static int fld_cache_shrink(struct fld_cache *cache) fld_cache_shrink() argument
222 LASSERT(cache != NULL); fld_cache_shrink()
224 if (cache->fci_cache_count < cache->fci_cache_size) fld_cache_shrink()
227 curr = cache->fci_lru.prev; fld_cache_shrink()
229 while (cache->fci_cache_count + cache->fci_threshold > fld_cache_shrink()
230 cache->fci_cache_size && curr != &cache->fci_lru) { fld_cache_shrink()
234 fld_cache_entry_delete(cache, flde); fld_cache_shrink()
238 CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n", fld_cache_shrink()
239 cache->fci_name, num); fld_cache_shrink()
245 * kill all fld cache entries.
247 void fld_cache_flush(struct fld_cache *cache) fld_cache_flush() argument
249 write_lock(&cache->fci_lock); fld_cache_flush()
250 cache->fci_cache_size = 0; fld_cache_flush()
251 fld_cache_shrink(cache); fld_cache_flush()
252 write_unlock(&cache->fci_lock); fld_cache_flush()
260 static void fld_cache_punch_hole(struct fld_cache *cache, fld_cache_punch_hole() argument
290 fld_cache_entry_add(cache, f_new, &f_curr->fce_list); fld_cache_punch_hole()
291 fld_cache_entry_add(cache, fldt, &f_new->fce_list); fld_cache_punch_hole()
297 * handle range overlap in fld cache.
299 static void fld_cache_overlap_handle(struct fld_cache *cache, fld_cache_overlap_handle() argument
319 fld_fix_new_list(cache); fld_cache_overlap_handle()
324 * e.g. whole range migrated. update fld cache entry */ fld_cache_overlap_handle()
328 fld_fix_new_list(cache); fld_cache_overlap_handle()
334 fld_cache_punch_hole(cache, f_curr, f_new); fld_cache_overlap_handle()
344 fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev); fld_cache_overlap_handle()
354 fld_cache_entry_add(cache, f_new, &f_curr->fce_list); fld_cache_overlap_handle()
376 * Insert FLD entry in FLD cache.
381 int fld_cache_insert_nolock(struct fld_cache *cache, fld_cache_insert_nolock() argument
398 if (!cache->fci_no_shrink) fld_cache_insert_nolock()
399 fld_cache_shrink(cache); fld_cache_insert_nolock()
401 head = &cache->fci_entries_head; fld_cache_insert_nolock()
414 fld_cache_overlap_handle(cache, f_curr, f_new); list_for_each_entry_safe()
423 /* Add new entry to cache and lru list. */
424 fld_cache_entry_add(cache, f_new, prev);
429 int fld_cache_insert(struct fld_cache *cache, fld_cache_insert() argument
439 write_lock(&cache->fci_lock); fld_cache_insert()
440 rc = fld_cache_insert_nolock(cache, flde); fld_cache_insert()
441 write_unlock(&cache->fci_lock); fld_cache_insert()
448 void fld_cache_delete_nolock(struct fld_cache *cache, fld_cache_delete_nolock() argument
455 head = &cache->fci_entries_head; list_for_each_entry_safe()
461 fld_cache_entry_delete(cache, flde); list_for_each_entry_safe()
468 * Delete FLD entry in FLD cache.
471 void fld_cache_delete(struct fld_cache *cache, fld_cache_delete() argument
474 write_lock(&cache->fci_lock); fld_cache_delete()
475 fld_cache_delete_nolock(cache, range); fld_cache_delete()
476 write_unlock(&cache->fci_lock); fld_cache_delete()
480 *fld_cache_entry_lookup_nolock(struct fld_cache *cache, fld_cache_entry_lookup_nolock() argument
487 head = &cache->fci_entries_head; list_for_each_entry()
501 * lookup \a seq sequence for range in fld cache.
504 *fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range) fld_cache_entry_lookup() argument
508 read_lock(&cache->fci_lock); fld_cache_entry_lookup()
509 got = fld_cache_entry_lookup_nolock(cache, range); fld_cache_entry_lookup()
510 read_unlock(&cache->fci_lock); fld_cache_entry_lookup()
515 * lookup \a seq sequence for range in fld cache.
517 int fld_cache_lookup(struct fld_cache *cache, fld_cache_lookup() argument
524 read_lock(&cache->fci_lock); fld_cache_lookup()
525 head = &cache->fci_entries_head; fld_cache_lookup()
527 cache->fci_stat.fst_count++; list_for_each_entry()
539 cache->fci_stat.fst_cache++; list_for_each_entry()
540 read_unlock(&cache->fci_lock); list_for_each_entry()
544 read_unlock(&cache->fci_lock);
H A Dfld_internal.h77 * fld cache entries are sorted on range->lsr_start field. */
125 /* 4M of FLD cache will not hurt client a lot. */
128 /* 1M of FLD cache will not hurt client a lot. */
153 void fld_cache_fini(struct fld_cache *cache);
155 void fld_cache_flush(struct fld_cache *cache);
157 int fld_cache_insert(struct fld_cache *cache,
163 int fld_cache_insert_nolock(struct fld_cache *cache,
165 void fld_cache_delete(struct fld_cache *cache,
167 void fld_cache_delete_nolock(struct fld_cache *cache,
169 int fld_cache_lookup(struct fld_cache *cache,
173 fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range);
174 void fld_cache_entry_delete(struct fld_cache *cache,
176 void fld_dump_cache_entries(struct fld_cache *cache);
179 *fld_cache_entry_lookup_nolock(struct fld_cache *cache,
/linux-4.1.27/fs/squashfs/
H A Dcache.c21 * cache.c
28 * This file implements a generic cache implementation used for both caches,
29 * plus functions layered ontop of the generic cache implementation to
32 * To avoid out of memory and fragmentation issues with vmalloc the cache
35 * It should be noted that the cache is not used for file datablocks, these
36 * are decompressed and cached in the page-cache in the normal way. The
37 * cache is only used to temporarily cache fragment and metadata blocks
62 * Look-up block in cache, and increment usage count. If not in cache, read
66 struct squashfs_cache *cache, u64 block, int length) squashfs_cache_get()
71 spin_lock(&cache->lock); squashfs_cache_get()
74 for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { squashfs_cache_get()
75 if (cache->entry[i].block == block) { squashfs_cache_get()
76 cache->curr_blk = i; squashfs_cache_get()
79 i = (i + 1) % cache->entries; squashfs_cache_get()
82 if (n == cache->entries) { squashfs_cache_get()
84 * Block not in cache, if all cache entries are used squashfs_cache_get()
87 if (cache->unused == 0) { squashfs_cache_get()
88 cache->num_waiters++; squashfs_cache_get()
89 spin_unlock(&cache->lock); squashfs_cache_get()
90 wait_event(cache->wait_queue, cache->unused); squashfs_cache_get()
91 spin_lock(&cache->lock); squashfs_cache_get()
92 cache->num_waiters--; squashfs_cache_get()
97 * At least one unused cache entry. A simple squashfs_cache_get()
99 * be evicted from the cache. squashfs_cache_get()
101 i = cache->next_blk; squashfs_cache_get()
102 for (n = 0; n < cache->entries; n++) { squashfs_cache_get()
103 if (cache->entry[i].refcount == 0) squashfs_cache_get()
105 i = (i + 1) % cache->entries; squashfs_cache_get()
108 cache->next_blk = (i + 1) % cache->entries; squashfs_cache_get()
109 entry = &cache->entry[i]; squashfs_cache_get()
112 * Initialise chosen cache entry, and fill it in from squashfs_cache_get()
115 cache->unused--; squashfs_cache_get()
121 spin_unlock(&cache->lock); squashfs_cache_get()
126 spin_lock(&cache->lock); squashfs_cache_get()
135 * have looked it up in the cache, and have slept squashfs_cache_get()
139 spin_unlock(&cache->lock); squashfs_cache_get()
142 spin_unlock(&cache->lock); squashfs_cache_get()
148 * Block already in cache. Increment refcount so it doesn't squashfs_cache_get()
150 * previously unused there's one less cache entry available squashfs_cache_get()
153 entry = &cache->entry[i]; squashfs_cache_get()
155 cache->unused--; squashfs_cache_get()
164 spin_unlock(&cache->lock); squashfs_cache_get()
167 spin_unlock(&cache->lock); squashfs_cache_get()
174 cache->name, i, entry->block, entry->refcount, entry->error); squashfs_cache_get()
177 ERROR("Unable to read %s cache entry [%llx]\n", cache->name, squashfs_cache_get()
184 * Release cache entry, once usage count is zero it can be reused.
188 struct squashfs_cache *cache = entry->cache; squashfs_cache_put() local
190 spin_lock(&cache->lock); squashfs_cache_put()
193 cache->unused++; squashfs_cache_put()
198 if (cache->num_waiters) { squashfs_cache_put()
199 spin_unlock(&cache->lock); squashfs_cache_put()
200 wake_up(&cache->wait_queue); squashfs_cache_put()
204 spin_unlock(&cache->lock); squashfs_cache_put()
208 * Delete cache reclaiming all kmalloced buffers.
210 void squashfs_cache_delete(struct squashfs_cache *cache) squashfs_cache_delete() argument
214 if (cache == NULL) squashfs_cache_delete()
217 for (i = 0; i < cache->entries; i++) { squashfs_cache_delete()
218 if (cache->entry[i].data) { squashfs_cache_delete()
219 for (j = 0; j < cache->pages; j++) squashfs_cache_delete()
220 kfree(cache->entry[i].data[j]); squashfs_cache_delete()
221 kfree(cache->entry[i].data); squashfs_cache_delete()
223 kfree(cache->entry[i].actor); squashfs_cache_delete()
226 kfree(cache->entry); squashfs_cache_delete()
227 kfree(cache); squashfs_cache_delete()
232 * Initialise cache allocating the specified number of entries, each of
240 struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); squashfs_cache_init() local
242 if (cache == NULL) { squashfs_cache_init()
243 ERROR("Failed to allocate %s cache\n", name); squashfs_cache_init()
247 cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); squashfs_cache_init()
248 if (cache->entry == NULL) { squashfs_cache_init()
249 ERROR("Failed to allocate %s cache\n", name); squashfs_cache_init()
253 cache->curr_blk = 0; squashfs_cache_init()
254 cache->next_blk = 0; squashfs_cache_init()
255 cache->unused = entries; squashfs_cache_init()
256 cache->entries = entries; squashfs_cache_init()
257 cache->block_size = block_size; squashfs_cache_init()
258 cache->pages = block_size >> PAGE_CACHE_SHIFT; squashfs_cache_init()
259 cache->pages = cache->pages ? cache->pages : 1; squashfs_cache_init()
260 cache->name = name; squashfs_cache_init()
261 cache->num_waiters = 0; squashfs_cache_init()
262 spin_lock_init(&cache->lock); squashfs_cache_init()
263 init_waitqueue_head(&cache->wait_queue); squashfs_cache_init()
266 struct squashfs_cache_entry *entry = &cache->entry[i]; squashfs_cache_init()
268 init_waitqueue_head(&cache->entry[i].wait_queue); squashfs_cache_init()
269 entry->cache = cache; squashfs_cache_init()
271 entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); squashfs_cache_init()
273 ERROR("Failed to allocate %s cache entry\n", name); squashfs_cache_init()
277 for (j = 0; j < cache->pages; j++) { squashfs_cache_init()
286 cache->pages, 0); squashfs_cache_init()
288 ERROR("Failed to allocate %s cache entry\n", name); squashfs_cache_init()
293 return cache; squashfs_cache_init()
296 squashfs_cache_delete(cache); squashfs_cache_init()
302 * Copy up to length bytes from cache entry to buffer starting at offset bytes
303 * into the cache entry. If there's not length bytes then copy the number of
386 * Look-up in the fragmment cache the fragment located at <start_block> in the
401 * filesystem. The cache is used here to avoid duplicating locking and
65 squashfs_cache_get(struct super_block *sb, struct squashfs_cache *cache, u64 block, int length) squashfs_cache_get() argument
/linux-4.1.27/drivers/base/regmap/
H A Dregcache-flat.c2 * Register cache access API - flat caching support
22 unsigned int *cache; regcache_flat_init() local
24 map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1), regcache_flat_init()
26 if (!map->cache) regcache_flat_init()
29 cache = map->cache; regcache_flat_init()
32 cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def; regcache_flat_init()
39 kfree(map->cache); regcache_flat_exit()
40 map->cache = NULL; regcache_flat_exit()
48 unsigned int *cache = map->cache; regcache_flat_read() local
50 *value = cache[reg]; regcache_flat_read()
58 unsigned int *cache = map->cache; regcache_flat_write() local
60 cache[reg] = value; regcache_flat_write()
H A Dregcache.c2 * Register cache access API
58 dev_warn(map->dev, "No cache defaults, reading back from HW\n"); regcache_hw_init()
60 /* Bypass the cache access till data read from HW*/ regcache_hw_init()
128 map->cache = NULL; regcache_init()
149 /* Some devices such as PMICs don't have cache defaults, regcache_init()
151 * crafting the cache defaults by hand. regcache_init()
164 dev_dbg(map->dev, "Initializing %s cache\n", regcache_init()
192 dev_dbg(map->dev, "Destroying %s cache\n", regcache_exit()
199 * regcache_read: Fetch the value of a given register from the cache.
230 * regcache_write: Set the value of a given register in the cache.
289 * regcache_sync: Sync the register cache with the hardware.
311 dev_dbg(map->dev, "Syncing %s cache\n", regcache_sync()
356 * regcache_sync_region: Sync part of the register cache with the hardware.
382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); regcache_sync_region()
411 * regcache_drop_region: Discard part of the register cache
417 * Discard part of the register cache.
442 * regcache_cache_only: Put a register map into cache only mode
447 * When a register map is marked as cache only writes to the register
448 * map API will only update the register cache, they will not cause
464 * regcache_mark_dirty: Mark the register cache as dirty
468 * Mark the register cache as dirty, for example due to the device
469 * having been powered down for suspend. If the cache is not marked
470 * as dirty then the cache sync will be suppressed.
481 * regcache_cache_bypass: Put a register map into cache bypass mode
486 * When a register map is marked with the cache bypass option, writes
488 * the cache directly. This is useful when syncing the cache back to
516 u8 *cache = base; regcache_set_val() local
517 cache[idx] = val; regcache_set_val()
521 u16 *cache = base; regcache_set_val() local
522 cache[idx] = val; regcache_set_val()
526 u32 *cache = base; regcache_set_val() local
527 cache[idx] = val; regcache_set_val()
549 const u8 *cache = base; regcache_get_val() local
550 return cache[idx]; regcache_get_val()
553 const u16 *cache = base; regcache_get_val() local
554 return cache[idx]; regcache_get_val()
557 const u32 *cache = base; regcache_get_val() local
558 return cache[idx]; regcache_get_val()
/linux-4.1.27/drivers/infiniband/core/
H A Dcache.c77 struct ib_gid_cache *cache; ib_get_cached_gid() local
84 read_lock_irqsave(&device->cache.lock, flags); ib_get_cached_gid()
86 cache = device->cache.gid_cache[port_num - start_port(device)]; ib_get_cached_gid()
88 if (index < 0 || index >= cache->table_len) ib_get_cached_gid()
91 *gid = cache->table[index]; ib_get_cached_gid()
93 read_unlock_irqrestore(&device->cache.lock, flags); ib_get_cached_gid()
104 struct ib_gid_cache *cache; ib_find_cached_gid() local
113 read_lock_irqsave(&device->cache.lock, flags); ib_find_cached_gid()
116 cache = device->cache.gid_cache[p]; ib_find_cached_gid()
117 for (i = 0; i < cache->table_len; ++i) { ib_find_cached_gid()
118 if (!memcmp(gid, &cache->table[i], sizeof *gid)) { ib_find_cached_gid()
128 read_unlock_irqrestore(&device->cache.lock, flags); ib_find_cached_gid()
139 struct ib_pkey_cache *cache; ib_get_cached_pkey() local
146 read_lock_irqsave(&device->cache.lock, flags); ib_get_cached_pkey()
148 cache = device->cache.pkey_cache[port_num - start_port(device)]; ib_get_cached_pkey()
150 if (index < 0 || index >= cache->table_len) ib_get_cached_pkey()
153 *pkey = cache->table[index]; ib_get_cached_pkey()
155 read_unlock_irqrestore(&device->cache.lock, flags); ib_get_cached_pkey()
166 struct ib_pkey_cache *cache; ib_find_cached_pkey() local
175 read_lock_irqsave(&device->cache.lock, flags); ib_find_cached_pkey()
177 cache = device->cache.pkey_cache[port_num - start_port(device)]; ib_find_cached_pkey()
181 for (i = 0; i < cache->table_len; ++i) ib_find_cached_pkey()
182 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { ib_find_cached_pkey()
183 if (cache->table[i] & 0x8000) { ib_find_cached_pkey()
196 read_unlock_irqrestore(&device->cache.lock, flags); ib_find_cached_pkey()
207 struct ib_pkey_cache *cache; ib_find_exact_cached_pkey() local
215 read_lock_irqsave(&device->cache.lock, flags); ib_find_exact_cached_pkey()
217 cache = device->cache.pkey_cache[port_num - start_port(device)]; ib_find_exact_cached_pkey()
221 for (i = 0; i < cache->table_len; ++i) ib_find_exact_cached_pkey()
222 if (cache->table[i] == pkey) { ib_find_exact_cached_pkey()
228 read_unlock_irqrestore(&device->cache.lock, flags); ib_find_exact_cached_pkey()
244 read_lock_irqsave(&device->cache.lock, flags); ib_get_cached_lmc()
245 *lmc = device->cache.lmc_cache[port_num - start_port(device)]; ib_get_cached_lmc()
246 read_unlock_irqrestore(&device->cache.lock, flags); ib_get_cached_lmc()
304 write_lock_irq(&device->cache.lock); ib_cache_update()
306 old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; ib_cache_update()
307 old_gid_cache = device->cache.gid_cache [port - start_port(device)]; ib_cache_update()
309 device->cache.pkey_cache[port - start_port(device)] = pkey_cache; ib_cache_update()
310 device->cache.gid_cache [port - start_port(device)] = gid_cache; ib_cache_update()
312 device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; ib_cache_update()
314 write_unlock_irq(&device->cache.lock); ib_cache_update()
362 rwlock_init(&device->cache.lock); ib_cache_setup_one()
364 device->cache.pkey_cache = ib_cache_setup_one()
365 kmalloc(sizeof *device->cache.pkey_cache * ib_cache_setup_one()
367 device->cache.gid_cache = ib_cache_setup_one()
368 kmalloc(sizeof *device->cache.gid_cache * ib_cache_setup_one()
371 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * ib_cache_setup_one()
376 if (!device->cache.pkey_cache || !device->cache.gid_cache || ib_cache_setup_one()
377 !device->cache.lmc_cache) { ib_cache_setup_one()
378 printk(KERN_WARNING "Couldn't allocate cache " ib_cache_setup_one()
384 device->cache.pkey_cache[p] = NULL; ib_cache_setup_one()
385 device->cache.gid_cache [p] = NULL; ib_cache_setup_one()
389 INIT_IB_EVENT_HANDLER(&device->cache.event_handler, ib_cache_setup_one()
391 if (ib_register_event_handler(&device->cache.event_handler)) ib_cache_setup_one()
398 kfree(device->cache.pkey_cache[p]); ib_cache_setup_one()
399 kfree(device->cache.gid_cache[p]); ib_cache_setup_one()
403 kfree(device->cache.pkey_cache); ib_cache_setup_one()
404 kfree(device->cache.gid_cache); ib_cache_setup_one()
405 kfree(device->cache.lmc_cache); ib_cache_setup_one()
412 ib_unregister_event_handler(&device->cache.event_handler); ib_cache_cleanup_one()
416 kfree(device->cache.pkey_cache[p]); ib_cache_cleanup_one()
417 kfree(device->cache.gid_cache[p]); ib_cache_cleanup_one()
420 kfree(device->cache.pkey_cache); ib_cache_cleanup_one()
421 kfree(device->cache.gid_cache); ib_cache_cleanup_one()
422 kfree(device->cache.lmc_cache); ib_cache_cleanup_one()
426 .name = "cache",
/linux-4.1.27/arch/blackfin/mach-common/
H A DMakefile6 cache.o cache-c.o entry.o head.o \
/linux-4.1.27/arch/hexagon/mm/
H A DMakefile5 obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o
/linux-4.1.27/arch/metag/include/asm/
H A Dcache.h4 /* L1 cache line size (64 bytes) */
12 * With an L2 cache, we may invalidate dirty lines, so we need to ensure DMA
13 * buffers have cache line alignment.
H A Dl2cache.h21 * Functions for reading of L2 cache configuration.
49 /* Get the L2 cache size in bytes */ meta_l2c_size()
61 /* Get the number of ways in the L2 cache */ meta_l2c_ways()
72 /* Get the line size of the L2 cache */ meta_l2c_linesize()
88 /* Get the revision ID of the L2 cache */ meta_l2c_revision()
119 /* Set whether the L2 cache is enabled. */ _meta_l2c_enable()
132 /* Set whether the L2 cache prefetch is enabled. */ _meta_l2c_pf_enable()
145 /* Return whether the L2 cache is enabled */ _meta_l2c_is_enabled()
151 /* Return whether the L2 cache prefetch is enabled */ _meta_l2c_pf_is_enabled()
158 /* Return whether the L2 cache is enabled */ meta_l2c_is_enabled()
173 * Ensure the L2 cache is disabled.
179 * Ensure the L2 cache is enabled.
184 /* Return whether the L2 cache prefetch is enabled */ meta_l2c_pf_is_enabled()
191 * Set whether the L2 cache prefetch is enabled.
197 * Flush the L2 cache.
203 * Write back all dirty cache lines in the L2 cache.
211 /* no need to purge if it's not a writeback cache */ meta_l2c_writeback()
H A Dcachepart.h2 * Meta cache partition manipulation.
11 * get_dcache_size() - Get size of data cache.
16 * get_icache_size() - Get size of code cache.
H A Dmetag_isa.h17 /* L1 cache layout */
19 /* Data cache line size as bytes and shift */
23 /* Number of ways in the data cache */
26 /* Instruction cache line size as bytes and shift */
30 /* Number of ways in the instruction cache */
77 #define CRLINPHY1_SINGLE_BIT 0x00000004 /* Set if TLB does not cache entry */
H A Duser_gateway.h30 /* Avoid cache aliases on virtually tagged cache. */ set_gateway_tls()
/linux-4.1.27/arch/sh/include/asm/
H A Dcache.h1 /* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $
3 * include/asm-sh/cache.h
13 #include <cpu/cache.h>
21 unsigned int ways; /* Number of cache ways */
22 unsigned int sets; /* Number of cache sets */
29 * in memory mapped cache array ops.
37 * 1. those used to select the cache set during indexing
/linux-4.1.27/fs/
H A Dmbcache.c13 * There can only be one cache entry in a cache per device and block number.
16 * or specified at cache create time.
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
106 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
124 * accessing cache data structures on SMP machines. The lru list is
193 struct mb_cache *cache = ce->e_cache; __mb_cache_entry_forget() local
196 kmem_cache_free(cache->c_entry_cache, ce); __mb_cache_entry_forget()
197 atomic_dec(&cache->c_entry_count); __mb_cache_entry_forget()
205 /* Wake up all processes queuing for this cache entry. */ __mb_cache_entry_release()
211 * Make sure that all cache entries on lru_list have __mb_cache_entry_release()
293 struct mb_cache *cache; mb_cache_shrink_count() local
297 list_for_each_entry(cache, &mb_cache_list, c_cache_list) { mb_cache_shrink_count()
298 mb_debug("cache %s (%d)", cache->c_name, mb_cache_shrink_count()
299 atomic_read(&cache->c_entry_count)); mb_cache_shrink_count()
300 count += atomic_read(&cache->c_entry_count); mb_cache_shrink_count()
314 * mb_cache_create() create a new cache
316 * All entries in one cache are equal size. Cache entries may be from
318 * the cache with kernel memory management. Returns NULL if no more
321 * @name: name of the cache (informal)
328 struct mb_cache *cache = NULL; mb_cache_create() local
338 cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL); mb_cache_create()
339 if (!cache) mb_cache_create()
341 cache->c_name = name; mb_cache_create()
342 atomic_set(&cache->c_entry_count, 0); mb_cache_create()
343 cache->c_bucket_bits = bucket_bits; mb_cache_create()
344 cache->c_block_hash = kmalloc(bucket_count * mb_cache_create()
346 if (!cache->c_block_hash) mb_cache_create()
349 INIT_HLIST_BL_HEAD(&cache->c_block_hash[n]); mb_cache_create()
350 cache->c_index_hash = kmalloc(bucket_count * mb_cache_create()
352 if (!cache->c_index_hash) mb_cache_create()
355 INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]); mb_cache_create()
363 cache->c_entry_cache = mb_cache_kmem_cache; mb_cache_create()
366 * Set an upper limit on the number of cache entries so that the hash mb_cache_create()
369 cache->c_max_entries = bucket_count << 4; mb_cache_create()
372 list_add(&cache->c_cache_list, &mb_cache_list); mb_cache_create()
374 return cache; mb_cache_create()
377 kfree(cache->c_index_hash); mb_cache_create()
380 kfree(cache->c_block_hash); mb_cache_create()
381 kfree(cache); mb_cache_create()
389 * Removes all cache entries of a device from the cache. All cache entries
390 * currently in use cannot be freed, and thus remain in the cache. All others
393 * @bdev: which device's cache entries to shrink
447 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
452 mb_cache_destroy(struct mb_cache *cache) mb_cache_destroy() argument
459 if (ce->e_cache == cache) mb_cache_destroy()
462 list_del(&cache->c_cache_list); mb_cache_destroy()
478 if (atomic_read(&cache->c_entry_count) > 0) { mb_cache_destroy()
479 mb_error("cache %s: %d orphaned entries", mb_cache_destroy()
480 cache->c_name, mb_cache_destroy()
481 atomic_read(&cache->c_entry_count)); mb_cache_destroy()
488 kfree(cache->c_index_hash); mb_cache_destroy()
489 kfree(cache->c_block_hash); mb_cache_destroy()
490 kfree(cache); mb_cache_destroy()
496 * Allocates a new cache entry. The new entry will not be valid initially,
498 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
502 mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) mb_cache_entry_alloc() argument
506 if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) { mb_cache_entry_alloc()
514 if (ce->e_cache == cache) { mb_cache_entry_alloc()
546 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); mb_cache_entry_alloc()
549 atomic_inc(&cache->c_entry_count); mb_cache_entry_alloc()
553 ce->e_cache = cache; mb_cache_entry_alloc()
557 ce->e_block_hash_p = &cache->c_block_hash[0]; mb_cache_entry_alloc()
558 ce->e_index_hash_p = &cache->c_index_hash[0]; mb_cache_entry_alloc()
568 * the cache. After this, the cache entry can be looked up, but is not yet
570 * success, or -EBUSY if a cache entry for that device + inode exists
572 * has inserted the same cache entry in the meantime).
574 * @bdev: device the cache entry belongs to
582 struct mb_cache *cache = ce->e_cache; mb_cache_entry_insert() local
591 cache->c_bucket_bits); mb_cache_entry_insert()
592 block_hash_p = &cache->c_block_hash[bucket]; mb_cache_entry_insert()
609 bucket = hash_long(key, cache->c_bucket_bits);
610 index_hash_p = &cache->c_index_hash[bucket];
622 * Release a handle to a cache entry. When the last handle to a cache entry
655 * Get a cache entry by device / block number. (There can only be one entry
656 * in the cache per device and block.) Returns NULL if no such cache entry
657 * exists. The returned cache entry is locked for exclusive access ("single
661 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, mb_cache_entry_get() argument
670 cache->c_bucket_bits); mb_cache_entry_get()
671 block_hash_p = &cache->c_block_hash[bucket]; mb_cache_entry_get()
776 * Find the first cache entry on a given device with a certain key in
779 * returned cache entry is locked for shared access ("multiple readers").
781 * @cache: the cache to search
782 * @bdev: the device the cache entry should belong to
786 mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev, mb_cache_entry_find_first() argument
789 unsigned int bucket = hash_long(key, cache->c_bucket_bits); mb_cache_entry_find_first()
794 index_hash_p = &cache->c_index_hash[bucket]; mb_cache_entry_find_first()
808 * Find the next cache entry on a given device with a certain key in an
820 * @bdev: the device the cache entry should belong to
827 struct mb_cache *cache = prev->e_cache; mb_cache_entry_find_next() local
828 unsigned int bucket = hash_long(key, cache->c_bucket_bits); mb_cache_entry_find_next()
833 index_hash_p = &cache->c_index_hash[bucket]; mb_cache_entry_find_next()
/linux-4.1.27/fs/btrfs/tests/
H A Dfree-space-tests.c22 #include "../free-space-cache.h"
27 struct btrfs_block_group_cache *cache; init_test_block_group() local
29 cache = kzalloc(sizeof(*cache), GFP_NOFS); init_test_block_group()
30 if (!cache) init_test_block_group()
32 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), init_test_block_group()
34 if (!cache->free_space_ctl) { init_test_block_group()
35 kfree(cache); init_test_block_group()
39 cache->key.objectid = 0; init_test_block_group()
40 cache->key.offset = 1024 * 1024 * 1024; init_test_block_group()
41 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; init_test_block_group()
42 cache->sectorsize = 4096; init_test_block_group()
43 cache->full_stripe_len = 4096; init_test_block_group()
45 spin_lock_init(&cache->lock); init_test_block_group()
46 INIT_LIST_HEAD(&cache->list); init_test_block_group()
47 INIT_LIST_HEAD(&cache->cluster_list); init_test_block_group()
48 INIT_LIST_HEAD(&cache->bg_list); init_test_block_group()
50 btrfs_init_free_space_ctl(cache); init_test_block_group()
52 return cache; init_test_block_group()
60 static int test_extents(struct btrfs_block_group_cache *cache) test_extents() argument
67 ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024); test_extents()
73 ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024); test_extents()
79 if (test_check_exists(cache, 0, 4 * 1024 * 1024)) { test_extents()
85 ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024); test_extents()
91 ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 1 * 1024 * 1024); test_extents()
97 ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024); test_extents()
103 ret = btrfs_remove_free_space(cache, 2 * 1024 * 1024, 4096); test_extents()
109 if (test_check_exists(cache, 0, 1 * 1024 * 1024)) { test_extents()
114 if (test_check_exists(cache, 2 * 1024 * 1024, 4096)) { test_extents()
119 if (test_check_exists(cache, 3 * 1024 * 1024, 1 * 1024 * 1024)) { test_extents()
125 __btrfs_remove_free_space_cache(cache->free_space_ctl); test_extents()
130 static int test_bitmaps(struct btrfs_block_group_cache *cache) test_bitmaps() argument
137 ret = test_add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1); test_bitmaps()
143 ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024); test_bitmaps()
149 if (test_check_exists(cache, 0, 4 * 1024 * 1024)) { test_bitmaps()
154 ret = test_add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1); test_bitmaps()
160 ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 2 * 1024 * 1024); test_bitmaps()
173 ret = test_add_free_space_entry(cache, next_bitmap_offset - test_bitmaps()
181 ret = btrfs_remove_free_space(cache, next_bitmap_offset - test_bitmaps()
188 if (test_check_exists(cache, next_bitmap_offset - (1 * 1024 * 1024), test_bitmaps()
194 __btrfs_remove_free_space_cache(cache->free_space_ctl); test_bitmaps()
200 static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache) test_bitmaps_and_extents() argument
212 ret = test_add_free_space_entry(cache, 4 * 1024 * 1024, 1 * 1024 * 1024, 1); test_bitmaps_and_extents()
218 ret = test_add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0); test_bitmaps_and_extents()
224 ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024); test_bitmaps_and_extents()
230 if (test_check_exists(cache, 0, 1 * 1024 * 1024)) { test_bitmaps_and_extents()
236 ret = test_add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0); test_bitmaps_and_extents()
242 ret = btrfs_remove_free_space(cache, 4 * 1024 * 1024, 1 * 1024 * 1024); test_bitmaps_and_extents()
248 if (test_check_exists(cache, 4 * 1024 * 1024, 1 * 1024 * 1024)) { test_bitmaps_and_extents()
257 ret = test_add_free_space_entry(cache, 1 * 1024 * 1024, 4 * 1024 * 1024, 1); test_bitmaps_and_extents()
263 ret = btrfs_remove_free_space(cache, 512 * 1024, 3 * 1024 * 1024); test_bitmaps_and_extents()
269 if (test_check_exists(cache, 512 * 1024, 3 * 1024 * 1024)) { test_bitmaps_and_extents()
274 __btrfs_remove_free_space_cache(cache->free_space_ctl); test_bitmaps_and_extents()
277 ret = test_add_free_space_entry(cache, 4 * 1024 * 1024, 4 * 1024 * 1024, 1); test_bitmaps_and_extents()
283 ret = test_add_free_space_entry(cache, 2 * 1024 * 1024, 2 * 1024 * 1024, 0); test_bitmaps_and_extents()
285 test_msg("Couldn't add extent to the cache %d\n", ret); test_bitmaps_and_extents()
289 ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 4 * 1024 * 1024); test_bitmaps_and_extents()
295 if (test_check_exists(cache, 3 * 1024 * 1024, 4 * 1024 * 1024)) { test_bitmaps_and_extents()
310 __btrfs_remove_free_space_cache(cache->free_space_ctl); test_bitmaps_and_extents()
311 ret = test_add_free_space_entry(cache, bitmap_offset + 4 * 1024 * 1024, test_bitmaps_and_extents()
318 ret = test_add_free_space_entry(cache, bitmap_offset - 1 * 1024 * 1024, test_bitmaps_and_extents()
325 ret = btrfs_remove_free_space(cache, bitmap_offset + 1 * 1024 * 1024, test_bitmaps_and_extents()
332 if (test_check_exists(cache, bitmap_offset + 1 * 1024 * 1024, test_bitmaps_and_extents()
338 __btrfs_remove_free_space_cache(cache->free_space_ctl); test_bitmaps_and_extents()
346 ret = test_add_free_space_entry(cache, 1 * 1024 * 1024, 2 * 1024 * 1024, 1); test_bitmaps_and_extents()
352 ret = test_add_free_space_entry(cache, 3 * 1024 * 1024, 1 * 1024 * 1024, 0); test_bitmaps_and_extents()
358 ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 3 * 1024 * 1024); test_bitmaps_and_extents()
364 __btrfs_remove_free_space_cache(cache->free_space_ctl); test_bitmaps_and_extents()
377 check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache, check_num_extents_and_bitmaps() argument
381 if (cache->free_space_ctl->free_extents != num_extents) { check_num_extents_and_bitmaps()
382 test_msg("Incorrect # of extent entries in the cache: %d, expected %d\n", check_num_extents_and_bitmaps()
383 cache->free_space_ctl->free_extents, num_extents); check_num_extents_and_bitmaps()
386 if (cache->free_space_ctl->total_bitmaps != num_bitmaps) { check_num_extents_and_bitmaps()
387 test_msg("Incorrect # of extent entries in the cache: %d, expected %d\n", check_num_extents_and_bitmaps()
388 cache->free_space_ctl->total_bitmaps, num_bitmaps); check_num_extents_and_bitmaps()
395 static int check_cache_empty(struct btrfs_block_group_cache *cache) check_cache_empty() argument
404 if (cache->free_space_ctl->free_space != 0) { check_cache_empty()
410 offset = btrfs_find_space_for_alloc(cache, 0, 4096, 0, check_cache_empty()
418 /* And no extent nor bitmap entries in the cache anymore. */ check_cache_empty()
419 return check_num_extents_and_bitmaps(cache, 0, 0); check_cache_empty()
436 test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) test_steal_space_from_bitmap_to_extent() argument
458 * cache->free_space_ctl->extents_thresh, which currently is test_steal_space_from_bitmap_to_extent()
465 use_bitmap_op = cache->free_space_ctl->op->use_bitmap; test_steal_space_from_bitmap_to_extent()
466 cache->free_space_ctl->op->use_bitmap = test_use_bitmap; test_steal_space_from_bitmap_to_extent()
471 ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 - 256 * 1024, test_steal_space_from_bitmap_to_extent()
479 ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 512 * 1024, test_steal_space_from_bitmap_to_extent()
486 ret = check_num_extents_and_bitmaps(cache, 2, 1); test_steal_space_from_bitmap_to_extent()
497 ret = btrfs_remove_free_space(cache, test_steal_space_from_bitmap_to_extent()
506 if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024, test_steal_space_from_bitmap_to_extent()
511 if (!test_check_exists(cache, 128 * 1024 * 1024 + 512 * 1024, test_steal_space_from_bitmap_to_extent()
521 if (test_check_exists(cache, 128 * 1024 * 1024 + 768 * 1024, test_steal_space_from_bitmap_to_extent()
523 test_msg("Bitmap region not removed from space cache\n"); test_steal_space_from_bitmap_to_extent()
531 if (test_check_exists(cache, 128 * 1024 * 1024 + 256 * 1024, test_steal_space_from_bitmap_to_extent()
541 if (test_check_exists(cache, 128 * 1024 * 1024, test_steal_space_from_bitmap_to_extent()
549 * lets make sure the free space cache marks it as free in the bitmap, test_steal_space_from_bitmap_to_extent()
552 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 512 * 1024); test_steal_space_from_bitmap_to_extent()
558 if (!test_check_exists(cache, 128 * 1024 * 1024, 512 * 1024)) { test_steal_space_from_bitmap_to_extent()
565 * the cache after adding that free space region. test_steal_space_from_bitmap_to_extent()
567 ret = check_num_extents_and_bitmaps(cache, 2, 1); test_steal_space_from_bitmap_to_extent()
577 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 + 16 * 1024 * 1024, test_steal_space_from_bitmap_to_extent()
586 * the cache after adding that free space region. test_steal_space_from_bitmap_to_extent()
588 ret = check_num_extents_and_bitmaps(cache, 2, 1); test_steal_space_from_bitmap_to_extent()
597 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 128 * 1024, test_steal_space_from_bitmap_to_extent()
604 if (!test_check_exists(cache, 128 * 1024 * 1024 - 128 * 1024, test_steal_space_from_bitmap_to_extent()
614 ret = check_num_extents_and_bitmaps(cache, 2, 1); test_steal_space_from_bitmap_to_extent()
622 * cache: test_steal_space_from_bitmap_to_extent()
633 if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024, test_steal_space_from_bitmap_to_extent()
639 if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 4096)) { test_steal_space_from_bitmap_to_extent()
644 offset = btrfs_find_space_for_alloc(cache, test_steal_space_from_bitmap_to_extent()
648 test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n", test_steal_space_from_bitmap_to_extent()
654 ret = check_num_extents_and_bitmaps(cache, 1, 1); test_steal_space_from_bitmap_to_extent()
658 if (cache->free_space_ctl->free_space != 4096) { test_steal_space_from_bitmap_to_extent()
663 offset = btrfs_find_space_for_alloc(cache, test_steal_space_from_bitmap_to_extent()
667 test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n", test_steal_space_from_bitmap_to_extent()
672 ret = check_cache_empty(cache); test_steal_space_from_bitmap_to_extent()
676 __btrfs_remove_free_space_cache(cache->free_space_ctl); test_steal_space_from_bitmap_to_extent()
687 ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 128 * 1024, test_steal_space_from_bitmap_to_extent()
695 ret = test_add_free_space_entry(cache, 0, test_steal_space_from_bitmap_to_extent()
702 ret = check_num_extents_and_bitmaps(cache, 2, 1); test_steal_space_from_bitmap_to_extent()
713 ret = btrfs_remove_free_space(cache, test_steal_space_from_bitmap_to_extent()
722 if (!test_check_exists(cache, 128 * 1024 * 1024 + 128 * 1024, test_steal_space_from_bitmap_to_extent()
727 if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024, test_steal_space_from_bitmap_to_extent()
737 if (test_check_exists(cache, 0, test_steal_space_from_bitmap_to_extent()
739 test_msg("Bitmap region not removed from space cache\n"); test_steal_space_from_bitmap_to_extent()
747 if (test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024, test_steal_space_from_bitmap_to_extent()
755 * lets make sure the free space cache marks it as free in the bitmap, test_steal_space_from_bitmap_to_extent()
758 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 512 * 1024, test_steal_space_from_bitmap_to_extent()
765 if (!test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024, test_steal_space_from_bitmap_to_extent()
773 * the cache after adding that free space region. test_steal_space_from_bitmap_to_extent()
775 ret = check_num_extents_and_bitmaps(cache, 2, 1); test_steal_space_from_bitmap_to_extent()
785 ret = btrfs_add_free_space(cache, 32 * 1024 * 1024, 8192); test_steal_space_from_bitmap_to_extent()
796 ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 128 * 1024); test_steal_space_from_bitmap_to_extent()
802 if (!test_check_exists(cache, 128 * 1024 * 1024, 128 * 1024)) { test_steal_space_from_bitmap_to_extent()
811 ret = check_num_extents_and_bitmaps(cache, 2, 1); test_steal_space_from_bitmap_to_extent()
819 * cache: test_steal_space_from_bitmap_to_extent()
830 if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024, test_steal_space_from_bitmap_to_extent()
836 if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 8192)) { test_steal_space_from_bitmap_to_extent()
841 offset = btrfs_find_space_for_alloc(cache, test_steal_space_from_bitmap_to_extent()
845 test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n", test_steal_space_from_bitmap_to_extent()
851 ret = check_num_extents_and_bitmaps(cache, 1, 1); test_steal_space_from_bitmap_to_extent()
855 if (cache->free_space_ctl->free_space != 8192) { test_steal_space_from_bitmap_to_extent()
860 offset = btrfs_find_space_for_alloc(cache, test_steal_space_from_bitmap_to_extent()
864 test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n", test_steal_space_from_bitmap_to_extent()
869 ret = check_cache_empty(cache); test_steal_space_from_bitmap_to_extent()
873 cache->free_space_ctl->op->use_bitmap = use_bitmap_op; test_steal_space_from_bitmap_to_extent()
874 __btrfs_remove_free_space_cache(cache->free_space_ctl); test_steal_space_from_bitmap_to_extent()
881 struct btrfs_block_group_cache *cache; btrfs_test_free_space_cache() local
884 test_msg("Running btrfs free space cache tests\n"); btrfs_test_free_space_cache()
886 cache = init_test_block_group(); btrfs_test_free_space_cache()
887 if (!cache) { btrfs_test_free_space_cache()
892 ret = test_extents(cache); btrfs_test_free_space_cache()
895 ret = test_bitmaps(cache); btrfs_test_free_space_cache()
898 ret = test_bitmaps_and_extents(cache); btrfs_test_free_space_cache()
902 ret = test_steal_space_from_bitmap_to_extent(cache); btrfs_test_free_space_cache()
904 __btrfs_remove_free_space_cache(cache->free_space_ctl); btrfs_test_free_space_cache()
905 kfree(cache->free_space_ctl); btrfs_test_free_space_cache()
906 kfree(cache); btrfs_test_free_space_cache()
907 test_msg("Free space cache tests finished\n"); btrfs_test_free_space_cache()
/linux-4.1.27/arch/powerpc/sysdev/
H A Dfsl_85xx_l2ctlr.c69 __setup("cache-sram-size=", get_size_from_cmdline);
70 __setup("cache-sram-offset=", get_offset_from_cmdline);
86 prop = of_get_property(dev->dev.of_node, "cache-size", NULL); mpc85xx_l2ctlr_of_probe()
88 dev_err(&dev->dev, "Missing L2 cache-size\n"); mpc85xx_l2ctlr_of_probe()
95 "Entire L2 as cache, provide valid sram offset and size\n"); mpc85xx_l2ctlr_of_probe()
103 dev_err(&dev->dev, "Illegal cache-sram-size in command line\n"); mpc85xx_l2ctlr_of_probe()
176 .compatible = "fsl,p2020-l2-cache-controller",
179 .compatible = "fsl,p2010-l2-cache-controller",
182 .compatible = "fsl,p1020-l2-cache-controller",
185 .compatible = "fsl,p1011-l2-cache-controller",
188 .compatible = "fsl,p1013-l2-cache-controller",
191 .compatible = "fsl,p1022-l2-cache-controller",
194 .compatible = "fsl,mpc8548-l2-cache-controller",
196 { .compatible = "fsl,mpc8544-l2-cache-controller",},
197 { .compatible = "fsl,mpc8572-l2-cache-controller",},
198 { .compatible = "fsl,mpc8536-l2-cache-controller",},
199 { .compatible = "fsl,p1021-l2-cache-controller",},
200 { .compatible = "fsl,p1012-l2-cache-controller",},
201 { .compatible = "fsl,p1025-l2-cache-controller",},
202 { .compatible = "fsl,p1016-l2-cache-controller",},
203 { .compatible = "fsl,p1024-l2-cache-controller",},
204 { .compatible = "fsl,p1015-l2-cache-controller",},
205 { .compatible = "fsl,p1010-l2-cache-controller",},
206 { .compatible = "fsl,bsc9131-l2-cache-controller",},
H A Dppc4xx_soc.c6 * L2 cache routines cloned from arch/ppc/syslib/ibm440gx_common.c which is:
32 * L2-cache
51 /* Read cache trapped address */ l2c_error_handler()
86 np = of_find_compatible_node(NULL, NULL, "ibm,l2-cache"); ppc4xx_l2c_probe()
90 /* Get l2 cache size */ ppc4xx_l2c_probe()
91 prop = of_get_property(np, "cache-size", NULL); ppc4xx_l2c_probe()
93 printk(KERN_ERR "%s: Can't get cache-size!\n", np->full_name); ppc4xx_l2c_probe()
121 ", cache is not enabled\n"); ppc4xx_l2c_probe()
178 if (of_device_is_compatible(np, "ibm,l2-cache-460ex") || ppc4xx_l2c_probe()
179 of_device_is_compatible(np, "ibm,l2-cache-460gt")) ppc4xx_l2c_probe()
187 printk(KERN_INFO "%dk L2-cache enabled\n", l2_size >> 10); ppc4xx_l2c_probe()
/linux-4.1.27/arch/alpha/include/uapi/asm/
H A Dauxvec.h10 /* More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the
11 value is -1, then the cache doesn't exist. Otherwise:
15 bit 8-31: Size of the entire cache >> 8.
/linux-4.1.27/arch/xtensa/mm/
H A DMakefile6 obj-$(CONFIG_MMU) += cache.o fault.o mmu.o tlb.o
H A Dcache.c2 * arch/xtensa/mm/cache.c
42 * can be used for cache coherency.
46 * The Xtensa architecture doesn't keep the instruction cache coherent with
47 * the data cache. We use the architecture bit to indicate if the caches
49 * page cache. At that time, the caches might not be in sync. We, therefore,
52 * D-cache aliasing.
54 * With cache aliasing, we have to always flush the cache when pages are
128 * Any time the kernel writes to a user page cache page, or it is about to
129 * read from a page cache page this routine is called.
177 /* There shouldn't be an entry in the cache for this page anymore. */ flush_dcache_page()
182 * For now, flush the whole cache. FIXME??
193 * Remove any entry in the cache for this page.
196 * alias versions of the cache flush functions.
/linux-4.1.27/arch/microblaze/kernel/cpu/
H A DMakefile12 obj-y += cache.o cpuinfo.o cpuinfo-pvr-full.o cpuinfo-static.o mb.o pvr.o
/linux-4.1.27/arch/arm64/mm/
H A DMakefile2 cache.o copypage.o flush.o \
H A Dcache.S32 * Flush the whole D-cache.
42 mov x10, #0 // start clean at cache level 0
44 add x2, x10, x10, lsr #1 // work out 3x current cache level
45 lsr x1, x0, x2 // extract cache type bits from clidr
46 and x1, x1, #7 // mask of the bits for current cache only
47 cmp x1, #2 // see what cache we have at this level
48 b.lt skip // skip if no cache, or just i-cache
50 msr csselr_el1, x10 // select current cache level in csselr
54 and x2, x1, #7 // extract the length of the cache lines
65 orr x11, x10, x6 // factor way and cache number into x11
74 add x10, x10, #2 // increment cache number
78 mov x10, #0 // swith back to cache level 0
79 msr csselr_el1, x10 // select current cache level in csselr
88 * Flush the entire cache system. The data cache flush is now achieved
89 * using atomic clean / invalidates working outwards from L1 cache. This
90 * is done using Set/Way based cache maintainance instructions. The
91 * instruction cache can still be invalidated back to the point of
98 ic ialluis // I+BTB cache invalidate
192 tst x1, x3 // end cache line aligned?
196 1: tst x0, x3 // start cache line aligned?
/linux-4.1.27/arch/avr32/include/uapi/asm/
H A Dcachectl.h8 /* Clean the data cache, then invalidate the icache */
/linux-4.1.27/arch/c6x/platforms/
H A DMakefile7 obj-y = platform.o cache.o megamod-pic.o pll.o plldata.o timer64.o
/linux-4.1.27/arch/frv/lib/
H A DMakefile8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
/linux-4.1.27/fs/fat/
H A DMakefile9 fat-y := cache.o dir.o fatent.o file.o inode.o misc.o nfs.o
H A Dcache.c2 * linux/fs/fat/cache.c
6 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
40 struct fat_cache *cache = (struct fat_cache *)foo; init_once() local
42 INIT_LIST_HEAD(&cache->cache_list); init_once()
66 static inline void fat_cache_free(struct fat_cache *cache) fat_cache_free() argument
68 BUG_ON(!list_empty(&cache->cache_list)); fat_cache_free()
69 kmem_cache_free(fat_cache_cachep, cache); fat_cache_free()
73 struct fat_cache *cache) fat_cache_update_lru()
75 if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list) fat_cache_update_lru()
76 list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru); fat_cache_update_lru()
90 /* Find the cache of "fclus" or nearest cache. */ fat_cache_lookup()
135 struct fat_cache *cache, *tmp; fat_cache_add() local
137 if (new->fcluster == -1) /* dummy cache */ fat_cache_add()
143 goto out; /* this cache was invalidated */ fat_cache_add()
145 cache = fat_cache_merge(inode, new); fat_cache_add()
146 if (cache == NULL) { fat_cache_add()
160 cache = fat_cache_merge(inode, new); fat_cache_add()
161 if (cache != NULL) { fat_cache_add()
166 cache = tmp; fat_cache_add()
169 cache = list_entry(p, struct fat_cache, cache_list); fat_cache_add()
171 cache->fcluster = new->fcluster; fat_cache_add()
172 cache->dcluster = new->dcluster; fat_cache_add()
173 cache->nr_contig = new->nr_contig; fat_cache_add()
176 fat_cache_update_lru(inode, cache); fat_cache_add()
188 struct fat_cache *cache; __fat_cache_inval_inode() local
191 cache = list_entry(i->cache_lru.next, __fat_cache_inval_inode()
193 list_del_init(&cache->cache_list); __fat_cache_inval_inode()
195 fat_cache_free(cache); __fat_cache_inval_inode()
72 fat_cache_update_lru(struct inode *inode, struct fat_cache *cache) fat_cache_update_lru() argument
/linux-4.1.27/fs/9p/
H A DMakefile17 9p-$(CONFIG_9P_FSCACHE) += cache.o
/linux-4.1.27/include/linux/
H A Dcacheinfo.h20 * struct cacheinfo - represent a cache leaf node
21 * @type: type of the cache - data, inst or unified
22 * @level: represents the hierarcy in the multi-level cache
23 * @coherency_line_size: size of each cache line usually representing
25 * @number_of_sets: total number of sets, a set is a collection of cache
28 * block can be placed in the cache
29 * @physical_line_partition: number of physical cache lines sharing the
31 * @size: Total size of the cache
33 * this cache node
34 * @attributes: bitfield representing various cache attributes
36 * case there's no explicit cache node or the cache node itself in the
41 * cache design
44 * keeping, the remaining members form the core properties of the cache
77 * Helpers to make sure "func" is executed on the cpu whose cache
H A Dfscache.h37 * a page is currently backed by a local disk cache
76 /* select the cache into which to insert an entry in this index
78 * - should return a cache identifier or NULL to cause the cache to be
79 * inherited from the parent if possible or the first cache picked
138 /* indicate page that now have cache metadata retained
171 * - a file will only appear in one cache
172 * - a request to cache a file may or may not be honoured, subject to
187 #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
188 #define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
291 * fscache_lookup_cache_tag - Look up a cache tag
294 * Acquire a specific cache referral tag that can be used to select a specific
295 * cache in which to cache an index.
310 * fscache_release_cache_tag - Release a cache tag
313 * Release a reference to a cache referral tag previously looked up.
326 * fscache_acquire_cookie - Acquire a cookie to represent a cache object
328 * @def: A description of the cache object, including callback operations
330 * represent the cache object to the netfs
355 * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
358 * @retire: True if the cache object the cookie represents is to be discarded
360 * This function returns a cookie to the cache, forcibly discarding the
361 * associated cache object if retire is set to true.
374 * fscache_check_consistency - Request that if the cache is updated
375 * @cookie: The cookie representing the cache object
378 * to the backing cache.
393 * fscache_update_cookie - Request that a cache object be updated
394 * @cookie: The cookie representing the cache object
396 * Request an update of the index data for the cache object associated with the
410 * fscache_pin_cookie - Pin a data-storage cache object in its cache
411 * @cookie: The cookie representing the cache object
413 * Permit data-storage cache objects to be pinned in the cache.
425 * fscache_pin_cookie - Unpin a data-storage cache object in its cache
426 * @cookie: The cookie representing the cache object
428 * Permit data-storage cache objects to be unpinned from the cache.
439 * fscache_attr_changed - Notify cache that an object's attributes changed
440 * @cookie: The cookie representing the cache object
442 * Send a notification to the cache indicating that an object's attributes have
459 * fscache_invalidate - Notify cache that an object needs invalidation
460 * @cookie: The cookie representing the cache object
462 * Notify the cache that an object is needs to be invalidated and that it
463 * should abort any retrievals or stores it is doing on the cache. The object
480 * @cookie: The cookie representing the cache object
496 * @cookie: The cookie representing the cache object
499 * Reserve an amount of space in the cache for the cache object attached to a
513 * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
515 * @cookie: The cookie representing the cache object
521 * Read a page from the cache, or if that's not possible make a potential
522 * one-block reservation in the cache into which the page may be stored once
525 * If the page is not backed by the cache object, or if it there's some reason
529 * Else, if that page is backed by the cache, a read will be initiated directly
536 * been allocated in the cache.
556 * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
558 * @cookie: The cookie representing the cache object
566 * Read a set of pages from the cache, or if that's not possible, attempt to
567 * make a potential one-block reservation for each page in the cache into which
570 * If some pages are not backed by the cache object, or if it there's some
574 * Else, if some of the pages are backed by the cache, a read will be initiated
581 * been allocated in the cache.
609 * @cookie: The cookie representing the cache object
613 * Request Allocation a block in the cache in which to store a netfs page
614 * without retrieving any contents from the cache.
638 * @cookie: The cookie representing the inode's cache object.
657 * fscache_write_page - Request storage of a page in the cache
658 * @cookie: The cookie representing the cache object
662 * Request the contents of the netfs page be written into the cache. This
663 * request may be ignored if no cache block is currently allocated, in which
666 * If a cache block was already allocated, a write will be initiated and 0 will
687 * @cookie: The cookie representing the cache object
690 * Tell the cache that we no longer want a page to be cached and that it should
694 * page and the cache.
708 * fscache_check_page_write - Ask if a page is being writing to the cache
709 * @cookie: The cookie representing the cache object
712 * Ask the cache if a page is being written to the cache.
727 * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
728 * @cookie: The cookie representing the cache object
731 * Ask the cache to wake us up when a page is no longer being written to the
732 * cache.
747 * @cookie: The cookie representing the cache object
772 * @cookie: The cookie representing the inode's cache object.
779 * and will wait whilst the PG_fscache mark is removed by the cache.
791 * @cookie: The cookie representing the cache object
812 * @cookie: The cookie representing the cache object
H A Dfscache-cache.h1 /* General filesystem caching backing cache interface
15 * for a description of the cache backend interface declared here.
33 * cache tag definition
37 struct fscache_cache *cache; /* cache referred to by this tag */ member in struct:fscache_cache_tag
39 #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
45 * cache definition
49 struct fscache_cache_tag *tag; /* tag representing this cache */
50 struct kobject *kobj; /* system representation of this cache */
53 char identifier[36]; /* cache label */
61 atomic_t object_count; /* no. of live objects in this cache */
64 #define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
65 #define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
71 * operation to be applied to a cache object
217 * - batch writes to the cache
218 * - do cache writes asynchronously
219 * - defer writes until cache object lookup completion
227 * cache operations
230 /* name of cache provider */
234 struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
248 /* pin an object in the cache */
251 /* unpin an object in the cache */
254 /* check the consistency between the backing cache and the FS-Cache
271 /* sync a cache */
272 void (*sync_cache)(struct fscache_cache *cache);
282 * cache */
286 * the cache */
289 /* request a backing block for a page be allocated in the cache so that
293 /* request backing blocks for pages be allocated in the cache so that
297 /* write a page to its backing block in the cache */
307 /* dissociate a cache from all the pages it was backing */
308 void (*dissociate_pages)(struct fscache_cache *cache);
320 FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
323 FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */
346 * on-disk cache file or index handle
375 struct list_head cache_link; /* link in cache->object_list */
377 struct fscache_cache *cache; /* cache that supplied this object */ member in struct:fscache_object
417 !test_bit(FSCACHE_IOERROR, &object->cache->flags); fscache_object_is_active()
423 test_bit(FSCACHE_IOERROR, &object->cache->flags); fscache_object_is_dead()
427 * fscache_object_destroyed - Note destruction of an object in a cache
428 * @cache: The cache from which the object came
430 * Note the destruction and deallocation of an object record in a cache.
432 static inline void fscache_object_destroyed(struct fscache_cache *cache) fscache_object_destroyed() argument
434 if (atomic_dec_and_test(&cache->object_count)) fscache_object_destroyed()
528 * out-of-line cache backend functions
531 void fscache_init_cache(struct fscache_cache *cache,
535 extern int fscache_add_cache(struct fscache_cache *cache,
538 extern void fscache_withdraw_cache(struct fscache_cache *cache);
540 extern void fscache_io_error(struct fscache_cache *cache);
/linux-4.1.27/arch/sh/mm/
H A Dcache-debugfs.c2 * debugfs ops for the L1 cache
16 #include <asm/cache.h>
28 struct cache_info *cache; cache_seq_show() local
49 cache = &current_cpu_data.dcache; cache_seq_show()
52 cache = &current_cpu_data.icache; cache_seq_show()
55 waysize = cache->sets; cache_seq_show()
64 waysize <<= cache->entry_shift; cache_seq_show()
66 for (way = 0; way < cache->ways; way++) { cache_seq_show()
76 addr += cache->linesz, line++) { cache_seq_show()
83 /* U: Dirty, cache tag is 10 bits up */ cache_seq_show()
89 addrstart += cache->way_incr; cache_seq_show()
H A Dcache-shx3.c2 * arch/sh/mm/cache-shx3.c - SH-X3 optimized cache ops
13 #include <asm/cache.h>
25 * If we've got cache aliases, resolve them in hardware. shx3_cache_init()
38 * Broadcast I-cache block invalidations by default. shx3_cache_init()
H A DMakefile5 obj-y := alignment.o cache.o init.o consistent.o mmap.o
7 cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o
8 cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o
9 cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
10 cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
11 cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o
12 cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
13 cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
25 debugfs-$(CONFIG_CPU_SH4) += cache-debugfs.o
H A Dcache-sh7705.c2 * arch/sh/mm/cache-sh7705.c
21 #include <asm/cache.h>
29 * The 32KB cache on the SH7705 suffers from the same synonym problem
63 * Write back the range of D-cache, and purge the I-cache.
79 * Writeback&Invalidate the D-cache of the page
90 * tags in the cache for those with the same page number as this page __flush_dcache_page()
95 * Since 2 bits of the cache index are derived from the virtual page __flush_dcache_page()
96 * number, knowing this would reduce the number of cache entries to be __flush_dcache_page()
98 * potential cache aliasing, therefore the optimisation is probably not __flush_dcache_page()
133 * Write back & invalidate the D-cache of the page.
173 * This is called when a page-cache page is about to be mapped into a
175 * port to ensure d-cache/i-cache coherency if necessary.
177 * Not entirely sure why this is necessary on SH3 with 32K cache but
H A Dcache-sh4.c2 * arch/sh/mm/cache-sh4.c
35 * Write back the range of D-cache, and purge the I-cache.
57 * Selectively flush d-cache then invalidate the i-cache. sh4_flush_icache_range()
76 /* Clear i-cache line valid-bit */ sh4_flush_icache_range()
94 * All types of SH-4 require PC to be uncached to operate on the I-cache. flush_cache_one()
95 * Some types of SH-4 require PC to be uncached to operate on the D-cache. flush_cache_one()
107 * Write back & invalidate the D-cache of the page.
135 /* Flush I-cache */ flush_icache_all()
181 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
183 * accessed with (hence cache set) is in accord with the physical
273 * Flushing the cache lines for U0 only isn't enough.
290 * If cache is only 4k-per-way, there are never any 'aliases'. Since sh4_flush_cache_range()
291 * the cache is physically tagged, the data can just be left in there. sh4_flush_cache_range()
305 * @addr: address in memory mapped cache array
311 * The offset into the cache array implied by 'addr' selects the
361 * will do since only the cache tag bits need to __flush_cache_one()
376 * SH-4 has virtually indexed and physically tagged cache.
H A Dcache-sh5.c2 * arch/sh/mm/cache-sh5.c
18 #include <asm/cache.h>
25 /* Wired TLB entry for the D-cache */
73 /* Invalidate range of addresses [start,end] from the I-cache, where sh64_icache_inv_kernel_range()
98 /* Check whether we can use the current ASID for the I-cache sh64_icache_inv_user_page()
106 pid->ASID mapping changes. However, the whole cache will get sh64_icache_inv_user_page()
109 cache entries. sh64_icache_inv_user_page()
135 /* Used for invalidating big chunks of I-cache, i.e. assume the range sh64_icache_inv_user_page_range()
141 the choice of algorithm. However, for the I-cache option (2) isn't sh64_icache_inv_user_page_range()
144 mapping. Because icbi is cheaper than ocbp on a cache hit, it sh64_icache_inv_user_page_range()
146 possible with the D-cache. Just assume 64 for now as a working sh64_icache_inv_user_page_range()
207 cache hit on the virtual tag the instruction ends there, without a sh64_icache_inv_current_user_range()
220 invalidate another processes I-cache entries : no worries, just a sh64_icache_inv_current_user_range()
232 /* Buffer used as the target of alloco instructions to purge data from cache
259 * Do one alloco which hits the required set per cache sh64_dcache_purge_sets()
281 * alloco is a NOP if the cache is write-through. sh64_dcache_purge_sets()
298 * memory equal in size to the cache, thereby causing the current
314 /* Purge the physical page 'paddr' from the cache. It's known that any
315 * cache lines requiring attention have the same page colour as the the
318 * This relies on the fact that the D-cache matches on physical tags when
439 * 3. walk all the lines in the cache, check the tags, if a match
448 * 4. walk all the lines in the cache, check the tags, if a match
484 * memory any dirty data from the D-cache.
501 * Have to do a purge here, despite the comments re I-cache below.
503 * in the cache - if this gets written out through natural eviction
510 * I-cache. This is similar to the lack of action needed in
540 * Invalidate any entries in either cache for the vma within the user
543 * the I-cache must be searched too in case the page in question is
572 * the I-cache. The corresponding range must be purged from the
573 * D-cache also because the SH-5 doesn't have cache snooping between
575 * mapping, therefore it's guaranteed that there no cache entries for
576 * the range in cache sets of the wrong colour.
593 * D-cache and invalidate the corresponding region of the I-cache for the
/linux-4.1.27/arch/sh/include/uapi/asm/
H A Dcachectl.h15 #define ICACHE CACHEFLUSH_I /* flush instruction cache */
16 #define DCACHE CACHEFLUSH_D_PURGE /* writeback and flush data cache */
H A Dauxvec.h25 * More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the
26 * value is -1, then the cache doesn't exist. Otherwise:
30 * bit 8-31: Size of the entire cache >> 8.
/linux-4.1.27/arch/blackfin/include/uapi/asm/
H A Dcachectl.h16 #define ICACHE (1<<0) /* flush instruction cache */
17 #define DCACHE (1<<1) /* writeback and flush data cache */
/linux-4.1.27/arch/arm/include/asm/
H A Dshmparam.h5 * This should be the size of the virtually indexed cache/ways,
6 * or page size, whichever is greater since the cache aliases
H A Dcache.h2 * arch/arm/include/asm/cache.h
12 * sure that all such allocations are cache aligned. Otherwise,
14 * cache before the transfer is done, causing old data to be seen by
H A Doutercache.h48 * outer_inv_range - invalidate range of outer cache lines
59 * outer_clean_range - clean dirty outer cache lines
70 * outer_flush_range - clean and invalidate outer cache lines
81 * outer_flush_all - clean and invalidate all cache lines in the outer cache
85 * cache masters.
98 * outer_disable - clean, invalidate and disable the outer cache
100 * Disable the outer cache, ensuring that any data contained in the outer
101 * cache is pushed out to lower levels of system memory. The note and
107 * outer_resume - restore the cache configuration and re-enable outer cache
109 * Restore any configuration that the cache had when previously enabled,
110 * and re-enable the outer cache.
134 * outer_sync - perform a sync point for outer cache outer_resume()
136 * Ensure that all outer cache operations are complete and any store outer_resume()
H A Dcacheflush.h15 #include <asm/glue-cache.h>
32 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
40 * effects are cache-type (VIVT/VIPT/PIPT) specific.
45 * Currently only needed for cache-v6.S and cache-v7.S, see
50 * Unconditionally clean and invalidate the entire cache.
54 * Flush data cache levels up to the level of unification
55 * inner shareable and invalidate the I-cache.
61 * Clean and invalidate all user space cache entries
66 * Clean and invalidate a range of cache entries in the
139 * Their sole purpose is to ensure that data held in the cache
160 * Their sole purpose is to ensure that data held in the cache
186 /* Invalidate I-cache */
191 /* Invalidate I-cache inner shareable */
275 * Perform necessary cache operations to ensure that data previously
281 * Perform necessary cache operations to ensure that the TLB will
288 * cache page at virtual address page->virtual.
294 * Otherwise we can defer the operation, and clean the cache when we are
335 * duplicate cache flushing elsewhere performed by flush_dcache_page().
343 * data, we need to do a full cache flush to ensure that writebacks
353 * have a DSB after cleaning the cache line. flush_cache_vmap()
368 * cache enabled or disabled depending on the code path. It is crucial
369 * to always ensure proper cache maintenance to update main memory right
372 * Any cached write must be followed by a cache clean operation.
373 * Any cached read must be preceded by a cache invalidate operation.
374 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
380 * a separate cache line.
384 * This needs to be >= the max cache writeback size of all
386 * This is used to align state variables to their own cache lines.
422 * Ensure dirty data migrated from other CPUs into our cache __sync_cache_range_r()
423 * are cleaned out safely before the outer cache is cleaned: __sync_cache_range_r()
432 /* ... and inner cache: */ __sync_cache_range_r()
440 * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
443 * - Clear the SCTLR.C bit to prevent further cache allocations
444 * - Flush the desired level of cache
462 * fp is preserved to the stack explicitly prior disabling the cache
/linux-4.1.27/arch/alpha/include/asm/
H A Dcache.h2 * include/asm-alpha/cache.h
8 /* Bytes per L1 (data) cache line. */
/linux-4.1.27/arch/metag/kernel/
H A Dcachepart.c2 * Meta cache partition manipulation.
58 static int get_thread_cache_size(unsigned int cache, int thread_id) get_thread_cache_size() argument
64 isEnabled = (cache == DCACHE ? metag_in32(MMCU_DCACHE_CTRL_ADDR) & 0x1 : get_thread_cache_size()
69 /* Checking for global cache */ get_thread_cache_size()
70 cache_size = (cache == DCACHE ? get_global_dcache_size() : get_thread_cache_size()
74 cache_size = (cache == DCACHE ? get_dcache_size() : get_thread_cache_size()
77 t_cache_part = (cache == DCACHE ? get_thread_cache_size()
103 pr_emerg("Can't read %s cache size\n", check_for_cache_aliasing()
109 pr_emerg("Potential cache aliasing detected in %s on Thread %d\n", check_for_cache_aliasing()
119 panic("Potential cache aliasing detected"); check_for_cache_aliasing()
/linux-4.1.27/arch/metag/mm/
H A Dcache.c2 * arch/metag/mm/cache.c
42 * metag_lnkget_probe() - Probe whether lnkget/lnkset go around the cache
54 * It's conceivable the user has configured a globally coherent cache metag_lnkget_probe()
56 * from executing and causing cache eviction during the test. metag_lnkget_probe()
60 /* read a value to bring it into the cache */ metag_lnkget_probe()
81 /* flush the cache line to fix any incoherency */ metag_lnkget_probe()
85 /* if the cache is right, LNKGET_AROUND_CACHE is unnecessary */ metag_lnkget_probe()
87 pr_info("LNKGET/SET go through cache but CONFIG_METAG_LNKGET_AROUND_CACHE=y\n"); metag_lnkget_probe()
90 * if the cache is wrong, LNKGET_AROUND_CACHE is really necessary metag_lnkget_probe()
94 "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n" metag_lnkget_probe()
98 * if the cache is wrong, LNKGET_AROUND_CACHE should be used or the metag_lnkget_probe()
102 "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n" metag_lnkget_probe()
106 * if the cache is wrong, LNKGET_AROUND_CACHE is set wrong, but it metag_lnkget_probe()
111 pr_warn("LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"); metag_lnkget_probe()
117 * metag_cache_probe() - Probe L1 cache configuration.
119 * Probe the L1 cache configuration to aid the L1 physical cache flushing
151 /* Extract cache sizes from global heap segment */ metag_cache_probe()
162 /* Work out width of I-cache size bit-field */ metag_cache_probe()
175 /* Now calculate I-cache set size */ metag_cache_probe()
180 /* Similarly for D-cache */ metag_cache_probe()
207 /* Use a sequence of writes to flush the cache region requested */ metag_phys_data_cache_flush()
214 /* Move to the base of the physical cache flush region */ metag_phys_data_cache_flush()
237 /* Reduce loops by step of cache line size */ metag_phys_data_cache_flush()
252 /* Clear loops ways in cache */ metag_phys_data_cache_flush()
287 /* No need to flush the data cache it's not actually enabled */ metag_data_cache_flush_all()
299 /* No need to flush the data cache it's not actually enabled */ metag_data_cache_flush()
307 /* Use linear cache flush mechanism on META IP */ metag_data_cache_flush()
352 /* Use a sequence of writes to flush the cache region requested */ metag_phys_code_cache_flush()
357 /* Move to the base of the physical cache flush region */ metag_phys_code_cache_flush()
392 /* Reduce loops by step of cache line size */ metag_phys_code_cache_flush()
415 /* Clear loops ways in cache */ metag_phys_code_cache_flush()
458 /* No need to flush the code cache it's not actually enabled */ metag_code_cache_flush_all()
473 /* No need to flush the code cache it's not actually enabled */ metag_code_cache_flush()
477 /* CACHEWD isn't available on Meta1, so always do full cache flush */ metag_code_cache_flush()
481 /* If large size do full physical cache flush */ metag_code_cache_flush()
487 /* Use linear cache flush mechanism on META IP */ metag_code_cache_flush()
H A DMakefile5 obj-y += cache.o
H A Dl2cache.c8 /* If non-0, then initialise the L2 cache */
10 /* If non-0, then initialise the L2 cache prefetch */
44 * If the L2 cache isn't even present, don't do anything, but say so in meta_l2c_setup()
66 * Enable the L2 cache and print to log whether it was already enabled meta_l2c_setup()
80 * Enable L2 cache prefetch. meta_l2c_setup()
132 * cache if it's already enabled (dirty lines would be discarded), so meta_l2c_enable()
/linux-4.1.27/arch/mn10300/include/asm/
H A Dcache.h1 /* MN10300 cache management registers
16 #include <proc/cache.h>
26 /* data cache purge registers
27 * - read from the register to unconditionally purge that cache line
28 * - write address & 0xffffff00 to conditionally purge that cache line
44 /* instruction cache access registers */
52 /* data cache access registers */
/linux-4.1.27/arch/mn10300/proc-mn103e010/include/proc/
H A Dcache.h14 /* L1 cache */
22 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
23 #define L1_CACHE_TAG_DIRTY 0x00000008 /* data cache tag dirty bit */
24 #define L1_CACHE_TAG_ENTRY 0x00000ff0 /* cache tag entry address mask */
25 #define L1_CACHE_TAG_ADDRESS 0xfffff000 /* cache tag line address mask */
30 * managing the cache with the interrupts disabled
36 * whole cache rather than trying to flush the specified range.
/linux-4.1.27/arch/mn10300/proc-mn2ws0050/include/proc/
H A Dcache.h20 * L1 cache
28 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
29 #define L1_CACHE_TAG_DIRTY 0x00000008 /* data cache tag dirty bit */
30 #define L1_CACHE_TAG_ENTRY 0x00000fe0 /* cache tag entry address mask */
31 #define L1_CACHE_TAG_ADDRESS 0xfffff000 /* cache tag line address mask */
36 * managing the cache with the interrupts disabled
42 * whole cache rather than trying to flush the specified range.
H A Dsmp-regs.h6 * 13-Nov-2006 MEI Add extended cache and atomic operation register
40 /* extended cache control register */
42 #define ECHCTR_IBCM 0x00000001 /* instruction cache broad cast mask */
43 #define ECHCTR_DBCM 0x00000002 /* data cache broad cast mask */
44 #define ECHCTR_ISPM 0x00000004 /* instruction cache snoop mask */
45 #define ECHCTR_DSPM 0x00000008 /* data cache snoop mask */
/linux-4.1.27/arch/unicore32/include/asm/
H A Dcache.h2 * linux/arch/unicore32/include/asm/cache.h
20 * sure that all such allocations are cache aligned. Otherwise,
22 * cache before the transfer is done, causing old data to be seen by
H A Dcacheflush.h31 * The arch/unicore32/mm/cache.S files implement these methods.
38 * effects are cache-type (VIVT/VIPT/PIPT) specific.
43 * Currently only needed for cache-v6.S and cache-v7.S, see
48 * Unconditionally clean and invalidate the entire cache.
52 * Clean and invalidate all user space cache entries
57 * Clean and invalidate a range of cache entries in the
106 * Their sole purpose is to ensure that data held in the cache
128 /* Invalidate I-cache */ __flush_icache_all()
156 * Perform necessary cache operations to ensure that data previously
162 * Perform necessary cache operations to ensure that the TLB will
169 * cache page at virtual address page->virtual.
175 * Otherwise we can defer the operation, and clean the cache when we are
192 * duplicate cache flushing elsewhere performed by flush_dcache_page().
200 * data, we need to do a full cache flush to ensure that writebacks
/linux-4.1.27/arch/frv/include/asm/
H A Dcache.h0 /* cache.h: FRV cache definitions
16 /* bytes per L1 cache line */
/linux-4.1.27/arch/arm/mm/
H A Dcache-v4wt.S2 * linux/arch/arm/mm/cache-v4wt.S
10 * ARMv4 write through cache operations support.
21 * The size of one data cache line.
26 * The number of data cache segments.
31 * The number of lines in a cache segment.
37 * clean the whole cache, rather than using the individual
38 * cache line maintenance instructions.
51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
58 * Invalidate all cache entries in a particular address
66 * Clean and invalidate the entire cache.
73 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
74 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
80 * Clean and invalidate a range of cache entries in the specified
135 * Ensure no D cache aliasing occurs, either with itself or
136 * the I cache
143 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
152 * are not cache line aligned, those lines must be written
H A DMakefile38 obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
39 obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
40 obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
41 obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
42 obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
43 obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
44 obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o
99 obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
100 obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
101 obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
102 obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
H A Dcache-tauros2.c2 * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
23 #include <asm/hardware/cache-tauros2.h>
28 * cache operations, the cache handling code in proc-v7.S takes care
31 * So, we only need to register outer cache operations here if we're
33 * outer cache operations into the kernel image if the kernel has been
38 * Low-level cache maintenance operations.
67 * Clean and invalidate partial first cache line. tauros2_inv_range()
75 * Clean and invalidate partial last cache line. tauros2_inv_range()
83 * Invalidate all full cache lines between 'start' and 'end'. tauros2_inv_range()
214 * v5 CPUs with Tauros2 have the L2 cache enable bit tauros2_internal_init()
219 pr_info("Tauros2: Enabling L2 cache.\n"); tauros2_internal_init()
235 * cache ops. (PJ4 is in its v7 personality mode if the MMFR3 tauros2_internal_init()
236 * register indicates support for the v7 hierarchical cache tauros2_internal_init()
240 * implement the v7 cache ops but are only ARMv6 CPUs (due to tauros2_internal_init()
252 * ARMv7 spec to contain fine-grained cache control bits). tauros2_internal_init()
256 pr_info("Tauros2: Enabling L2 cache.\n"); tauros2_internal_init()
269 pr_info("Tauros2: L2 cache support initialised " tauros2_internal_init()
275 { .compatible = "marvell,tauros2-cache"},
289 pr_info("Not found marvell,tauros2-cache, disable it\n"); tauros2_init()
293 ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f); tauros2_init()
295 pr_info("Not found marvell,tauros-cache-features property, " tauros2_init()
H A Dcache-v7.S2 * linux/arch/arm/mm/cache-v7.S
25 * of cache lines with uninitialized data and uninitialized tags to get
70 * Flush the whole I-cache.
77 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
78 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
85 * Flush the D-cache up to the Level of Unification Inner Shareable
104 beq start_flush_levels @ start flushing cache levels
112 * Flush the whole D-cache.
125 mov r10, #0 @ start clean at cache level 0
127 add r2, r10, r10, lsr #1 @ work out 3x current cache level
128 mov r1, r0, lsr r2 @ extract cache type bits from clidr
129 and r1, r1, #7 @ mask of the bits for current cache only
130 cmp r1, #2 @ see what cache we have at this level
131 blt skip @ skip if no cache, or just i-cache
135 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
141 and r2, r1, #7 @ extract the length of the cache lines
151 ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
153 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
163 add r10, r10, #2 @ increment cache number
167 mov r10, #0 @ swith back to cache level 0
168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
177 * Flush the entire cache system.
178 * The data cache flush is now achieved using atomic clean / invalidates
179 * working outwards from L1 cache. This is done using Set/Way based cache
181 * The instruction cache can still be invalidated back to the point of
190 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
191 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
200 * Flush the data cache up to Level of Unification Inner Shareable.
201 * Invalidate the I-cache to the point of unification.
208 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
209 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
235 * - we have a VIPT cache.
302 * Fault handling for the cache operation above. If the virtual address in r0
345 * Invalidate the data cache within the specified region; we will
347 * purge old data in the cache.
H A Dcopypage-v4wt.c10 * This is for CPUs with a writethrough cache and 'flush ID cache' is
11 * the only supported cache operation.
20 * dirty data in the cache. However, we do have to ensure that
40 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ v4wt_copy_user_page()
78 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" v4wt_clear_user_highpage()
H A Dcache-feroceon-l2.c2 * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
22 #include <asm/hardware/cache-feroceon-l2.h>
27 * Low-level cache maintenance operations.
29 * As well as the regular 'clean/invalidate/flush L2 cache line by
30 * MVA' instructions, the Feroceon L2 cache controller also features
35 * cache line whose first byte address lies in the inclusive range
38 * The cache range operations stall the CPU pipeline until completion.
51 * memory mapping afterwards (note: a cache flush may happen l2_get_va()
134 * noninclusive, while the hardware cache range operations use
150 * Try to process all cache lines between 'start' and 'end'. calc_range_end()
155 * Limit the number of cache lines processed at once, calc_range_end()
156 * since cache range operations stall the CPU pipeline calc_range_end()
174 * Clean and invalidate partial first cache line. feroceon_l2_inv_range()
182 * Clean and invalidate partial last cache line. feroceon_l2_inv_range()
190 * Invalidate all full cache lines between 'start' and 'end'. feroceon_l2_inv_range()
237 * Routines to disable and re-enable the D-cache and I-cache at run
238 * time. These are necessary because the L2 cache can only be enabled
341 "Feroceon L2: bootloader left the L2 cache on!\n"); enable_l2()
361 { .compatible = "marvell,kirkwood-cache"},
362 { .compatible = "marvell,feroceon-cache"},
378 if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) { feroceon_of_init()
H A Dcache-v4wb.S2 * linux/arch/arm/mm/cache-v4wb.S
18 * The size of one data cache line.
23 * The total size of the data cache.
30 # error Unknown cache size
35 * clean the whole cache, rather than using the individual
36 * cache line maintenance instructions.
61 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
68 * Clean and invalidate all cache entries in a particular address
76 * Clean and invalidate the entire cache.
80 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
103 * Invalidate a range of cache entries in the specified
114 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
117 bhs __flush_whole_cache @ flush whole D cache
131 * Ensure no D cache aliasing occurs, either with itself or
132 * the I cache
172 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
182 * are not cache line aligned, those lines must be written
H A Dproc-arm925.S9 * Update for Linux-2.6 and cache flush improvements
29 * These are the low level assembler for performing cache and TLB
36 * NOTE1: The TI925T Configuration Register bit "D-cache clean and flush
41 * NOTE2: Default is the "D-cache clean and flush entry mode". It looks
42 * like the "Transparent mode" must be on for partial cache flushes
46 * NOTE3: Write-back cache flushing seems to be flakey with devices using
48 * write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is
63 * The size of one data cache line.
68 * The number of data cache segments.
73 * The number of lines in a cache segment.
79 * clean the whole cache, rather than using the individual
80 * cache line maintenance instructions.
145 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
164 * Clean and invalidate all cache entries in a particular
173 * Clean and invalidate the entire cache.
180 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
189 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
196 * Clean and invalidate a range of cache entries in the
267 * Ensure no D cache aliasing occurs, either with itself or
268 * the I cache
280 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
289 * are not cache line aligned, those lines must be written
409 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
417 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
445 /* Transparent on, D-cache clean & flush mode. See NOTE2 above */
496 .macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
H A Dcache-v6.S2 * linux/arch/arm/mm/cache-v6.S
28 * Flush the whole I-cache.
43 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
44 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
45 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
46 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
52 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
60 * Flush the entire cache.
67 mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
69 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
98 * - we have a VIPT cache.
146 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
156 * Fault handling for the cache operation above. If the virtual address in r0
197 * Invalidate the data cache within the specified region; we will
199 * purge old data in the cache.
H A Dproc-arm946.S6 * (Many of cache codes are from proc-arm926.S)
48 bic r0, r0, #0x00001000 @ i-cache
49 bic r0, r0, #0x00000004 @ d-cache
61 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
62 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
66 bic ip, ip, #0x00001000 @ i-cache
87 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
100 * Clean and invalidate the entire cache.
107 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
118 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache
125 * Clean and invalidate a range of cache entries in the
199 * Ensure no D cache aliasing occurs, either with itself or
200 * the I cache
213 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
222 * are not cache line aligned, those lines must be written
332 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
333 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
380 orr r0, r0, #0x00001000 @ I-cache
381 orr r0, r0, #0x00000005 @ MPU/D-cache
H A Dproc-xscale.S35 * is larger than this, then we flush the whole cache
40 * the cache line size of the I and D cache
45 * the size of the data cache
50 * Virtual address used to allocate the cache when flushed
59 * Without this the XScale core exhibits cache eviction problems and no one
94 1: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line
96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line
98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line
100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line
155 @ *** cache line aligned ***
193 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
200 * Invalidate all cache entries in a particular address
209 * Clean and invalidate the entire cache.
217 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
224 * Invalidate a range of cache entries in the specified
239 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
240 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
241 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line
260 * Note: single I-cache line invalidation isn't used here since
261 * it also trashes the mini I-cache used by JTAG debuggers.
270 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
287 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry
299 * Ensure no D cache aliasing occurs, either with itself or
300 * the I cache
313 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
322 * are not cache line aligned, those lines must be written
420 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
424 * The recommended workaround is to always do a clean D-cache line before
425 * doing an invalidate D-cache line, so on the affected processors,
439 * Most of the cache functions are unchanged for these processor revisions.
475 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
617 .macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
638 .ifb \cache
641 .long \cache
647 cache=xscale_80200_A0_A1_cache_fns
H A Dproc-arm1022.S14 * These are the low level assembler for performing cache and TLB
31 * than this, and we go for the whole cache.
39 * The size of one data cache line.
44 * The number of data cache segments.
49 * The number of lines in a cache segment.
55 * clean the whole cache, rather than using the individual
56 * cache line maintenance instructions.
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
131 * Invalidate all cache entries in a particular address
139 * Clean and invalidate the entire cache.
156 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
164 * Invalidate a range of cache entries in the specified
185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
233 * Ensure no D cache aliasing occurs, either with itself or
234 * the I cache
256 * are not cache line aligned, those lines must be written
386 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
H A Dproc-arm1026.S14 * These are the low level assembler for performing cache and TLB
31 * than this, and we go for the whole cache.
39 * The size of one data cache line.
44 * The number of data cache segments.
49 * The number of lines in a cache segment.
55 * clean the whole cache, rather than using the individual
56 * cache line maintenance instructions.
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
131 * Invalidate all cache entries in a particular address
139 * Clean and invalidate the entire cache.
151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
159 * Invalidate a range of cache entries in the specified
180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
227 * Ensure no D cache aliasing occurs, either with itself or
228 * the I cache
250 * are not cache line aligned, those lines must be written
375 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
H A Dproc-arm940.S41 bic r0, r0, #0x00001000 @ i-cache
42 bic r0, r0, #0x00000004 @ d-cache
54 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
55 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
59 bic ip, ip, #0x00001000 @ i-cache
80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
93 * Clean and invalidate the entire cache.
102 * There is no efficient way to flush a range of cache entries
112 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
156 * Ensure no D cache aliasing occurs, either with itself or
157 * the I cache
171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
279 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
280 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
327 orr r0, r0, #0x00001000 @ I-cache
328 orr r0, r0, #0x00000005 @ MPU/D-cache
/linux-4.1.27/drivers/video/fbdev/
H A Dsh_mobile_meram.c110 * @regs: Registers cache
114 * @cache_unit: Bytes to cache per ICB
133 struct sh_mobile_meram_icb *cache; member in struct:sh_mobile_meram_fb_plane
145 * @regs: Registers cache
213 * LCDC cache planes allocation, init, cleanup and free
227 plane->cache = &priv->icbs[idx]; meram_plane_alloc()
239 __set_bit(plane->cache->index, &priv->used_icb); meram_plane_alloc()
255 __clear_bit(plane->cache->index, &priv->used_icb); meram_plane_free()
269 struct sh_mobile_meram_fb_cache *cache, meram_set_next_addr()
273 struct sh_mobile_meram_icb *icb = cache->planes[0].marker; meram_set_next_addr()
280 meram_write_icb(priv->base, cache->planes[0].cache->index, target, meram_set_next_addr()
282 meram_write_icb(priv->base, cache->planes[0].marker->index, target, meram_set_next_addr()
283 base_addr_y + cache->planes[0].marker->cache_unit); meram_set_next_addr()
285 if (cache->nplanes == 2) { meram_set_next_addr()
286 meram_write_icb(priv->base, cache->planes[1].cache->index, meram_set_next_addr()
288 meram_write_icb(priv->base, cache->planes[1].marker->index, meram_set_next_addr()
290 cache->planes[1].marker->cache_unit); meram_set_next_addr()
297 struct sh_mobile_meram_fb_cache *cache, meram_get_next_icb_addr()
300 struct sh_mobile_meram_icb *icb = cache->planes[0].marker; meram_get_next_icb_addr()
308 *icb_addr_y = icb_offset | (cache->planes[0].marker->index << 24); meram_get_next_icb_addr()
309 if (cache->nplanes == 2) meram_get_next_icb_addr()
311 | (cache->planes[1].marker->index << 24); meram_get_next_icb_addr()
356 meram_write_icb(priv->base, plane->cache->index, MExxBSIZE, meram_plane_init()
361 meram_write_icb(priv->base, plane->cache->index, MExxMNCF, bnm); meram_plane_init()
364 meram_write_icb(priv->base, plane->cache->index, MExxSBSIZE, xpitch); meram_plane_init()
367 /* save a cache unit size */ meram_plane_init()
368 plane->cache->cache_unit = xres * save_lines; meram_plane_init()
377 meram_write_icb(priv->base, plane->cache->index, MExxCTL, meram_plane_init()
382 MERAM_MExxCTL_VAL(plane->cache->index, marker->offset + meram_plane_init()
394 meram_write_icb(priv->base, plane->cache->index, MExxCTL, meram_plane_cleanup()
399 plane->cache->cache_unit = 0; meram_plane_cleanup()
432 struct sh_mobile_meram_fb_cache *cache; meram_cache_alloc() local
435 cache = kzalloc(sizeof(*cache), GFP_KERNEL); meram_cache_alloc()
436 if (cache == NULL) meram_cache_alloc()
439 cache->nplanes = nplanes; meram_cache_alloc()
441 ret = meram_plane_alloc(priv, &cache->planes[0], meram_cache_alloc()
446 cache->planes[0].marker->current_reg = 1; meram_cache_alloc()
447 cache->planes[0].marker->pixelformat = pixelformat; meram_cache_alloc()
449 if (cache->nplanes == 1) meram_cache_alloc()
450 return cache; meram_cache_alloc()
452 ret = meram_plane_alloc(priv, &cache->planes[1], meram_cache_alloc()
455 meram_plane_free(priv, &cache->planes[0]); meram_cache_alloc()
459 return cache; meram_cache_alloc()
462 kfree(cache); meram_cache_alloc()
471 struct sh_mobile_meram_fb_cache *cache; sh_mobile_meram_cache_alloc() local
503 cache = meram_cache_alloc(priv, cfg, pixelformat); sh_mobile_meram_cache_alloc()
504 if (IS_ERR(cache)) { sh_mobile_meram_cache_alloc()
506 PTR_ERR(cache)); sh_mobile_meram_cache_alloc()
511 meram_plane_init(priv, &cache->planes[0], xres, yres, &out_pitch); sh_mobile_meram_cache_alloc()
514 meram_plane_init(priv, &cache->planes[1], sh_mobile_meram_cache_alloc()
517 meram_plane_init(priv, &cache->planes[1], sh_mobile_meram_cache_alloc()
522 return cache; sh_mobile_meram_cache_alloc()
529 struct sh_mobile_meram_fb_cache *cache = data; sh_mobile_meram_cache_free() local
535 meram_plane_cleanup(priv, &cache->planes[0]); sh_mobile_meram_cache_free()
536 meram_plane_free(priv, &cache->planes[0]); sh_mobile_meram_cache_free()
538 if (cache->nplanes == 2) { sh_mobile_meram_cache_free()
539 meram_plane_cleanup(priv, &cache->planes[1]); sh_mobile_meram_cache_free()
540 meram_plane_free(priv, &cache->planes[1]); sh_mobile_meram_cache_free()
543 kfree(cache); sh_mobile_meram_cache_free()
556 struct sh_mobile_meram_fb_cache *cache = data; sh_mobile_meram_cache_update() local
561 meram_set_next_addr(priv, cache, base_addr_y, base_addr_c); sh_mobile_meram_cache_update()
562 meram_get_next_icb_addr(pdata, cache, icb_addr_y, icb_addr_c); sh_mobile_meram_cache_update()
268 meram_set_next_addr(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_cache *cache, unsigned long base_addr_y, unsigned long base_addr_c) meram_set_next_addr() argument
296 meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata, struct sh_mobile_meram_fb_cache *cache, unsigned long *icb_addr_y, unsigned long *icb_addr_c) meram_get_next_icb_addr() argument
/linux-4.1.27/arch/unicore32/mm/
H A DMakefile14 obj-$(CONFIG_CPU_UCV2) += cache-ucv2.o tlb-ucv2.o proc-ucv2.o
/linux-4.1.27/include/net/netns/
H A Dhash.h4 #include <asm/cache.h>
/linux-4.1.27/arch/s390/include/asm/
H A Dcache.h5 * Derived from "include/asm-i386/cache.h"
/linux-4.1.27/arch/cris/arch-v32/kernel/
H A DMakefile10 cache.o cacheflush.o
H A Dcache.c3 #include <arch/cache.h>
6 /* This file is used to workaround a cache bug, Guinness TR 106. */
/linux-4.1.27/arch/frv/mm/
H A DMakefile8 pgalloc.o highmem.o fault.o extable.o cache-page.o tlb-flush.o tlb-miss.o \
/linux-4.1.27/arch/m68k/mm/
H A DMakefile7 obj-$(CONFIG_MMU) += cache.o fault.o
/linux-4.1.27/arch/arm/mach-realview/include/mach/
H A Dbarriers.h2 * Barriers redefined for RealView ARM11MPCore platforms with L220 cache
/linux-4.1.27/fs/ceph/
H A DMakefile12 ceph-$(CONFIG_CEPH_FSCACHE) += cache.o
/linux-4.1.27/fs/coda/
H A DMakefile7 coda-objs := psdev.o cache.o cnode.o inode.o dir.o file.o upcall.o \
/linux-4.1.27/sound/soc/
H A Dsoc-cache.c2 * soc-cache.c -- ASoC register cache helpers
28 dev_dbg(codec->dev, "ASoC: Initializing cache for %s codec\n", snd_soc_cache_init()
48 dev_dbg(codec->dev, "ASoC: Destroying cache for %s codec\n", snd_soc_cache_exit()
/linux-4.1.27/tools/perf/ui/gtk/
H A Dsetup.c2 #include "../../util/cache.h"
/linux-4.1.27/tools/perf/util/
H A Denvironment.c6 #include "cache.h"
/linux-4.1.27/include/net/netfilter/
H A Dxt_rateest.h5 /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
8 /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
/linux-4.1.27/drivers/s390/char/
H A Dhmcdrv_cache.c22 * struct hmcdrv_cache_entry - file cache (only used on read/dir)
25 * @len: size of @content cache (0 if caching disabled)
29 * @timeout: cache timeout in jiffies
45 static int hmcdrv_cache_order; /* cache allocated page order */
55 * hmcdrv_cache_get() - looks for file data/content in read cache
58 * Return: number of bytes read from cache or a negative number if nothing
59 * in content cache (for the file/cmd specified in @ftp)
63 loff_t pos; /* position in cache (signed) */ hmcdrv_cache_get()
85 /* check if the requested chunk falls into our cache (which starts hmcdrv_cache_get()
107 * hmcdrv_cache_do() - do a HMC drive CD/DVD transfer with cache update
118 /* only cache content if the read/dir cache really exists hmcdrv_cache_do()
125 /* because the cache is not located at ftp->buf, we have to hmcdrv_cache_do()
127 * to our cache, and using the increased size) hmcdrv_cache_do()
153 /* cache some file info (FTP command, file name and file hmcdrv_cache_do()
181 if ((ftp->id == HMCDRV_FTP_DIR) || /* read cache */ hmcdrv_cache_cmd()
187 if (len >= 0) /* got it from cache ? */ hmcdrv_cache_cmd()
199 /* invalidate the (read) cache in case there was a write operation hmcdrv_cache_cmd()
210 * hmcdrv_cache_startup() - startup of HMC drive cache
211 * @cachesize: cache size
224 pr_err("Allocating the requested cache size of %zu bytes failed\n", hmcdrv_cache_startup()
229 pr_debug("content cache enabled, size is %zu bytes\n", hmcdrv_cache_startup()
238 * hmcdrv_cache_shutdown() - shutdown of HMC drive cache
251 hmcdrv_cache_file.len = 0; /* no cache */ hmcdrv_cache_shutdown()
/linux-4.1.27/fs/overlayfs/
H A Dreaddir.c51 struct ovl_dir_cache *cache; member in struct:ovl_dir_file
175 struct ovl_dir_cache *cache = od->cache; ovl_cache_put() local
177 WARN_ON(cache->refcount <= 0); ovl_cache_put()
178 cache->refcount--; ovl_cache_put()
179 if (!cache->refcount) { ovl_cache_put()
180 if (ovl_dir_cache(dentry) == cache) ovl_cache_put()
183 ovl_cache_free(&cache->entries); ovl_cache_put()
184 kfree(cache); ovl_cache_put()
270 struct ovl_dir_cache *cache = od->cache; ovl_dir_reset() local
274 if (cache && ovl_dentry_version_get(dentry) != cache->version) { ovl_dir_reset()
276 od->cache = NULL; ovl_dir_reset()
322 list_for_each(p, &od->cache->entries) { ovl_seek_cursor()
327 /* Cursor is safe since the cache is stable */ ovl_seek_cursor()
334 struct ovl_dir_cache *cache; ovl_cache_get() local
336 cache = ovl_dir_cache(dentry); ovl_cache_get()
337 if (cache && ovl_dentry_version_get(dentry) == cache->version) { ovl_cache_get()
338 cache->refcount++; ovl_cache_get()
339 return cache; ovl_cache_get()
343 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); ovl_cache_get()
344 if (!cache) ovl_cache_get()
347 cache->refcount = 1; ovl_cache_get()
348 INIT_LIST_HEAD(&cache->entries); ovl_cache_get()
350 res = ovl_dir_read_merged(dentry, &cache->entries); ovl_cache_get()
352 ovl_cache_free(&cache->entries); ovl_cache_get()
353 kfree(cache); ovl_cache_get()
357 cache->version = ovl_dentry_version_get(dentry); ovl_cache_get()
358 ovl_set_dir_cache(dentry, cache); ovl_cache_get()
360 return cache; ovl_cache_get()
375 if (!od->cache) { ovl_iterate()
376 struct ovl_dir_cache *cache; ovl_iterate() local
378 cache = ovl_cache_get(dentry); ovl_iterate()
379 if (IS_ERR(cache)) ovl_iterate()
380 return PTR_ERR(cache); ovl_iterate()
382 od->cache = cache; ovl_iterate()
386 while (od->cursor != &od->cache->entries) { ovl_iterate()
426 if (od->cache) ovl_dir_llseek()
481 if (od->cache) { ovl_dir_release()
/linux-4.1.27/arch/s390/kernel/
H A Dcache.c2 * Extract CPU cache information and expose them via sysfs.
70 struct cacheinfo *cache; show_cacheinfo() local
78 cache = this_cpu_ci->info_list + idx; show_cacheinfo()
79 seq_printf(m, "cache%-11d: ", idx); show_cacheinfo()
80 seq_printf(m, "level=%d ", cache->level); show_cacheinfo()
81 seq_printf(m, "type=%s ", cache_type_string[cache->type]); show_cacheinfo()
83 cache->disable_sysfs ? "Shared" : "Private"); show_cacheinfo()
84 seq_printf(m, "size=%dK ", cache->size >> 10); show_cacheinfo()
85 seq_printf(m, "line_size=%u ", cache->coherency_line_size); show_cacheinfo()
86 seq_printf(m, "associativity=%d", cache->ways_of_associativity); show_cacheinfo()
/linux-4.1.27/arch/sh/include/cpu-sh5/cpu/
H A Dcache.h5 * include/asm-sh/cpu-sh5/cache.h
70 * A cache line is identified by a tag + set but OCACHETAG/ICACHETAG
76 /* Instruction cache */
82 /* These declarations relate to cache 'synonyms' in the operand cache. A
84 indexing the cache sets and those passed to the MMU for translation. In the
93 * Instruction cache can't be invalidated based on physical addresses.
/linux-4.1.27/arch/arm64/include/asm/
H A Dcacheflush.h34 * The arch/arm64/mm/cache.S implements these methods.
40 * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
41 * VIPT or ASID-tagged VIVT I-cache.
45 * Unconditionally clean and invalidate the entire cache.
49 * Clean and invalidate all user space cache entries
54 * Ensure coherency between the I-cache and the D-cache in the
61 * Ensure coherency between the I-cache and the D-cache in the
110 * cache page at virtual address page->virtual.
116 * Otherwise we can defer the operation, and clean the cache when we are
136 * duplicate cache flushing elsewhere performed by flush_dcache_page().
141 * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
H A Dcachetype.h43 * NumSets, bits[27:13] - (Number of sets in cache) - 1
44 * Associativity, bits[12:3] - (Associativity of cache) - 1
45 * LineSize, bits[2:0] - (Log2(Number of words in cache line)) - 2
68 /* Helpers for Level 1 Instruction cache csselr = 1L */ icache_get_linesize()
81 * permitted in the I-cache.
/linux-4.1.27/arch/score/mm/
H A Dcache.c2 * arch/score/mm/cache.c
40 The addr must be cache aligned.
47 "cache 0x0e, [%0, 0]\n" flush_data_cache_page()
48 "cache 0x1a, [%0, 0]\n" flush_data_cache_page()
126 "cache 0x10, [r8, 0]\n" flush_icache_all()
135 "cache 0x1f, [r8, 0]\n" flush_dcache_all()
137 "cache 0x1a, [r8, 0]\n" flush_dcache_all()
146 "cache 0x10, [r8, 0]\n" flush_cache_all()
148 "cache 0x1f, [r8, 0]\n" flush_cache_all()
150 "cache 0x1a, [r8, 0]\n" flush_cache_all()
170 sized regions from the cache.
226 "cache 0x02, [%0, 0]\n" flush_cache_sigtramp()
228 "cache 0x02, [%0, 0x4]\n" flush_cache_sigtramp()
231 "cache 0x0d, [%0, 0]\n" flush_cache_sigtramp()
233 "cache 0x0d, [%0, 0x4]\n" flush_cache_sigtramp()
236 "cache 0x1a, [%0, 0]\n" flush_cache_sigtramp()
242 1. WB and invalid a cache line of Dcache
256 "cache 0x0e, [%0, 0]\n" flush_dcache_range()
258 "cache 0x1a, [%0, 0]\n" flush_dcache_range()
275 "cache 0x02, [%0, 0]\n" flush_icache_range()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dcache.h8 /* bytes per L1 cache line */
32 u32 dsize; /* L1 d-cache size */
33 u32 dline_size; /* L1 d-cache line size */
36 u32 isize; /* L1 i-cache size */
37 u32 iline_size; /* L1 i-cache line size */
H A Dreg_8xx.h10 #define SPRN_IC_CST 560 /* Instruction cache control/status */
13 #define SPRN_DC_CST 568 /* Data cache control/status */
17 /* Commands. Only the first few are available to the instruction cache.
26 #define DC_FLINE 0x0e000000 /* Flush data cache line */
39 #define DC_DFWT 0x40000000 /* Data cache is forced write through */
H A Dfsl_pamu_stash.h22 /* cache stash targets */
36 u32 cache; /* cache to stash to: L1,L2,L3 */ member in struct:pamu_stash_attribute
H A Dvdso_datapage.h73 __u32 dcache_size; /* L1 d-cache size 0x60 */
74 __u32 dcache_line_size; /* L1 d-cache line size 0x64 */
75 __u32 icache_size; /* L1 i-cache size 0x68 */
76 __u32 icache_line_size; /* L1 i-cache line size 0x6C */
81 __u32 dcache_block_size; /* L1 d-cache block size */
82 __u32 icache_block_size; /* L1 i-cache block size */
83 __u32 dcache_log_block_size; /* L1 d-cache log block size */
84 __u32 icache_log_block_size; /* L1 i-cache log block size */
111 __u32 dcache_block_size; /* L1 d-cache block size */
112 __u32 icache_block_size; /* L1 i-cache block size */
113 __u32 dcache_log_block_size; /* L1 d-cache log block size */
114 __u32 icache_log_block_size; /* L1 i-cache log block size */
/linux-4.1.27/arch/ia64/include/asm/sn/
H A Dmspec.h41 * half of the cache line. The cache line _MUST NOT_ be used for anything
43 * addresses which reference the same physical cache line. One will
45 * may be loaded into processor cache. The amo will be referenced
47 * cached cache-line is modified, when that line is flushed, it will
/linux-4.1.27/drivers/block/
H A Dps3vram.c83 struct ps3vram_cache cache; member in struct:ps3vram_priv
317 struct ps3vram_cache *cache = &priv->cache; ps3vram_cache_evict() local
319 if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY)) ps3vram_cache_evict()
323 cache->tags[entry].address); ps3vram_cache_evict()
324 if (ps3vram_upload(dev, CACHE_OFFSET + entry * cache->page_size, ps3vram_cache_evict()
325 cache->tags[entry].address, DMA_PAGE_SIZE, ps3vram_cache_evict()
326 cache->page_size / DMA_PAGE_SIZE) < 0) { ps3vram_cache_evict()
329 entry * cache->page_size, cache->tags[entry].address, ps3vram_cache_evict()
330 cache->page_size); ps3vram_cache_evict()
332 cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; ps3vram_cache_evict()
339 struct ps3vram_cache *cache = &priv->cache; ps3vram_cache_load() local
343 CACHE_OFFSET + entry * cache->page_size, ps3vram_cache_load()
345 cache->page_size / DMA_PAGE_SIZE) < 0) { ps3vram_cache_load()
348 address, entry * cache->page_size, cache->page_size); ps3vram_cache_load()
351 cache->tags[entry].address = address; ps3vram_cache_load()
352 cache->tags[entry].flags |= CACHE_PAGE_PRESENT; ps3vram_cache_load()
359 struct ps3vram_cache *cache = &priv->cache; ps3vram_cache_flush() local
363 for (i = 0; i < cache->page_count; i++) { ps3vram_cache_flush()
365 cache->tags[i].flags = 0; ps3vram_cache_flush()
373 struct ps3vram_cache *cache = &priv->cache; ps3vram_cache_match() local
379 offset = (unsigned int) (address & (cache->page_size - 1)); ps3vram_cache_match()
383 for (i = 0; i < cache->page_count; i++) { ps3vram_cache_match()
384 if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && ps3vram_cache_match()
385 cache->tags[i].address == base) { ps3vram_cache_match()
386 cache->hit++; ps3vram_cache_match()
388 cache->tags[i].address); ps3vram_cache_match()
394 i = (jiffies + (counter++)) % cache->page_count; ps3vram_cache_match()
400 cache->miss++; ps3vram_cache_match()
408 priv->cache.page_count = CACHE_PAGE_COUNT; ps3vram_cache_init()
409 priv->cache.page_size = CACHE_PAGE_SIZE; ps3vram_cache_init()
410 priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * ps3vram_cache_init()
412 if (priv->cache.tags == NULL) { ps3vram_cache_init()
413 dev_err(&dev->core, "Could not allocate cache tags\n"); ps3vram_cache_init()
417 dev_info(&dev->core, "Created ram cache: %d entries, %d KiB each\n", ps3vram_cache_init()
428 kfree(priv->cache.tags); ps3vram_cache_cleanup()
452 offset = (unsigned int) (from & (priv->cache.page_size - 1)); ps3vram_read()
453 avail = priv->cache.page_size - offset; ps3vram_read()
456 cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; ps3vram_read()
493 offset = (unsigned int) (to & (priv->cache.page_size - 1)); ps3vram_write()
494 avail = priv->cache.page_size - offset; ps3vram_write()
497 cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; ps3vram_write()
507 priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY; ps3vram_write()
522 seq_printf(m, "hit:%u\nmiss:%u\n", priv->cache.hit, priv->cache.miss); ps3vram_proc_show()
/linux-4.1.27/arch/arm/mach-bcm/
H A Dkona_l2_cache.c17 #include <asm/hardware/cache-l2x0.h>
41 * The aux_val and aux_mask have no effect since L2 cache is already kona_l2_cache_init()
46 pr_err("Couldn't enable L2 cache: %d\n", ret); kona_l2_cache_init()
/linux-4.1.27/arch/arm/boot/compressed/
H A Dhead-xscale.S16 @ Data cache might be active.
17 @ Be sure to flush kernel binary out of the cache,
20 @ memory to be sure we hit the same cache.
/linux-4.1.27/drivers/staging/iio/
H A Diio_simple_dummy.h20 * @dac_val: cache for dac value
21 * @single_ended_adc_val: cache for single ended adc value
22 * @differential_adc_val: cache for differential adc value
23 * @accel_val: cache for acceleration value
24 * @accel_calibbias: cache for acceleration calibbias
25 * @accel_calibscale: cache for acceleration calibscale
28 * @event_val: cache for event theshold value
29 * @event_en: cache of whether event is enabled
/linux-4.1.27/arch/xtensa/include/asm/
H A Dcache.h2 * include/asm-xtensa/cache.h
25 /* Maximum cache size per way. */
H A Dpage.h16 #include <asm/cache.h>
49 * If the cache size for one way is greater than the page size, we have to
50 * deal with cache aliasing. The cache index is wider than the page size:
52 * | |cache| cache index
62 * bit(s) (X) that are part of the cache index are also translated (Y).
63 * If this translation changes bit(s) (X), the cache index is also afected,
64 * thus resulting in a different cache line than before.
144 * If we have cache aliasing and writeback caches, we might have to do
/linux-4.1.27/arch/sparc/include/asm/
H A Dlsu.h16 #define LSU_CONTROL_DC _AC(0x0000000000000002,UL) /* Data cache enable. */
17 #define LSU_CONTROL_IC _AC(0x0000000000000001,UL) /* Instruction cache enable.*/
H A Dcache.h0 /* cache.h: Cache specific code for the Sparc. These include flushing
H A Dross.h26 * CWR: Cache Wrapping Enabled, if one cache wrapping is on.
27 * SE: Snoop Enable, turns on bus snooping for cache activity if one.
37 * CE: Cache Enable -- 0 = no caching, 1 = cache is on
57 /* The ICCR instruction cache register on the HyperSparc.
70 * instruction cache hit occurs, the corresponding line
71 * for said cache-hit is invalidated. If FTD is zero,
75 * ICE: If set to one, the instruction cache is enabled. If
76 * zero, the cache will not be used for instruction fetches.
126 /* HyperSparc specific cache flushing. */
128 /* This is for the on-chip instruction cache. */ hyper_flush_whole_icache()
H A Dviking.h21 * TC: Tablewalk Cacheable -- 0 = Twalks are not cacheable in E-cache
22 * 1 = Twalks are cacheable in E-cache
24 * GNU/Viking will only cache tablewalks in the E-cache (mxcc) if present
26 * for machines lacking an E-cache (ie. in MBUS mode) this bit must
35 * "Cachable" is only referring to E-cache (if present) and not the
41 * for the hardware cache consistency mechanisms of the GNU/Viking
61 * MBUS mode, the GNU/Viking lacks a GNU/MXCC E-cache. If it is
63 * to a GNU/MXCC cache controller. The GNU/MXCC can be thus connected
88 #define VIKING_DCENABLE 0x00000100 /* Enable data cache */
89 #define VIKING_ICENABLE 0x00000200 /* Enable instruction cache */
94 #define VIKING_SPENABLE 0x00004000 /* Enable bus cache snooping */
109 #define VIKING_PTAG_SHARED 0x00000100 /* Shared with some other cache */
/linux-4.1.27/arch/mips/include/uapi/asm/
H A Dcachectl.h14 #define ICACHE (1<<0) /* flush instruction cache */
15 #define DCACHE (1<<1) /* writeback and flush data cache */
/linux-4.1.27/arch/mips/mm/
H A Dsc-mips.c19 * MIPS32/MIPS64 L2 cache handling
23 * Writeback and invalidate the secondary cache before DMA.
31 * Invalidate the secondary cache before DMA.
45 /* L2 cache is permanently enabled */ mips_sc_enable()
50 /* L2 cache is permanently enabled */ mips_sc_disable()
61 * Check if the L2 cache controller is activated on a particular platform.
62 * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
64 * cache being disabled. However there is no guarantee for this to be
H A Dsc-rm7k.c2 * sc-rm7k.c: RM7000 cache management functions.
21 /* Primary cache parameters. */
25 /* Secondary cache parameters. */
28 /* Tertiary cache parameters */
39 * Writeback and invalidate the primary cache dcache before DMA.
150 pr_info("Enabling secondary cache...\n"); rm7k_sc_enable()
241 printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n", rm7k_sc_init()
250 * While we're at it let's deal with the tertiary cache. rm7k_sc_init()
268 pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10)); rm7k_sc_init()
/linux-4.1.27/arch/blackfin/mach-bf561/
H A Dhotplug.c23 * turn off the data cache. This causes the CoreB failed to boot. platform_cpu_die()
24 * As a workaround, we invalidate all the data cache before sleep. platform_cpu_die()
/linux-4.1.27/arch/m68k/coldfire/
H A Dcache.c4 * cache.c -- general ColdFire Cache maintenance code
20 * Use cpushl to push all dirty cache lines back to memory.
/linux-4.1.27/arch/tile/include/uapi/arch/
H A Dchip_tilegx.h59 /** Size of the L2 cache, in bytes. */
62 /** Log size of an L2 cache line in bytes. */
65 /** Size of an L2 cache line, in bytes. */
68 /** Associativity of the L2 cache. */
71 /** Size of the L1 data cache, in bytes. */
74 /** Log size of an L1 data cache line in bytes. */
77 /** Size of an L1 data cache line, in bytes. */
80 /** Associativity of the L1 data cache. */
83 /** Size of the L1 instruction cache, in bytes. */
86 /** Log size of an L1 instruction cache line in bytes. */
89 /** Size of an L1 instruction cache line, in bytes. */
92 /** Associativity of the L1 instruction cache. */
104 /** Can the local cache coherently cache data that is homed elsewhere? */
107 /** How many simultaneous outstanding victims can the L2 cache have? */
119 /** Do uncacheable requests miss in the cache regardless of whether
174 /** Size of the L1 static network processor instruction cache, in bytes. */
213 /** Does the L1 instruction cache clear on reset? */
H A Dchip_tilepro.h59 /** Size of the L2 cache, in bytes. */
62 /** Log size of an L2 cache line in bytes. */
65 /** Size of an L2 cache line, in bytes. */
68 /** Associativity of the L2 cache. */
71 /** Size of the L1 data cache, in bytes. */
74 /** Log size of an L1 data cache line in bytes. */
77 /** Size of an L1 data cache line, in bytes. */
80 /** Associativity of the L1 data cache. */
83 /** Size of the L1 instruction cache, in bytes. */
86 /** Log size of an L1 instruction cache line in bytes. */
89 /** Size of an L1 instruction cache line, in bytes. */
92 /** Associativity of the L1 instruction cache. */
104 /** Can the local cache coherently cache data that is homed elsewhere? */
107 /** How many simultaneous outstanding victims can the L2 cache have? */
119 /** Do uncacheable requests miss in the cache regardless of whether
174 /** Size of the L1 static network processor instruction cache, in bytes. */
213 /** Does the L1 instruction cache clear on reset? */
/linux-4.1.27/arch/sh/kernel/cpu/sh4/
H A Dperf_event.c45 * 0x04 Operand cache read miss
46 * 0x05 Operand cache write miss
47 * 0x06 Instruction fetch (w/ cache)
49 * 0x08 Instruction cache miss
55 * 0x0f Operand cache miss (r/w)
67 * 0x21 Instruction cache fill
68 * 0x22 Operand cache fill
70 * 0x24 Pipeline freeze by I-cache miss
71 * 0x25 Pipeline freeze by D-cache miss
80 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */
81 [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */
/linux-4.1.27/fs/btrfs/
H A Dextent-tree.c35 #include "free-space-cache.h"
108 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
117 block_group_cache_done(struct btrfs_block_group_cache *cache) block_group_cache_done() argument
120 return cache->cached == BTRFS_CACHE_FINISHED || block_group_cache_done()
121 cache->cached == BTRFS_CACHE_ERROR; block_group_cache_done()
124 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) block_group_bits() argument
126 return (cache->flags & bits) == bits; block_group_bits()
129 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) btrfs_get_block_group() argument
131 atomic_inc(&cache->count); btrfs_get_block_group()
134 void btrfs_put_block_group(struct btrfs_block_group_cache *cache) btrfs_put_block_group() argument
136 if (atomic_dec_and_test(&cache->count)) { btrfs_put_block_group()
137 WARN_ON(cache->pinned > 0); btrfs_put_block_group()
138 WARN_ON(cache->reserved > 0); btrfs_put_block_group()
139 kfree(cache->free_space_ctl); btrfs_put_block_group()
140 kfree(cache); btrfs_put_block_group()
146 * cache
153 struct btrfs_block_group_cache *cache; btrfs_add_block_group_cache() local
160 cache = rb_entry(parent, struct btrfs_block_group_cache, btrfs_add_block_group_cache()
162 if (block_group->key.objectid < cache->key.objectid) { btrfs_add_block_group_cache()
164 } else if (block_group->key.objectid > cache->key.objectid) { btrfs_add_block_group_cache()
192 struct btrfs_block_group_cache *cache, *ret = NULL; block_group_cache_tree_search() local
200 cache = rb_entry(n, struct btrfs_block_group_cache, block_group_cache_tree_search()
202 end = cache->key.objectid + cache->key.offset - 1; block_group_cache_tree_search()
203 start = cache->key.objectid; block_group_cache_tree_search()
207 ret = cache; block_group_cache_tree_search()
211 ret = cache; block_group_cache_tree_search()
216 ret = cache; block_group_cache_tree_search()
242 struct btrfs_block_group_cache *cache) free_excluded_extents()
246 start = cache->key.objectid; free_excluded_extents()
247 end = start + cache->key.offset - 1; free_excluded_extents()
256 struct btrfs_block_group_cache *cache) exclude_super_stripes()
263 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { exclude_super_stripes()
264 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; exclude_super_stripes()
265 cache->bytes_super += stripe_len; exclude_super_stripes()
266 ret = add_excluded_extent(root, cache->key.objectid, exclude_super_stripes()
275 cache->key.objectid, bytenr, exclude_super_stripes()
283 if (logical[nr] > cache->key.objectid + exclude_super_stripes()
284 cache->key.offset) exclude_super_stripes()
287 if (logical[nr] + stripe_len <= cache->key.objectid) exclude_super_stripes()
291 if (start < cache->key.objectid) { exclude_super_stripes()
292 start = cache->key.objectid; exclude_super_stripes()
296 cache->key.objectid + exclude_super_stripes()
297 cache->key.offset - start); exclude_super_stripes()
300 cache->bytes_super += len; exclude_super_stripes()
314 get_caching_control(struct btrfs_block_group_cache *cache) get_caching_control() argument
318 spin_lock(&cache->lock); get_caching_control()
319 if (!cache->caching_ctl) { get_caching_control()
320 spin_unlock(&cache->lock); get_caching_control()
324 ctl = cache->caching_ctl; get_caching_control()
326 spin_unlock(&cache->lock); get_caching_control()
532 static int cache_block_group(struct btrfs_block_group_cache *cache, cache_block_group() argument
536 struct btrfs_fs_info *fs_info = cache->fs_info; cache_block_group()
547 caching_ctl->block_group = cache; cache_block_group()
548 caching_ctl->progress = cache->key.objectid; cache_block_group()
553 spin_lock(&cache->lock); cache_block_group()
556 * case where one thread starts to load the space cache info, and then cache_block_group()
558 * allocation while the other thread is still loading the space cache cache_block_group()
563 * from a block group who's cache gets evicted for one reason or cache_block_group()
566 while (cache->cached == BTRFS_CACHE_FAST) { cache_block_group()
569 ctl = cache->caching_ctl; cache_block_group()
572 spin_unlock(&cache->lock); cache_block_group()
578 spin_lock(&cache->lock); cache_block_group()
581 if (cache->cached != BTRFS_CACHE_NO) { cache_block_group()
582 spin_unlock(&cache->lock); cache_block_group()
586 WARN_ON(cache->caching_ctl); cache_block_group()
587 cache->caching_ctl = caching_ctl; cache_block_group()
588 cache->cached = BTRFS_CACHE_FAST; cache_block_group()
589 spin_unlock(&cache->lock); cache_block_group()
593 ret = load_free_space_cache(fs_info, cache); cache_block_group()
595 spin_lock(&cache->lock); cache_block_group()
597 cache->caching_ctl = NULL; cache_block_group()
598 cache->cached = BTRFS_CACHE_FINISHED; cache_block_group()
599 cache->last_byte_to_unpin = (u64)-1; cache_block_group()
603 cache->caching_ctl = NULL; cache_block_group()
604 cache->cached = BTRFS_CACHE_NO; cache_block_group()
606 cache->cached = BTRFS_CACHE_STARTED; cache_block_group()
607 cache->has_caching_ctl = 1; cache_block_group()
610 spin_unlock(&cache->lock); cache_block_group()
616 free_excluded_extents(fs_info->extent_root, cache); cache_block_group()
624 spin_lock(&cache->lock); cache_block_group()
626 cache->caching_ctl = NULL; cache_block_group()
627 cache->cached = BTRFS_CACHE_NO; cache_block_group()
629 cache->cached = BTRFS_CACHE_STARTED; cache_block_group()
630 cache->has_caching_ctl = 1; cache_block_group()
632 spin_unlock(&cache->lock); cache_block_group()
646 btrfs_get_block_group(cache); cache_block_group()
659 struct btrfs_block_group_cache *cache; btrfs_lookup_first_block_group() local
661 cache = block_group_cache_tree_search(info, bytenr, 0); btrfs_lookup_first_block_group()
663 return cache; btrfs_lookup_first_block_group()
673 struct btrfs_block_group_cache *cache; btrfs_lookup_block_group() local
675 cache = block_group_cache_tree_search(info, bytenr, 1); btrfs_lookup_block_group()
677 return cache; btrfs_lookup_block_group()
3163 struct btrfs_block_group_cache *cache) write_one_cache_group()
3170 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); write_one_cache_group()
3179 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); write_one_cache_group()
3189 struct btrfs_block_group_cache *cache) next_block_group()
3196 if (RB_EMPTY_NODE(&cache->cache_node)) { next_block_group()
3197 const u64 next_bytenr = cache->key.objectid + cache->key.offset; next_block_group()
3200 btrfs_put_block_group(cache); next_block_group()
3201 cache = btrfs_lookup_first_block_group(root->fs_info, next_block_group()
3203 return cache; next_block_group()
3205 node = rb_next(&cache->cache_node); next_block_group()
3206 btrfs_put_block_group(cache); next_block_group()
3208 cache = rb_entry(node, struct btrfs_block_group_cache, next_block_group()
3210 btrfs_get_block_group(cache); next_block_group()
3212 cache = NULL; next_block_group()
3214 return cache; next_block_group()
3272 * from here on out we know not to trust this cache when we load up next cache_save_setup()
3280 * super cache generation to 0 so we know to invalidate the cache_save_setup()
3281 * cache, but then we'd have to keep track of the block groups cache_save_setup()
3282 * that fail this way so we know we _have_ to reset this cache cache_save_setup()
3283 * before the next commit or risk reading stale cache. So to cache_save_setup()
3322 * cache. cache_save_setup()
3359 struct btrfs_block_group_cache *cache, *tmp; btrfs_setup_space_cache() local
3372 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, btrfs_setup_space_cache()
3374 if (cache->disk_cache_state == BTRFS_DC_CLEAR) btrfs_setup_space_cache()
3375 cache_save_setup(cache, trans, path); btrfs_setup_space_cache()
3383 * transaction commit does final block group cache writeback during a
3385 * required in order for the cache to actually match the block group,
3389 * cache IO. There's a chance we'll have to redo some of it if the
3397 struct btrfs_block_group_cache *cache; btrfs_start_dirty_block_groups() local
3431 * writing out the cache btrfs_start_dirty_block_groups()
3435 cache = list_first_entry(&dirty, btrfs_start_dirty_block_groups()
3443 if (!list_empty(&cache->io_list)) { btrfs_start_dirty_block_groups()
3444 list_del_init(&cache->io_list); btrfs_start_dirty_block_groups()
3445 btrfs_wait_cache_io(root, trans, cache, btrfs_start_dirty_block_groups()
3446 &cache->io_ctl, path, btrfs_start_dirty_block_groups()
3447 cache->key.objectid); btrfs_start_dirty_block_groups()
3448 btrfs_put_block_group(cache); btrfs_start_dirty_block_groups()
3453 * btrfs_wait_cache_io uses the cache->dirty_list to decide btrfs_start_dirty_block_groups()
3461 list_del_init(&cache->dirty_list); btrfs_start_dirty_block_groups()
3466 cache_save_setup(cache, trans, path); btrfs_start_dirty_block_groups()
3468 if (cache->disk_cache_state == BTRFS_DC_SETUP) { btrfs_start_dirty_block_groups()
3469 cache->io_ctl.inode = NULL; btrfs_start_dirty_block_groups()
3470 ret = btrfs_write_out_cache(root, trans, cache, path); btrfs_start_dirty_block_groups()
3471 if (ret == 0 && cache->io_ctl.inode) { btrfs_start_dirty_block_groups()
3479 list_add_tail(&cache->io_list, io); btrfs_start_dirty_block_groups()
3482 * if we failed to write the cache, the btrfs_start_dirty_block_groups()
3489 ret = write_one_cache_group(trans, root, path, cache); btrfs_start_dirty_block_groups()
3502 if (list_empty(&cache->dirty_list)) { btrfs_start_dirty_block_groups()
3503 list_add_tail(&cache->dirty_list, btrfs_start_dirty_block_groups()
3505 btrfs_get_block_group(cache); btrfs_start_dirty_block_groups()
3515 btrfs_put_block_group(cache); btrfs_start_dirty_block_groups()
3557 struct btrfs_block_group_cache *cache; btrfs_write_dirty_block_groups() local
3576 cache = list_first_entry(&cur_trans->dirty_bgs, btrfs_write_dirty_block_groups()
3585 if (!list_empty(&cache->io_list)) { btrfs_write_dirty_block_groups()
3586 list_del_init(&cache->io_list); btrfs_write_dirty_block_groups()
3587 btrfs_wait_cache_io(root, trans, cache, btrfs_write_dirty_block_groups()
3588 &cache->io_ctl, path, btrfs_write_dirty_block_groups()
3589 cache->key.objectid); btrfs_write_dirty_block_groups()
3590 btrfs_put_block_group(cache); btrfs_write_dirty_block_groups()
3597 list_del_init(&cache->dirty_list); btrfs_write_dirty_block_groups()
3600 cache_save_setup(cache, trans, path); btrfs_write_dirty_block_groups()
3605 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { btrfs_write_dirty_block_groups()
3606 cache->io_ctl.inode = NULL; btrfs_write_dirty_block_groups()
3607 ret = btrfs_write_out_cache(root, trans, cache, path); btrfs_write_dirty_block_groups()
3608 if (ret == 0 && cache->io_ctl.inode) { btrfs_write_dirty_block_groups()
3611 list_add_tail(&cache->io_list, io); btrfs_write_dirty_block_groups()
3614 * if we failed to write the cache, the btrfs_write_dirty_block_groups()
3621 ret = write_one_cache_group(trans, root, path, cache); btrfs_write_dirty_block_groups()
3628 btrfs_put_block_group(cache); btrfs_write_dirty_block_groups()
3632 cache = list_first_entry(io, struct btrfs_block_group_cache, btrfs_write_dirty_block_groups()
3634 list_del_init(&cache->io_list); btrfs_write_dirty_block_groups()
3635 btrfs_wait_cache_io(root, trans, cache, btrfs_write_dirty_block_groups()
3636 &cache->io_ctl, path, cache->key.objectid); btrfs_write_dirty_block_groups()
3637 btrfs_put_block_group(cache); btrfs_write_dirty_block_groups()
3933 * reservation for the free space cache in the btrfs_check_data_free_space()
5619 struct btrfs_block_group_cache *cache = NULL; update_block_group() local
5637 cache = btrfs_lookup_block_group(info, bytenr); update_block_group()
5638 if (!cache) update_block_group()
5640 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | update_block_group()
5647 * If this block group has free space cache written out, we update_block_group()
5652 if (!alloc && cache->cached == BTRFS_CACHE_NO) update_block_group()
5653 cache_block_group(cache, 1); update_block_group()
5655 byte_in_group = bytenr - cache->key.objectid; update_block_group()
5656 WARN_ON(byte_in_group > cache->key.offset); update_block_group()
5658 spin_lock(&cache->space_info->lock); update_block_group()
5659 spin_lock(&cache->lock); update_block_group()
5662 cache->disk_cache_state < BTRFS_DC_CLEAR) update_block_group()
5663 cache->disk_cache_state = BTRFS_DC_CLEAR; update_block_group()
5665 old_val = btrfs_block_group_used(&cache->item); update_block_group()
5666 num_bytes = min(total, cache->key.offset - byte_in_group); update_block_group()
5669 btrfs_set_block_group_used(&cache->item, old_val); update_block_group()
5670 cache->reserved -= num_bytes; update_block_group()
5671 cache->space_info->bytes_reserved -= num_bytes; update_block_group()
5672 cache->space_info->bytes_used += num_bytes; update_block_group()
5673 cache->space_info->disk_used += num_bytes * factor; update_block_group()
5674 spin_unlock(&cache->lock); update_block_group()
5675 spin_unlock(&cache->space_info->lock); update_block_group()
5678 btrfs_set_block_group_used(&cache->item, old_val); update_block_group()
5679 cache->pinned += num_bytes; update_block_group()
5680 cache->space_info->bytes_pinned += num_bytes; update_block_group()
5681 cache->space_info->bytes_used -= num_bytes; update_block_group()
5682 cache->space_info->disk_used -= num_bytes * factor; update_block_group()
5683 spin_unlock(&cache->lock); update_block_group()
5684 spin_unlock(&cache->space_info->lock); update_block_group()
5695 if (list_empty(&cache->bg_list)) { update_block_group()
5696 btrfs_get_block_group(cache); update_block_group()
5697 list_add_tail(&cache->bg_list, update_block_group()
5705 if (list_empty(&cache->dirty_list)) { update_block_group()
5706 list_add_tail(&cache->dirty_list, update_block_group()
5709 btrfs_get_block_group(cache); update_block_group()
5713 btrfs_put_block_group(cache); update_block_group()
5722 struct btrfs_block_group_cache *cache; first_logical_byte() local
5732 cache = btrfs_lookup_first_block_group(root->fs_info, search_start); first_logical_byte()
5733 if (!cache) first_logical_byte()
5736 bytenr = cache->key.objectid; first_logical_byte()
5737 btrfs_put_block_group(cache); first_logical_byte()
5743 struct btrfs_block_group_cache *cache, pin_down_extent()
5746 spin_lock(&cache->space_info->lock); pin_down_extent()
5747 spin_lock(&cache->lock); pin_down_extent()
5748 cache->pinned += num_bytes; pin_down_extent()
5749 cache->space_info->bytes_pinned += num_bytes; pin_down_extent()
5751 cache->reserved -= num_bytes; pin_down_extent()
5752 cache->space_info->bytes_reserved -= num_bytes; pin_down_extent()
5754 spin_unlock(&cache->lock); pin_down_extent()
5755 spin_unlock(&cache->space_info->lock); pin_down_extent()
5770 struct btrfs_block_group_cache *cache; btrfs_pin_extent() local
5772 cache = btrfs_lookup_block_group(root->fs_info, bytenr); btrfs_pin_extent()
5773 BUG_ON(!cache); /* Logic error */ btrfs_pin_extent()
5775 pin_down_extent(root, cache, bytenr, num_bytes, reserved); btrfs_pin_extent()
5777 btrfs_put_block_group(cache); btrfs_pin_extent()
5787 struct btrfs_block_group_cache *cache; btrfs_pin_extent_for_log_replay() local
5790 cache = btrfs_lookup_block_group(root->fs_info, bytenr); btrfs_pin_extent_for_log_replay()
5791 if (!cache) btrfs_pin_extent_for_log_replay()
5795 * pull in the free space cache (if any) so that our pin btrfs_pin_extent_for_log_replay()
5796 * removes the free space from the cache. We have load_only set btrfs_pin_extent_for_log_replay()
5800 cache_block_group(cache, 1); btrfs_pin_extent_for_log_replay()
5802 pin_down_extent(root, cache, bytenr, num_bytes, 0); btrfs_pin_extent_for_log_replay()
5804 /* remove us from the free space cache (if we're there at all) */ btrfs_pin_extent_for_log_replay()
5805 ret = btrfs_remove_free_space(cache, bytenr, num_bytes); btrfs_pin_extent_for_log_replay()
5806 btrfs_put_block_group(cache); btrfs_pin_extent_for_log_replay()
5886 * @cache: The cache we are manipulating
5907 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, btrfs_update_reserved_bytes() argument
5910 struct btrfs_space_info *space_info = cache->space_info; btrfs_update_reserved_bytes()
5914 spin_lock(&cache->lock); btrfs_update_reserved_bytes()
5916 if (cache->ro) { btrfs_update_reserved_bytes()
5919 cache->reserved += num_bytes; btrfs_update_reserved_bytes()
5922 trace_btrfs_space_reservation(cache->fs_info, btrfs_update_reserved_bytes()
5929 cache->delalloc_bytes += num_bytes; btrfs_update_reserved_bytes()
5932 if (cache->ro) btrfs_update_reserved_bytes()
5934 cache->reserved -= num_bytes; btrfs_update_reserved_bytes()
5938 cache->delalloc_bytes -= num_bytes; btrfs_update_reserved_bytes()
5940 spin_unlock(&cache->lock); btrfs_update_reserved_bytes()
5951 struct btrfs_block_group_cache *cache; btrfs_prepare_extent_commit() local
5957 cache = caching_ctl->block_group; btrfs_prepare_extent_commit()
5958 if (block_group_cache_done(cache)) { btrfs_prepare_extent_commit()
5959 cache->last_byte_to_unpin = (u64)-1; btrfs_prepare_extent_commit()
5963 cache->last_byte_to_unpin = caching_ctl->progress; btrfs_prepare_extent_commit()
5981 struct btrfs_block_group_cache *cache = NULL; unpin_extent_range() local
5989 if (!cache || unpin_extent_range()
5990 start >= cache->key.objectid + cache->key.offset) { unpin_extent_range()
5991 if (cache) unpin_extent_range()
5992 btrfs_put_block_group(cache); unpin_extent_range()
5993 cache = btrfs_lookup_block_group(fs_info, start); unpin_extent_range()
5994 BUG_ON(!cache); /* Logic error */ unpin_extent_range()
5997 len = cache->key.objectid + cache->key.offset - start; unpin_extent_range()
6000 if (start < cache->last_byte_to_unpin) { unpin_extent_range()
6001 len = min(len, cache->last_byte_to_unpin - start); unpin_extent_range()
6003 btrfs_add_free_space(cache, start, len); unpin_extent_range()
6007 space_info = cache->space_info; unpin_extent_range()
6010 spin_lock(&cache->lock); unpin_extent_range()
6011 cache->pinned -= len; unpin_extent_range()
6014 if (cache->ro) { unpin_extent_range()
6018 spin_unlock(&cache->lock); unpin_extent_range()
6034 if (cache) unpin_extent_range()
6035 btrfs_put_block_group(cache); unpin_extent_range()
6475 struct btrfs_block_group_cache *cache; btrfs_free_tree_block() local
6483 cache = btrfs_lookup_block_group(root->fs_info, buf->start); btrfs_free_tree_block()
6486 pin_down_extent(root, cache, buf->start, buf->len, 1); btrfs_free_tree_block()
6487 btrfs_put_block_group(cache); btrfs_free_tree_block()
6493 btrfs_add_free_space(cache, buf->start, buf->len); btrfs_free_tree_block()
6494 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0); btrfs_free_tree_block()
6495 btrfs_put_block_group(cache); btrfs_free_tree_block()
6560 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6564 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, wait_block_group_cache_progress() argument
6569 caching_ctl = get_caching_control(cache); wait_block_group_cache_progress()
6573 wait_event(caching_ctl->wait, block_group_cache_done(cache) || wait_block_group_cache_progress()
6574 (cache->free_space_ctl->free_space >= num_bytes)); wait_block_group_cache_progress()
6580 wait_block_group_cache_done(struct btrfs_block_group_cache *cache) wait_block_group_cache_done() argument
6585 caching_ctl = get_caching_control(cache); wait_block_group_cache_done()
6587 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; wait_block_group_cache_done()
6589 wait_event(caching_ctl->wait, block_group_cache_done(cache)); wait_block_group_cache_done()
6590 if (cache->cached == BTRFS_CACHE_ERROR) wait_block_group_cache_done()
6614 int get_block_group_index(struct btrfs_block_group_cache *cache) get_block_group_index() argument
6616 return __get_raid_index(cache->flags); get_block_group_index()
6645 btrfs_lock_block_group(struct btrfs_block_group_cache *cache, btrfs_lock_block_group() argument
6649 down_read(&cache->data_rwsem); btrfs_lock_block_group()
6653 btrfs_grab_block_group(struct btrfs_block_group_cache *cache, btrfs_grab_block_group() argument
6656 btrfs_get_block_group(cache); btrfs_grab_block_group()
6658 down_read(&cache->data_rwsem); btrfs_grab_block_group()
6700 btrfs_release_block_group(struct btrfs_block_group_cache *cache, btrfs_release_block_group() argument
6704 up_read(&cache->data_rwsem); btrfs_release_block_group()
6705 btrfs_put_block_group(cache); btrfs_release_block_group()
7135 struct btrfs_block_group_cache *cache; dump_space_info() local
7156 list_for_each_entry(cache, &info->block_groups[index], list) { dump_space_info()
7157 spin_lock(&cache->lock); dump_space_info()
7161 cache->key.objectid, cache->key.offset, dump_space_info()
7162 btrfs_block_group_used(&cache->item), cache->pinned, dump_space_info()
7163 cache->reserved, cache->ro ? "[readonly]" : ""); dump_space_info()
7164 btrfs_dump_free_space(cache, bytes); dump_space_info()
7165 spin_unlock(&cache->lock); dump_space_info()
7213 struct btrfs_block_group_cache *cache; __btrfs_free_reserved_extent() local
7216 cache = btrfs_lookup_block_group(root->fs_info, start); __btrfs_free_reserved_extent()
7217 if (!cache) { __btrfs_free_reserved_extent()
7224 pin_down_extent(root, cache, start, len, 1); __btrfs_free_reserved_extent()
7228 btrfs_add_free_space(cache, start, len); __btrfs_free_reserved_extent()
7229 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc); __btrfs_free_reserved_extent()
7232 btrfs_put_block_group(cache); __btrfs_free_reserved_extent()
7439 * space cache bits as well
8760 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force) set_block_group_ro() argument
8762 struct btrfs_space_info *sinfo = cache->space_info; set_block_group_ro()
8781 spin_lock(&cache->lock); set_block_group_ro()
8783 if (cache->ro) { set_block_group_ro()
8788 num_bytes = cache->key.offset - cache->reserved - cache->pinned - set_block_group_ro()
8789 cache->bytes_super - btrfs_block_group_used(&cache->item); set_block_group_ro()
8795 cache->ro = 1; set_block_group_ro()
8796 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); set_block_group_ro()
8800 spin_unlock(&cache->lock); set_block_group_ro()
8806 struct btrfs_block_group_cache *cache) btrfs_set_block_group_ro()
8813 BUG_ON(cache->ro); btrfs_set_block_group_ro()
8822 * block groups cache has started writing. If it already started, btrfs_set_block_group_ro()
8842 alloc_flags = update_block_group_flags(root, cache->flags); btrfs_set_block_group_ro()
8843 if (alloc_flags != cache->flags) { btrfs_set_block_group_ro()
8857 ret = set_block_group_ro(cache, 0); btrfs_set_block_group_ro()
8860 alloc_flags = get_alloc_profile(root, cache->space_info->flags); btrfs_set_block_group_ro()
8865 ret = set_block_group_ro(cache, 0); btrfs_set_block_group_ro()
8867 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { btrfs_set_block_group_ro()
8868 alloc_flags = update_block_group_flags(root, cache->flags); btrfs_set_block_group_ro()
8929 struct btrfs_block_group_cache *cache) btrfs_set_block_group_rw()
8931 struct btrfs_space_info *sinfo = cache->space_info; btrfs_set_block_group_rw()
8934 BUG_ON(!cache->ro); btrfs_set_block_group_rw()
8937 spin_lock(&cache->lock); btrfs_set_block_group_rw()
8938 num_bytes = cache->key.offset - cache->reserved - cache->pinned - btrfs_set_block_group_rw()
8939 cache->bytes_super - btrfs_block_group_used(&cache->item); btrfs_set_block_group_rw()
8941 cache->ro = 0; btrfs_set_block_group_rw()
8942 list_del_init(&cache->ro_list); btrfs_set_block_group_rw()
8943 spin_unlock(&cache->lock); btrfs_set_block_group_rw()
9248 struct btrfs_block_group_cache *cache) __link_block_group()
9250 int index = get_block_group_index(cache); __link_block_group()
9256 list_add_tail(&cache->list, &space_info->block_groups[index]); __link_block_group()
9279 pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n"); __link_block_group()
9285 struct btrfs_block_group_cache *cache; btrfs_create_block_group_cache() local
9287 cache = kzalloc(sizeof(*cache), GFP_NOFS); btrfs_create_block_group_cache()
9288 if (!cache) btrfs_create_block_group_cache()
9291 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), btrfs_create_block_group_cache()
9293 if (!cache->free_space_ctl) { btrfs_create_block_group_cache()
9294 kfree(cache); btrfs_create_block_group_cache()
9298 cache->key.objectid = start; btrfs_create_block_group_cache()
9299 cache->key.offset = size; btrfs_create_block_group_cache()
9300 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; btrfs_create_block_group_cache()
9302 cache->sectorsize = root->sectorsize; btrfs_create_block_group_cache()
9303 cache->fs_info = root->fs_info; btrfs_create_block_group_cache()
9304 cache->full_stripe_len = btrfs_full_stripe_len(root, btrfs_create_block_group_cache()
9307 atomic_set(&cache->count, 1); btrfs_create_block_group_cache()
9308 spin_lock_init(&cache->lock); btrfs_create_block_group_cache()
9309 init_rwsem(&cache->data_rwsem); btrfs_create_block_group_cache()
9310 INIT_LIST_HEAD(&cache->list); btrfs_create_block_group_cache()
9311 INIT_LIST_HEAD(&cache->cluster_list); btrfs_create_block_group_cache()
9312 INIT_LIST_HEAD(&cache->bg_list); btrfs_create_block_group_cache()
9313 INIT_LIST_HEAD(&cache->ro_list); btrfs_create_block_group_cache()
9314 INIT_LIST_HEAD(&cache->dirty_list); btrfs_create_block_group_cache()
9315 INIT_LIST_HEAD(&cache->io_list); btrfs_create_block_group_cache()
9316 btrfs_init_free_space_ctl(cache); btrfs_create_block_group_cache()
9317 atomic_set(&cache->trimming, 0); btrfs_create_block_group_cache()
9319 return cache; btrfs_create_block_group_cache()
9326 struct btrfs_block_group_cache *cache; btrfs_read_block_groups() local
9361 cache = btrfs_create_block_group_cache(root, found_key.objectid, btrfs_read_block_groups()
9363 if (!cache) { btrfs_read_block_groups()
9370 * When we mount with old space cache, we need to btrfs_read_block_groups()
9374 * truncate the old free space cache inode and btrfs_read_block_groups()
9377 * the new space cache info onto disk. btrfs_read_block_groups()
9380 cache->disk_cache_state = BTRFS_DC_CLEAR; btrfs_read_block_groups()
9383 read_extent_buffer(leaf, &cache->item, btrfs_read_block_groups()
9385 sizeof(cache->item)); btrfs_read_block_groups()
9386 cache->flags = btrfs_block_group_flags(&cache->item); btrfs_read_block_groups()
9396 ret = exclude_super_stripes(root, cache); btrfs_read_block_groups()
9402 free_excluded_extents(root, cache); btrfs_read_block_groups()
9403 btrfs_put_block_group(cache); btrfs_read_block_groups()
9414 if (found_key.offset == btrfs_block_group_used(&cache->item)) { btrfs_read_block_groups()
9415 cache->last_byte_to_unpin = (u64)-1; btrfs_read_block_groups()
9416 cache->cached = BTRFS_CACHE_FINISHED; btrfs_read_block_groups()
9417 free_excluded_extents(root, cache); btrfs_read_block_groups()
9418 } else if (btrfs_block_group_used(&cache->item) == 0) { btrfs_read_block_groups()
9419 cache->last_byte_to_unpin = (u64)-1; btrfs_read_block_groups()
9420 cache->cached = BTRFS_CACHE_FINISHED; btrfs_read_block_groups()
9421 add_new_free_space(cache, root->fs_info, btrfs_read_block_groups()
9425 free_excluded_extents(root, cache); btrfs_read_block_groups()
9428 ret = btrfs_add_block_group_cache(root->fs_info, cache); btrfs_read_block_groups()
9430 btrfs_remove_free_space_cache(cache); btrfs_read_block_groups()
9431 btrfs_put_block_group(cache); btrfs_read_block_groups()
9435 ret = update_space_info(info, cache->flags, found_key.offset, btrfs_read_block_groups()
9436 btrfs_block_group_used(&cache->item), btrfs_read_block_groups()
9439 btrfs_remove_free_space_cache(cache); btrfs_read_block_groups()
9441 rb_erase(&cache->cache_node, btrfs_read_block_groups()
9443 RB_CLEAR_NODE(&cache->cache_node); btrfs_read_block_groups()
9445 btrfs_put_block_group(cache); btrfs_read_block_groups()
9449 cache->space_info = space_info; btrfs_read_block_groups()
9450 spin_lock(&cache->space_info->lock); btrfs_read_block_groups()
9451 cache->space_info->bytes_readonly += cache->bytes_super; btrfs_read_block_groups()
9452 spin_unlock(&cache->space_info->lock); btrfs_read_block_groups()
9454 __link_block_group(space_info, cache); btrfs_read_block_groups()
9456 set_avail_alloc_bits(root->fs_info, cache->flags); btrfs_read_block_groups()
9457 if (btrfs_chunk_readonly(root, cache->key.objectid)) { btrfs_read_block_groups()
9458 set_block_group_ro(cache, 1); btrfs_read_block_groups()
9459 } else if (btrfs_block_group_used(&cache->item) == 0) { btrfs_read_block_groups()
9462 if (list_empty(&cache->bg_list)) { btrfs_read_block_groups()
9463 btrfs_get_block_group(cache); btrfs_read_block_groups()
9464 list_add_tail(&cache->bg_list, btrfs_read_block_groups()
9483 list_for_each_entry(cache, btrfs_read_block_groups()
9486 set_block_group_ro(cache, 1); btrfs_read_block_groups()
9487 list_for_each_entry(cache, btrfs_read_block_groups()
9490 set_block_group_ro(cache, 1); btrfs_read_block_groups()
9538 struct btrfs_block_group_cache *cache; btrfs_make_block_group() local
9544 cache = btrfs_create_block_group_cache(root, chunk_offset, size); btrfs_make_block_group()
9545 if (!cache) btrfs_make_block_group()
9548 btrfs_set_block_group_used(&cache->item, bytes_used); btrfs_make_block_group()
9549 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); btrfs_make_block_group()
9550 btrfs_set_block_group_flags(&cache->item, type); btrfs_make_block_group()
9552 cache->flags = type; btrfs_make_block_group()
9553 cache->last_byte_to_unpin = (u64)-1; btrfs_make_block_group()
9554 cache->cached = BTRFS_CACHE_FINISHED; btrfs_make_block_group()
9555 ret = exclude_super_stripes(root, cache); btrfs_make_block_group()
9561 free_excluded_extents(root, cache); btrfs_make_block_group()
9562 btrfs_put_block_group(cache); btrfs_make_block_group()
9566 add_new_free_space(cache, root->fs_info, chunk_offset, btrfs_make_block_group()
9569 free_excluded_extents(root, cache); btrfs_make_block_group()
9571 ret = btrfs_add_block_group_cache(root->fs_info, cache); btrfs_make_block_group()
9573 btrfs_remove_free_space_cache(cache); btrfs_make_block_group()
9574 btrfs_put_block_group(cache); btrfs_make_block_group()
9578 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, btrfs_make_block_group()
9579 &cache->space_info); btrfs_make_block_group()
9581 btrfs_remove_free_space_cache(cache); btrfs_make_block_group()
9583 rb_erase(&cache->cache_node, btrfs_make_block_group()
9585 RB_CLEAR_NODE(&cache->cache_node); btrfs_make_block_group()
9587 btrfs_put_block_group(cache); btrfs_make_block_group()
9592 spin_lock(&cache->space_info->lock); btrfs_make_block_group()
9593 cache->space_info->bytes_readonly += cache->bytes_super; btrfs_make_block_group()
9594 spin_unlock(&cache->space_info->lock); btrfs_make_block_group()
9596 __link_block_group(cache->space_info, cache); btrfs_make_block_group()
9598 list_add_tail(&cache->bg_list, &trans->new_bgs); btrfs_make_block_group()
9687 * make sure our free spache cache IO is done before remove the btrfs_remove_block_group()
10086 struct btrfs_block_group_cache *cache = NULL; btrfs_trim_fs() local
10098 cache = btrfs_lookup_first_block_group(fs_info, range->start); btrfs_trim_fs()
10100 cache = btrfs_lookup_block_group(fs_info, range->start); btrfs_trim_fs()
10102 while (cache) { btrfs_trim_fs()
10103 if (cache->key.objectid >= (range->start + range->len)) { btrfs_trim_fs()
10104 btrfs_put_block_group(cache); btrfs_trim_fs()
10108 start = max(range->start, cache->key.objectid); btrfs_trim_fs()
10110 cache->key.objectid + cache->key.offset); btrfs_trim_fs()
10113 if (!block_group_cache_done(cache)) { btrfs_trim_fs()
10114 ret = cache_block_group(cache, 0); btrfs_trim_fs()
10116 btrfs_put_block_group(cache); btrfs_trim_fs()
10119 ret = wait_block_group_cache_done(cache); btrfs_trim_fs()
10121 btrfs_put_block_group(cache); btrfs_trim_fs()
10125 ret = btrfs_trim_block_group(cache, btrfs_trim_fs()
10133 btrfs_put_block_group(cache); btrfs_trim_fs()
10138 cache = next_block_group(fs_info->tree_root, cache); btrfs_trim_fs()
10148 * data into the page cache through nocow before the subvolume is snapshoted,
241 free_excluded_extents(struct btrfs_root *root, struct btrfs_block_group_cache *cache) free_excluded_extents() argument
255 exclude_super_stripes(struct btrfs_root *root, struct btrfs_block_group_cache *cache) exclude_super_stripes() argument
3160 write_one_cache_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_block_group_cache *cache) write_one_cache_group() argument
3188 next_block_group(struct btrfs_root *root, struct btrfs_block_group_cache *cache) next_block_group() argument
5742 pin_down_extent(struct btrfs_root *root, struct btrfs_block_group_cache *cache, u64 bytenr, u64 num_bytes, int reserved) pin_down_extent() argument
8805 btrfs_set_block_group_ro(struct btrfs_root *root, struct btrfs_block_group_cache *cache) btrfs_set_block_group_ro() argument
8928 btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache) btrfs_set_block_group_rw() argument
9247 __link_block_group(struct btrfs_space_info *space_info, struct btrfs_block_group_cache *cache) __link_block_group() argument
/linux-4.1.27/arch/mips/include/asm/octeon/
H A Dcvmx-l2c.h41 #define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
196 * the cache 'ways' that a core can evict from.
202 * Partitions the L2 cache for a core
206 * mask. A 0 bit allows the core to evict cache lines from
213 * those ways will never have any cache lines evicted from them.
223 * the cache 'ways' that a core can evict from.
229 * Partitions the L2 cache for the hardware blocks.
232 * mask. A 0 bit allows the core to evict cache lines from
239 * those ways will never have any cache lines evicted from them.
247 * Locks a line in the L2 cache at the specified physical address
257 * Locks a specified memory region in the L2 cache.
263 * Care should be taken to ensure that enough of the L2 cache is left
275 * Unlock and flush a cache line from the L2 cache.
278 * Note that this function will flush a matching but unlocked cache line.
289 * Unlocks a region of memory that is locked in the L2 cache
318 * Returns the cache index for a given physical address
322 * Returns L2 cache index
327 * Flushes (and unlocks) the entire L2 cache.
335 * Returns Returns the size of the L2 cache in bytes,
348 * Return log base 2 of the number of sets in the L2 cache
360 * Flush a line from the L2 cache
/linux-4.1.27/arch/m68k/kernel/
H A Dsys_m68k.c67 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len) cache_flush_040() argument
74 switch (cache) cache_flush_040()
127 switch (cache) cache_flush_040()
184 switch (cache) cache_flush_040()
227 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) cache_flush_060() argument
240 switch (cache) cache_flush_060()
288 switch (cache) cache_flush_060()
314 * We just want to jump to the first cache line cache_flush_060()
347 switch (cache) cache_flush_060()
375 /* sys_cacheflush -- flush (part of) the processor cache. */
377 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) sys_cacheflush() argument
382 cache & ~FLUSH_CACHE_BOTH) sys_cacheflush()
386 /* Only the superuser may explicitly flush the whole cache. */ sys_cacheflush()
412 if (cache & FLUSH_CACHE_INSN) sys_cacheflush()
414 if (cache & FLUSH_CACHE_DATA) sys_cacheflush()
425 /* Flush the whole cache, even if page granularity requested. */ sys_cacheflush()
428 if (cache & FLUSH_CACHE_INSN) sys_cacheflush()
430 if (cache & FLUSH_CACHE_DATA) sys_cacheflush()
447 ret = cache_flush_040 (addr, scope, cache, len); sys_cacheflush()
449 ret = cache_flush_060 (addr, scope, cache, len); sys_cacheflush()
523 /* sys_cacheflush -- flush (part of) the processor cache. */
525 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) sys_cacheflush() argument
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dmr.c72 struct mlx5_mr_cache *cache = &dev->cache; order2idx() local
74 if (order < cache->ent[0].order) order2idx()
77 return order - cache->ent[0].order; order2idx()
84 struct mlx5_mr_cache *cache = &dev->cache; reg_mr_callback() local
86 struct mlx5_cache_ent *ent = &cache->ent[c]; reg_mr_callback()
118 cache->last_add = jiffies; reg_mr_callback()
136 struct mlx5_mr_cache *cache = &dev->cache; add_keys() local
137 struct mlx5_cache_ent *ent = &cache->ent[c]; add_keys()
190 struct mlx5_mr_cache *cache = &dev->cache; remove_keys() local
191 struct mlx5_cache_ent *ent = &cache->ent[c]; remove_keys()
343 static int someone_adding(struct mlx5_mr_cache *cache) someone_adding() argument
348 if (cache->ent[i].cur < cache->ent[i].limit) someone_adding()
358 struct mlx5_mr_cache *cache = &dev->cache; __cache_work_func() local
362 if (cache->stopped) __cache_work_func()
365 ent = &dev->cache.ent[i]; __cache_work_func()
372 queue_delayed_work(cache->wq, &ent->dwork, __cache_work_func()
377 queue_delayed_work(cache->wq, &ent->dwork, __cache_work_func()
380 queue_work(cache->wq, &ent->work); __cache_work_func()
384 if (!someone_adding(cache) && __cache_work_func()
385 time_after(jiffies, cache->last_add + 300 * HZ)) { __cache_work_func()
388 queue_work(cache->wq, &ent->work); __cache_work_func()
390 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); __cache_work_func()
413 struct mlx5_mr_cache *cache = &dev->cache; alloc_cached_mr() local
421 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); alloc_cached_mr()
426 ent = &cache->ent[i]; alloc_cached_mr()
428 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); alloc_cached_mr()
438 queue_work(cache->wq, &ent->work); alloc_cached_mr()
443 queue_work(cache->wq, &ent->work); alloc_cached_mr()
450 cache->ent[c].miss++; alloc_cached_mr()
457 struct mlx5_mr_cache *cache = &dev->cache; free_cached_mr() local
464 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); free_cached_mr()
467 ent = &cache->ent[c]; free_cached_mr()
476 queue_work(cache->wq, &ent->work); free_cached_mr()
481 struct mlx5_mr_cache *cache = &dev->cache; clean_keys() local
482 struct mlx5_cache_ent *ent = &cache->ent[c]; clean_keys()
508 struct mlx5_mr_cache *cache = &dev->cache; mlx5_mr_cache_debugfs_init() local
515 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); mlx5_mr_cache_debugfs_init()
516 if (!cache->root) mlx5_mr_cache_debugfs_init()
520 ent = &cache->ent[i]; mlx5_mr_cache_debugfs_init()
522 ent->dir = debugfs_create_dir(ent->name, cache->root); mlx5_mr_cache_debugfs_init()
555 debugfs_remove_recursive(dev->cache.root); mlx5_mr_cache_debugfs_cleanup()
567 struct mlx5_mr_cache *cache = &dev->cache; mlx5_mr_cache_init() local
573 cache->wq = create_singlethread_workqueue("mkey_cache"); mlx5_mr_cache_init()
574 if (!cache->wq) { mlx5_mr_cache_init()
581 INIT_LIST_HEAD(&cache->ent[i].head); mlx5_mr_cache_init()
582 spin_lock_init(&cache->ent[i].lock); mlx5_mr_cache_init()
584 ent = &cache->ent[i]; mlx5_mr_cache_init()
598 queue_work(cache->wq, &ent->work); mlx5_mr_cache_init()
603 mlx5_ib_warn(dev, "cache debugfs failure\n"); mlx5_mr_cache_init()
612 dev->cache.stopped = 1; mlx5_mr_cache_cleanup()
613 flush_workqueue(dev->cache.wq); mlx5_mr_cache_cleanup()
620 destroy_workqueue(dev->cache.wq); mlx5_mr_cache_cleanup()
1068 mlx5_ib_dbg(dev, "cache empty for order %d", order); mlx5_ib_reg_user_mr()
/linux-4.1.27/arch/x86/mm/
H A Dmm_internal.h19 void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
/linux-4.1.27/arch/x86/include/asm/
H A Dcache.h6 /* L1 cache line size */
/linux-4.1.27/fs/nfsd/
H A Dcache.h2 * Request reply cache. This was heavily inspired by the
14 * Representation of a reply cache entry.
43 /* cache entry states */
60 * attrstat replies. Using cache entries with fixed length instead
/linux-4.1.27/arch/score/include/asm/
H A Dtlb.h6 * we need to flush cache for area to be unmapped.
/linux-4.1.27/arch/avr32/include/asm/
H A Dsyscalls.h18 /* mm/cache.c */
H A Dcacheflush.h28 asm volatile("cache %0[0], %1" invalidate_dcache_line()
40 asm volatile("cache %0[0], %1" clean_dcache_line()
52 asm volatile("cache %0[0], %1" flush_dcache_line()
64 asm volatile("cache %0[0], %1" invalidate_icache_line()
106 * These are (I think) related to D-cache aliasing. We might need to
116 * These are for I/D cache coherency. In this case, we do need to
/linux-4.1.27/arch/c6x/include/asm/
H A Dcacheflush.h17 #include <asm/cache.h>
23 * virtually-indexed cache management (our cache is physically indexed)
38 * physically-indexed cache management
/linux-4.1.27/arch/cris/arch-v32/mm/
H A Dl2cache.c24 /* Enable the cache */ l2cache_init()
/linux-4.1.27/arch/cris/include/arch-v32/arch/
H A Dcache.h6 /* A cache-line is 32 bytes. */
H A Dtlb.h5 * The TLB is a 64-entry cache. Each entry has a 8-bit page_id that is used
/linux-4.1.27/arch/ia64/sn/kernel/sn2/
H A DMakefile14 obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
/linux-4.1.27/arch/arm/mach-ux500/
H A DMakefile6 obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
/linux-4.1.27/arch/arm/include/asm/hardware/
H A Dcache-tauros2.h2 * arch/arm/include/asm/hardware/cache-tauros2.h
/linux-4.1.27/arch/arc/include/asm/
H A Dshmparam.h12 /* Handle upto 2 cache bins */
/linux-4.1.27/fs/cifs/
H A DMakefile17 cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
/linux-4.1.27/net/atm/
H A Dmpc.h24 struct in_cache_ops *in_ops; /* ingress cache operations */
25 in_cache_entry *in_cache; /* the ingress cache of this MPC */
28 struct eg_cache_ops *eg_ops; /* egress cache operations */
29 eg_cache_entry *eg_cache; /* the egress cache of this MPC */
/linux-4.1.27/net/sunrpc/
H A DMakefile15 sunrpc_syms.o cache.o rpc_pipe.o \
/linux-4.1.27/security/selinux/include/
H A Davc_ss.h2 * Access vector cache interface for the security server.
/linux-4.1.27/tools/perf/ui/
H A Dprogress.c1 #include "../cache.h"
/linux-4.1.27/drivers/xen/
H A Dfeatures.c9 #include <linux/cache.h>
/linux-4.1.27/scripts/
H A Ddecode_stacktrace.sh13 declare -A cache
29 if [[ "${cache[$name]+isset}" == "isset" ]]; then
30 local base_addr=${cache[$name]}
33 cache["$name"]="$base_addr"
49 if [[ "${cache[$address]+isset}" == "isset" ]]; then
50 local code=${cache[$address]}
53 cache[$address]=$code
/linux-4.1.27/arch/sh/kernel/cpu/sh5/
H A Dprobe.c17 #include <asm/cache.h>
40 * First, setup some sane values for the I-cache. cpu_probe()
53 * Next, setup some sane values for the D-cache. cpu_probe()
55 * On the SH5, these are pretty consistent with the I-cache settings, cpu_probe()
65 * Setup any cache-related flags here cpu_probe()
/linux-4.1.27/arch/sparc/kernel/
H A Dentry.h191 /* D-cache state */
193 /*0x30*/u64 dcache_index; /* D-cache index */
194 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
195 /*0x40*/u64 dcache_utag; /* D-cache microtag */
196 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
198 /* I-cache state */
200 /*0x90*/u64 icache_index; /* I-cache index */
201 /*0x98*/u64 icache_tag; /* I-cache phys tag */
202 /*0xa0*/u64 icache_utag; /* I-cache microtag */
203 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
204 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
205 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
207 /* E-cache state */
209 /*0xe0*/u64 ecache_index; /* E-cache index */
210 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
/linux-4.1.27/include/linux/sunrpc/
H A Dcache.h2 * include/linux/sunrpc/cache.h
22 * Each cache requires:
23 * - A 'struct cache_detail' which contains information specific to the cache
27 * - A 'put' function that can release a cache item. It will only
35 * Each cache must be registered so that it can be cleaned regularly.
36 * When the cache is unregistered, it is flushed completely.
41 * Existence in the cache is counted the refcount.
44 /* Every cache item has a common header that is used
60 #define CACHE_CLEANED 3 /* Entry has been cleaned from cache */
106 * and should not be touched by cache owners
108 time_t flush_time; /* flush all cache items with last_refresh
131 * a cache fill
140 * delayed awaiting cache-fill
145 struct cache_head *item; /* cache item we wait on */
153 * timestamps kept in the cache are expressed in seconds
/linux-4.1.27/arch/sh/kernel/cpu/
H A Dinit.c22 #include <asm/cache.h>
90 * the memory-mapped cache array. expmask_init()
101 /* 2nd-level cache init */ l2_cache_init()
107 * Generic first-level cache init
118 * At this point we don't know whether the cache is enabled or not - a cache_init()
120 * could be dirty in the cache at this point: cache_init()
123 * => before re-initialising the cache, we must do a purge of the whole cache_init()
124 * cache out to memory for safety. As long as nothing is spilled cache_init()
217 l2_cache_shape = -1; /* No S-cache */ detect_cache_shape()
296 * and cache configuration in cpu_probe(). dsp_init()
308 /* First setup the rest of the I-cache info */ cpu_init()
315 /* And the D-cache too */ cpu_init()
322 /* Init the cache */ cpu_init()
330 /* Boot CPU sets the cache shape */ cpu_init()
338 * Initialize the per-CPU ASID cache very early, since the cpu_init()
H A Dproc.c103 seq_printf(m, "cache type\t: "); show_cpuinfo()
106 * Check for what type of cache we have, we support both the show_cpuinfo()
107 * unified cache on the SH-2 and SH-3, as well as the harvard show_cpuinfo()
108 * style cache on the SH-4. show_cpuinfo()
112 show_cacheinfo(m, "cache", c->icache); show_cpuinfo()
119 /* Optional secondary cache */ show_cpuinfo()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dxattr_cache.c54 * Initializes xattr cache for an inode.
56 * This initializes the xattr list and marks cache presence.
71 * Find in @cache and return @xattr_name attribute in @xattr,
77 static int ll_xattr_cache_find(struct list_head *cache, ll_xattr_cache_find() argument
85 list_for_each_entry(entry, cache, xe_list) { list_for_each_entry()
109 static int ll_xattr_cache_add(struct list_head *cache, ll_xattr_cache_add() argument
118 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) { ll_xattr_cache_add()
140 list_add(&xattr->xe_list, cache); ll_xattr_cache_add()
155 * This removes an extended attribute from cache.
157 * Remove @xattr_name attribute from @cache.
162 static int ll_xattr_cache_del(struct list_head *cache, ll_xattr_cache_del() argument
171 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) { ll_xattr_cache_del()
186 * Walk over cached attributes in @cache and
193 static int ll_xattr_cache_list(struct list_head *cache, ll_xattr_cache_list() argument
202 list_for_each_entry_safe(xattr, tmp, cache, xe_list) { list_for_each_entry_safe()
223 * Check if the xattr cache is initialized (filled).
225 * \retval 0 @cache is not initialized
226 * \retval 1 @cache is initialized
234 * This finalizes the xattr cache.
342 * Refill the xattr cache.
344 * Fetch and cache the whole of xattrs for @inode, acquiring
350 * \retval -ENOMEM not enough memory for the cache
375 /* Matched but no cache? Cancelled on error by a parallel refill. */ ll_xattr_cache_refill()
386 /* xattr data is so large that we don't want to cache it */ ll_xattr_cache_refill()
475 * Get an xattr value or list xattrs using the write-through cache.
484 * \retval -ENOMEM not enough memory for the cache
/linux-4.1.27/drivers/net/wireless/cw1200/
H A Dtxrx.c48 /* TX policy cache implementation */
216 static int tx_policy_find(struct tx_policy_cache *cache, tx_policy_find() argument
220 * the cache. tx_policy_find()
225 list_for_each_entry(it, &cache->used, link) { tx_policy_find()
227 return it - cache->cache; tx_policy_find()
230 list_for_each_entry(it, &cache->free, link) { tx_policy_find()
232 return it - cache->cache; tx_policy_find()
237 static inline void tx_policy_use(struct tx_policy_cache *cache, tx_policy_use() argument
241 list_move(&entry->link, &cache->used); tx_policy_use()
244 static inline int tx_policy_release(struct tx_policy_cache *cache, tx_policy_release() argument
249 list_move(&entry->link, &cache->free); tx_policy_release()
256 struct tx_policy_cache *cache = &priv->tx_policy_cache; tx_policy_clean() local
260 spin_lock_bh(&cache->lock); tx_policy_clean()
261 locked = list_empty(&cache->free); tx_policy_clean()
264 entry = &cache->cache[idx]; tx_policy_clean()
270 list_move(&entry->link, &cache->free); tx_policy_clean()
278 spin_unlock_bh(&cache->lock); tx_policy_clean()
282 /* External TX policy cache API */
286 struct tx_policy_cache *cache = &priv->tx_policy_cache; tx_policy_init() local
289 memset(cache, 0, sizeof(*cache)); tx_policy_init()
291 spin_lock_init(&cache->lock); tx_policy_init()
292 INIT_LIST_HEAD(&cache->used); tx_policy_init()
293 INIT_LIST_HEAD(&cache->free); tx_policy_init()
296 list_add(&cache->cache[i].link, &cache->free); tx_policy_init()
304 struct tx_policy_cache *cache = &priv->tx_policy_cache; tx_policy_get() local
309 spin_lock_bh(&cache->lock); tx_policy_get()
310 if (WARN_ON_ONCE(list_empty(&cache->free))) { tx_policy_get()
311 spin_unlock_bh(&cache->lock); tx_policy_get()
314 idx = tx_policy_find(cache, &wanted); tx_policy_get()
324 entry = list_entry(cache->free.prev, tx_policy_get()
327 idx = entry - cache->cache; tx_policy_get()
331 tx_policy_use(cache, &cache->cache[idx]); tx_policy_get()
332 if (list_empty(&cache->free)) { tx_policy_get()
336 spin_unlock_bh(&cache->lock); tx_policy_get()
343 struct tx_policy_cache *cache = &priv->tx_policy_cache; tx_policy_put() local
345 spin_lock_bh(&cache->lock); tx_policy_put()
346 locked = list_empty(&cache->free); tx_policy_put()
347 usage = tx_policy_release(cache, &cache->cache[idx]); tx_policy_put()
352 spin_unlock_bh(&cache->lock); tx_policy_put()
357 struct tx_policy_cache *cache = &priv->tx_policy_cache; tx_policy_upload() local
362 spin_lock_bh(&cache->lock); tx_policy_upload()
366 struct tx_policy *src = &cache->cache[i].policy; tx_policy_upload()
382 spin_unlock_bh(&cache->lock); tx_policy_upload()
/linux-4.1.27/drivers/power/
H A Dbq27x00_battery.c113 struct bq27x00_reg_cache cache; member in struct:bq27x00_device_info
473 struct bq27x00_reg_cache cache = {0, }; bq27x00_update() local
480 cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, flags_1b); bq27x00_update()
481 if ((cache.flags & 0xff) == 0xff) bq27x00_update()
483 cache.flags = -1; bq27x00_update()
484 if (cache.flags >= 0) { bq27x00_update()
486 && (cache.flags & BQ27000_FLAG_CI)) { bq27x00_update()
488 cache.capacity = -ENODATA; bq27x00_update()
489 cache.energy = -ENODATA; bq27x00_update()
490 cache.time_to_empty = -ENODATA; bq27x00_update()
491 cache.time_to_empty_avg = -ENODATA; bq27x00_update()
492 cache.time_to_full = -ENODATA; bq27x00_update()
493 cache.charge_full = -ENODATA; bq27x00_update()
494 cache.health = -ENODATA; bq27x00_update()
496 cache.capacity = bq27x00_battery_read_rsoc(di); bq27x00_update()
498 cache.time_to_empty = bq27x00_update()
502 cache.energy = bq27x00_battery_read_energy(di); bq27x00_update()
503 cache.time_to_empty = bq27x00_update()
506 cache.time_to_empty_avg = bq27x00_update()
509 cache.time_to_full = bq27x00_update()
513 cache.charge_full = bq27x00_battery_read_lmd(di); bq27x00_update()
514 cache.health = bq27x00_battery_read_health(di); bq27x00_update()
516 cache.temperature = bq27x00_battery_read_temperature(di); bq27x00_update()
518 cache.cycle_count = bq27x00_battery_read_cyct(di); bq27x00_update()
520 cache.power_avg = bq27x00_update()
524 cache.power_avg = bq27x00_update()
533 if (di->cache.capacity != cache.capacity) bq27x00_update()
536 if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) bq27x00_update()
537 di->cache = cache; bq27x00_update()
595 if (di->cache.flags & BQ27500_FLAG_FC) bq27x00_battery_status()
597 else if (di->cache.flags & BQ27500_FLAG_DSC) bq27x00_battery_status()
602 if (di->cache.flags & BQ27000_FLAG_FC) bq27x00_battery_status()
604 else if (di->cache.flags & BQ27000_FLAG_CHGS) bq27x00_battery_status()
623 if (di->cache.flags & BQ27500_FLAG_FC) bq27x00_battery_capacity_level()
625 else if (di->cache.flags & BQ27500_FLAG_SOC1) bq27x00_battery_capacity_level()
627 else if (di->cache.flags & BQ27500_FLAG_SOCF) bq27x00_battery_capacity_level()
632 if (di->cache.flags & BQ27000_FLAG_FC) bq27x00_battery_capacity_level()
634 else if (di->cache.flags & BQ27000_FLAG_EDV1) bq27x00_battery_capacity_level()
636 else if (di->cache.flags & BQ27000_FLAG_EDVF) bq27x00_battery_capacity_level()
692 if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) bq27x00_battery_get_property()
703 val->intval = di->cache.flags < 0 ? 0 : 1; bq27x00_battery_get_property()
709 ret = bq27x00_simple_value(di->cache.capacity, val); bq27x00_battery_get_property()
715 ret = bq27x00_simple_value(di->cache.temperature, val); bq27x00_battery_get_property()
720 ret = bq27x00_simple_value(di->cache.time_to_empty, val); bq27x00_battery_get_property()
723 ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val); bq27x00_battery_get_property()
726 ret = bq27x00_simple_value(di->cache.time_to_full, val); bq27x00_battery_get_property()
735 ret = bq27x00_simple_value(di->cache.charge_full, val); bq27x00_battery_get_property()
741 ret = bq27x00_simple_value(di->cache.cycle_count, val); bq27x00_battery_get_property()
744 ret = bq27x00_simple_value(di->cache.energy, val); bq27x00_battery_get_property()
747 ret = bq27x00_simple_value(di->cache.power_avg, val); bq27x00_battery_get_property()
750 ret = bq27x00_simple_value(di->cache.health, val); bq27x00_battery_get_property()
/linux-4.1.27/arch/tile/include/asm/
H A Dcache.h20 /* bytes per L1 data cache line */
24 /* bytes per L2 cache line */
42 /* use the cache line size for the L2, which is where it counts */
48 /* Group together read-mostly things to avoid cache false sharing */
55 * This allowed better cache utilization since cache inclusion did not
/linux-4.1.27/fs/nilfs2/
H A Dalloc.h92 * struct nilfs_palloc_cache - persistent object allocator cache
93 * @lock: cache protecting lock
94 * @prev_desc: blockgroup descriptors cache
95 * @prev_bitmap: blockgroup bitmap cache
96 * @prev_entry: translation entries cache
106 struct nilfs_palloc_cache *cache);
/linux-4.1.27/arch/nios2/boot/compressed/
H A Dhead.S18 #include <asm/cache.h>
24 /* invalidate all instruction cache */
30 /* invalidate all data cache */
51 /* flush the data cache after moving */
87 /* flush all data cache after decompressing */
93 /* flush all instruction cache */
/linux-4.1.27/fs/nfs/
H A Dfscache.c1 /* NFS filesystem cache interface
39 /* create a cache index for looking up filehandles */ nfs_fscache_get_client_cookie()
60 * Get the cache cookie for an NFS superblock. We have to handle
61 * uniquification here because the cache doesn't do it for us.
139 /* create a cache index for looking up filehandles */ nfs_fscache_get_super_cookie()
179 * Initialise the per-inode cache cookie pointer for an NFS inode.
218 * access() thrashing the cache.
221 * to use the cache.
223 * We enable the cache for an inode if we open it read-only and it isn't
224 * currently open for writing. We disable the cache if the inode is open
242 dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi); nfs_fscache_open_file()
247 dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi); nfs_fscache_open_file()
257 * interacting with the cache.
301 * Handle completion of a page being read from the cache.
349 case -ENOBUFS: /* inode not in cache */ __nfs_readpage_from_fscache()
350 case -ENODATA: /* page not in cache */ __nfs_readpage_from_fscache()
391 case 0: /* read submitted to the cache for all pages */ __nfs_readpages_from_fscache()

Completed in 3019 milliseconds

1234567891011>>