1/*
2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
4 *
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
7 *
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 */
10
11#include <linux/slab.h>
12#include <linux/sunrpc/addr.h>
13#include <linux/highmem.h>
14#include <linux/log2.h>
15#include <linux/hash.h>
16#include <net/checksum.h>
17
18#include "nfsd.h"
19#include "cache.h"
20
21#define NFSDDBG_FACILITY	NFSDDBG_REPCACHE
22
23/*
24 * We use this value to determine the number of hash buckets from the max
25 * cache size, the idea being that when the cache is at its maximum number
26 * of entries, then this should be the average number of entries per bucket.
27 */
28#define TARGET_BUCKET_SIZE	64
29
30struct nfsd_drc_bucket {
31	struct list_head lru_head;
32	spinlock_t cache_lock;
33};
34
35static struct nfsd_drc_bucket	*drc_hashtbl;
36static struct kmem_cache	*drc_slab;
37
38/* max number of entries allowed in the cache */
39static unsigned int		max_drc_entries;
40
41/* number of significant bits in the hash value */
42static unsigned int		maskbits;
43static unsigned int		drc_hashsize;
44
45/*
46 * Stats and other tracking of on the duplicate reply cache. All of these and
47 * the "rc" fields in nfsdstats are protected by the cache_lock
48 */
49
50/* total number of entries */
51static atomic_t			num_drc_entries;
52
53/* cache misses due only to checksum comparison failures */
54static unsigned int		payload_misses;
55
56/* amount of memory (in bytes) currently consumed by the DRC */
57static unsigned int		drc_mem_usage;
58
59/* longest hash chain seen */
60static unsigned int		longest_chain;
61
62/* size of cache when we saw the longest hash chain */
63static unsigned int		longest_chain_cachesize;
64
65static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
66static void	cache_cleaner_func(struct work_struct *unused);
67static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
68					    struct shrink_control *sc);
69static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
70					   struct shrink_control *sc);
71
72static struct shrinker nfsd_reply_cache_shrinker = {
73	.scan_objects = nfsd_reply_cache_scan,
74	.count_objects = nfsd_reply_cache_count,
75	.seeks	= 1,
76};
77
78/*
79 * locking for the reply cache:
80 * A cache entry is "single use" if c_state == RC_INPROG
81 * Otherwise, it when accessing _prev or _next, the lock must be held.
82 */
83static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
84
85/*
86 * Put a cap on the size of the DRC based on the amount of available
87 * low memory in the machine.
88 *
89 *  64MB:    8192
90 * 128MB:   11585
91 * 256MB:   16384
92 * 512MB:   23170
93 *   1GB:   32768
94 *   2GB:   46340
95 *   4GB:   65536
96 *   8GB:   92681
97 *  16GB:  131072
98 *
99 * ...with a hard cap of 256k entries. In the worst case, each entry will be
100 * ~1k, so the above numbers should give a rough max of the amount of memory
101 * used in k.
102 */
103static unsigned int
104nfsd_cache_size_limit(void)
105{
106	unsigned int limit;
107	unsigned long low_pages = totalram_pages - totalhigh_pages;
108
109	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
110	return min_t(unsigned int, limit, 256*1024);
111}
112
113/*
114 * Compute the number of hash buckets we need. Divide the max cachesize by
115 * the "target" max bucket size, and round up to next power of two.
116 */
117static unsigned int
118nfsd_hashsize(unsigned int limit)
119{
120	return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
121}
122
123static u32
124nfsd_cache_hash(__be32 xid)
125{
126	return hash_32(be32_to_cpu(xid), maskbits);
127}
128
129static struct svc_cacherep *
130nfsd_reply_cache_alloc(void)
131{
132	struct svc_cacherep	*rp;
133
134	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
135	if (rp) {
136		rp->c_state = RC_UNUSED;
137		rp->c_type = RC_NOCACHE;
138		INIT_LIST_HEAD(&rp->c_lru);
139	}
140	return rp;
141}
142
143static void
144nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
145{
146	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
147		drc_mem_usage -= rp->c_replvec.iov_len;
148		kfree(rp->c_replvec.iov_base);
149	}
150	list_del(&rp->c_lru);
151	atomic_dec(&num_drc_entries);
152	drc_mem_usage -= sizeof(*rp);
153	kmem_cache_free(drc_slab, rp);
154}
155
156static void
157nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
158{
159	spin_lock(&b->cache_lock);
160	nfsd_reply_cache_free_locked(rp);
161	spin_unlock(&b->cache_lock);
162}
163
164int nfsd_reply_cache_init(void)
165{
166	unsigned int hashsize;
167	unsigned int i;
168	int status = 0;
169
170	max_drc_entries = nfsd_cache_size_limit();
171	atomic_set(&num_drc_entries, 0);
172	hashsize = nfsd_hashsize(max_drc_entries);
173	maskbits = ilog2(hashsize);
174
175	status = register_shrinker(&nfsd_reply_cache_shrinker);
176	if (status)
177		return status;
178
179	drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
180					0, 0, NULL);
181	if (!drc_slab)
182		goto out_nomem;
183
184	drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL);
185	if (!drc_hashtbl)
186		goto out_nomem;
187	for (i = 0; i < hashsize; i++) {
188		INIT_LIST_HEAD(&drc_hashtbl[i].lru_head);
189		spin_lock_init(&drc_hashtbl[i].cache_lock);
190	}
191	drc_hashsize = hashsize;
192
193	return 0;
194out_nomem:
195	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
196	nfsd_reply_cache_shutdown();
197	return -ENOMEM;
198}
199
200void nfsd_reply_cache_shutdown(void)
201{
202	struct svc_cacherep	*rp;
203	unsigned int i;
204
205	unregister_shrinker(&nfsd_reply_cache_shrinker);
206	cancel_delayed_work_sync(&cache_cleaner);
207
208	for (i = 0; i < drc_hashsize; i++) {
209		struct list_head *head = &drc_hashtbl[i].lru_head;
210		while (!list_empty(head)) {
211			rp = list_first_entry(head, struct svc_cacherep, c_lru);
212			nfsd_reply_cache_free_locked(rp);
213		}
214	}
215
216	kfree (drc_hashtbl);
217	drc_hashtbl = NULL;
218	drc_hashsize = 0;
219
220	if (drc_slab) {
221		kmem_cache_destroy(drc_slab);
222		drc_slab = NULL;
223	}
224}
225
226/*
227 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
228 * not already scheduled.
229 */
230static void
231lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
232{
233	rp->c_timestamp = jiffies;
234	list_move_tail(&rp->c_lru, &b->lru_head);
235	schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
236}
237
238static long
239prune_bucket(struct nfsd_drc_bucket *b)
240{
241	struct svc_cacherep *rp, *tmp;
242	long freed = 0;
243
244	list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
245		/*
246		 * Don't free entries attached to calls that are still
247		 * in-progress, but do keep scanning the list.
248		 */
249		if (rp->c_state == RC_INPROG)
250			continue;
251		if (atomic_read(&num_drc_entries) <= max_drc_entries &&
252		    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
253			break;
254		nfsd_reply_cache_free_locked(rp);
255		freed++;
256	}
257	return freed;
258}
259
260/*
261 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
262 * Also prune the oldest ones when the total exceeds the max number of entries.
263 */
264static long
265prune_cache_entries(void)
266{
267	unsigned int i;
268	long freed = 0;
269	bool cancel = true;
270
271	for (i = 0; i < drc_hashsize; i++) {
272		struct nfsd_drc_bucket *b = &drc_hashtbl[i];
273
274		if (list_empty(&b->lru_head))
275			continue;
276		spin_lock(&b->cache_lock);
277		freed += prune_bucket(b);
278		if (!list_empty(&b->lru_head))
279			cancel = false;
280		spin_unlock(&b->cache_lock);
281	}
282
283	/*
284	 * Conditionally rearm the job to run in RC_EXPIRE since we just
285	 * ran the pruner.
286	 */
287	if (!cancel)
288		mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
289	return freed;
290}
291
292static void
293cache_cleaner_func(struct work_struct *unused)
294{
295	prune_cache_entries();
296}
297
298static unsigned long
299nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
300{
301	return atomic_read(&num_drc_entries);
302}
303
304static unsigned long
305nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
306{
307	return prune_cache_entries();
308}
309/*
310 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
311 */
312static __wsum
313nfsd_cache_csum(struct svc_rqst *rqstp)
314{
315	int idx;
316	unsigned int base;
317	__wsum csum;
318	struct xdr_buf *buf = &rqstp->rq_arg;
319	const unsigned char *p = buf->head[0].iov_base;
320	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
321				RC_CSUMLEN);
322	size_t len = min(buf->head[0].iov_len, csum_len);
323
324	/* rq_arg.head first */
325	csum = csum_partial(p, len, 0);
326	csum_len -= len;
327
328	/* Continue into page array */
329	idx = buf->page_base / PAGE_SIZE;
330	base = buf->page_base & ~PAGE_MASK;
331	while (csum_len) {
332		p = page_address(buf->pages[idx]) + base;
333		len = min_t(size_t, PAGE_SIZE - base, csum_len);
334		csum = csum_partial(p, len, csum);
335		csum_len -= len;
336		base = 0;
337		++idx;
338	}
339	return csum;
340}
341
342static bool
343nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
344{
345	/* Check RPC XID first */
346	if (rqstp->rq_xid != rp->c_xid)
347		return false;
348	/* compare checksum of NFS data */
349	if (csum != rp->c_csum) {
350		++payload_misses;
351		return false;
352	}
353
354	/* Other discriminators */
355	if (rqstp->rq_proc != rp->c_proc ||
356	    rqstp->rq_prot != rp->c_prot ||
357	    rqstp->rq_vers != rp->c_vers ||
358	    rqstp->rq_arg.len != rp->c_len ||
359	    !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
360	    rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
361		return false;
362
363	return true;
364}
365
366/*
367 * Search the request hash for an entry that matches the given rqstp.
368 * Must be called with cache_lock held. Returns the found entry or
369 * NULL on failure.
370 */
371static struct svc_cacherep *
372nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
373		__wsum csum)
374{
375	struct svc_cacherep	*rp, *ret = NULL;
376	struct list_head 	*rh = &b->lru_head;
377	unsigned int		entries = 0;
378
379	list_for_each_entry(rp, rh, c_lru) {
380		++entries;
381		if (nfsd_cache_match(rqstp, csum, rp)) {
382			ret = rp;
383			break;
384		}
385	}
386
387	/* tally hash chain length stats */
388	if (entries > longest_chain) {
389		longest_chain = entries;
390		longest_chain_cachesize = atomic_read(&num_drc_entries);
391	} else if (entries == longest_chain) {
392		/* prefer to keep the smallest cachesize possible here */
393		longest_chain_cachesize = min_t(unsigned int,
394				longest_chain_cachesize,
395				atomic_read(&num_drc_entries));
396	}
397
398	return ret;
399}
400
401/*
402 * Try to find an entry matching the current call in the cache. When none
403 * is found, we try to grab the oldest expired entry off the LRU list. If
404 * a suitable one isn't there, then drop the cache_lock and allocate a
405 * new one, then search again in case one got inserted while this thread
406 * didn't hold the lock.
407 */
408int
409nfsd_cache_lookup(struct svc_rqst *rqstp)
410{
411	struct svc_cacherep	*rp, *found;
412	__be32			xid = rqstp->rq_xid;
413	u32			proto =  rqstp->rq_prot,
414				vers = rqstp->rq_vers,
415				proc = rqstp->rq_proc;
416	__wsum			csum;
417	u32 hash = nfsd_cache_hash(xid);
418	struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
419	unsigned long		age;
420	int type = rqstp->rq_cachetype;
421	int rtn = RC_DOIT;
422
423	rqstp->rq_cacherep = NULL;
424	if (type == RC_NOCACHE) {
425		nfsdstats.rcnocache++;
426		return rtn;
427	}
428
429	csum = nfsd_cache_csum(rqstp);
430
431	/*
432	 * Since the common case is a cache miss followed by an insert,
433	 * preallocate an entry.
434	 */
435	rp = nfsd_reply_cache_alloc();
436	spin_lock(&b->cache_lock);
437	if (likely(rp)) {
438		atomic_inc(&num_drc_entries);
439		drc_mem_usage += sizeof(*rp);
440	}
441
442	/* go ahead and prune the cache */
443	prune_bucket(b);
444
445	found = nfsd_cache_search(b, rqstp, csum);
446	if (found) {
447		if (likely(rp))
448			nfsd_reply_cache_free_locked(rp);
449		rp = found;
450		goto found_entry;
451	}
452
453	if (!rp) {
454		dprintk("nfsd: unable to allocate DRC entry!\n");
455		goto out;
456	}
457
458	nfsdstats.rcmisses++;
459	rqstp->rq_cacherep = rp;
460	rp->c_state = RC_INPROG;
461	rp->c_xid = xid;
462	rp->c_proc = proc;
463	rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
464	rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
465	rp->c_prot = proto;
466	rp->c_vers = vers;
467	rp->c_len = rqstp->rq_arg.len;
468	rp->c_csum = csum;
469
470	lru_put_end(b, rp);
471
472	/* release any buffer */
473	if (rp->c_type == RC_REPLBUFF) {
474		drc_mem_usage -= rp->c_replvec.iov_len;
475		kfree(rp->c_replvec.iov_base);
476		rp->c_replvec.iov_base = NULL;
477	}
478	rp->c_type = RC_NOCACHE;
479 out:
480	spin_unlock(&b->cache_lock);
481	return rtn;
482
483found_entry:
484	nfsdstats.rchits++;
485	/* We found a matching entry which is either in progress or done. */
486	age = jiffies - rp->c_timestamp;
487	lru_put_end(b, rp);
488
489	rtn = RC_DROPIT;
490	/* Request being processed or excessive rexmits */
491	if (rp->c_state == RC_INPROG || age < RC_DELAY)
492		goto out;
493
494	/* From the hall of fame of impractical attacks:
495	 * Is this a user who tries to snoop on the cache? */
496	rtn = RC_DOIT;
497	if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
498		goto out;
499
500	/* Compose RPC reply header */
501	switch (rp->c_type) {
502	case RC_NOCACHE:
503		break;
504	case RC_REPLSTAT:
505		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
506		rtn = RC_REPLY;
507		break;
508	case RC_REPLBUFF:
509		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
510			goto out;	/* should not happen */
511		rtn = RC_REPLY;
512		break;
513	default:
514		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
515		nfsd_reply_cache_free_locked(rp);
516	}
517
518	goto out;
519}
520
521/*
522 * Update a cache entry. This is called from nfsd_dispatch when
523 * the procedure has been executed and the complete reply is in
524 * rqstp->rq_res.
525 *
526 * We're copying around data here rather than swapping buffers because
527 * the toplevel loop requires max-sized buffers, which would be a waste
528 * of memory for a cache with a max reply size of 100 bytes (diropokres).
529 *
530 * If we should start to use different types of cache entries tailored
531 * specifically for attrstat and fh's, we may save even more space.
532 *
533 * Also note that a cachetype of RC_NOCACHE can legally be passed when
534 * nfsd failed to encode a reply that otherwise would have been cached.
535 * In this case, nfsd_cache_update is called with statp == NULL.
536 */
537void
538nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
539{
540	struct svc_cacherep *rp = rqstp->rq_cacherep;
541	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
542	u32		hash;
543	struct nfsd_drc_bucket *b;
544	int		len;
545	size_t		bufsize = 0;
546
547	if (!rp)
548		return;
549
550	hash = nfsd_cache_hash(rp->c_xid);
551	b = &drc_hashtbl[hash];
552
553	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
554	len >>= 2;
555
556	/* Don't cache excessive amounts of data and XDR failures */
557	if (!statp || len > (256 >> 2)) {
558		nfsd_reply_cache_free(b, rp);
559		return;
560	}
561
562	switch (cachetype) {
563	case RC_REPLSTAT:
564		if (len != 1)
565			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
566		rp->c_replstat = *statp;
567		break;
568	case RC_REPLBUFF:
569		cachv = &rp->c_replvec;
570		bufsize = len << 2;
571		cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
572		if (!cachv->iov_base) {
573			nfsd_reply_cache_free(b, rp);
574			return;
575		}
576		cachv->iov_len = bufsize;
577		memcpy(cachv->iov_base, statp, bufsize);
578		break;
579	case RC_NOCACHE:
580		nfsd_reply_cache_free(b, rp);
581		return;
582	}
583	spin_lock(&b->cache_lock);
584	drc_mem_usage += bufsize;
585	lru_put_end(b, rp);
586	rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
587	rp->c_type = cachetype;
588	rp->c_state = RC_DONE;
589	spin_unlock(&b->cache_lock);
590	return;
591}
592
593/*
594 * Copy cached reply to current reply buffer. Should always fit.
595 * FIXME as reply is in a page, we should just attach the page, and
596 * keep a refcount....
597 */
598static int
599nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
600{
601	struct kvec	*vec = &rqstp->rq_res.head[0];
602
603	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
604		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
605				data->iov_len);
606		return 0;
607	}
608	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
609	vec->iov_len += data->iov_len;
610	return 1;
611}
612
613/*
614 * Note that fields may be added, removed or reordered in the future. Programs
615 * scraping this file for info should test the labels to ensure they're
616 * getting the correct field.
617 */
618static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
619{
620	seq_printf(m, "max entries:           %u\n", max_drc_entries);
621	seq_printf(m, "num entries:           %u\n",
622			atomic_read(&num_drc_entries));
623	seq_printf(m, "hash buckets:          %u\n", 1 << maskbits);
624	seq_printf(m, "mem usage:             %u\n", drc_mem_usage);
625	seq_printf(m, "cache hits:            %u\n", nfsdstats.rchits);
626	seq_printf(m, "cache misses:          %u\n", nfsdstats.rcmisses);
627	seq_printf(m, "not cached:            %u\n", nfsdstats.rcnocache);
628	seq_printf(m, "payload misses:        %u\n", payload_misses);
629	seq_printf(m, "longest chain len:     %u\n", longest_chain);
630	seq_printf(m, "cachesize at longest:  %u\n", longest_chain_cachesize);
631	return 0;
632}
633
634int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
635{
636	return single_open(file, nfsd_reply_cache_stats_show, NULL);
637}
638