1/*
2 * net/sunrpc/cache.c
3 *
4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
6 *
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8 *
9 * Released under terms in GPL version 2.  See COPYING.
10 *
11 */
12
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/string_helpers.h>
24#include <asm/uaccess.h>
25#include <linux/poll.h>
26#include <linux/seq_file.h>
27#include <linux/proc_fs.h>
28#include <linux/net.h>
29#include <linux/workqueue.h>
30#include <linux/mutex.h>
31#include <linux/pagemap.h>
32#include <asm/ioctls.h>
33#include <linux/sunrpc/types.h>
34#include <linux/sunrpc/cache.h>
35#include <linux/sunrpc/stats.h>
36#include <linux/sunrpc/rpc_pipe_fs.h>
37#include "netns.h"
38
39#define	 RPCDBG_FACILITY RPCDBG_CACHE
40
41static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
42static void cache_revisit_request(struct cache_head *item);
43
44static void cache_init(struct cache_head *h)
45{
46	time_t now = seconds_since_boot();
47	h->next = NULL;
48	h->flags = 0;
49	kref_init(&h->ref);
50	h->expiry_time = now + CACHE_NEW_EXPIRY;
51	h->last_refresh = now;
52}
53
54struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
55				       struct cache_head *key, int hash)
56{
57	struct cache_head **head,  **hp;
58	struct cache_head *new = NULL, *freeme = NULL;
59
60	head = &detail->hash_table[hash];
61
62	read_lock(&detail->hash_lock);
63
64	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
65		struct cache_head *tmp = *hp;
66		if (detail->match(tmp, key)) {
67			if (cache_is_expired(detail, tmp))
68				/* This entry is expired, we will discard it. */
69				break;
70			cache_get(tmp);
71			read_unlock(&detail->hash_lock);
72			return tmp;
73		}
74	}
75	read_unlock(&detail->hash_lock);
76	/* Didn't find anything, insert an empty entry */
77
78	new = detail->alloc();
79	if (!new)
80		return NULL;
81	/* must fully initialise 'new', else
82	 * we might get lose if we need to
83	 * cache_put it soon.
84	 */
85	cache_init(new);
86	detail->init(new, key);
87
88	write_lock(&detail->hash_lock);
89
90	/* check if entry appeared while we slept */
91	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
92		struct cache_head *tmp = *hp;
93		if (detail->match(tmp, key)) {
94			if (cache_is_expired(detail, tmp)) {
95				*hp = tmp->next;
96				tmp->next = NULL;
97				detail->entries --;
98				freeme = tmp;
99				break;
100			}
101			cache_get(tmp);
102			write_unlock(&detail->hash_lock);
103			cache_put(new, detail);
104			return tmp;
105		}
106	}
107	new->next = *head;
108	*head = new;
109	detail->entries++;
110	cache_get(new);
111	write_unlock(&detail->hash_lock);
112
113	if (freeme)
114		cache_put(freeme, detail);
115	return new;
116}
117EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
118
119
120static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
121
122static void cache_fresh_locked(struct cache_head *head, time_t expiry)
123{
124	head->expiry_time = expiry;
125	head->last_refresh = seconds_since_boot();
126	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
127	set_bit(CACHE_VALID, &head->flags);
128}
129
130static void cache_fresh_unlocked(struct cache_head *head,
131				 struct cache_detail *detail)
132{
133	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
134		cache_revisit_request(head);
135		cache_dequeue(detail, head);
136	}
137}
138
139struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
140				       struct cache_head *new, struct cache_head *old, int hash)
141{
142	/* The 'old' entry is to be replaced by 'new'.
143	 * If 'old' is not VALID, we update it directly,
144	 * otherwise we need to replace it
145	 */
146	struct cache_head **head;
147	struct cache_head *tmp;
148
149	if (!test_bit(CACHE_VALID, &old->flags)) {
150		write_lock(&detail->hash_lock);
151		if (!test_bit(CACHE_VALID, &old->flags)) {
152			if (test_bit(CACHE_NEGATIVE, &new->flags))
153				set_bit(CACHE_NEGATIVE, &old->flags);
154			else
155				detail->update(old, new);
156			cache_fresh_locked(old, new->expiry_time);
157			write_unlock(&detail->hash_lock);
158			cache_fresh_unlocked(old, detail);
159			return old;
160		}
161		write_unlock(&detail->hash_lock);
162	}
163	/* We need to insert a new entry */
164	tmp = detail->alloc();
165	if (!tmp) {
166		cache_put(old, detail);
167		return NULL;
168	}
169	cache_init(tmp);
170	detail->init(tmp, old);
171	head = &detail->hash_table[hash];
172
173	write_lock(&detail->hash_lock);
174	if (test_bit(CACHE_NEGATIVE, &new->flags))
175		set_bit(CACHE_NEGATIVE, &tmp->flags);
176	else
177		detail->update(tmp, new);
178	tmp->next = *head;
179	*head = tmp;
180	detail->entries++;
181	cache_get(tmp);
182	cache_fresh_locked(tmp, new->expiry_time);
183	cache_fresh_locked(old, 0);
184	write_unlock(&detail->hash_lock);
185	cache_fresh_unlocked(tmp, detail);
186	cache_fresh_unlocked(old, detail);
187	cache_put(old, detail);
188	return tmp;
189}
190EXPORT_SYMBOL_GPL(sunrpc_cache_update);
191
192static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
193{
194	if (cd->cache_upcall)
195		return cd->cache_upcall(cd, h);
196	return sunrpc_cache_pipe_upcall(cd, h);
197}
198
199static inline int cache_is_valid(struct cache_head *h)
200{
201	if (!test_bit(CACHE_VALID, &h->flags))
202		return -EAGAIN;
203	else {
204		/* entry is valid */
205		if (test_bit(CACHE_NEGATIVE, &h->flags))
206			return -ENOENT;
207		else {
208			/*
209			 * In combination with write barrier in
210			 * sunrpc_cache_update, ensures that anyone
211			 * using the cache entry after this sees the
212			 * updated contents:
213			 */
214			smp_rmb();
215			return 0;
216		}
217	}
218}
219
220static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
221{
222	int rv;
223
224	write_lock(&detail->hash_lock);
225	rv = cache_is_valid(h);
226	if (rv == -EAGAIN) {
227		set_bit(CACHE_NEGATIVE, &h->flags);
228		cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
229		rv = -ENOENT;
230	}
231	write_unlock(&detail->hash_lock);
232	cache_fresh_unlocked(h, detail);
233	return rv;
234}
235
236/*
237 * This is the generic cache management routine for all
238 * the authentication caches.
239 * It checks the currency of a cache item and will (later)
240 * initiate an upcall to fill it if needed.
241 *
242 *
243 * Returns 0 if the cache_head can be used, or cache_puts it and returns
244 * -EAGAIN if upcall is pending and request has been queued
245 * -ETIMEDOUT if upcall failed or request could not be queue or
246 *           upcall completed but item is still invalid (implying that
247 *           the cache item has been replaced with a newer one).
248 * -ENOENT if cache entry was negative
249 */
250int cache_check(struct cache_detail *detail,
251		    struct cache_head *h, struct cache_req *rqstp)
252{
253	int rv;
254	long refresh_age, age;
255
256	/* First decide return status as best we can */
257	rv = cache_is_valid(h);
258
259	/* now see if we want to start an upcall */
260	refresh_age = (h->expiry_time - h->last_refresh);
261	age = seconds_since_boot() - h->last_refresh;
262
263	if (rqstp == NULL) {
264		if (rv == -EAGAIN)
265			rv = -ENOENT;
266	} else if (rv == -EAGAIN ||
267		   (h->expiry_time != 0 && age > refresh_age/2)) {
268		dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
269				refresh_age, age);
270		if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
271			switch (cache_make_upcall(detail, h)) {
272			case -EINVAL:
273				rv = try_to_negate_entry(detail, h);
274				break;
275			case -EAGAIN:
276				cache_fresh_unlocked(h, detail);
277				break;
278			}
279		}
280	}
281
282	if (rv == -EAGAIN) {
283		if (!cache_defer_req(rqstp, h)) {
284			/*
285			 * Request was not deferred; handle it as best
286			 * we can ourselves:
287			 */
288			rv = cache_is_valid(h);
289			if (rv == -EAGAIN)
290				rv = -ETIMEDOUT;
291		}
292	}
293	if (rv)
294		cache_put(h, detail);
295	return rv;
296}
297EXPORT_SYMBOL_GPL(cache_check);
298
299/*
300 * caches need to be periodically cleaned.
301 * For this we maintain a list of cache_detail and
302 * a current pointer into that list and into the table
303 * for that entry.
304 *
305 * Each time cache_clean is called it finds the next non-empty entry
306 * in the current table and walks the list in that entry
307 * looking for entries that can be removed.
308 *
309 * An entry gets removed if:
310 * - The expiry is before current time
311 * - The last_refresh time is before the flush_time for that cache
312 *
313 * later we might drop old entries with non-NEVER expiry if that table
314 * is getting 'full' for some definition of 'full'
315 *
316 * The question of "how often to scan a table" is an interesting one
317 * and is answered in part by the use of the "nextcheck" field in the
318 * cache_detail.
319 * When a scan of a table begins, the nextcheck field is set to a time
320 * that is well into the future.
321 * While scanning, if an expiry time is found that is earlier than the
322 * current nextcheck time, nextcheck is set to that expiry time.
323 * If the flush_time is ever set to a time earlier than the nextcheck
324 * time, the nextcheck time is then set to that flush_time.
325 *
326 * A table is then only scanned if the current time is at least
327 * the nextcheck time.
328 *
329 */
330
331static LIST_HEAD(cache_list);
332static DEFINE_SPINLOCK(cache_list_lock);
333static struct cache_detail *current_detail;
334static int current_index;
335
336static void do_cache_clean(struct work_struct *work);
337static struct delayed_work cache_cleaner;
338
339void sunrpc_init_cache_detail(struct cache_detail *cd)
340{
341	rwlock_init(&cd->hash_lock);
342	INIT_LIST_HEAD(&cd->queue);
343	spin_lock(&cache_list_lock);
344	cd->nextcheck = 0;
345	cd->entries = 0;
346	atomic_set(&cd->readers, 0);
347	cd->last_close = 0;
348	cd->last_warn = -1;
349	list_add(&cd->others, &cache_list);
350	spin_unlock(&cache_list_lock);
351
352	/* start the cleaning process */
353	schedule_delayed_work(&cache_cleaner, 0);
354}
355EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
356
357void sunrpc_destroy_cache_detail(struct cache_detail *cd)
358{
359	cache_purge(cd);
360	spin_lock(&cache_list_lock);
361	write_lock(&cd->hash_lock);
362	if (cd->entries || atomic_read(&cd->inuse)) {
363		write_unlock(&cd->hash_lock);
364		spin_unlock(&cache_list_lock);
365		goto out;
366	}
367	if (current_detail == cd)
368		current_detail = NULL;
369	list_del_init(&cd->others);
370	write_unlock(&cd->hash_lock);
371	spin_unlock(&cache_list_lock);
372	if (list_empty(&cache_list)) {
373		/* module must be being unloaded so its safe to kill the worker */
374		cancel_delayed_work_sync(&cache_cleaner);
375	}
376	return;
377out:
378	printk(KERN_ERR "RPC: failed to unregister %s cache\n", cd->name);
379}
380EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
381
382/* clean cache tries to find something to clean
383 * and cleans it.
384 * It returns 1 if it cleaned something,
385 *            0 if it didn't find anything this time
386 *           -1 if it fell off the end of the list.
387 */
388static int cache_clean(void)
389{
390	int rv = 0;
391	struct list_head *next;
392
393	spin_lock(&cache_list_lock);
394
395	/* find a suitable table if we don't already have one */
396	while (current_detail == NULL ||
397	    current_index >= current_detail->hash_size) {
398		if (current_detail)
399			next = current_detail->others.next;
400		else
401			next = cache_list.next;
402		if (next == &cache_list) {
403			current_detail = NULL;
404			spin_unlock(&cache_list_lock);
405			return -1;
406		}
407		current_detail = list_entry(next, struct cache_detail, others);
408		if (current_detail->nextcheck > seconds_since_boot())
409			current_index = current_detail->hash_size;
410		else {
411			current_index = 0;
412			current_detail->nextcheck = seconds_since_boot()+30*60;
413		}
414	}
415
416	/* find a non-empty bucket in the table */
417	while (current_detail &&
418	       current_index < current_detail->hash_size &&
419	       current_detail->hash_table[current_index] == NULL)
420		current_index++;
421
422	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
423
424	if (current_detail && current_index < current_detail->hash_size) {
425		struct cache_head *ch, **cp;
426		struct cache_detail *d;
427
428		write_lock(&current_detail->hash_lock);
429
430		/* Ok, now to clean this strand */
431
432		cp = & current_detail->hash_table[current_index];
433		for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
434			if (current_detail->nextcheck > ch->expiry_time)
435				current_detail->nextcheck = ch->expiry_time+1;
436			if (!cache_is_expired(current_detail, ch))
437				continue;
438
439			*cp = ch->next;
440			ch->next = NULL;
441			current_detail->entries--;
442			rv = 1;
443			break;
444		}
445
446		write_unlock(&current_detail->hash_lock);
447		d = current_detail;
448		if (!ch)
449			current_index ++;
450		spin_unlock(&cache_list_lock);
451		if (ch) {
452			set_bit(CACHE_CLEANED, &ch->flags);
453			cache_fresh_unlocked(ch, d);
454			cache_put(ch, d);
455		}
456	} else
457		spin_unlock(&cache_list_lock);
458
459	return rv;
460}
461
462/*
463 * We want to regularly clean the cache, so we need to schedule some work ...
464 */
465static void do_cache_clean(struct work_struct *work)
466{
467	int delay = 5;
468	if (cache_clean() == -1)
469		delay = round_jiffies_relative(30*HZ);
470
471	if (list_empty(&cache_list))
472		delay = 0;
473
474	if (delay)
475		schedule_delayed_work(&cache_cleaner, delay);
476}
477
478
479/*
480 * Clean all caches promptly.  This just calls cache_clean
481 * repeatedly until we are sure that every cache has had a chance to
482 * be fully cleaned
483 */
484void cache_flush(void)
485{
486	while (cache_clean() != -1)
487		cond_resched();
488	while (cache_clean() != -1)
489		cond_resched();
490}
491EXPORT_SYMBOL_GPL(cache_flush);
492
493void cache_purge(struct cache_detail *detail)
494{
495	detail->flush_time = LONG_MAX;
496	detail->nextcheck = seconds_since_boot();
497	cache_flush();
498	detail->flush_time = 1;
499}
500EXPORT_SYMBOL_GPL(cache_purge);
501
502
503/*
504 * Deferral and Revisiting of Requests.
505 *
506 * If a cache lookup finds a pending entry, we
507 * need to defer the request and revisit it later.
508 * All deferred requests are stored in a hash table,
509 * indexed by "struct cache_head *".
510 * As it may be wasteful to store a whole request
511 * structure, we allow the request to provide a
512 * deferred form, which must contain a
513 * 'struct cache_deferred_req'
514 * This cache_deferred_req contains a method to allow
515 * it to be revisited when cache info is available
516 */
517
518#define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
519#define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
520
521#define	DFR_MAX	300	/* ??? */
522
523static DEFINE_SPINLOCK(cache_defer_lock);
524static LIST_HEAD(cache_defer_list);
525static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
526static int cache_defer_cnt;
527
528static void __unhash_deferred_req(struct cache_deferred_req *dreq)
529{
530	hlist_del_init(&dreq->hash);
531	if (!list_empty(&dreq->recent)) {
532		list_del_init(&dreq->recent);
533		cache_defer_cnt--;
534	}
535}
536
537static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
538{
539	int hash = DFR_HASH(item);
540
541	INIT_LIST_HEAD(&dreq->recent);
542	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
543}
544
545static void setup_deferral(struct cache_deferred_req *dreq,
546			   struct cache_head *item,
547			   int count_me)
548{
549
550	dreq->item = item;
551
552	spin_lock(&cache_defer_lock);
553
554	__hash_deferred_req(dreq, item);
555
556	if (count_me) {
557		cache_defer_cnt++;
558		list_add(&dreq->recent, &cache_defer_list);
559	}
560
561	spin_unlock(&cache_defer_lock);
562
563}
564
565struct thread_deferred_req {
566	struct cache_deferred_req handle;
567	struct completion completion;
568};
569
570static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
571{
572	struct thread_deferred_req *dr =
573		container_of(dreq, struct thread_deferred_req, handle);
574	complete(&dr->completion);
575}
576
577static void cache_wait_req(struct cache_req *req, struct cache_head *item)
578{
579	struct thread_deferred_req sleeper;
580	struct cache_deferred_req *dreq = &sleeper.handle;
581
582	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
583	dreq->revisit = cache_restart_thread;
584
585	setup_deferral(dreq, item, 0);
586
587	if (!test_bit(CACHE_PENDING, &item->flags) ||
588	    wait_for_completion_interruptible_timeout(
589		    &sleeper.completion, req->thread_wait) <= 0) {
590		/* The completion wasn't completed, so we need
591		 * to clean up
592		 */
593		spin_lock(&cache_defer_lock);
594		if (!hlist_unhashed(&sleeper.handle.hash)) {
595			__unhash_deferred_req(&sleeper.handle);
596			spin_unlock(&cache_defer_lock);
597		} else {
598			/* cache_revisit_request already removed
599			 * this from the hash table, but hasn't
600			 * called ->revisit yet.  It will very soon
601			 * and we need to wait for it.
602			 */
603			spin_unlock(&cache_defer_lock);
604			wait_for_completion(&sleeper.completion);
605		}
606	}
607}
608
609static void cache_limit_defers(void)
610{
611	/* Make sure we haven't exceed the limit of allowed deferred
612	 * requests.
613	 */
614	struct cache_deferred_req *discard = NULL;
615
616	if (cache_defer_cnt <= DFR_MAX)
617		return;
618
619	spin_lock(&cache_defer_lock);
620
621	/* Consider removing either the first or the last */
622	if (cache_defer_cnt > DFR_MAX) {
623		if (prandom_u32() & 1)
624			discard = list_entry(cache_defer_list.next,
625					     struct cache_deferred_req, recent);
626		else
627			discard = list_entry(cache_defer_list.prev,
628					     struct cache_deferred_req, recent);
629		__unhash_deferred_req(discard);
630	}
631	spin_unlock(&cache_defer_lock);
632	if (discard)
633		discard->revisit(discard, 1);
634}
635
636/* Return true if and only if a deferred request is queued. */
637static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
638{
639	struct cache_deferred_req *dreq;
640
641	if (req->thread_wait) {
642		cache_wait_req(req, item);
643		if (!test_bit(CACHE_PENDING, &item->flags))
644			return false;
645	}
646	dreq = req->defer(req);
647	if (dreq == NULL)
648		return false;
649	setup_deferral(dreq, item, 1);
650	if (!test_bit(CACHE_PENDING, &item->flags))
651		/* Bit could have been cleared before we managed to
652		 * set up the deferral, so need to revisit just in case
653		 */
654		cache_revisit_request(item);
655
656	cache_limit_defers();
657	return true;
658}
659
660static void cache_revisit_request(struct cache_head *item)
661{
662	struct cache_deferred_req *dreq;
663	struct list_head pending;
664	struct hlist_node *tmp;
665	int hash = DFR_HASH(item);
666
667	INIT_LIST_HEAD(&pending);
668	spin_lock(&cache_defer_lock);
669
670	hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
671		if (dreq->item == item) {
672			__unhash_deferred_req(dreq);
673			list_add(&dreq->recent, &pending);
674		}
675
676	spin_unlock(&cache_defer_lock);
677
678	while (!list_empty(&pending)) {
679		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
680		list_del_init(&dreq->recent);
681		dreq->revisit(dreq, 0);
682	}
683}
684
685void cache_clean_deferred(void *owner)
686{
687	struct cache_deferred_req *dreq, *tmp;
688	struct list_head pending;
689
690
691	INIT_LIST_HEAD(&pending);
692	spin_lock(&cache_defer_lock);
693
694	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
695		if (dreq->owner == owner) {
696			__unhash_deferred_req(dreq);
697			list_add(&dreq->recent, &pending);
698		}
699	}
700	spin_unlock(&cache_defer_lock);
701
702	while (!list_empty(&pending)) {
703		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
704		list_del_init(&dreq->recent);
705		dreq->revisit(dreq, 1);
706	}
707}
708
709/*
710 * communicate with user-space
711 *
712 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
713 * On read, you get a full request, or block.
714 * On write, an update request is processed.
715 * Poll works if anything to read, and always allows write.
716 *
717 * Implemented by linked list of requests.  Each open file has
718 * a ->private that also exists in this list.  New requests are added
719 * to the end and may wakeup and preceding readers.
720 * New readers are added to the head.  If, on read, an item is found with
721 * CACHE_UPCALLING clear, we free it from the list.
722 *
723 */
724
725static DEFINE_SPINLOCK(queue_lock);
726static DEFINE_MUTEX(queue_io_mutex);
727
728struct cache_queue {
729	struct list_head	list;
730	int			reader;	/* if 0, then request */
731};
732struct cache_request {
733	struct cache_queue	q;
734	struct cache_head	*item;
735	char			* buf;
736	int			len;
737	int			readers;
738};
739struct cache_reader {
740	struct cache_queue	q;
741	int			offset;	/* if non-0, we have a refcnt on next request */
742};
743
744static int cache_request(struct cache_detail *detail,
745			       struct cache_request *crq)
746{
747	char *bp = crq->buf;
748	int len = PAGE_SIZE;
749
750	detail->cache_request(detail, crq->item, &bp, &len);
751	if (len < 0)
752		return -EAGAIN;
753	return PAGE_SIZE - len;
754}
755
756static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
757			  loff_t *ppos, struct cache_detail *cd)
758{
759	struct cache_reader *rp = filp->private_data;
760	struct cache_request *rq;
761	struct inode *inode = file_inode(filp);
762	int err;
763
764	if (count == 0)
765		return 0;
766
767	mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
768			      * readers on this file */
769 again:
770	spin_lock(&queue_lock);
771	/* need to find next request */
772	while (rp->q.list.next != &cd->queue &&
773	       list_entry(rp->q.list.next, struct cache_queue, list)
774	       ->reader) {
775		struct list_head *next = rp->q.list.next;
776		list_move(&rp->q.list, next);
777	}
778	if (rp->q.list.next == &cd->queue) {
779		spin_unlock(&queue_lock);
780		mutex_unlock(&inode->i_mutex);
781		WARN_ON_ONCE(rp->offset);
782		return 0;
783	}
784	rq = container_of(rp->q.list.next, struct cache_request, q.list);
785	WARN_ON_ONCE(rq->q.reader);
786	if (rp->offset == 0)
787		rq->readers++;
788	spin_unlock(&queue_lock);
789
790	if (rq->len == 0) {
791		err = cache_request(cd, rq);
792		if (err < 0)
793			goto out;
794		rq->len = err;
795	}
796
797	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
798		err = -EAGAIN;
799		spin_lock(&queue_lock);
800		list_move(&rp->q.list, &rq->q.list);
801		spin_unlock(&queue_lock);
802	} else {
803		if (rp->offset + count > rq->len)
804			count = rq->len - rp->offset;
805		err = -EFAULT;
806		if (copy_to_user(buf, rq->buf + rp->offset, count))
807			goto out;
808		rp->offset += count;
809		if (rp->offset >= rq->len) {
810			rp->offset = 0;
811			spin_lock(&queue_lock);
812			list_move(&rp->q.list, &rq->q.list);
813			spin_unlock(&queue_lock);
814		}
815		err = 0;
816	}
817 out:
818	if (rp->offset == 0) {
819		/* need to release rq */
820		spin_lock(&queue_lock);
821		rq->readers--;
822		if (rq->readers == 0 &&
823		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
824			list_del(&rq->q.list);
825			spin_unlock(&queue_lock);
826			cache_put(rq->item, cd);
827			kfree(rq->buf);
828			kfree(rq);
829		} else
830			spin_unlock(&queue_lock);
831	}
832	if (err == -EAGAIN)
833		goto again;
834	mutex_unlock(&inode->i_mutex);
835	return err ? err :  count;
836}
837
838static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
839				 size_t count, struct cache_detail *cd)
840{
841	ssize_t ret;
842
843	if (count == 0)
844		return -EINVAL;
845	if (copy_from_user(kaddr, buf, count))
846		return -EFAULT;
847	kaddr[count] = '\0';
848	ret = cd->cache_parse(cd, kaddr, count);
849	if (!ret)
850		ret = count;
851	return ret;
852}
853
854static ssize_t cache_slow_downcall(const char __user *buf,
855				   size_t count, struct cache_detail *cd)
856{
857	static char write_buf[8192]; /* protected by queue_io_mutex */
858	ssize_t ret = -EINVAL;
859
860	if (count >= sizeof(write_buf))
861		goto out;
862	mutex_lock(&queue_io_mutex);
863	ret = cache_do_downcall(write_buf, buf, count, cd);
864	mutex_unlock(&queue_io_mutex);
865out:
866	return ret;
867}
868
869static ssize_t cache_downcall(struct address_space *mapping,
870			      const char __user *buf,
871			      size_t count, struct cache_detail *cd)
872{
873	struct page *page;
874	char *kaddr;
875	ssize_t ret = -ENOMEM;
876
877	if (count >= PAGE_CACHE_SIZE)
878		goto out_slow;
879
880	page = find_or_create_page(mapping, 0, GFP_KERNEL);
881	if (!page)
882		goto out_slow;
883
884	kaddr = kmap(page);
885	ret = cache_do_downcall(kaddr, buf, count, cd);
886	kunmap(page);
887	unlock_page(page);
888	page_cache_release(page);
889	return ret;
890out_slow:
891	return cache_slow_downcall(buf, count, cd);
892}
893
894static ssize_t cache_write(struct file *filp, const char __user *buf,
895			   size_t count, loff_t *ppos,
896			   struct cache_detail *cd)
897{
898	struct address_space *mapping = filp->f_mapping;
899	struct inode *inode = file_inode(filp);
900	ssize_t ret = -EINVAL;
901
902	if (!cd->cache_parse)
903		goto out;
904
905	mutex_lock(&inode->i_mutex);
906	ret = cache_downcall(mapping, buf, count, cd);
907	mutex_unlock(&inode->i_mutex);
908out:
909	return ret;
910}
911
912static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
913
914static unsigned int cache_poll(struct file *filp, poll_table *wait,
915			       struct cache_detail *cd)
916{
917	unsigned int mask;
918	struct cache_reader *rp = filp->private_data;
919	struct cache_queue *cq;
920
921	poll_wait(filp, &queue_wait, wait);
922
923	/* alway allow write */
924	mask = POLLOUT | POLLWRNORM;
925
926	if (!rp)
927		return mask;
928
929	spin_lock(&queue_lock);
930
931	for (cq= &rp->q; &cq->list != &cd->queue;
932	     cq = list_entry(cq->list.next, struct cache_queue, list))
933		if (!cq->reader) {
934			mask |= POLLIN | POLLRDNORM;
935			break;
936		}
937	spin_unlock(&queue_lock);
938	return mask;
939}
940
941static int cache_ioctl(struct inode *ino, struct file *filp,
942		       unsigned int cmd, unsigned long arg,
943		       struct cache_detail *cd)
944{
945	int len = 0;
946	struct cache_reader *rp = filp->private_data;
947	struct cache_queue *cq;
948
949	if (cmd != FIONREAD || !rp)
950		return -EINVAL;
951
952	spin_lock(&queue_lock);
953
954	/* only find the length remaining in current request,
955	 * or the length of the next request
956	 */
957	for (cq= &rp->q; &cq->list != &cd->queue;
958	     cq = list_entry(cq->list.next, struct cache_queue, list))
959		if (!cq->reader) {
960			struct cache_request *cr =
961				container_of(cq, struct cache_request, q);
962			len = cr->len - rp->offset;
963			break;
964		}
965	spin_unlock(&queue_lock);
966
967	return put_user(len, (int __user *)arg);
968}
969
970static int cache_open(struct inode *inode, struct file *filp,
971		      struct cache_detail *cd)
972{
973	struct cache_reader *rp = NULL;
974
975	if (!cd || !try_module_get(cd->owner))
976		return -EACCES;
977	nonseekable_open(inode, filp);
978	if (filp->f_mode & FMODE_READ) {
979		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
980		if (!rp) {
981			module_put(cd->owner);
982			return -ENOMEM;
983		}
984		rp->offset = 0;
985		rp->q.reader = 1;
986		atomic_inc(&cd->readers);
987		spin_lock(&queue_lock);
988		list_add(&rp->q.list, &cd->queue);
989		spin_unlock(&queue_lock);
990	}
991	filp->private_data = rp;
992	return 0;
993}
994
995static int cache_release(struct inode *inode, struct file *filp,
996			 struct cache_detail *cd)
997{
998	struct cache_reader *rp = filp->private_data;
999
1000	if (rp) {
1001		spin_lock(&queue_lock);
1002		if (rp->offset) {
1003			struct cache_queue *cq;
1004			for (cq= &rp->q; &cq->list != &cd->queue;
1005			     cq = list_entry(cq->list.next, struct cache_queue, list))
1006				if (!cq->reader) {
1007					container_of(cq, struct cache_request, q)
1008						->readers--;
1009					break;
1010				}
1011			rp->offset = 0;
1012		}
1013		list_del(&rp->q.list);
1014		spin_unlock(&queue_lock);
1015
1016		filp->private_data = NULL;
1017		kfree(rp);
1018
1019		cd->last_close = seconds_since_boot();
1020		atomic_dec(&cd->readers);
1021	}
1022	module_put(cd->owner);
1023	return 0;
1024}
1025
1026
1027
1028static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1029{
1030	struct cache_queue *cq, *tmp;
1031	struct cache_request *cr;
1032	struct list_head dequeued;
1033
1034	INIT_LIST_HEAD(&dequeued);
1035	spin_lock(&queue_lock);
1036	list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1037		if (!cq->reader) {
1038			cr = container_of(cq, struct cache_request, q);
1039			if (cr->item != ch)
1040				continue;
1041			if (test_bit(CACHE_PENDING, &ch->flags))
1042				/* Lost a race and it is pending again */
1043				break;
1044			if (cr->readers != 0)
1045				continue;
1046			list_move(&cr->q.list, &dequeued);
1047		}
1048	spin_unlock(&queue_lock);
1049	while (!list_empty(&dequeued)) {
1050		cr = list_entry(dequeued.next, struct cache_request, q.list);
1051		list_del(&cr->q.list);
1052		cache_put(cr->item, detail);
1053		kfree(cr->buf);
1054		kfree(cr);
1055	}
1056}
1057
1058/*
1059 * Support routines for text-based upcalls.
1060 * Fields are separated by spaces.
1061 * Fields are either mangled to quote space tab newline slosh with slosh
1062 * or a hexified with a leading \x
1063 * Record is terminated with newline.
1064 *
1065 */
1066
1067void qword_add(char **bpp, int *lp, char *str)
1068{
1069	char *bp = *bpp;
1070	int len = *lp;
1071	int ret;
1072
1073	if (len < 0) return;
1074
1075	ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1076	if (ret >= len) {
1077		bp += len;
1078		len = -1;
1079	} else {
1080		bp += ret;
1081		len -= ret;
1082		*bp++ = ' ';
1083		len--;
1084	}
1085	*bpp = bp;
1086	*lp = len;
1087}
1088EXPORT_SYMBOL_GPL(qword_add);
1089
1090void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1091{
1092	char *bp = *bpp;
1093	int len = *lp;
1094
1095	if (len < 0) return;
1096
1097	if (len > 2) {
1098		*bp++ = '\\';
1099		*bp++ = 'x';
1100		len -= 2;
1101		while (blen && len >= 2) {
1102			bp = hex_byte_pack(bp, *buf++);
1103			len -= 2;
1104			blen--;
1105		}
1106	}
1107	if (blen || len<1) len = -1;
1108	else {
1109		*bp++ = ' ';
1110		len--;
1111	}
1112	*bpp = bp;
1113	*lp = len;
1114}
1115EXPORT_SYMBOL_GPL(qword_addhex);
1116
1117static void warn_no_listener(struct cache_detail *detail)
1118{
1119	if (detail->last_warn != detail->last_close) {
1120		detail->last_warn = detail->last_close;
1121		if (detail->warn_no_listener)
1122			detail->warn_no_listener(detail, detail->last_close != 0);
1123	}
1124}
1125
1126static bool cache_listeners_exist(struct cache_detail *detail)
1127{
1128	if (atomic_read(&detail->readers))
1129		return true;
1130	if (detail->last_close == 0)
1131		/* This cache was never opened */
1132		return false;
1133	if (detail->last_close < seconds_since_boot() - 30)
1134		/*
1135		 * We allow for the possibility that someone might
1136		 * restart a userspace daemon without restarting the
1137		 * server; but after 30 seconds, we give up.
1138		 */
1139		 return false;
1140	return true;
1141}
1142
1143/*
1144 * register an upcall request to user-space and queue it up for read() by the
1145 * upcall daemon.
1146 *
1147 * Each request is at most one page long.
1148 */
1149int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1150{
1151
1152	char *buf;
1153	struct cache_request *crq;
1154	int ret = 0;
1155
1156	if (!detail->cache_request)
1157		return -EINVAL;
1158
1159	if (!cache_listeners_exist(detail)) {
1160		warn_no_listener(detail);
1161		return -EINVAL;
1162	}
1163	if (test_bit(CACHE_CLEANED, &h->flags))
1164		/* Too late to make an upcall */
1165		return -EAGAIN;
1166
1167	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1168	if (!buf)
1169		return -EAGAIN;
1170
1171	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1172	if (!crq) {
1173		kfree(buf);
1174		return -EAGAIN;
1175	}
1176
1177	crq->q.reader = 0;
1178	crq->item = cache_get(h);
1179	crq->buf = buf;
1180	crq->len = 0;
1181	crq->readers = 0;
1182	spin_lock(&queue_lock);
1183	if (test_bit(CACHE_PENDING, &h->flags))
1184		list_add_tail(&crq->q.list, &detail->queue);
1185	else
1186		/* Lost a race, no longer PENDING, so don't enqueue */
1187		ret = -EAGAIN;
1188	spin_unlock(&queue_lock);
1189	wake_up(&queue_wait);
1190	if (ret == -EAGAIN) {
1191		kfree(buf);
1192		kfree(crq);
1193	}
1194	return ret;
1195}
1196EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1197
1198/*
1199 * parse a message from user-space and pass it
1200 * to an appropriate cache
1201 * Messages are, like requests, separated into fields by
1202 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1203 *
1204 * Message is
1205 *   reply cachename expiry key ... content....
1206 *
1207 * key and content are both parsed by cache
1208 */
1209
1210int qword_get(char **bpp, char *dest, int bufsize)
1211{
1212	/* return bytes copied, or -1 on error */
1213	char *bp = *bpp;
1214	int len = 0;
1215
1216	while (*bp == ' ') bp++;
1217
1218	if (bp[0] == '\\' && bp[1] == 'x') {
1219		/* HEX STRING */
1220		bp += 2;
1221		while (len < bufsize - 1) {
1222			int h, l;
1223
1224			h = hex_to_bin(bp[0]);
1225			if (h < 0)
1226				break;
1227
1228			l = hex_to_bin(bp[1]);
1229			if (l < 0)
1230				break;
1231
1232			*dest++ = (h << 4) | l;
1233			bp += 2;
1234			len++;
1235		}
1236	} else {
1237		/* text with \nnn octal quoting */
1238		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1239			if (*bp == '\\' &&
1240			    isodigit(bp[1]) && (bp[1] <= '3') &&
1241			    isodigit(bp[2]) &&
1242			    isodigit(bp[3])) {
1243				int byte = (*++bp -'0');
1244				bp++;
1245				byte = (byte << 3) | (*bp++ - '0');
1246				byte = (byte << 3) | (*bp++ - '0');
1247				*dest++ = byte;
1248				len++;
1249			} else {
1250				*dest++ = *bp++;
1251				len++;
1252			}
1253		}
1254	}
1255
1256	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1257		return -1;
1258	while (*bp == ' ') bp++;
1259	*bpp = bp;
1260	*dest = '\0';
1261	return len;
1262}
1263EXPORT_SYMBOL_GPL(qword_get);
1264
1265
1266/*
1267 * support /proc/sunrpc/cache/$CACHENAME/content
1268 * as a seqfile.
1269 * We call ->cache_show passing NULL for the item to
1270 * get a header, then pass each real item in the cache
1271 */
1272
1273struct handle {
1274	struct cache_detail *cd;
1275};
1276
1277static void *c_start(struct seq_file *m, loff_t *pos)
1278	__acquires(cd->hash_lock)
1279{
1280	loff_t n = *pos;
1281	unsigned int hash, entry;
1282	struct cache_head *ch;
1283	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1284
1285
1286	read_lock(&cd->hash_lock);
1287	if (!n--)
1288		return SEQ_START_TOKEN;
1289	hash = n >> 32;
1290	entry = n & ((1LL<<32) - 1);
1291
1292	for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1293		if (!entry--)
1294			return ch;
1295	n &= ~((1LL<<32) - 1);
1296	do {
1297		hash++;
1298		n += 1LL<<32;
1299	} while(hash < cd->hash_size &&
1300		cd->hash_table[hash]==NULL);
1301	if (hash >= cd->hash_size)
1302		return NULL;
1303	*pos = n+1;
1304	return cd->hash_table[hash];
1305}
1306
1307static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1308{
1309	struct cache_head *ch = p;
1310	int hash = (*pos >> 32);
1311	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1312
1313	if (p == SEQ_START_TOKEN)
1314		hash = 0;
1315	else if (ch->next == NULL) {
1316		hash++;
1317		*pos += 1LL<<32;
1318	} else {
1319		++*pos;
1320		return ch->next;
1321	}
1322	*pos &= ~((1LL<<32) - 1);
1323	while (hash < cd->hash_size &&
1324	       cd->hash_table[hash] == NULL) {
1325		hash++;
1326		*pos += 1LL<<32;
1327	}
1328	if (hash >= cd->hash_size)
1329		return NULL;
1330	++*pos;
1331	return cd->hash_table[hash];
1332}
1333
1334static void c_stop(struct seq_file *m, void *p)
1335	__releases(cd->hash_lock)
1336{
1337	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1338	read_unlock(&cd->hash_lock);
1339}
1340
1341static int c_show(struct seq_file *m, void *p)
1342{
1343	struct cache_head *cp = p;
1344	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1345
1346	if (p == SEQ_START_TOKEN)
1347		return cd->cache_show(m, cd, NULL);
1348
1349	ifdebug(CACHE)
1350		seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1351			   convert_to_wallclock(cp->expiry_time),
1352			   atomic_read(&cp->ref.refcount), cp->flags);
1353	cache_get(cp);
1354	if (cache_check(cd, cp, NULL))
1355		/* cache_check does a cache_put on failure */
1356		seq_printf(m, "# ");
1357	else {
1358		if (cache_is_expired(cd, cp))
1359			seq_printf(m, "# ");
1360		cache_put(cp, cd);
1361	}
1362
1363	return cd->cache_show(m, cd, cp);
1364}
1365
1366static const struct seq_operations cache_content_op = {
1367	.start	= c_start,
1368	.next	= c_next,
1369	.stop	= c_stop,
1370	.show	= c_show,
1371};
1372
1373static int content_open(struct inode *inode, struct file *file,
1374			struct cache_detail *cd)
1375{
1376	struct handle *han;
1377
1378	if (!cd || !try_module_get(cd->owner))
1379		return -EACCES;
1380	han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1381	if (han == NULL) {
1382		module_put(cd->owner);
1383		return -ENOMEM;
1384	}
1385
1386	han->cd = cd;
1387	return 0;
1388}
1389
1390static int content_release(struct inode *inode, struct file *file,
1391		struct cache_detail *cd)
1392{
1393	int ret = seq_release_private(inode, file);
1394	module_put(cd->owner);
1395	return ret;
1396}
1397
1398static int open_flush(struct inode *inode, struct file *file,
1399			struct cache_detail *cd)
1400{
1401	if (!cd || !try_module_get(cd->owner))
1402		return -EACCES;
1403	return nonseekable_open(inode, file);
1404}
1405
1406static int release_flush(struct inode *inode, struct file *file,
1407			struct cache_detail *cd)
1408{
1409	module_put(cd->owner);
1410	return 0;
1411}
1412
1413static ssize_t read_flush(struct file *file, char __user *buf,
1414			  size_t count, loff_t *ppos,
1415			  struct cache_detail *cd)
1416{
1417	char tbuf[22];
1418	unsigned long p = *ppos;
1419	size_t len;
1420
1421	snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
1422	len = strlen(tbuf);
1423	if (p >= len)
1424		return 0;
1425	len -= p;
1426	if (len > count)
1427		len = count;
1428	if (copy_to_user(buf, (void*)(tbuf+p), len))
1429		return -EFAULT;
1430	*ppos += len;
1431	return len;
1432}
1433
1434static ssize_t write_flush(struct file *file, const char __user *buf,
1435			   size_t count, loff_t *ppos,
1436			   struct cache_detail *cd)
1437{
1438	char tbuf[20];
1439	char *bp, *ep;
1440
1441	if (*ppos || count > sizeof(tbuf)-1)
1442		return -EINVAL;
1443	if (copy_from_user(tbuf, buf, count))
1444		return -EFAULT;
1445	tbuf[count] = 0;
1446	simple_strtoul(tbuf, &ep, 0);
1447	if (*ep && *ep != '\n')
1448		return -EINVAL;
1449
1450	bp = tbuf;
1451	cd->flush_time = get_expiry(&bp);
1452	cd->nextcheck = seconds_since_boot();
1453	cache_flush();
1454
1455	*ppos += count;
1456	return count;
1457}
1458
1459static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1460				 size_t count, loff_t *ppos)
1461{
1462	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1463
1464	return cache_read(filp, buf, count, ppos, cd);
1465}
1466
1467static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1468				  size_t count, loff_t *ppos)
1469{
1470	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1471
1472	return cache_write(filp, buf, count, ppos, cd);
1473}
1474
1475static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1476{
1477	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1478
1479	return cache_poll(filp, wait, cd);
1480}
1481
1482static long cache_ioctl_procfs(struct file *filp,
1483			       unsigned int cmd, unsigned long arg)
1484{
1485	struct inode *inode = file_inode(filp);
1486	struct cache_detail *cd = PDE_DATA(inode);
1487
1488	return cache_ioctl(inode, filp, cmd, arg, cd);
1489}
1490
1491static int cache_open_procfs(struct inode *inode, struct file *filp)
1492{
1493	struct cache_detail *cd = PDE_DATA(inode);
1494
1495	return cache_open(inode, filp, cd);
1496}
1497
1498static int cache_release_procfs(struct inode *inode, struct file *filp)
1499{
1500	struct cache_detail *cd = PDE_DATA(inode);
1501
1502	return cache_release(inode, filp, cd);
1503}
1504
1505static const struct file_operations cache_file_operations_procfs = {
1506	.owner		= THIS_MODULE,
1507	.llseek		= no_llseek,
1508	.read		= cache_read_procfs,
1509	.write		= cache_write_procfs,
1510	.poll		= cache_poll_procfs,
1511	.unlocked_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
1512	.open		= cache_open_procfs,
1513	.release	= cache_release_procfs,
1514};
1515
1516static int content_open_procfs(struct inode *inode, struct file *filp)
1517{
1518	struct cache_detail *cd = PDE_DATA(inode);
1519
1520	return content_open(inode, filp, cd);
1521}
1522
1523static int content_release_procfs(struct inode *inode, struct file *filp)
1524{
1525	struct cache_detail *cd = PDE_DATA(inode);
1526
1527	return content_release(inode, filp, cd);
1528}
1529
1530static const struct file_operations content_file_operations_procfs = {
1531	.open		= content_open_procfs,
1532	.read		= seq_read,
1533	.llseek		= seq_lseek,
1534	.release	= content_release_procfs,
1535};
1536
1537static int open_flush_procfs(struct inode *inode, struct file *filp)
1538{
1539	struct cache_detail *cd = PDE_DATA(inode);
1540
1541	return open_flush(inode, filp, cd);
1542}
1543
1544static int release_flush_procfs(struct inode *inode, struct file *filp)
1545{
1546	struct cache_detail *cd = PDE_DATA(inode);
1547
1548	return release_flush(inode, filp, cd);
1549}
1550
1551static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1552			    size_t count, loff_t *ppos)
1553{
1554	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1555
1556	return read_flush(filp, buf, count, ppos, cd);
1557}
1558
1559static ssize_t write_flush_procfs(struct file *filp,
1560				  const char __user *buf,
1561				  size_t count, loff_t *ppos)
1562{
1563	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1564
1565	return write_flush(filp, buf, count, ppos, cd);
1566}
1567
1568static const struct file_operations cache_flush_operations_procfs = {
1569	.open		= open_flush_procfs,
1570	.read		= read_flush_procfs,
1571	.write		= write_flush_procfs,
1572	.release	= release_flush_procfs,
1573	.llseek		= no_llseek,
1574};
1575
1576static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1577{
1578	struct sunrpc_net *sn;
1579
1580	if (cd->u.procfs.proc_ent == NULL)
1581		return;
1582	if (cd->u.procfs.flush_ent)
1583		remove_proc_entry("flush", cd->u.procfs.proc_ent);
1584	if (cd->u.procfs.channel_ent)
1585		remove_proc_entry("channel", cd->u.procfs.proc_ent);
1586	if (cd->u.procfs.content_ent)
1587		remove_proc_entry("content", cd->u.procfs.proc_ent);
1588	cd->u.procfs.proc_ent = NULL;
1589	sn = net_generic(net, sunrpc_net_id);
1590	remove_proc_entry(cd->name, sn->proc_net_rpc);
1591}
1592
1593#ifdef CONFIG_PROC_FS
1594static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1595{
1596	struct proc_dir_entry *p;
1597	struct sunrpc_net *sn;
1598
1599	sn = net_generic(net, sunrpc_net_id);
1600	cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1601	if (cd->u.procfs.proc_ent == NULL)
1602		goto out_nomem;
1603	cd->u.procfs.channel_ent = NULL;
1604	cd->u.procfs.content_ent = NULL;
1605
1606	p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1607			     cd->u.procfs.proc_ent,
1608			     &cache_flush_operations_procfs, cd);
1609	cd->u.procfs.flush_ent = p;
1610	if (p == NULL)
1611		goto out_nomem;
1612
1613	if (cd->cache_request || cd->cache_parse) {
1614		p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1615				     cd->u.procfs.proc_ent,
1616				     &cache_file_operations_procfs, cd);
1617		cd->u.procfs.channel_ent = p;
1618		if (p == NULL)
1619			goto out_nomem;
1620	}
1621	if (cd->cache_show) {
1622		p = proc_create_data("content", S_IFREG|S_IRUSR,
1623				cd->u.procfs.proc_ent,
1624				&content_file_operations_procfs, cd);
1625		cd->u.procfs.content_ent = p;
1626		if (p == NULL)
1627			goto out_nomem;
1628	}
1629	return 0;
1630out_nomem:
1631	remove_cache_proc_entries(cd, net);
1632	return -ENOMEM;
1633}
1634#else /* CONFIG_PROC_FS */
1635static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1636{
1637	return 0;
1638}
1639#endif
1640
1641void __init cache_initialize(void)
1642{
1643	INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1644}
1645
1646int cache_register_net(struct cache_detail *cd, struct net *net)
1647{
1648	int ret;
1649
1650	sunrpc_init_cache_detail(cd);
1651	ret = create_cache_proc_entries(cd, net);
1652	if (ret)
1653		sunrpc_destroy_cache_detail(cd);
1654	return ret;
1655}
1656EXPORT_SYMBOL_GPL(cache_register_net);
1657
1658void cache_unregister_net(struct cache_detail *cd, struct net *net)
1659{
1660	remove_cache_proc_entries(cd, net);
1661	sunrpc_destroy_cache_detail(cd);
1662}
1663EXPORT_SYMBOL_GPL(cache_unregister_net);
1664
1665struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
1666{
1667	struct cache_detail *cd;
1668
1669	cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1670	if (cd == NULL)
1671		return ERR_PTR(-ENOMEM);
1672
1673	cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *),
1674				 GFP_KERNEL);
1675	if (cd->hash_table == NULL) {
1676		kfree(cd);
1677		return ERR_PTR(-ENOMEM);
1678	}
1679	cd->net = net;
1680	return cd;
1681}
1682EXPORT_SYMBOL_GPL(cache_create_net);
1683
1684void cache_destroy_net(struct cache_detail *cd, struct net *net)
1685{
1686	kfree(cd->hash_table);
1687	kfree(cd);
1688}
1689EXPORT_SYMBOL_GPL(cache_destroy_net);
1690
1691static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1692				 size_t count, loff_t *ppos)
1693{
1694	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1695
1696	return cache_read(filp, buf, count, ppos, cd);
1697}
1698
1699static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1700				  size_t count, loff_t *ppos)
1701{
1702	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1703
1704	return cache_write(filp, buf, count, ppos, cd);
1705}
1706
1707static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1708{
1709	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1710
1711	return cache_poll(filp, wait, cd);
1712}
1713
1714static long cache_ioctl_pipefs(struct file *filp,
1715			      unsigned int cmd, unsigned long arg)
1716{
1717	struct inode *inode = file_inode(filp);
1718	struct cache_detail *cd = RPC_I(inode)->private;
1719
1720	return cache_ioctl(inode, filp, cmd, arg, cd);
1721}
1722
1723static int cache_open_pipefs(struct inode *inode, struct file *filp)
1724{
1725	struct cache_detail *cd = RPC_I(inode)->private;
1726
1727	return cache_open(inode, filp, cd);
1728}
1729
1730static int cache_release_pipefs(struct inode *inode, struct file *filp)
1731{
1732	struct cache_detail *cd = RPC_I(inode)->private;
1733
1734	return cache_release(inode, filp, cd);
1735}
1736
1737const struct file_operations cache_file_operations_pipefs = {
1738	.owner		= THIS_MODULE,
1739	.llseek		= no_llseek,
1740	.read		= cache_read_pipefs,
1741	.write		= cache_write_pipefs,
1742	.poll		= cache_poll_pipefs,
1743	.unlocked_ioctl	= cache_ioctl_pipefs, /* for FIONREAD */
1744	.open		= cache_open_pipefs,
1745	.release	= cache_release_pipefs,
1746};
1747
1748static int content_open_pipefs(struct inode *inode, struct file *filp)
1749{
1750	struct cache_detail *cd = RPC_I(inode)->private;
1751
1752	return content_open(inode, filp, cd);
1753}
1754
1755static int content_release_pipefs(struct inode *inode, struct file *filp)
1756{
1757	struct cache_detail *cd = RPC_I(inode)->private;
1758
1759	return content_release(inode, filp, cd);
1760}
1761
1762const struct file_operations content_file_operations_pipefs = {
1763	.open		= content_open_pipefs,
1764	.read		= seq_read,
1765	.llseek		= seq_lseek,
1766	.release	= content_release_pipefs,
1767};
1768
1769static int open_flush_pipefs(struct inode *inode, struct file *filp)
1770{
1771	struct cache_detail *cd = RPC_I(inode)->private;
1772
1773	return open_flush(inode, filp, cd);
1774}
1775
1776static int release_flush_pipefs(struct inode *inode, struct file *filp)
1777{
1778	struct cache_detail *cd = RPC_I(inode)->private;
1779
1780	return release_flush(inode, filp, cd);
1781}
1782
1783static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1784			    size_t count, loff_t *ppos)
1785{
1786	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1787
1788	return read_flush(filp, buf, count, ppos, cd);
1789}
1790
1791static ssize_t write_flush_pipefs(struct file *filp,
1792				  const char __user *buf,
1793				  size_t count, loff_t *ppos)
1794{
1795	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1796
1797	return write_flush(filp, buf, count, ppos, cd);
1798}
1799
1800const struct file_operations cache_flush_operations_pipefs = {
1801	.open		= open_flush_pipefs,
1802	.read		= read_flush_pipefs,
1803	.write		= write_flush_pipefs,
1804	.release	= release_flush_pipefs,
1805	.llseek		= no_llseek,
1806};
1807
1808int sunrpc_cache_register_pipefs(struct dentry *parent,
1809				 const char *name, umode_t umode,
1810				 struct cache_detail *cd)
1811{
1812	struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1813	if (IS_ERR(dir))
1814		return PTR_ERR(dir);
1815	cd->u.pipefs.dir = dir;
1816	return 0;
1817}
1818EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1819
1820void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1821{
1822	rpc_remove_cache_dir(cd->u.pipefs.dir);
1823	cd->u.pipefs.dir = NULL;
1824}
1825EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1826
1827