1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/atomic.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/log2.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <linux/mm.h>
25#include <linux/jhash.h>
26#include <linux/random.h>
27#include <linux/rhashtable.h>
28#include <linux/err.h>
29#include <linux/export.h>
30
31#define HASH_DEFAULT_SIZE	64UL
32#define HASH_MIN_SIZE		4U
33#define BUCKET_LOCKS_PER_CPU   128UL
34
35static u32 head_hashfn(struct rhashtable *ht,
36		       const struct bucket_table *tbl,
37		       const struct rhash_head *he)
38{
39	return rht_head_hashfn(ht, tbl, he, ht->p);
40}
41
42#ifdef CONFIG_PROVE_LOCKING
43#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
44
45int lockdep_rht_mutex_is_held(struct rhashtable *ht)
46{
47	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
48}
49EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
50
51int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
52{
53	spinlock_t *lock = rht_bucket_lock(tbl, hash);
54
55	return (debug_locks) ? lockdep_is_held(lock) : 1;
56}
57EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
58#else
59#define ASSERT_RHT_MUTEX(HT)
60#endif
61
62
63static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
64			      gfp_t gfp)
65{
66	unsigned int i, size;
67#if defined(CONFIG_PROVE_LOCKING)
68	unsigned int nr_pcpus = 2;
69#else
70	unsigned int nr_pcpus = num_possible_cpus();
71#endif
72
73	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
74	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75
76	/* Never allocate more than 0.5 locks per bucket */
77	size = min_t(unsigned int, size, tbl->size >> 1);
78
79	if (sizeof(spinlock_t) != 0) {
80#ifdef CONFIG_NUMA
81		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
82		    gfp == GFP_KERNEL)
83			tbl->locks = vmalloc(size * sizeof(spinlock_t));
84		else
85#endif
86		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
87					   gfp);
88		if (!tbl->locks)
89			return -ENOMEM;
90		for (i = 0; i < size; i++)
91			spin_lock_init(&tbl->locks[i]);
92	}
93	tbl->locks_mask = size - 1;
94
95	return 0;
96}
97
98static void bucket_table_free(const struct bucket_table *tbl)
99{
100	if (tbl)
101		kvfree(tbl->locks);
102
103	kvfree(tbl);
104}
105
106static void bucket_table_free_rcu(struct rcu_head *head)
107{
108	bucket_table_free(container_of(head, struct bucket_table, rcu));
109}
110
111static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
112					       size_t nbuckets,
113					       gfp_t gfp)
114{
115	struct bucket_table *tbl = NULL;
116	size_t size;
117	int i;
118
119	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
120	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
121	    gfp != GFP_KERNEL)
122		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
123	if (tbl == NULL && gfp == GFP_KERNEL)
124		tbl = vzalloc(size);
125	if (tbl == NULL)
126		return NULL;
127
128	tbl->size = nbuckets;
129
130	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
131		bucket_table_free(tbl);
132		return NULL;
133	}
134
135	INIT_LIST_HEAD(&tbl->walkers);
136
137	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
138
139	for (i = 0; i < nbuckets; i++)
140		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
141
142	return tbl;
143}
144
145static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
146						  struct bucket_table *tbl)
147{
148	struct bucket_table *new_tbl;
149
150	do {
151		new_tbl = tbl;
152		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
153	} while (tbl);
154
155	return new_tbl;
156}
157
158static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
159{
160	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
161	struct bucket_table *new_tbl = rhashtable_last_table(ht,
162		rht_dereference_rcu(old_tbl->future_tbl, ht));
163	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
164	int err = -ENOENT;
165	struct rhash_head *head, *next, *entry;
166	spinlock_t *new_bucket_lock;
167	unsigned int new_hash;
168
169	rht_for_each(entry, old_tbl, old_hash) {
170		err = 0;
171		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
172
173		if (rht_is_a_nulls(next))
174			break;
175
176		pprev = &entry->next;
177	}
178
179	if (err)
180		goto out;
181
182	new_hash = head_hashfn(ht, new_tbl, entry);
183
184	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
185
186	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
187	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
188				      new_tbl, new_hash);
189
190	if (rht_is_a_nulls(head))
191		INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
192	else
193		RCU_INIT_POINTER(entry->next, head);
194
195	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
196	spin_unlock(new_bucket_lock);
197
198	rcu_assign_pointer(*pprev, next);
199
200out:
201	return err;
202}
203
204static void rhashtable_rehash_chain(struct rhashtable *ht,
205				    unsigned int old_hash)
206{
207	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
208	spinlock_t *old_bucket_lock;
209
210	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
211
212	spin_lock_bh(old_bucket_lock);
213	while (!rhashtable_rehash_one(ht, old_hash))
214		;
215	old_tbl->rehash++;
216	spin_unlock_bh(old_bucket_lock);
217}
218
219static int rhashtable_rehash_attach(struct rhashtable *ht,
220				    struct bucket_table *old_tbl,
221				    struct bucket_table *new_tbl)
222{
223	/* Protect future_tbl using the first bucket lock. */
224	spin_lock_bh(old_tbl->locks);
225
226	/* Did somebody beat us to it? */
227	if (rcu_access_pointer(old_tbl->future_tbl)) {
228		spin_unlock_bh(old_tbl->locks);
229		return -EEXIST;
230	}
231
232	/* Make insertions go into the new, empty table right away. Deletions
233	 * and lookups will be attempted in both tables until we synchronize.
234	 */
235	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
236
237	/* Ensure the new table is visible to readers. */
238	smp_wmb();
239
240	spin_unlock_bh(old_tbl->locks);
241
242	return 0;
243}
244
245static int rhashtable_rehash_table(struct rhashtable *ht)
246{
247	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
248	struct bucket_table *new_tbl;
249	struct rhashtable_walker *walker;
250	unsigned int old_hash;
251
252	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
253	if (!new_tbl)
254		return 0;
255
256	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
257		rhashtable_rehash_chain(ht, old_hash);
258
259	/* Publish the new table pointer. */
260	rcu_assign_pointer(ht->tbl, new_tbl);
261
262	spin_lock(&ht->lock);
263	list_for_each_entry(walker, &old_tbl->walkers, list)
264		walker->tbl = NULL;
265	spin_unlock(&ht->lock);
266
267	/* Wait for readers. All new readers will see the new
268	 * table, and thus no references to the old table will
269	 * remain.
270	 */
271	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
272
273	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
274}
275
276/**
277 * rhashtable_expand - Expand hash table while allowing concurrent lookups
278 * @ht:		the hash table to expand
279 *
280 * A secondary bucket array is allocated and the hash entries are migrated.
281 *
282 * This function may only be called in a context where it is safe to call
283 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
284 *
285 * The caller must ensure that no concurrent resizing occurs by holding
286 * ht->mutex.
287 *
288 * It is valid to have concurrent insertions and deletions protected by per
289 * bucket locks or concurrent RCU protected lookups and traversals.
290 */
291static int rhashtable_expand(struct rhashtable *ht)
292{
293	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
294	int err;
295
296	ASSERT_RHT_MUTEX(ht);
297
298	old_tbl = rhashtable_last_table(ht, old_tbl);
299
300	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
301	if (new_tbl == NULL)
302		return -ENOMEM;
303
304	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
305	if (err)
306		bucket_table_free(new_tbl);
307
308	return err;
309}
310
311/**
312 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
313 * @ht:		the hash table to shrink
314 *
315 * This function shrinks the hash table to fit, i.e., the smallest
316 * size would not cause it to expand right away automatically.
317 *
318 * The caller must ensure that no concurrent resizing occurs by holding
319 * ht->mutex.
320 *
321 * The caller must ensure that no concurrent table mutations take place.
322 * It is however valid to have concurrent lookups if they are RCU protected.
323 *
324 * It is valid to have concurrent insertions and deletions protected by per
325 * bucket locks or concurrent RCU protected lookups and traversals.
326 */
327static int rhashtable_shrink(struct rhashtable *ht)
328{
329	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
330	unsigned int size;
331	int err;
332
333	ASSERT_RHT_MUTEX(ht);
334
335	size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
336	if (size < ht->p.min_size)
337		size = ht->p.min_size;
338
339	if (old_tbl->size <= size)
340		return 0;
341
342	if (rht_dereference(old_tbl->future_tbl, ht))
343		return -EEXIST;
344
345	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
346	if (new_tbl == NULL)
347		return -ENOMEM;
348
349	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
350	if (err)
351		bucket_table_free(new_tbl);
352
353	return err;
354}
355
356static void rht_deferred_worker(struct work_struct *work)
357{
358	struct rhashtable *ht;
359	struct bucket_table *tbl;
360	int err = 0;
361
362	ht = container_of(work, struct rhashtable, run_work);
363	mutex_lock(&ht->mutex);
364
365	tbl = rht_dereference(ht->tbl, ht);
366	tbl = rhashtable_last_table(ht, tbl);
367
368	if (rht_grow_above_75(ht, tbl))
369		rhashtable_expand(ht);
370	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
371		rhashtable_shrink(ht);
372
373	err = rhashtable_rehash_table(ht);
374
375	mutex_unlock(&ht->mutex);
376
377	if (err)
378		schedule_work(&ht->run_work);
379}
380
381static bool rhashtable_check_elasticity(struct rhashtable *ht,
382					struct bucket_table *tbl,
383					unsigned int hash)
384{
385	unsigned int elasticity = ht->elasticity;
386	struct rhash_head *head;
387
388	rht_for_each(head, tbl, hash)
389		if (!--elasticity)
390			return true;
391
392	return false;
393}
394
395int rhashtable_insert_rehash(struct rhashtable *ht)
396{
397	struct bucket_table *old_tbl;
398	struct bucket_table *new_tbl;
399	struct bucket_table *tbl;
400	unsigned int size;
401	int err;
402
403	old_tbl = rht_dereference_rcu(ht->tbl, ht);
404	tbl = rhashtable_last_table(ht, old_tbl);
405
406	size = tbl->size;
407
408	if (rht_grow_above_75(ht, tbl))
409		size *= 2;
410	/* Do not schedule more than one rehash */
411	else if (old_tbl != tbl)
412		return -EBUSY;
413
414	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
415	if (new_tbl == NULL) {
416		/* Schedule async resize/rehash to try allocation
417		 * non-atomic context.
418		 */
419		schedule_work(&ht->run_work);
420		return -ENOMEM;
421	}
422
423	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
424	if (err) {
425		bucket_table_free(new_tbl);
426		if (err == -EEXIST)
427			err = 0;
428	} else
429		schedule_work(&ht->run_work);
430
431	return err;
432}
433EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
434
435int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
436			   struct rhash_head *obj,
437			   struct bucket_table *tbl)
438{
439	struct rhash_head *head;
440	unsigned int hash;
441	int err;
442
443	tbl = rhashtable_last_table(ht, tbl);
444	hash = head_hashfn(ht, tbl, obj);
445	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
446
447	err = -EEXIST;
448	if (key && rhashtable_lookup_fast(ht, key, ht->p))
449		goto exit;
450
451	err = -E2BIG;
452	if (unlikely(rht_grow_above_max(ht, tbl)))
453		goto exit;
454
455	err = -EAGAIN;
456	if (rhashtable_check_elasticity(ht, tbl, hash) ||
457	    rht_grow_above_100(ht, tbl))
458		goto exit;
459
460	err = 0;
461
462	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
463
464	RCU_INIT_POINTER(obj->next, head);
465
466	rcu_assign_pointer(tbl->buckets[hash], obj);
467
468	atomic_inc(&ht->nelems);
469
470exit:
471	spin_unlock(rht_bucket_lock(tbl, hash));
472
473	return err;
474}
475EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
476
477/**
478 * rhashtable_walk_init - Initialise an iterator
479 * @ht:		Table to walk over
480 * @iter:	Hash table Iterator
481 *
482 * This function prepares a hash table walk.
483 *
484 * Note that if you restart a walk after rhashtable_walk_stop you
485 * may see the same object twice.  Also, you may miss objects if
486 * there are removals in between rhashtable_walk_stop and the next
487 * call to rhashtable_walk_start.
488 *
489 * For a completely stable walk you should construct your own data
490 * structure outside the hash table.
491 *
492 * This function may sleep so you must not call it from interrupt
493 * context or with spin locks held.
494 *
495 * You must call rhashtable_walk_exit if this function returns
496 * successfully.
497 */
498int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
499{
500	iter->ht = ht;
501	iter->p = NULL;
502	iter->slot = 0;
503	iter->skip = 0;
504
505	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
506	if (!iter->walker)
507		return -ENOMEM;
508
509	spin_lock(&ht->lock);
510	iter->walker->tbl =
511		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
512	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
513	spin_unlock(&ht->lock);
514
515	return 0;
516}
517EXPORT_SYMBOL_GPL(rhashtable_walk_init);
518
519/**
520 * rhashtable_walk_exit - Free an iterator
521 * @iter:	Hash table Iterator
522 *
523 * This function frees resources allocated by rhashtable_walk_init.
524 */
525void rhashtable_walk_exit(struct rhashtable_iter *iter)
526{
527	spin_lock(&iter->ht->lock);
528	if (iter->walker->tbl)
529		list_del(&iter->walker->list);
530	spin_unlock(&iter->ht->lock);
531	kfree(iter->walker);
532}
533EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
534
535/**
536 * rhashtable_walk_start - Start a hash table walk
537 * @iter:	Hash table iterator
538 *
539 * Start a hash table walk.  Note that we take the RCU lock in all
540 * cases including when we return an error.  So you must always call
541 * rhashtable_walk_stop to clean up.
542 *
543 * Returns zero if successful.
544 *
545 * Returns -EAGAIN if resize event occured.  Note that the iterator
546 * will rewind back to the beginning and you may use it immediately
547 * by calling rhashtable_walk_next.
548 */
549int rhashtable_walk_start(struct rhashtable_iter *iter)
550	__acquires(RCU)
551{
552	struct rhashtable *ht = iter->ht;
553
554	rcu_read_lock();
555
556	spin_lock(&ht->lock);
557	if (iter->walker->tbl)
558		list_del(&iter->walker->list);
559	spin_unlock(&ht->lock);
560
561	if (!iter->walker->tbl) {
562		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
563		return -EAGAIN;
564	}
565
566	return 0;
567}
568EXPORT_SYMBOL_GPL(rhashtable_walk_start);
569
570/**
571 * rhashtable_walk_next - Return the next object and advance the iterator
572 * @iter:	Hash table iterator
573 *
574 * Note that you must call rhashtable_walk_stop when you are finished
575 * with the walk.
576 *
577 * Returns the next object or NULL when the end of the table is reached.
578 *
579 * Returns -EAGAIN if resize event occured.  Note that the iterator
580 * will rewind back to the beginning and you may continue to use it.
581 */
582void *rhashtable_walk_next(struct rhashtable_iter *iter)
583{
584	struct bucket_table *tbl = iter->walker->tbl;
585	struct rhashtable *ht = iter->ht;
586	struct rhash_head *p = iter->p;
587	void *obj = NULL;
588
589	if (p) {
590		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
591		goto next;
592	}
593
594	for (; iter->slot < tbl->size; iter->slot++) {
595		int skip = iter->skip;
596
597		rht_for_each_rcu(p, tbl, iter->slot) {
598			if (!skip)
599				break;
600			skip--;
601		}
602
603next:
604		if (!rht_is_a_nulls(p)) {
605			iter->skip++;
606			iter->p = p;
607			obj = rht_obj(ht, p);
608			goto out;
609		}
610
611		iter->skip = 0;
612	}
613
614	iter->p = NULL;
615
616	/* Ensure we see any new tables. */
617	smp_rmb();
618
619	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
620	if (iter->walker->tbl) {
621		iter->slot = 0;
622		iter->skip = 0;
623		return ERR_PTR(-EAGAIN);
624	}
625
626out:
627
628	return obj;
629}
630EXPORT_SYMBOL_GPL(rhashtable_walk_next);
631
632/**
633 * rhashtable_walk_stop - Finish a hash table walk
634 * @iter:	Hash table iterator
635 *
636 * Finish a hash table walk.
637 */
638void rhashtable_walk_stop(struct rhashtable_iter *iter)
639	__releases(RCU)
640{
641	struct rhashtable *ht;
642	struct bucket_table *tbl = iter->walker->tbl;
643
644	if (!tbl)
645		goto out;
646
647	ht = iter->ht;
648
649	spin_lock(&ht->lock);
650	if (tbl->rehash < tbl->size)
651		list_add(&iter->walker->list, &tbl->walkers);
652	else
653		iter->walker->tbl = NULL;
654	spin_unlock(&ht->lock);
655
656	iter->p = NULL;
657
658out:
659	rcu_read_unlock();
660}
661EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
662
663static size_t rounded_hashtable_size(const struct rhashtable_params *params)
664{
665	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
666		   (unsigned long)params->min_size);
667}
668
669static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
670{
671	return jhash2(key, length, seed);
672}
673
674/**
675 * rhashtable_init - initialize a new hash table
676 * @ht:		hash table to be initialized
677 * @params:	configuration parameters
678 *
679 * Initializes a new hash table based on the provided configuration
680 * parameters. A table can be configured either with a variable or
681 * fixed length key:
682 *
683 * Configuration Example 1: Fixed length keys
684 * struct test_obj {
685 *	int			key;
686 *	void *			my_member;
687 *	struct rhash_head	node;
688 * };
689 *
690 * struct rhashtable_params params = {
691 *	.head_offset = offsetof(struct test_obj, node),
692 *	.key_offset = offsetof(struct test_obj, key),
693 *	.key_len = sizeof(int),
694 *	.hashfn = jhash,
695 *	.nulls_base = (1U << RHT_BASE_SHIFT),
696 * };
697 *
698 * Configuration Example 2: Variable length keys
699 * struct test_obj {
700 *	[...]
701 *	struct rhash_head	node;
702 * };
703 *
704 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
705 * {
706 *	struct test_obj *obj = data;
707 *
708 *	return [... hash ...];
709 * }
710 *
711 * struct rhashtable_params params = {
712 *	.head_offset = offsetof(struct test_obj, node),
713 *	.hashfn = jhash,
714 *	.obj_hashfn = my_hash_fn,
715 * };
716 */
717int rhashtable_init(struct rhashtable *ht,
718		    const struct rhashtable_params *params)
719{
720	struct bucket_table *tbl;
721	size_t size;
722
723	size = HASH_DEFAULT_SIZE;
724
725	if ((!params->key_len && !params->obj_hashfn) ||
726	    (params->obj_hashfn && !params->obj_cmpfn))
727		return -EINVAL;
728
729	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
730		return -EINVAL;
731
732	memset(ht, 0, sizeof(*ht));
733	mutex_init(&ht->mutex);
734	spin_lock_init(&ht->lock);
735	memcpy(&ht->p, params, sizeof(*params));
736
737	if (params->min_size)
738		ht->p.min_size = roundup_pow_of_two(params->min_size);
739
740	if (params->max_size)
741		ht->p.max_size = rounddown_pow_of_two(params->max_size);
742
743	if (params->insecure_max_entries)
744		ht->p.insecure_max_entries =
745			rounddown_pow_of_two(params->insecure_max_entries);
746	else
747		ht->p.insecure_max_entries = ht->p.max_size * 2;
748
749	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
750
751	if (params->nelem_hint)
752		size = rounded_hashtable_size(&ht->p);
753
754	/* The maximum (not average) chain length grows with the
755	 * size of the hash table, at a rate of (log N)/(log log N).
756	 * The value of 16 is selected so that even if the hash
757	 * table grew to 2^32 you would not expect the maximum
758	 * chain length to exceed it unless we are under attack
759	 * (or extremely unlucky).
760	 *
761	 * As this limit is only to detect attacks, we don't need
762	 * to set it to a lower value as you'd need the chain
763	 * length to vastly exceed 16 to have any real effect
764	 * on the system.
765	 */
766	if (!params->insecure_elasticity)
767		ht->elasticity = 16;
768
769	if (params->locks_mul)
770		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
771	else
772		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
773
774	ht->key_len = ht->p.key_len;
775	if (!params->hashfn) {
776		ht->p.hashfn = jhash;
777
778		if (!(ht->key_len & (sizeof(u32) - 1))) {
779			ht->key_len /= sizeof(u32);
780			ht->p.hashfn = rhashtable_jhash2;
781		}
782	}
783
784	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
785	if (tbl == NULL)
786		return -ENOMEM;
787
788	atomic_set(&ht->nelems, 0);
789
790	RCU_INIT_POINTER(ht->tbl, tbl);
791
792	INIT_WORK(&ht->run_work, rht_deferred_worker);
793
794	return 0;
795}
796EXPORT_SYMBOL_GPL(rhashtable_init);
797
798/**
799 * rhashtable_free_and_destroy - free elements and destroy hash table
800 * @ht:		the hash table to destroy
801 * @free_fn:	callback to release resources of element
802 * @arg:	pointer passed to free_fn
803 *
804 * Stops an eventual async resize. If defined, invokes free_fn for each
805 * element to releasal resources. Please note that RCU protected
806 * readers may still be accessing the elements. Releasing of resources
807 * must occur in a compatible manner. Then frees the bucket array.
808 *
809 * This function will eventually sleep to wait for an async resize
810 * to complete. The caller is responsible that no further write operations
811 * occurs in parallel.
812 */
813void rhashtable_free_and_destroy(struct rhashtable *ht,
814				 void (*free_fn)(void *ptr, void *arg),
815				 void *arg)
816{
817	const struct bucket_table *tbl;
818	unsigned int i;
819
820	cancel_work_sync(&ht->run_work);
821
822	mutex_lock(&ht->mutex);
823	tbl = rht_dereference(ht->tbl, ht);
824	if (free_fn) {
825		for (i = 0; i < tbl->size; i++) {
826			struct rhash_head *pos, *next;
827
828			for (pos = rht_dereference(tbl->buckets[i], ht),
829			     next = !rht_is_a_nulls(pos) ?
830					rht_dereference(pos->next, ht) : NULL;
831			     !rht_is_a_nulls(pos);
832			     pos = next,
833			     next = !rht_is_a_nulls(pos) ?
834					rht_dereference(pos->next, ht) : NULL)
835				free_fn(rht_obj(ht, pos), arg);
836		}
837	}
838
839	bucket_table_free(tbl);
840	mutex_unlock(&ht->mutex);
841}
842EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
843
844void rhashtable_destroy(struct rhashtable *ht)
845{
846	return rhashtable_free_and_destroy(ht, NULL, NULL);
847}
848EXPORT_SYMBOL_GPL(rhashtable_destroy);
849