Lines Matching refs:ht

35 static u32 head_hashfn(struct rhashtable *ht,  in head_hashfn()  argument
39 return rht_head_hashfn(ht, tbl, he, ht->p); in head_hashfn()
45 int lockdep_rht_mutex_is_held(struct rhashtable *ht) in lockdep_rht_mutex_is_held() argument
47 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; in lockdep_rht_mutex_is_held()
63 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, in alloc_bucket_locks() argument
74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); in alloc_bucket_locks()
111 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, in bucket_table_alloc() argument
130 if (alloc_bucket_locks(ht, tbl, gfp) < 0) { in bucket_table_alloc()
140 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); in bucket_table_alloc()
145 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, in rhashtable_last_table() argument
152 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_last_table()
158 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) in rhashtable_rehash_one() argument
160 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_one()
161 struct bucket_table *new_tbl = rhashtable_last_table(ht, in rhashtable_rehash_one()
162 rht_dereference_rcu(old_tbl->future_tbl, ht)); in rhashtable_rehash_one()
182 new_hash = head_hashfn(ht, new_tbl, entry); in rhashtable_rehash_one()
191 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash); in rhashtable_rehash_one()
204 static void rhashtable_rehash_chain(struct rhashtable *ht, in rhashtable_rehash_chain() argument
207 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_chain()
213 while (!rhashtable_rehash_one(ht, old_hash)) in rhashtable_rehash_chain()
219 static int rhashtable_rehash_attach(struct rhashtable *ht, in rhashtable_rehash_attach() argument
245 static int rhashtable_rehash_table(struct rhashtable *ht) in rhashtable_rehash_table() argument
247 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_table()
252 new_tbl = rht_dereference(old_tbl->future_tbl, ht); in rhashtable_rehash_table()
257 rhashtable_rehash_chain(ht, old_hash); in rhashtable_rehash_table()
260 rcu_assign_pointer(ht->tbl, new_tbl); in rhashtable_rehash_table()
262 spin_lock(&ht->lock); in rhashtable_rehash_table()
265 spin_unlock(&ht->lock); in rhashtable_rehash_table()
273 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; in rhashtable_rehash_table()
291 static int rhashtable_expand(struct rhashtable *ht) in rhashtable_expand() argument
293 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_expand()
296 ASSERT_RHT_MUTEX(ht); in rhashtable_expand()
298 old_tbl = rhashtable_last_table(ht, old_tbl); in rhashtable_expand()
300 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); in rhashtable_expand()
304 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); in rhashtable_expand()
327 static int rhashtable_shrink(struct rhashtable *ht) in rhashtable_shrink() argument
329 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_shrink()
333 ASSERT_RHT_MUTEX(ht); in rhashtable_shrink()
335 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); in rhashtable_shrink()
336 if (size < ht->p.min_size) in rhashtable_shrink()
337 size = ht->p.min_size; in rhashtable_shrink()
342 if (rht_dereference(old_tbl->future_tbl, ht)) in rhashtable_shrink()
345 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); in rhashtable_shrink()
349 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); in rhashtable_shrink()
358 struct rhashtable *ht; in rht_deferred_worker() local
362 ht = container_of(work, struct rhashtable, run_work); in rht_deferred_worker()
363 mutex_lock(&ht->mutex); in rht_deferred_worker()
365 tbl = rht_dereference(ht->tbl, ht); in rht_deferred_worker()
366 tbl = rhashtable_last_table(ht, tbl); in rht_deferred_worker()
368 if (rht_grow_above_75(ht, tbl)) in rht_deferred_worker()
369 rhashtable_expand(ht); in rht_deferred_worker()
370 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) in rht_deferred_worker()
371 rhashtable_shrink(ht); in rht_deferred_worker()
373 err = rhashtable_rehash_table(ht); in rht_deferred_worker()
375 mutex_unlock(&ht->mutex); in rht_deferred_worker()
378 schedule_work(&ht->run_work); in rht_deferred_worker()
381 static bool rhashtable_check_elasticity(struct rhashtable *ht, in rhashtable_check_elasticity() argument
385 unsigned int elasticity = ht->elasticity; in rhashtable_check_elasticity()
395 int rhashtable_insert_rehash(struct rhashtable *ht) in rhashtable_insert_rehash() argument
403 old_tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_insert_rehash()
404 tbl = rhashtable_last_table(ht, old_tbl); in rhashtable_insert_rehash()
408 if (rht_grow_above_75(ht, tbl)) in rhashtable_insert_rehash()
414 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); in rhashtable_insert_rehash()
419 schedule_work(&ht->run_work); in rhashtable_insert_rehash()
423 err = rhashtable_rehash_attach(ht, tbl, new_tbl); in rhashtable_insert_rehash()
429 schedule_work(&ht->run_work); in rhashtable_insert_rehash()
435 int rhashtable_insert_slow(struct rhashtable *ht, const void *key, in rhashtable_insert_slow() argument
443 tbl = rhashtable_last_table(ht, tbl); in rhashtable_insert_slow()
444 hash = head_hashfn(ht, tbl, obj); in rhashtable_insert_slow()
448 if (key && rhashtable_lookup_fast(ht, key, ht->p)) in rhashtable_insert_slow()
452 if (unlikely(rht_grow_above_max(ht, tbl))) in rhashtable_insert_slow()
456 if (rhashtable_check_elasticity(ht, tbl, hash) || in rhashtable_insert_slow()
457 rht_grow_above_100(ht, tbl)) in rhashtable_insert_slow()
468 atomic_inc(&ht->nelems); in rhashtable_insert_slow()
498 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) in rhashtable_walk_init() argument
500 iter->ht = ht; in rhashtable_walk_init()
509 spin_lock(&ht->lock); in rhashtable_walk_init()
511 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); in rhashtable_walk_init()
513 spin_unlock(&ht->lock); in rhashtable_walk_init()
527 spin_lock(&iter->ht->lock); in rhashtable_walk_exit()
530 spin_unlock(&iter->ht->lock); in rhashtable_walk_exit()
552 struct rhashtable *ht = iter->ht; in rhashtable_walk_start() local
556 spin_lock(&ht->lock); in rhashtable_walk_start()
559 spin_unlock(&ht->lock); in rhashtable_walk_start()
562 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_walk_start()
585 struct rhashtable *ht = iter->ht; in rhashtable_walk_next() local
607 obj = rht_obj(ht, p); in rhashtable_walk_next()
619 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_walk_next()
641 struct rhashtable *ht; in rhashtable_walk_stop() local
647 ht = iter->ht; in rhashtable_walk_stop()
649 spin_lock(&ht->lock); in rhashtable_walk_stop()
654 spin_unlock(&ht->lock); in rhashtable_walk_stop()
717 int rhashtable_init(struct rhashtable *ht, in rhashtable_init() argument
732 memset(ht, 0, sizeof(*ht)); in rhashtable_init()
733 mutex_init(&ht->mutex); in rhashtable_init()
734 spin_lock_init(&ht->lock); in rhashtable_init()
735 memcpy(&ht->p, params, sizeof(*params)); in rhashtable_init()
738 ht->p.min_size = roundup_pow_of_two(params->min_size); in rhashtable_init()
741 ht->p.max_size = rounddown_pow_of_two(params->max_size); in rhashtable_init()
744 ht->p.insecure_max_entries = in rhashtable_init()
747 ht->p.insecure_max_entries = ht->p.max_size * 2; in rhashtable_init()
749 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); in rhashtable_init()
752 size = rounded_hashtable_size(&ht->p); in rhashtable_init()
767 ht->elasticity = 16; in rhashtable_init()
770 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); in rhashtable_init()
772 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; in rhashtable_init()
774 ht->key_len = ht->p.key_len; in rhashtable_init()
776 ht->p.hashfn = jhash; in rhashtable_init()
778 if (!(ht->key_len & (sizeof(u32) - 1))) { in rhashtable_init()
779 ht->key_len /= sizeof(u32); in rhashtable_init()
780 ht->p.hashfn = rhashtable_jhash2; in rhashtable_init()
784 tbl = bucket_table_alloc(ht, size, GFP_KERNEL); in rhashtable_init()
788 atomic_set(&ht->nelems, 0); in rhashtable_init()
790 RCU_INIT_POINTER(ht->tbl, tbl); in rhashtable_init()
792 INIT_WORK(&ht->run_work, rht_deferred_worker); in rhashtable_init()
813 void rhashtable_free_and_destroy(struct rhashtable *ht, in rhashtable_free_and_destroy() argument
820 cancel_work_sync(&ht->run_work); in rhashtable_free_and_destroy()
822 mutex_lock(&ht->mutex); in rhashtable_free_and_destroy()
823 tbl = rht_dereference(ht->tbl, ht); in rhashtable_free_and_destroy()
828 for (pos = rht_dereference(tbl->buckets[i], ht), in rhashtable_free_and_destroy()
830 rht_dereference(pos->next, ht) : NULL; in rhashtable_free_and_destroy()
834 rht_dereference(pos->next, ht) : NULL) in rhashtable_free_and_destroy()
835 free_fn(rht_obj(ht, pos), arg); in rhashtable_free_and_destroy()
840 mutex_unlock(&ht->mutex); in rhashtable_free_and_destroy()
844 void rhashtable_destroy(struct rhashtable *ht) in rhashtable_destroy() argument
846 return rhashtable_free_and_destroy(ht, NULL, NULL); in rhashtable_destroy()