Lines Matching refs:mru

138 	struct xfs_mru_cache	*mru,  in _xfs_mru_cache_migrate()  argument
146 if (!mru->time_zero) in _xfs_mru_cache_migrate()
150 while (mru->time_zero <= now - mru->grp_count * mru->grp_time) { in _xfs_mru_cache_migrate()
156 lru_list = mru->lists + mru->lru_grp; in _xfs_mru_cache_migrate()
158 list_splice_init(lru_list, mru->reap_list.prev); in _xfs_mru_cache_migrate()
164 mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count; in _xfs_mru_cache_migrate()
165 mru->time_zero += mru->grp_time; in _xfs_mru_cache_migrate()
171 if (++migrated == mru->grp_count) { in _xfs_mru_cache_migrate()
172 mru->lru_grp = 0; in _xfs_mru_cache_migrate()
173 mru->time_zero = 0; in _xfs_mru_cache_migrate()
179 for (grp = 0; grp < mru->grp_count; grp++) { in _xfs_mru_cache_migrate()
182 lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); in _xfs_mru_cache_migrate()
184 return mru->time_zero + in _xfs_mru_cache_migrate()
185 (mru->grp_count + grp) * mru->grp_time; in _xfs_mru_cache_migrate()
189 mru->lru_grp = 0; in _xfs_mru_cache_migrate()
190 mru->time_zero = 0; in _xfs_mru_cache_migrate()
202 struct xfs_mru_cache *mru, in _xfs_mru_cache_list_insert() argument
213 if (!_xfs_mru_cache_migrate(mru, now)) { in _xfs_mru_cache_list_insert()
214 mru->time_zero = now; in _xfs_mru_cache_list_insert()
215 if (!mru->queued) { in _xfs_mru_cache_list_insert()
216 mru->queued = 1; in _xfs_mru_cache_list_insert()
217 queue_delayed_work(xfs_mru_reap_wq, &mru->work, in _xfs_mru_cache_list_insert()
218 mru->grp_count * mru->grp_time); in _xfs_mru_cache_list_insert()
221 grp = (now - mru->time_zero) / mru->grp_time; in _xfs_mru_cache_list_insert()
222 grp = (mru->lru_grp + grp) % mru->grp_count; in _xfs_mru_cache_list_insert()
226 list_add_tail(&elem->list_node, mru->lists + grp); in _xfs_mru_cache_list_insert()
240 struct xfs_mru_cache *mru) in _xfs_mru_cache_clear_reap_list() argument
241 __releases(mru->lock) __acquires(mru->lock) in _xfs_mru_cache_clear_reap_list()
247 list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) { in _xfs_mru_cache_clear_reap_list()
250 radix_tree_delete(&mru->store, elem->key); in _xfs_mru_cache_clear_reap_list()
258 spin_unlock(&mru->lock); in _xfs_mru_cache_clear_reap_list()
262 mru->free_func(elem); in _xfs_mru_cache_clear_reap_list()
265 spin_lock(&mru->lock); in _xfs_mru_cache_clear_reap_list()
279 struct xfs_mru_cache *mru = in _xfs_mru_cache_reap() local
283 ASSERT(mru && mru->lists); in _xfs_mru_cache_reap()
284 if (!mru || !mru->lists) in _xfs_mru_cache_reap()
287 spin_lock(&mru->lock); in _xfs_mru_cache_reap()
288 next = _xfs_mru_cache_migrate(mru, jiffies); in _xfs_mru_cache_reap()
289 _xfs_mru_cache_clear_reap_list(mru); in _xfs_mru_cache_reap()
291 mru->queued = next; in _xfs_mru_cache_reap()
292 if ((mru->queued > 0)) { in _xfs_mru_cache_reap()
298 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); in _xfs_mru_cache_reap()
301 spin_unlock(&mru->lock); in _xfs_mru_cache_reap()
333 struct xfs_mru_cache *mru = NULL; in xfs_mru_cache_create() local
346 if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) in xfs_mru_cache_create()
350 mru->grp_count = grp_count + 1; in xfs_mru_cache_create()
351 mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); in xfs_mru_cache_create()
353 if (!mru->lists) { in xfs_mru_cache_create()
358 for (grp = 0; grp < mru->grp_count; grp++) in xfs_mru_cache_create()
359 INIT_LIST_HEAD(mru->lists + grp); in xfs_mru_cache_create()
365 INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); in xfs_mru_cache_create()
366 INIT_LIST_HEAD(&mru->reap_list); in xfs_mru_cache_create()
367 spin_lock_init(&mru->lock); in xfs_mru_cache_create()
368 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); in xfs_mru_cache_create()
370 mru->grp_time = grp_time; in xfs_mru_cache_create()
371 mru->free_func = free_func; in xfs_mru_cache_create()
373 *mrup = mru; in xfs_mru_cache_create()
376 if (err && mru && mru->lists) in xfs_mru_cache_create()
377 kmem_free(mru->lists); in xfs_mru_cache_create()
378 if (err && mru) in xfs_mru_cache_create()
379 kmem_free(mru); in xfs_mru_cache_create()
392 struct xfs_mru_cache *mru) in xfs_mru_cache_flush() argument
394 if (!mru || !mru->lists) in xfs_mru_cache_flush()
397 spin_lock(&mru->lock); in xfs_mru_cache_flush()
398 if (mru->queued) { in xfs_mru_cache_flush()
399 spin_unlock(&mru->lock); in xfs_mru_cache_flush()
400 cancel_delayed_work_sync(&mru->work); in xfs_mru_cache_flush()
401 spin_lock(&mru->lock); in xfs_mru_cache_flush()
404 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); in xfs_mru_cache_flush()
405 _xfs_mru_cache_clear_reap_list(mru); in xfs_mru_cache_flush()
407 spin_unlock(&mru->lock); in xfs_mru_cache_flush()
412 struct xfs_mru_cache *mru) in xfs_mru_cache_destroy() argument
414 if (!mru || !mru->lists) in xfs_mru_cache_destroy()
417 xfs_mru_cache_flush(mru); in xfs_mru_cache_destroy()
419 kmem_free(mru->lists); in xfs_mru_cache_destroy()
420 kmem_free(mru); in xfs_mru_cache_destroy()
430 struct xfs_mru_cache *mru, in xfs_mru_cache_insert() argument
436 ASSERT(mru && mru->lists); in xfs_mru_cache_insert()
437 if (!mru || !mru->lists) in xfs_mru_cache_insert()
446 spin_lock(&mru->lock); in xfs_mru_cache_insert()
447 error = radix_tree_insert(&mru->store, key, elem); in xfs_mru_cache_insert()
450 _xfs_mru_cache_list_insert(mru, elem); in xfs_mru_cache_insert()
451 spin_unlock(&mru->lock); in xfs_mru_cache_insert()
464 struct xfs_mru_cache *mru, in xfs_mru_cache_remove() argument
469 ASSERT(mru && mru->lists); in xfs_mru_cache_remove()
470 if (!mru || !mru->lists) in xfs_mru_cache_remove()
473 spin_lock(&mru->lock); in xfs_mru_cache_remove()
474 elem = radix_tree_delete(&mru->store, key); in xfs_mru_cache_remove()
477 spin_unlock(&mru->lock); in xfs_mru_cache_remove()
488 struct xfs_mru_cache *mru, in xfs_mru_cache_delete() argument
493 elem = xfs_mru_cache_remove(mru, key); in xfs_mru_cache_delete()
495 mru->free_func(elem); in xfs_mru_cache_delete()
520 struct xfs_mru_cache *mru, in xfs_mru_cache_lookup() argument
525 ASSERT(mru && mru->lists); in xfs_mru_cache_lookup()
526 if (!mru || !mru->lists) in xfs_mru_cache_lookup()
529 spin_lock(&mru->lock); in xfs_mru_cache_lookup()
530 elem = radix_tree_lookup(&mru->store, key); in xfs_mru_cache_lookup()
533 _xfs_mru_cache_list_insert(mru, elem); in xfs_mru_cache_lookup()
536 spin_unlock(&mru->lock); in xfs_mru_cache_lookup()
548 struct xfs_mru_cache *mru) in xfs_mru_cache_done() argument
549 __releases(mru->lock) in xfs_mru_cache_done()
551 spin_unlock(&mru->lock); in xfs_mru_cache_done()