Lines Matching refs:mru

127 	struct xfs_mru_cache	*mru,  in _xfs_mru_cache_migrate()  argument
135 if (!mru->time_zero) in _xfs_mru_cache_migrate()
139 while (mru->time_zero <= now - mru->grp_count * mru->grp_time) { in _xfs_mru_cache_migrate()
145 lru_list = mru->lists + mru->lru_grp; in _xfs_mru_cache_migrate()
147 list_splice_init(lru_list, mru->reap_list.prev); in _xfs_mru_cache_migrate()
153 mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count; in _xfs_mru_cache_migrate()
154 mru->time_zero += mru->grp_time; in _xfs_mru_cache_migrate()
160 if (++migrated == mru->grp_count) { in _xfs_mru_cache_migrate()
161 mru->lru_grp = 0; in _xfs_mru_cache_migrate()
162 mru->time_zero = 0; in _xfs_mru_cache_migrate()
168 for (grp = 0; grp < mru->grp_count; grp++) { in _xfs_mru_cache_migrate()
171 lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); in _xfs_mru_cache_migrate()
173 return mru->time_zero + in _xfs_mru_cache_migrate()
174 (mru->grp_count + grp) * mru->grp_time; in _xfs_mru_cache_migrate()
178 mru->lru_grp = 0; in _xfs_mru_cache_migrate()
179 mru->time_zero = 0; in _xfs_mru_cache_migrate()
191 struct xfs_mru_cache *mru, in _xfs_mru_cache_list_insert() argument
202 if (!_xfs_mru_cache_migrate(mru, now)) { in _xfs_mru_cache_list_insert()
203 mru->time_zero = now; in _xfs_mru_cache_list_insert()
204 if (!mru->queued) { in _xfs_mru_cache_list_insert()
205 mru->queued = 1; in _xfs_mru_cache_list_insert()
206 queue_delayed_work(xfs_mru_reap_wq, &mru->work, in _xfs_mru_cache_list_insert()
207 mru->grp_count * mru->grp_time); in _xfs_mru_cache_list_insert()
210 grp = (now - mru->time_zero) / mru->grp_time; in _xfs_mru_cache_list_insert()
211 grp = (mru->lru_grp + grp) % mru->grp_count; in _xfs_mru_cache_list_insert()
215 list_add_tail(&elem->list_node, mru->lists + grp); in _xfs_mru_cache_list_insert()
229 struct xfs_mru_cache *mru) in _xfs_mru_cache_clear_reap_list() argument
230 __releases(mru->lock) __acquires(mru->lock) in _xfs_mru_cache_clear_reap_list()
236 list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) { in _xfs_mru_cache_clear_reap_list()
239 radix_tree_delete(&mru->store, elem->key); in _xfs_mru_cache_clear_reap_list()
247 spin_unlock(&mru->lock); in _xfs_mru_cache_clear_reap_list()
251 mru->free_func(mru->data, elem); in _xfs_mru_cache_clear_reap_list()
254 spin_lock(&mru->lock); in _xfs_mru_cache_clear_reap_list()
268 struct xfs_mru_cache *mru = in _xfs_mru_cache_reap() local
272 ASSERT(mru && mru->lists); in _xfs_mru_cache_reap()
273 if (!mru || !mru->lists) in _xfs_mru_cache_reap()
276 spin_lock(&mru->lock); in _xfs_mru_cache_reap()
277 next = _xfs_mru_cache_migrate(mru, jiffies); in _xfs_mru_cache_reap()
278 _xfs_mru_cache_clear_reap_list(mru); in _xfs_mru_cache_reap()
280 mru->queued = next; in _xfs_mru_cache_reap()
281 if ((mru->queued > 0)) { in _xfs_mru_cache_reap()
287 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); in _xfs_mru_cache_reap()
290 spin_unlock(&mru->lock); in _xfs_mru_cache_reap()
323 struct xfs_mru_cache *mru = NULL; in xfs_mru_cache_create() local
336 if (!(mru = kmem_zalloc(sizeof(*mru), 0))) in xfs_mru_cache_create()
340 mru->grp_count = grp_count + 1; in xfs_mru_cache_create()
341 mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 0); in xfs_mru_cache_create()
343 if (!mru->lists) { in xfs_mru_cache_create()
348 for (grp = 0; grp < mru->grp_count; grp++) in xfs_mru_cache_create()
349 INIT_LIST_HEAD(mru->lists + grp); in xfs_mru_cache_create()
355 INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); in xfs_mru_cache_create()
356 INIT_LIST_HEAD(&mru->reap_list); in xfs_mru_cache_create()
357 spin_lock_init(&mru->lock); in xfs_mru_cache_create()
358 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); in xfs_mru_cache_create()
360 mru->grp_time = grp_time; in xfs_mru_cache_create()
361 mru->free_func = free_func; in xfs_mru_cache_create()
362 mru->data = data; in xfs_mru_cache_create()
363 *mrup = mru; in xfs_mru_cache_create()
366 if (err && mru && mru->lists) in xfs_mru_cache_create()
367 kmem_free(mru->lists); in xfs_mru_cache_create()
368 if (err && mru) in xfs_mru_cache_create()
369 kmem_free(mru); in xfs_mru_cache_create()
382 struct xfs_mru_cache *mru) in xfs_mru_cache_flush() argument
384 if (!mru || !mru->lists) in xfs_mru_cache_flush()
387 spin_lock(&mru->lock); in xfs_mru_cache_flush()
388 if (mru->queued) { in xfs_mru_cache_flush()
389 spin_unlock(&mru->lock); in xfs_mru_cache_flush()
390 cancel_delayed_work_sync(&mru->work); in xfs_mru_cache_flush()
391 spin_lock(&mru->lock); in xfs_mru_cache_flush()
394 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); in xfs_mru_cache_flush()
395 _xfs_mru_cache_clear_reap_list(mru); in xfs_mru_cache_flush()
397 spin_unlock(&mru->lock); in xfs_mru_cache_flush()
402 struct xfs_mru_cache *mru) in xfs_mru_cache_destroy() argument
404 if (!mru || !mru->lists) in xfs_mru_cache_destroy()
407 xfs_mru_cache_flush(mru); in xfs_mru_cache_destroy()
409 kmem_free(mru->lists); in xfs_mru_cache_destroy()
410 kmem_free(mru); in xfs_mru_cache_destroy()
420 struct xfs_mru_cache *mru, in xfs_mru_cache_insert() argument
426 ASSERT(mru && mru->lists); in xfs_mru_cache_insert()
427 if (!mru || !mru->lists) in xfs_mru_cache_insert()
436 spin_lock(&mru->lock); in xfs_mru_cache_insert()
437 error = radix_tree_insert(&mru->store, key, elem); in xfs_mru_cache_insert()
440 _xfs_mru_cache_list_insert(mru, elem); in xfs_mru_cache_insert()
441 spin_unlock(&mru->lock); in xfs_mru_cache_insert()
454 struct xfs_mru_cache *mru, in xfs_mru_cache_remove() argument
459 ASSERT(mru && mru->lists); in xfs_mru_cache_remove()
460 if (!mru || !mru->lists) in xfs_mru_cache_remove()
463 spin_lock(&mru->lock); in xfs_mru_cache_remove()
464 elem = radix_tree_delete(&mru->store, key); in xfs_mru_cache_remove()
467 spin_unlock(&mru->lock); in xfs_mru_cache_remove()
478 struct xfs_mru_cache *mru, in xfs_mru_cache_delete() argument
483 elem = xfs_mru_cache_remove(mru, key); in xfs_mru_cache_delete()
485 mru->free_func(mru->data, elem); in xfs_mru_cache_delete()
510 struct xfs_mru_cache *mru, in xfs_mru_cache_lookup() argument
515 ASSERT(mru && mru->lists); in xfs_mru_cache_lookup()
516 if (!mru || !mru->lists) in xfs_mru_cache_lookup()
519 spin_lock(&mru->lock); in xfs_mru_cache_lookup()
520 elem = radix_tree_lookup(&mru->store, key); in xfs_mru_cache_lookup()
523 _xfs_mru_cache_list_insert(mru, elem); in xfs_mru_cache_lookup()
526 spin_unlock(&mru->lock); in xfs_mru_cache_lookup()
538 struct xfs_mru_cache *mru) in xfs_mru_cache_done() argument
539 __releases(mru->lock) in xfs_mru_cache_done()
541 spin_unlock(&mru->lock); in xfs_mru_cache_done()