Lines Matching refs:mru

127 	struct xfs_mru_cache	*mru,
135 if (!mru->time_zero)
139 while (mru->time_zero <= now - mru->grp_count * mru->grp_time) {
145 lru_list = mru->lists + mru->lru_grp;
147 list_splice_init(lru_list, mru->reap_list.prev);
153 mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count;
154 mru->time_zero += mru->grp_time;
160 if (++migrated == mru->grp_count) {
161 mru->lru_grp = 0;
162 mru->time_zero = 0;
168 for (grp = 0; grp < mru->grp_count; grp++) {
171 lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count);
173 return mru->time_zero +
174 (mru->grp_count + grp) * mru->grp_time;
178 mru->lru_grp = 0;
179 mru->time_zero = 0;
191 struct xfs_mru_cache *mru,
202 if (!_xfs_mru_cache_migrate(mru, now)) {
203 mru->time_zero = now;
204 if (!mru->queued) {
205 mru->queued = 1;
206 queue_delayed_work(xfs_mru_reap_wq, &mru->work,
207 mru->grp_count * mru->grp_time);
210 grp = (now - mru->time_zero) / mru->grp_time;
211 grp = (mru->lru_grp + grp) % mru->grp_count;
215 list_add_tail(&elem->list_node, mru->lists + grp);
224 * We get called holding the mru->lock, which we drop and then reacquire.
229 struct xfs_mru_cache *mru)
230 __releases(mru->lock) __acquires(mru->lock)
236 list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) {
239 radix_tree_delete(&mru->store, elem->key);
247 spin_unlock(&mru->lock);
251 mru->free_func(mru->data, elem);
254 spin_lock(&mru->lock);
268 struct xfs_mru_cache *mru =
272 ASSERT(mru && mru->lists);
273 if (!mru || !mru->lists)
276 spin_lock(&mru->lock);
277 next = _xfs_mru_cache_migrate(mru, jiffies);
278 _xfs_mru_cache_clear_reap_list(mru);
280 mru->queued = next;
281 if ((mru->queued > 0)) {
287 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
290 spin_unlock(&mru->lock);
323 struct xfs_mru_cache *mru = NULL;
336 mru = kzalloc(sizeof(*mru), GFP_KERNEL | __GFP_NOFAIL);
337 if (!mru)
341 mru->grp_count = grp_count + 1;
342 mru->lists = kzalloc(mru->grp_count * sizeof(*mru->lists),
344 if (!mru->lists) {
349 for (grp = 0; grp < mru->grp_count; grp++)
350 INIT_LIST_HEAD(mru->lists + grp);
356 INIT_RADIX_TREE(&mru->store, GFP_ATOMIC);
357 INIT_LIST_HEAD(&mru->reap_list);
358 spin_lock_init(&mru->lock);
359 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap);
361 mru->grp_time = grp_time;
362 mru->free_func = free_func;
363 mru->data = data;
364 *mrup = mru;
367 if (err && mru && mru->lists)
368 kfree(mru->lists);
369 if (err && mru)
370 kfree(mru);
383 struct xfs_mru_cache *mru)
385 if (!mru || !mru->lists)
388 spin_lock(&mru->lock);
389 if (mru->queued) {
390 spin_unlock(&mru->lock);
391 cancel_delayed_work_sync(&mru->work);
392 spin_lock(&mru->lock);
395 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
396 _xfs_mru_cache_clear_reap_list(mru);
398 spin_unlock(&mru->lock);
403 struct xfs_mru_cache *mru)
405 if (!mru || !mru->lists)
408 xfs_mru_cache_flush(mru);
410 kfree(mru->lists);
411 kfree(mru);
421 struct xfs_mru_cache *mru,
427 ASSERT(mru && mru->lists);
428 if (!mru || !mru->lists)
437 spin_lock(&mru->lock);
438 error = radix_tree_insert(&mru->store, key, elem);
441 _xfs_mru_cache_list_insert(mru, elem);
442 spin_unlock(&mru->lock);
455 struct xfs_mru_cache *mru,
460 ASSERT(mru && mru->lists);
461 if (!mru || !mru->lists)
464 spin_lock(&mru->lock);
465 elem = radix_tree_delete(&mru->store, key);
468 spin_unlock(&mru->lock);
479 struct xfs_mru_cache *mru,
484 elem = xfs_mru_cache_remove(mru, key);
486 mru->free_func(mru->data, elem);
511 struct xfs_mru_cache *mru,
516 ASSERT(mru && mru->lists);
517 if (!mru || !mru->lists)
520 spin_lock(&mru->lock);
521 elem = radix_tree_lookup(&mru->store, key);
524 _xfs_mru_cache_list_insert(mru, elem);
527 spin_unlock(&mru->lock);
539 struct xfs_mru_cache *mru)
540 __releases(mru->lock)
542 spin_unlock(&mru->lock);