Lines Matching refs:cache

3  * Manage cache of swap slots to be used for and returned from
25 * The swap slots cache is protected by a mutex instead of
43 /* Serialize swap slots cache enable/disable operations */
106 /* if global pool of slot caches too low, deactivate cache */
115 struct swap_slots_cache *cache;
136 cache = &per_cpu(swp_slots, cpu);
137 if (cache->slots || cache->slots_ret) {
138 /* cache already allocated */
147 if (!cache->lock_initialized) {
148 mutex_init(&cache->alloc_lock);
149 spin_lock_init(&cache->free_lock);
150 cache->lock_initialized = true;
152 cache->nr = 0;
153 cache->cur = 0;
154 cache->n_ret = 0;
157 * !cache->slots or !cache->slots_ret to know if it is safe to acquire
158 * the corresponding lock and use the cache. Memory barrier below
162 cache->slots = slots;
163 cache->slots_ret = slots_ret;
171 struct swap_slots_cache *cache;
174 cache = &per_cpu(swp_slots, cpu);
175 if ((type & SLOTS_CACHE) && cache->slots) {
176 mutex_lock(&cache->alloc_lock);
177 swapcache_free_entries(cache->slots + cache->cur, cache->nr);
178 cache->cur = 0;
179 cache->nr = 0;
180 if (free_slots && cache->slots) {
181 kvfree(cache->slots);
182 cache->slots = NULL;
184 mutex_unlock(&cache->alloc_lock);
186 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
187 spin_lock_irq(&cache->free_lock);
188 swapcache_free_entries(cache->slots_ret, cache->n_ret);
189 cache->n_ret = 0;
190 if (free_slots && cache->slots_ret) {
191 slots = cache->slots_ret;
192 cache->slots_ret = NULL;
194 spin_unlock_irq(&cache->free_lock);
206 * left over slots are in cache when we remove
208 * 2) disabling of swap slot cache, when we run low
223 * fill any swap slots in slots cache of such cpu.
247 "without swap slots cache.\n", __func__))
258 /* called with swap slot cache's alloc lock held */
259 static int refill_swap_slots_cache(struct swap_slots_cache *cache)
264 cache->cur = 0;
266 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
267 cache->slots, 1);
269 return cache->nr;
274 struct swap_slots_cache *cache;
279 cache = raw_cpu_ptr(&swp_slots);
280 if (likely(use_swap_slot_cache && cache->slots_ret)) {
281 spin_lock_irq(&cache->free_lock);
282 /* Swap slots cache may be deactivated before acquiring lock */
283 if (!use_swap_slot_cache || !cache->slots_ret) {
284 spin_unlock_irq(&cache->free_lock);
287 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
294 swapcache_free_entries(cache->slots_ret, cache->n_ret);
295 cache->n_ret = 0;
297 cache->slots_ret[cache->n_ret++] = entry;
298 spin_unlock_irq(&cache->free_lock);
308 struct swap_slots_cache *cache;
322 * mutex cache->alloc_lock.
324 * The alloc path here does not touch cache->slots_ret
325 * so cache->free_lock is not taken.
327 cache = raw_cpu_ptr(&swp_slots);
329 if (likely(check_cache_active() && cache->slots)) {
330 mutex_lock(&cache->alloc_lock);
331 if (cache->slots) {
333 if (cache->nr) {
334 entry = cache->slots[cache->cur];
335 cache->slots[cache->cur++].val = 0;
336 cache->nr--;
337 } else if (refill_swap_slots_cache(cache)) {
341 mutex_unlock(&cache->alloc_lock);