Lines Matching refs:cache

8  * cache.c
15 * This file implements a generic cache implementation used for both caches,
16 * plus functions layered ontop of the generic cache implementation to
19 * To avoid out of memory and fragmentation issues with vmalloc the cache
22 * It should be noted that the cache is not used for file datablocks, these
23 * are decompressed and cached in the page-cache in the normal way. The
24 * cache is only used to temporarily cache fragment and metadata blocks
49 * Look-up block in cache, and increment usage count. If not in cache, read
53 struct squashfs_cache *cache, u64 block, int length)
58 spin_lock(&cache->lock);
61 for (i = cache->curr_blk, n = 0; n < cache->entries; n++) {
62 if (cache->entry[i].block == block) {
63 cache->curr_blk = i;
66 i = (i + 1) % cache->entries;
69 if (n == cache->entries) {
71 * Block not in cache, if all cache entries are used
74 if (cache->unused == 0) {
75 cache->num_waiters++;
76 spin_unlock(&cache->lock);
77 wait_event(cache->wait_queue, cache->unused);
78 spin_lock(&cache->lock);
79 cache->num_waiters--;
84 * At least one unused cache entry. A simple
86 * be evicted from the cache.
88 i = cache->next_blk;
89 for (n = 0; n < cache->entries; n++) {
90 if (cache->entry[i].refcount == 0)
92 i = (i + 1) % cache->entries;
95 cache->next_blk = (i + 1) % cache->entries;
96 entry = &cache->entry[i];
99 * Initialise chosen cache entry, and fill it in from
102 cache->unused--;
108 spin_unlock(&cache->lock);
113 spin_lock(&cache->lock);
122 * have looked it up in the cache, and have slept
126 spin_unlock(&cache->lock);
129 spin_unlock(&cache->lock);
135 * Block already in cache. Increment refcount so it doesn't
137 * previously unused there's one less cache entry available
140 entry = &cache->entry[i];
142 cache->unused--;
151 spin_unlock(&cache->lock);
154 spin_unlock(&cache->lock);
161 cache->name, i, entry->block, entry->refcount, entry->error);
164 ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
171 * Release cache entry, once usage count is zero it can be reused.
175 struct squashfs_cache *cache = entry->cache;
177 spin_lock(&cache->lock);
180 cache->unused++;
185 if (cache->num_waiters) {
186 spin_unlock(&cache->lock);
187 wake_up(&cache->wait_queue);
191 spin_unlock(&cache->lock);
195 * Delete cache reclaiming all kmalloced buffers.
197 void squashfs_cache_delete(struct squashfs_cache *cache)
201 if (cache == NULL)
204 for (i = 0; i < cache->entries; i++) {
205 if (cache->entry[i].data) {
206 for (j = 0; j < cache->pages; j++)
207 kfree(cache->entry[i].data[j]);
208 kfree(cache->entry[i].data);
210 kfree(cache->entry[i].actor);
213 kfree(cache->entry);
214 kfree(cache);
219 * Initialise cache allocating the specified number of entries, each of
227 struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
229 if (cache == NULL) {
230 ERROR("Failed to allocate %s cache\n", name);
234 cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
235 if (cache->entry == NULL) {
236 ERROR("Failed to allocate %s cache\n", name);
240 cache->curr_blk = 0;
241 cache->next_blk = 0;
242 cache->unused = entries;
243 cache->entries = entries;
244 cache->block_size = block_size;
245 cache->pages = block_size >> PAGE_SHIFT;
246 cache->pages = cache->pages ? cache->pages : 1;
247 cache->name = name;
248 cache->num_waiters = 0;
249 spin_lock_init(&cache->lock);
250 init_waitqueue_head(&cache->wait_queue);
253 struct squashfs_cache_entry *entry = &cache->entry[i];
255 init_waitqueue_head(&cache->entry[i].wait_queue);
256 entry->cache = cache;
258 entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
260 ERROR("Failed to allocate %s cache entry\n", name);
264 for (j = 0; j < cache->pages; j++) {
273 cache->pages, 0);
275 ERROR("Failed to allocate %s cache entry\n", name);
280 return cache;
283 squashfs_cache_delete(cache);
289 * Copy up to length bytes from cache entry to buffer starting at offset bytes
290 * into the cache entry. If there's not length bytes then copy the number of
376 * Look-up in the fragmment cache the fragment located at <start_block> in the
391 * filesystem. The cache is used here to avoid duplicating locking and