Searched refs:cache (Results 26 - 50 of 961) sorted by relevance

1234567891011>>

/linux-master/fs/nfs/
H A Dnfs42xattr.c6 * User extended attribute client side cache functions.
21 * a cache structure attached to NFS inodes. This structure is allocated
22 * when needed, and freed when the cache is zapped.
24 * The cache structure contains as hash table of entries, and a pointer
25 * to a special-cased entry for the listxattr cache.
28 * counting. The cache entries use a similar refcounting scheme.
30 * This makes freeing a cache, both from the shrinker and from the
31 * zap cache path, easy. It also means that, in current use cases,
40 * Two shrinkers deal with the cache entries themselves: one for
45 * The other shrinker frees the cache structure
64 struct nfs4_xattr_cache *cache; member in struct:nfs4_xattr_bucket
106 nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache) argument
270 struct nfs4_xattr_cache *cache; local
290 struct nfs4_xattr_cache *cache; local
309 nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache, struct nfs4_xattr_entry *new) argument
384 nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache) argument
429 struct nfs4_xattr_cache *cache, *oldcache, *newcache; local
504 nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name) argument
526 nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache, struct nfs4_xattr_entry *entry) argument
564 nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name) argument
587 nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name) argument
611 struct nfs4_xattr_cache *cache; local
650 struct nfs4_xattr_cache *cache; local
691 struct nfs4_xattr_cache *cache; local
722 struct nfs4_xattr_cache *cache; local
742 struct nfs4_xattr_cache *cache; local
809 struct nfs4_xattr_cache *cache = container_of(item, local
843 struct nfs4_xattr_cache *cache; local
874 struct nfs4_xattr_cache *cache; local
965 struct nfs4_xattr_cache *cache = p; local
[all...]
/linux-master/net/core/
H A Ddst_cache.c3 * net/core/dst_cache.c - dst entry cache
47 /* the cache already hold a dst reference; it can't go away */
65 if (!dst_cache->cache)
68 return dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache));
77 if (!dst_cache->cache)
80 idst = this_cpu_ptr(dst_cache->cache);
95 if (!dst_cache->cache)
98 idst = this_cpu_ptr(dst_cache->cache);
110 if (!dst_cache->cache)
113 idst = this_cpu_ptr(dst_cache->cache);
[all...]
/linux-master/fs/cachefiles/
H A Dvolume.c20 struct cachefiles_cache *cache = vcookie->cache->cache_priv; local
34 volume->cache = cache;
37 cachefiles_begin_secure(cache, &saved_cred);
48 vdentry = cachefiles_get_directory(cache, cache->store, name, &is_new);
61 inode_lock_nested(d_inode(cache->store), I_MUTEX_PARENT);
62 cachefiles_bury_object(cache, NULL, cache
[all...]
H A Dondemand.c11 struct cachefiles_cache *cache = object->volume->cache; local
15 XA_STATE(xas, &cache->reqs, 0);
17 xa_lock(&cache->reqs);
29 xa_unlock(&cache->reqs);
31 xa_erase(&cache->ondemand_ids, object_id);
34 cachefiles_put_unbind_pincount(cache);
42 struct cachefiles_cache *cache = object->volume->cache; local
52 cachefiles_begin_secure(cache,
82 struct cachefiles_cache *cache = object->volume->cache; local
115 cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) argument
185 cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args) argument
211 struct cachefiles_cache *cache; local
297 cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) argument
379 struct cachefiles_cache *cache = object->volume->cache; local
[all...]
H A Dinterface.c19 * Allocate a cache object record.
47 fscache_count_object(vcookie->cache);
84 struct fscache_cache *cache; local
97 cache = object->volume->cache->cache;
101 fscache_uncount_object(cache);
108 * Adjust the size of a cache file if necessary to match the DIO size. We keep
175 * Attempt to look up the nominated node in this cache
180 struct cachefiles_cache *cache local
235 struct cachefiles_cache *cache = object->volume->cache; local
282 struct cachefiles_cache *cache = object->volume->cache; local
308 cachefiles_commit_object(struct cachefiles_object *object, struct cachefiles_cache *cache) argument
327 cachefiles_clean_up_object(struct cachefiles_object *object, struct cachefiles_cache *cache) argument
357 struct cachefiles_cache *cache = object->volume->cache; local
[all...]
/linux-master/arch/sh/mm/
H A DMakefile6 obj-y := alignment.o cache.o init.o consistent.o mmap.o
8 cacheops-$(CONFIG_CPU_J2) := cache-j2.o
9 cacheops-$(CONFIG_CPU_SUBTYPE_SH7619) := cache-sh2.o
10 cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o
11 cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
12 cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
13 cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
14 cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
26 debugfs-$(CONFIG_CPU_SH4) += cache-debugfs.o
/linux-master/scripts/gdb/linux/
H A Dslab.py46 def for_each_object(cache, addr, slab_objects):
48 if cache['flags'] & SLAB_RED_ZONE:
49 p += int(cache['red_left_pad'])
50 while p < addr + (slab_objects * cache['size']):
52 p = p + int(cache['size'])
54 def get_info_end(cache):
55 if (cache['offset'] >= cache['inuse']):
56 return cache['inuse'] + gdb.lookup_type("void").pointer().sizeof
58 return cache['inus
[all...]
/linux-master/drivers/media/platform/nvidia/tegra-vde/
H A DMakefile2 tegra-vde-y := vde.o iommu.o dmabuf-cache.o h264.o v4l2.o
/linux-master/tools/testing/memblock/linux/
H A Dmemory_hotplug.h7 #include <linux/cache.h>
/linux-master/arch/powerpc/kernel/
H A Dsecvar-ops.c9 #include <linux/cache.h>
/linux-master/arch/hexagon/mm/
H A DMakefile6 obj-y := init.o uaccess.o vm_fault.o cache.o
/linux-master/arch/xtensa/include/asm/
H A Dtlb.h14 #include <asm/cache.h>
/linux-master/fs/btrfs/tests/
H A Dbtrfs-tests.c12 #include "../free-space-cache.h"
218 struct btrfs_block_group *cache; local
220 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
221 if (!cache)
223 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
225 if (!cache->free_space_ctl) {
226 kfree(cache);
230 cache
244 btrfs_free_dummy_block_group(struct btrfs_block_group *cache) argument
[all...]
/linux-master/fs/exfat/
H A Dcache.c3 * linux/fs/fat/cache.c
7 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
40 struct exfat_cache *cache = (struct exfat_cache *)c; local
42 INIT_LIST_HEAD(&cache->cache_list);
68 static inline void exfat_cache_free(struct exfat_cache *cache) argument
70 WARN_ON(!list_empty(&cache->cache_list));
71 kmem_cache_free(exfat_cachep, cache);
75 struct exfat_cache *cache)
79 if (ei->cache_lru.next != &cache->cache_list)
80 list_move(&cache
74 exfat_cache_update_lru(struct inode *inode, struct exfat_cache *cache) argument
141 struct exfat_cache *cache, *tmp; local
196 struct exfat_cache *cache; local
[all...]
/linux-master/include/linux/
H A Dfscache-cache.h2 /* General filesystem caching backing cache interface
11 * for a description of the cache backend interface declared here.
24 FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
25 FSCACHE_CACHE_IS_PREPARING, /* A cache is preparing to come live */
26 FSCACHE_CACHE_IS_ACTIVE, /* Attached cache is active and can be used */
27 FSCACHE_CACHE_GOT_IOERROR, /* Attached cache stopped on I/O error */
28 FSCACHE_CACHE_IS_WITHDRAWN, /* Attached cache is being withdrawn */
37 struct list_head cache_link; /* Link in cache list */
38 void *cache_priv; /* Private cache data (or NULL) */
41 atomic_t n_accesses; /* Number of in-progress accesses on the cache */
155 fscache_count_object(struct fscache_cache *cache) argument
167 fscache_uncount_object(struct fscache_cache *cache) argument
180 fscache_wait_for_objects(struct fscache_cache *cache) argument
[all...]
/linux-master/fs/btrfs/
H A Dblock-group.c10 #include "free-space-cache.h"
141 void btrfs_get_block_group(struct btrfs_block_group *cache) argument
143 refcount_inc(&cache->refs);
146 void btrfs_put_block_group(struct btrfs_block_group *cache) argument
148 if (refcount_dec_and_test(&cache->refs)) {
149 WARN_ON(cache->pinned > 0);
157 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
158 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
159 WARN_ON(cache->reserved > 0);
166 if (WARN_ON(!list_empty(&cache
184 struct btrfs_block_group *cache; local
222 struct btrfs_block_group *cache, *ret = NULL; local
274 btrfs_next_block_group( struct btrfs_block_group *cache) argument
404 btrfs_get_caching_control( struct btrfs_block_group *cache) argument
440 btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, u64 num_bytes) argument
465 btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, struct btrfs_caching_control *caching_ctl) argument
472 btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) argument
918 btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) argument
1337 inc_block_group_ro(struct btrfs_block_group *cache, int force) argument
2138 exclude_super_stripes(struct btrfs_block_group *cache) argument
2195 struct btrfs_block_group *cache; local
2290 struct btrfs_block_group *cache; local
2458 struct btrfs_block_group *cache; local
2803 struct btrfs_block_group *cache; local
2895 btrfs_inc_block_group_ro(struct btrfs_block_group *cache, bool do_chunk_alloc) argument
3008 btrfs_dec_block_group_ro(struct btrfs_block_group *cache) argument
3036 update_block_group_item(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_block_group *cache) argument
3273 struct btrfs_block_group *cache, *tmp; local
3311 struct btrfs_block_group *cache; local
3477 struct btrfs_block_group *cache; local
3602 struct btrfs_block_group *cache; local
3713 btrfs_add_reserved_bytes(struct btrfs_block_group *cache, u64 ram_bytes, u64 num_bytes, int delalloc, bool force_wrong_size_class) argument
3767 btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, int delalloc) argument
4459 btrfs_freeze_block_group(struct btrfs_block_group *cache) argument
[all...]
/linux-master/mm/kasan/
H A Dgeneric.c204 void kasan_cache_shrink(struct kmem_cache *cache) argument
206 kasan_quarantine_remove_cache(cache);
209 void kasan_cache_shutdown(struct kmem_cache *cache) argument
211 if (!__kmem_cache_empty(cache))
212 kasan_quarantine_remove_cache(cache);
353 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, argument
376 cache->kasan_info.alloc_meta_offset = *size;
381 cache->kasan_info.alloc_meta_offset = 0;
387 orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset;
397 if ((cache
453 kasan_get_alloc_meta(struct kmem_cache *cache, const void *object) argument
461 kasan_get_free_meta(struct kmem_cache *cache, const void *object) argument
470 kasan_init_object_meta(struct kmem_cache *cache, const void *object) argument
506 kasan_metadata_size(struct kmem_cache *cache, bool in_object) argument
527 struct kmem_cache *cache; local
554 kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) argument
568 kasan_save_free_info(struct kmem_cache *cache, void *object) argument
[all...]
H A Dreport_hw_tags.c30 size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache) argument
45 while (size < cache->object_size) {
54 return cache->object_size;
H A Dquarantine.c134 static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) argument
140 return ((void *)free_info) - cache->kasan_info.free_meta_offset;
143 static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) argument
145 void *object = qlink_to_object(qlink, cache);
146 struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);
159 if (slab_want_init_on_free(cache) &&
160 cache->kasan_info.free_meta_offset == 0)
163 ___cache_free(cache, object, _THIS_IP_);
166 static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) argument
176 cache
185 kasan_quarantine_put(struct kmem_cache *cache, void *object) argument
290 qlist_move_cache(struct qlist_head *from, struct qlist_head *to, struct kmem_cache *cache) argument
316 struct kmem_cache *cache = arg; local
342 kasan_quarantine_remove_cache(struct kmem_cache *cache) argument
[all...]
H A Dcommon.c156 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object) argument
158 kasan_unpoison(object, cache->object_size, false);
161 void __kasan_poison_new_object(struct kmem_cache *cache, void *object) argument
163 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
169 * 1. A cache might have a constructor, which might save a pointer to a slab
173 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
177 static inline u8 assign_tag(struct kmem_cache *cache, argument
184 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
187 if (!cache->ctor && !(cache
198 __kasan_init_slab_obj(struct kmem_cache *cache, const void *object) argument
211 poison_slab_object(struct kmem_cache *cache, void *object, unsigned long ip, bool init) argument
245 __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip, bool init) argument
301 unpoison_slab_object(struct kmem_cache *cache, void *object, gfp_t flags, bool init) argument
315 __kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags, bool init) argument
343 poison_kmalloc_redzone(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags) argument
374 __kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags) argument
[all...]
/linux-master/arch/arm/mach-imx/
H A Dresume-imx6.S9 #include <asm/hardware/cache-l2x0.h>
/linux-master/drivers/net/wireless/silabs/wfx/
H A Ddata_tx.c44 /* TX policy cache implementation */
73 static int wfx_tx_policy_find(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *wanted) argument
77 list_for_each_entry(it, &cache->used, link)
79 return it - cache->cache;
80 list_for_each_entry(it, &cache->free, link)
82 return it - cache->cache;
86 static void wfx_tx_policy_use(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry) argument
89 list_move(&entry->link, &cache
92 wfx_tx_policy_release(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry) argument
104 struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; local
138 struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; local
185 struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; local
[all...]
/linux-master/fs/fat/
H A Dcache.c3 * linux/fs/fat/cache.c
7 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
41 struct fat_cache *cache = (struct fat_cache *)foo; local
43 INIT_LIST_HEAD(&cache->cache_list);
67 static inline void fat_cache_free(struct fat_cache *cache) argument
69 BUG_ON(!list_empty(&cache->cache_list));
70 kmem_cache_free(fat_cache_cachep, cache);
74 struct fat_cache *cache)
76 if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
77 list_move(&cache
73 fat_cache_update_lru(struct inode *inode, struct fat_cache *cache) argument
136 struct fat_cache *cache, *tmp; local
189 struct fat_cache *cache; local
[all...]
/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmw_surface_cache.h356 * @cache: Surface layout data.
362 static inline u32 vmw_surface_subres(const struct vmw_surface_cache *cache, argument
365 return cache->num_mip_levels * layer + mip_level;
369 * vmw_surface_setup_cache - Build a surface cache entry
374 * @cache: Pointer to a struct vmw_surface_cach object to be filled in.
383 struct vmw_surface_cache *cache)
388 memset(cache, 0, sizeof(*cache));
389 cache->desc = desc = vmw_surface_get_desc(format);
390 cache
378 vmw_surface_setup_cache(const struct drm_vmw_size *size, SVGA3dSurfaceFormat format, u32 num_mip_levels, u32 num_layers, u32 num_samples, struct vmw_surface_cache *cache) argument
433 vmw_surface_get_loc(const struct vmw_surface_cache *cache, struct vmw_surface_loc *loc, size_t offset) argument
477 vmw_surface_inc_loc(const struct vmw_surface_cache *cache, struct vmw_surface_loc *loc) argument
503 vmw_surface_min_loc(const struct vmw_surface_cache *cache, u32 sub_resource, struct vmw_surface_loc *loc) argument
522 vmw_surface_max_loc(const struct vmw_surface_cache *cache, u32 sub_resource, struct vmw_surface_loc *loc) argument
[all...]
/linux-master/drivers/infiniband/hw/hfi1/
H A Dplatform.c196 u8 *cache = ppd->qsfp_info.cache; local
205 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
225 u8 *cache = ppd->qsfp_info.cache; local
228 cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
233 cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
251 u8 *cache = ppd->qsfp_info.cache; local
254 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFF
288 u8 *cache = ppd->qsfp_info.cache; local
339 u8 *cache = ppd->qsfp_info.cache; local
388 u8 *cache = ppd->qsfp_info.cache; local
401 u8 *cache = ppd->qsfp_info.cache; local
414 u8 *cache = ppd->qsfp_info.cache; local
459 u8 rx_eq, *cache = ppd->qsfp_info.cache; local
503 u8 *cache = ppd->qsfp_info.cache; local
525 u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache; local
620 u8 *cache = ppd->qsfp_info.cache; local
647 u8 *cache = ppd->qsfp_info.cache; local
730 u8 *cache = ppd->qsfp_info.cache; local
821 u8 *cache = ppd->qsfp_info.cache; local
[all...]

Completed in 435 milliseconds

1234567891011>>