Searched refs:cached (Results 51 - 75 of 109) sorted by relevance

12345

/linux-master/drivers/staging/media/atomisp/pci/hmm/
H A Dhmm.c557 void *hmm_vmap(ia_css_ptr virt, bool cached) argument
570 ptr = hmm_bo_vmap(bo, cached);
577 /* Flush the memory which is mapped as cached memory through hmm_vmap */
/linux-master/arch/arm/mm/
H A Dioremap.c396 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) argument
400 if (cached)
/linux-master/fs/bcachefs/
H A Dbuckets.c41 fs_usage->cached += sectors;
194 prt_printf(out, "cached:\t\t\t\t%llu\n",
195 fs_usage->u.b.cached);
600 if (b_gen != ptr->gen && !ptr->cached) {
739 dst->cached += src->cached;
798 u32 *dst_sectors = !ptr->cached
846 if (!p.ptr.cached) {
990 if (p.ptr.cached) {
995 bch2_fs_fatal_err_on(ret && gc, c, "%s: no replicas entry while updating cached sector
[all...]
H A Dextents.h581 if (!ptr->cached)
593 if (ptr->cached)
H A Dbtree_locking.c665 if (!path->cached && !trans->in_traverse_all) {
671 linked->cached == path->cached &&
H A Dbtree_gc.c619 if (!p.ptr.cached) {
636 if (!p.ptr.cached) {
659 if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
754 if ((p.ptr.cached &&
756 (!p.ptr.cached &&
1265 b.cached, "cached");
/linux-master/drivers/power/supply/
H A Dcpcap-battery.c643 int cached; local
646 cached = cpcap_battery_update_status(ddata);
647 if (cached < 0)
648 return cached;
/linux-master/drivers/soc/qcom/
H A Dsmem.c44 * two regions are cached and non-cached memory respectively. Each region
48 * Items in the non-cached region are allocated from the start of the partition
49 * while items in the cached region are allocated from the end. The free area
50 * is hence the region between the cached and non-cached offsets. The header of
51 * cached items comes after the data.
146 * @cacheline: alignment for "cached" entries
185 * @offset_free_cached: offset to the first free byte of cached memory in this
203 * @cacheline: alignment for "cached" entrie
381 void *cached; local
[all...]
/linux-master/net/core/
H A Dpage_pool.c96 stats->recycle_stats.cached += pcpu->cached;
136 *data++ = pool_stats->recycle_stats.cached;
656 recycle_stat_inc(pool, cached);
876 /* Verify the refcnt invariant of cached pages */
H A Dpage_pool_user.c148 stats.recycle_stats.cached) ||
/linux-master/tools/perf/util/
H A Dsrcline.c452 static bool cached; local
455 if (!cached) {
468 cached = true;
473 cached = true;
/linux-master/fs/btrfs/
H A Dblock-group.c437 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
469 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
479 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
871 block_group->cached = BTRFS_CACHE_STARTED;
891 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
941 if (cache->cached != BTRFS_CACHE_NO) {
952 cache->cached = BTRFS_CACHE_STARTED;
1170 if (block_group->cached == BTRFS_CACHE_STARTED)
2367 cache->cached = BTRFS_CACHE_FINISHED;
2370 cache->cached
[all...]
H A Dbackref.c46 bool cached; local
65 cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
67 if (!cached)
1281 * We cached a false result, but the last snapshot generation of the
1289 * If we cached a true result and the last generation used for dropping
1545 * cached result for the leaf is valid and only if there's only
1552 bool cached; local
1555 cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1558 if (cached) {
1984 bool cached; local
2469 bool cached; local
[all...]
H A Dextent_io.c529 struct extent_state *cached = NULL; local
558 unlock_extent(tree, processed->start, processed->end, &cached);
2504 /* Fields for the cached extent (unsubmitted, not ready, extent). */
2509 bool cached; member in struct:fiemap_cache
2538 * @len and @flags with cached one.
2539 * And only when we fails to merge, cached one will be submitted as
2554 if (!cache->cached)
2577 * to user space, we trim the length of the previously cached extent and
2593 * We cached a dealloc range (found in the io tree) for
2620 * is behind the cached offse
[all...]
H A Ddefrag.c784 struct extent_state *cached = NULL; local
789 lock_extent(io_tree, start, end, &cached);
792 unlock_extent(io_tree, start, end, &cached);
/linux-master/drivers/mfd/
H A D88pm860x-core.c495 static unsigned char cached[3] = {0x0, 0x0, 0x0}; local
500 /* Load cached value. In initial, all IRQs are masked */
502 mask[i] = cached[i];
525 if (mask[i] != cached[i]) {
526 cached[i] = mask[i];
/linux-master/drivers/gpu/drm/i915/
H A Di915_active.c145 /* Keep the MRU cached node for reuse */
151 /* Rebuild the tree with only the cached node */
156 /* Make the cached node available for reuse with any timeline */
247 u64 cached = READ_ONCE(it->timeline); local
250 if (cached == idx)
263 if (!cached && !cmpxchg64(&it->timeline, 0, idx))
/linux-master/drivers/hwmon/
H A Dltc4282.c298 u32 channel, long *cached, long *val)
304 *val = *cached;
312 *cached = *val;
317 u32 channel, u32 *cached, long *val)
322 val, cached);
297 ltc4282_vdd_source_read_hist(struct ltc4282_state *st, u32 reg, u32 channel, long *cached, long *val) argument
316 ltc4282_vdd_source_read_lim(struct ltc4282_state *st, u32 reg, u32 channel, u32 *cached, long *val) argument
/linux-master/drivers/media/usb/uvc/
H A Duvc_ctrl.c1012 ctrl->cached = 1;
1270 if (!ctrl->cached) {
1426 if (!ctrl->cached) {
1784 * uvc_ctrl_get from using the cached value, and for write-only
1907 if (!ctrl->cached) {
1932 if (!ctrl->cached) {
1962 if (!ctrl->cached) {
/linux-master/drivers/acpi/apei/
H A Dghes.c772 int i, cached = 0; local
791 cached = 1;
795 return cached;
886 * At this point, victim may point to a cached item different
/linux-master/include/media/
H A Dv4l2-ctrls.h115 * If not set, then the currently cached value will be returned.
373 * @cached: The last found control reference. It is common that the same
401 struct v4l2_ctrl_ref *cached; member in struct:v4l2_ctrl_handler
/linux-master/drivers/net/wireless/st/cw1200/
H A Dtxrx.c197 const struct tx_policy *cached)
200 if (wanted->defined > cached->defined)
203 if (memcmp(wanted->raw, cached->raw, count))
207 if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F))
196 tx_policy_is_equal(const struct tx_policy *wanted, const struct tx_policy *cached) argument
/linux-master/arch/arm/include/asm/
H A Dio.h140 extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
/linux-master/net/sched/
H A Dact_ct.c961 bool cached, commit, clear; local
1013 cached = tcf_ct_skb_nfct_cached(net, skb, p);
1014 if (!cached) {
1057 if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
/linux-master/drivers/ata/
H A Dsata_mv.c528 struct mv_cached_regs cached; member in struct:mv_port_priv
906 * mv_save_cached_regs - (re-)initialize cached port registers
920 pp->cached.fiscfg = readl(port_mmio + FISCFG);
921 pp->cached.ltmode = readl(port_mmio + LTMODE);
922 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
923 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
927 * mv_write_cached_reg - write to a cached port register
929 * @old: pointer to cached value of the register
932 * Write a new value to a cached register,
1448 u32 fiscfg, *old_fiscfg = &pp->cached
[all...]

Completed in 1097 milliseconds

12345