Searched refs:stale (Results 1 - 25 of 46) sorted by relevance

12

/linux-master/fs/iomap/
H A Diter.c19 * (processed = 0) meaning we are done and (processed = 0 && stale) meaning we
24 bool stale = iter->iomap.flags & IOMAP_F_STALE; local
30 if (!iter->processed && !stale)
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_gt_buffer_pool.c39 struct intel_gt_buffer_pool_node *node, *stale = NULL; local
67 node->free = stale;
68 stale = node;
79 while ((node = stale)) {
80 stale = stale->free;
/linux-master/fs/xfs/libxfs/
H A Dxfs_dir2_leaf.c48 to->stale = be16_to_cpu(from3->hdr.stale);
58 to->stale = be16_to_cpu(from->hdr.stale);
82 to3->hdr.stale = cpu_to_be16(from->stale);
91 to->hdr.stale = cpu_to_be16(from->stale);
148 int stale; local
171 /* Check hash value order, count stale entrie
[all...]
H A Dxfs_dir2_block.c214 * If there are stale entries we'll use one for the leaf.
216 if (btp->stale) {
241 if (be16_to_cpu(dup->length) + (be32_to_cpu(btp->stale) - 1) *
244 } else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len)
252 * no stale entries, so just use free space.
302 * Leave the highest-numbered stale entry stale.
319 int highstale; /* high stale index */
337 *lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
338 *lfloghigh -= be32_to_cpu(btp->stale)
[all...]
H A Dxfs_da_format.h426 __be16 stale; /* count of stale entries */ member in struct:xfs_dir2_leaf_hdr
432 __be16 stale; /* count of stale entries */ member in struct:xfs_dir3_leaf_hdr
542 __be32 stale; /* count of stale lf entries */ member in struct:xfs_dir2_block_tail
H A Dxfs_dir2_node.c510 int compact; /* compacting stale leaves */
511 int highstale = 0; /* next stale entry */
514 int lowstale = 0; /* previous stale entry */
533 * the block, if there are no stale entries it won't fit.
534 * Caller will do a split. If there are stale entries we'll do
539 if (!leafhdr.stale)
541 compact = leafhdr.stale > 1;
552 * Compact out all but one stale leaf entry. Leaves behind
558 else if (leafhdr.stale) {
688 * Skip stale lea
971 int stale; /* count stale leaves copied */ local
[all...]
H A Dxfs_dir2_data.c115 int stale; /* count of stale leaves */ local
257 for (i = stale = 0; i < be32_to_cpu(btp->count); i++) {
260 stale++;
265 if (count != be32_to_cpu(btp->count) - be32_to_cpu(btp->stale))
267 if (stale != be32_to_cpu(btp->stale))
H A Dxfs_dir2_priv.h20 uint16_t stale; member in struct:xfs_dir3_icleaf_hdr
/linux-master/fs/xfs/scrub/
H A Ddirtree.c295 dl->stale = true;
381 if (dl->stale) {
473 if (dl->stale) {
572 if (!error && dl->stale)
660 * path cannot be stale.
700 if (dl->stale || dl->aborted)
710 dl->stale = true;
740 dl->stale = false;
830 ASSERT(dl->stale);
838 } while (dl->stale);
[all...]
H A Ddirtree.h27 XCHK_DIRPATH_STALE, /* path is stale */
147 bool stale:1; member in struct:xchk_dirtree
H A Ddirtree_repair.c213 * if the scan results have become stale.
413 if (dl->stale) {
536 * failed or the scan data are now stale. This keeps things simple for
543 if (!error && dl->stale)
684 if (dl->stale) {
740 * failed or the scan data are now stale. This keeps things simple for
747 if (!error && dl->stale)
805 if (!dl->stale) {
H A Ddir.c628 unsigned int stale = 0; local
676 /* Check hash value order, count stale entries. */
684 stale++;
686 if (leafhdr.stale != stale)
734 unsigned int stale = 0; local
756 stale++;
769 if (freehdr.nused + stale != freehdr.nvalid)
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Dmock_context.c31 spin_lock_init(&ctx->stale.lock);
32 INIT_LIST_HEAD(&ctx->stale.engines);
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_context_types.h38 /** @link: Link in i915_gem_context::stale::engines */
414 /** @stale: tracks stale engines to be destroyed */
416 /** @stale.lock: guards engines */
418 /** @stale.engines: list of stale engines */
420 } stale; member in struct:i915_gem_context
H A Di915_gem_context.c1084 spin_lock_irqsave(&ctx->stale.lock, flags);
1086 spin_unlock_irqrestore(&ctx->stale.lock, flags);
1435 spin_lock_irq(&ctx->stale.lock);
1437 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1443 spin_unlock_irq(&ctx->stale.lock);
1448 spin_lock_irq(&ctx->stale.lock);
1455 spin_unlock_irq(&ctx->stale.lock);
1485 spin_lock_irq(&ctx->stale.lock);
1487 list_add_tail(&engines->link, &ctx->stale.engines);
1488 spin_unlock_irq(&ctx->stale
[all...]
/linux-master/drivers/md/bcache/
H A Dextents.c15 * counter. Garbage collection is used to remove stale pointers.
85 return "stale";
540 unsigned int i, stale; local
552 stale = ptr_stale(b->c, k, i);
554 if (stale && KEY_DIRTY(k)) {
556 pr_info("stale dirty pointer, stale %u, key: %s\n",
557 stale, buf);
560 btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
561 "key too stale
[all...]
H A Dbtree.c15 * counter. Garbage collection is used to remove stale pointers.
1231 uint8_t stale = 0; local
1241 return stale;
1253 stale = max(stale, ptr_stale(c, k, i));
1277 return stale;
1309 uint8_t stale = 0; local
1318 stale = max(stale, btree_mark_key(b, k));
1340 if (stale > 1
[all...]
/linux-master/fs/xfs/
H A Dxfs_buf_item.c182 * regions, we do not relog them in stale buffers. This has the effect of
202 * The buffer is stale, so all we need to log is the buf log
307 * The buffer is stale, so all we need to log
513 int stale = bip->bli_flags & XFS_BLI_STALE; local
534 if (stale) {
546 * stale so we own both lock and reference exclusively here. We
609 * been marked stale, we could end up stalling until someone else
610 * issues a log force to unpin the stale buffer. Check for the
706 bool stale = bip->bli_flags & XFS_BLI_STALE; local
722 ASSERT(!stale || (bi
[all...]
/linux-master/tools/testing/selftests/net/
H A Dndisc_unsolicited_na_test.sh145 to ${HOST_ADDR} dev ${ROUTER_INTF} nud stale)
H A Darp_ndisc_untracked_subnets.sh171 to ${HOST_ADDR_V6} dev ${ROUTER_INTF} nud stale)
/linux-master/drivers/gpu/drm/msm/disp/mdp4/
H A Dmdp4_crtc.c30 bool stale; member in struct:mdp4_crtc::__anon731
364 if (mdp4_crtc->cursor.stale) {
393 mdp4_crtc->cursor.stale = false;
443 mdp4_crtc->cursor.stale = true;
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_ptp.c615 if (test_and_clear_bit(idx, tx->stale))
646 * 1) check that the timestamp request is not stale
650 * 5) check if the timestamp is stale, and discard if so
674 * this case, software will set the stale bit for any outstanding Tx
769 if (test_and_clear_bit(idx, tx->stale))
870 unsigned long *in_use, *stale; local
875 stale = bitmap_zalloc(tx->len, GFP_KERNEL);
877 if (!tstamps || !in_use || !stale) {
880 bitmap_free(stale);
887 tx->stale
[all...]
H A Dice_ptp.h102 * been captured. This avoids reporting stale timestamps to the stack. This is
126 * @stale: bitmap of len to indicate slots which have stale timestamps
142 unsigned long *stale; member in struct:ice_ptp_tx
/linux-master/drivers/md/
H A Ddm-ps-historical-service-time.c9 * Marks paths stale if they have not finished within hst *
10 * num_paths. If a path is stale and unused, we will send a single
14 * multipath device is unused. If a path is stale and in use, limit the
346 u64 *out, u64 *stale)
353 *stale = pi->stale_after;
392 * If an unloaded path is stale, choose it. If both paths are unloaded,
393 * choose path that is the most stale.
419 /* In the case that the 'winner' is stale, limit to equal usage. */
345 hst_fill_compare(struct path_info *pi, u64 *hst, u64 *out, u64 *stale) argument
/linux-master/mm/
H A Dz3fold.c129 * @stale_lock: protects pool stale page list
133 * @stale: list of pages marked for freeing
148 struct list_head stale; member in struct:z3fold_pool
453 list_add(&zhdr->buddy, &pool->stale);
497 while (!list_empty(&pool->stale)) {
498 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
934 INIT_LIST_HEAD(&pool->stale);

Completed in 411 milliseconds

12