Searched refs:dirty (Results 276 - 300 of 326) sorted by relevance

<<11121314

/linux-master/drivers/md/
H A Ddm-cache-target.c255 * dirty. If you lose the cache device you will lose data.
262 * dirty. Potential performance benfit for reads only.
376 * cache_size entries, dirty if set
621 * and dirty bitset to be in sync.
1145 * not set/clear discard or dirty flags.
1199 * We clear dirty here to update the nr_dirty counter.
2489 *error = "could not allocate dirty bitset";
2745 DMERR("%s: could not write dirty bitset", cache_device_name(cache));
2760 * dirty bit to be set on reload.
2791 bool dirty, uint32_
2790 load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, bool dirty, uint32_t hint, bool hint_valid) argument
[all...]
/linux-master/fs/
H A Dbuffer.c84 * Returns if the folio has dirty or writeback buffers. If all the buffers
89 bool *dirty, bool *writeback)
92 *dirty = false;
110 *dirty = true;
530 * as you dirty the buffers, and then use osync_inode_buffers to wait for
531 * completion. Any other dirty buffers which are not yet queued for
655 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
656 * dirty, schedule it for IO. So that indirects merge nicely with their data.
691 * block_dirty_folio - Mark a folio as dirty.
693 * @folio: The folio to mark dirty
88 buffer_check_dirty_writeback(struct folio *folio, bool *dirty, bool *writeback) argument
[all...]
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Dhuge_pages.c161 obj->mm.dirty = false;
310 obj->mm.dirty = false;
/linux-master/include/linux/
H A Dkvm_host.h1226 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
1302 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
2383 * kick the vcpu to the userspace to avoid dirty ring full. This
2388 /* Max number of entries allowed for each kvm dirty ring */
/linux-master/arch/m68k/fpsp040/
H A Dfpsp.h128 .set FPR_DIRTY_BITS,LV-91 | fpr dirty bits
/linux-master/arch/powerpc/kvm/
H A De500_mmu.c857 struct kvm_dirty_tlb *dirty)
856 kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, struct kvm_dirty_tlb *dirty) argument
/linux-master/fs/ubifs/
H A Drecovery.c333 * dirty.
748 * find a dirty LEB which could be GC'd into LEB Y! Even LEB X
1175 dbg_rcvry("could not find a dirty LEB");
1180 ubifs_assert(c, lp.free + lp.dirty >= wbuf->offs);
/linux-master/fs/ocfs2/dlm/
H A Ddlmmaster.c485 !list_empty(&res->dirty) ||
496 !list_empty(&res->dirty) ? 'D' : ' ',
509 BUG_ON(!list_empty(&res->dirty));
545 INIT_LIST_HEAD(&res->dirty);
2602 * if we fail after this we need to re-dirty the lockres
2731 /* re-dirty the lockres if we failed */
2852 * try to dirty the lockres before MIGRATING is set */
2861 mlog(0, "about to wait on migration_wq, dirty=%s\n",
3469 /* re-dirty it on the new master */
H A Ddlmcommon.h295 struct list_head dirty; member in struct:dlm_lock_resource
/linux-master/drivers/net/ethernet/via/
H A Dvia-velocity.h1411 int dirty; member in struct:velocity_info::rx_info
/linux-master/drivers/infiniband/hw/mlx4/
H A Dmlx4_ib.h293 bool dirty; member in struct:mlx4_wqn_range
/linux-master/include/sound/
H A Dsoc-dapm.h675 struct list_head dirty; member in struct:snd_soc_dapm_widget
/linux-master/arch/powerpc/include/asm/
H A Dkvm_book3s.h250 unsigned long gpa, bool dirty);
/linux-master/drivers/net/ethernet/amd/xgbe/
H A Dxgbe.h475 * dirty - Tx: index of descriptor to check for transfer complete
479 unsigned int dirty; member in struct:xgbe_ring
/linux-master/drivers/gpu/drm/i915/
H A Di915_gem.c888 reg->dirty = true;
/linux-master/arch/powerpc/kernel/
H A Dasm-offsets.c430 OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty);
/linux-master/sound/pci/emu10k1/
H A Demuproc.c443 voice->dirty,
/linux-master/arch/x86/events/
H A Dperf_event.h242 unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; member in struct:cpu_hw_events
/linux-master/sound/pci/hda/
H A Dhda_codec.c681 unsigned char dirty; /* setups should be cleared */ member in struct:hda_cvt_setup
1158 p->dirty = 0;
1160 /* make other inactive cvts with the same stream-tag dirty */
1166 p->dirty = 1;
1231 if (p->dirty)
/linux-master/fs/bcachefs/
H A Dbcachefs.h15 * Multiple cache devices is intended to give us the ability to mirror dirty
29 * "cached" data is always dirty. The end result is that we get thin
139 * We can't just invalidate any bucket - it might contain dirty data or
140 * metadata. If it once contained dirty data, other writes might overwrite it
674 bool dirty; member in struct:journal_seq_blacklist_table::journal_seq_blacklist_table_entry
/linux-master/virt/kvm/
H A Dkvm_main.c376 * are related to dirty logging, and many do the TLB flush out of
1408 * Allocation size is twice as large as the actual dirty bitmap size.
1657 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1722 * Free the dirty bitmap as needed; the below check encompasses
2125 * kvm_get_dirty_log - get a snapshot of dirty pages
2128 * @is_dirty: set to '1' if any dirty pages were found
2139 /* Dirty ring tracking may be exclusive to dirty log tracking */
2174 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2175 * and reenable dirty page tracking for the corresponding pages.
2180 * concurrently. So, to avoid losing track of dirty page
3098 kvm_release_pfn(kvm_pfn_t pfn, bool dirty) argument
3140 kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) argument
[all...]
/linux-master/drivers/misc/vmw_vmci/
H A Dvmci_queue_pair.c632 u64 num_pages, bool dirty)
637 if (dirty)
631 qp_release_pages(struct page **pages, u64 num_pages, bool dirty) argument
/linux-master/fs/jfs/
H A Djfs_txnmgr.c548 /* write dirty metadata & forward log syncpt */
701 /* mark the page dirty and nohomeok */
1233 * Mark inode as not dirty. It will still be on the dirty
1235 * it gets marked dirty again
2593 void txAbort(tid_t tid, int dirty) argument
2634 * mark filesystem dirty
2636 if (dirty)
/linux-master/kernel/
H A Dkprobes.c257 kprobe_opcode_t *slot, int dirty)
279 if (dirty) {
256 __free_insn_slot(struct kprobe_insn_cache *c, kprobe_opcode_t *slot, int dirty) argument
/linux-master/drivers/perf/
H A Dxgene_pmu.c310 XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
385 XGENE_PMU_EVENT_ATTR(dirty-eviction, 0x07),

Completed in 979 milliseconds

<<11121314