Lines Matching defs:xas

143 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
147 unsigned long index = xas->xa_index;
156 key->xa = xas->xa;
159 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
181 static void dax_wake_entry(struct xa_state *xas, void *entry,
187 wq = dax_entry_waitqueue(xas, entry, &key);
209 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
219 entry = xas_find_conflict(xas);
227 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
230 xas_unlock_irq(xas);
231 xas_reset(xas);
234 xas_lock_irq(xas);
241 * After we call xas_unlock_irq(), we cannot touch xas->xa.
243 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
251 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
259 xas_unlock_irq(xas);
264 static void put_unlocked_entry(struct xa_state *xas, void *entry,
268 dax_wake_entry(xas, entry, mode);
276 static void dax_unlock_entry(struct xa_state *xas, void *entry)
281 xas_reset(xas);
282 xas_lock_irq(xas);
283 old = xas_store(xas, entry);
284 xas_unlock_irq(xas);
286 dax_wake_entry(xas, entry, WAKE_NEXT);
292 static void *dax_lock_entry(struct xa_state *xas, void *entry)
295 return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
425 XA_STATE(xas, NULL, 0);
448 xas.xa = &mapping->i_pages;
449 xas_lock_irq(&xas);
451 xas_unlock_irq(&xas);
454 xas_set(&xas, folio->index);
455 entry = xas_load(&xas);
458 wait_entry_unlocked(&xas, entry);
462 dax_lock_entry(&xas, entry);
463 xas_unlock_irq(&xas);
473 XA_STATE(xas, &mapping->i_pages, folio->index);
478 dax_unlock_entry(&xas, (void *)cookie);
493 XA_STATE(xas, NULL, 0);
502 xas.xa = &mapping->i_pages;
503 xas_lock_irq(&xas);
504 xas_set(&xas, index);
505 entry = xas_load(&xas);
508 wait_entry_unlocked(&xas, entry);
524 dax_lock_entry(&xas, entry);
526 xas_unlock_irq(&xas);
536 XA_STATE(xas, &mapping->i_pages, index);
541 dax_unlock_entry(&xas, (void *)cookie);
573 static void *grab_mapping_entry(struct xa_state *xas,
576 unsigned long index = xas->xa_index;
582 xas_lock_irq(xas);
583 entry = get_unlocked_entry(xas, order);
589 xas_set_err(xas, -EIO);
607 dax_lock_entry(xas, entry);
615 xas_unlock_irq(xas);
617 xas->xa_index & ~PG_PMD_COLOUR,
619 xas_reset(xas);
620 xas_lock_irq(xas);
624 xas_store(xas, NULL); /* undo the PMD join */
625 dax_wake_entry(xas, entry, WAKE_ALL);
628 xas_set(xas, index);
632 dax_lock_entry(xas, entry);
639 dax_lock_entry(xas, entry);
640 if (xas_error(xas))
646 xas_unlock_irq(xas);
647 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
649 if (xas->xa_node == XA_ERROR(-ENOMEM))
651 if (xas_error(xas))
655 xas_unlock_irq(xas);
685 XA_STATE(xas, &mapping->i_pages, start_idx);
715 xas_lock_irq(&xas);
716 xas_for_each(&xas, entry, end_idx) {
720 entry = get_unlocked_entry(&xas, 0);
723 put_unlocked_entry(&xas, entry, WAKE_NEXT);
729 xas_pause(&xas);
730 xas_unlock_irq(&xas);
732 xas_lock_irq(&xas);
734 xas_unlock_irq(&xas);
748 XA_STATE(xas, &mapping->i_pages, index);
752 xas_lock_irq(&xas);
753 entry = get_unlocked_entry(&xas, 0);
757 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
758 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
761 xas_store(&xas, NULL);
765 put_unlocked_entry(&xas, entry, WAKE_ALL);
766 xas_unlock_irq(&xas);
773 XA_STATE(xas, &mapping->i_pages, start);
777 xas_lock_irq(&xas);
778 xas_for_each(&xas, entry, end) {
779 entry = get_unlocked_entry(&xas, 0);
780 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
781 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
782 put_unlocked_entry(&xas, entry, WAKE_NEXT);
787 xas_pause(&xas);
788 xas_unlock_irq(&xas);
790 xas_lock_irq(&xas);
792 xas_unlock_irq(&xas);
869 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
883 unsigned long index = xas->xa_index;
892 xas_reset(xas);
893 xas_lock_irq(xas);
908 old = dax_lock_entry(xas, new_entry);
913 xas_load(xas); /* Walk the xa_state */
917 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
920 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
922 xas_unlock_irq(xas);
926 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
943 entry = get_unlocked_entry(xas, 0);
962 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
967 dax_lock_entry(xas, entry);
976 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
977 xas_unlock_irq(xas);
988 index = xas->xa_index & ~(count - 1);
1006 xas_reset(xas);
1007 xas_lock_irq(xas);
1008 xas_store(xas, entry);
1009 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
1010 dax_wake_entry(xas, entry, WAKE_NEXT);
1016 put_unlocked_entry(xas, entry, WAKE_NEXT);
1028 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
1041 trace_dax_writeback_range(inode, xas.xa_index, end_index);
1043 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
1045 xas_lock_irq(&xas);
1046 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
1047 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1055 xas_pause(&xas);
1056 xas_unlock_irq(&xas);
1058 xas_lock_irq(&xas);
1060 xas_unlock_irq(&xas);
1061 trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1186 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1194 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
1202 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1221 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1254 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1638 * @xas: the dax mapping tree of a file
1644 struct xa_state *xas, void **entry, bool pmd)
1649 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1663 return dax_load_hole(xas, vmf, iter, entry);
1664 return dax_pmd_load_hole(xas, vmf, iter, entry);
1676 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
1701 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1726 entry = grab_mapping_entry(&xas, mapping, 0);
1749 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1767 dax_unlock_entry(&xas, entry);
1774 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1801 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1811 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1833 if (xas.xa_index >= max_pgoff) {
1838 if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
1847 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1865 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1870 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1876 dax_unlock_entry(&xas, entry);
1932 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1936 xas_lock_irq(&xas);
1937 entry = get_unlocked_entry(&xas, order);
1941 put_unlocked_entry(&xas, entry, WAKE_NEXT);
1942 xas_unlock_irq(&xas);
1947 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1948 dax_lock_entry(&xas, entry);
1949 xas_unlock_irq(&xas);
1958 dax_unlock_entry(&xas, entry);