Lines Matching defs:xas

21  * @xas is the 'xarray operation state'.  It may be either a pointer to
38 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
41 xas_lock_irq(xas);
43 xas_lock_bh(xas);
45 xas_lock(xas);
48 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
51 xas_unlock_irq(xas);
53 xas_unlock_bh(xas);
55 xas_unlock(xas);
121 * @xas: Array operation state.
126 static void xas_squash_marks(const struct xa_state *xas)
129 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
131 if (!xas->xa_sibs)
135 unsigned long *marks = xas->xa_node->marks[mark];
136 if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
138 __set_bit(xas->xa_offset, marks);
139 bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
149 static void xas_set_offset(struct xa_state *xas)
151 xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
155 static void xas_move_index(struct xa_state *xas, unsigned long offset)
157 unsigned int shift = xas->xa_node->shift;
158 xas->xa_index &= ~XA_CHUNK_MASK << shift;
159 xas->xa_index += offset << shift;
162 static void xas_next_offset(struct xa_state *xas)
164 xas->xa_offset++;
165 xas_move_index(xas, xas->xa_offset);
168 static void *set_bounds(struct xa_state *xas)
170 xas->xa_node = XAS_BOUNDS;
175 * Starts a walk. If the @xas is already valid, we assume that it's on
178 * of the xarray, return NULL without changing @xas->xa_node. Otherwise
179 * set @xas->xa_node to NULL and return the current head of the array.
181 static void *xas_start(struct xa_state *xas)
185 if (xas_valid(xas))
186 return xas_reload(xas);
187 if (xas_error(xas))
190 entry = xa_head(xas->xa);
192 if (xas->xa_index)
193 return set_bounds(xas);
195 if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
196 return set_bounds(xas);
199 xas->xa_node = NULL;
203 static void *xas_descend(struct xa_state *xas, struct xa_node *node)
205 unsigned int offset = get_offset(xas->xa_index, node);
206 void *entry = xa_entry(xas->xa, node, offset);
208 xas->xa_node = node;
211 entry = xa_entry(xas->xa, node, offset);
216 xas->xa_offset = offset;
222 * @xas: XArray operation state.
224 * Usually walks the @xas to the appropriate state to load the entry
226 * @xas is in an error state. xas_load() will never expand the tree.
230 * present within the range specified by @xas.
235 void *xas_load(struct xa_state *xas)
237 void *entry = xas_start(xas);
242 if (xas->xa_shift > node->shift)
244 entry = xas_descend(xas, node);
263 * @xas: XArray operation state.
268 void xas_destroy(struct xa_state *xas)
270 struct xa_node *next, *node = xas->xa_alloc;
276 xas->xa_alloc = node = next;
282 * @xas: XArray operation state.
287 * If it fails, @xas is flagged as needing memory to continue. The caller
298 bool xas_nomem(struct xa_state *xas, gfp_t gfp)
300 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
301 xas_destroy(xas);
304 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
306 xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
307 if (!xas->xa_alloc)
309 xas->xa_alloc->parent = NULL;
310 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
311 xas->xa_node = XAS_RESTART;
318 * @xas: XArray operation state.
325 static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
326 __must_hold(xas->xa->xa_lock)
328 unsigned int lock_type = xa_lock_type(xas->xa);
330 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
331 xas_destroy(xas);
334 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
337 xas_unlock_type(xas, lock_type);
338 xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
339 xas_lock_type(xas, lock_type);
341 xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
343 if (!xas->xa_alloc)
345 xas->xa_alloc->parent = NULL;
346 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
347 xas->xa_node = XAS_RESTART;
351 static void xas_update(struct xa_state *xas, struct xa_node *node)
353 if (xas->xa_update)
354 xas->xa_update(node);
359 static void *xas_alloc(struct xa_state *xas, unsigned int shift)
361 struct xa_node *parent = xas->xa_node;
362 struct xa_node *node = xas->xa_alloc;
364 if (xas_invalid(xas))
368 xas->xa_alloc = NULL;
372 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
375 node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
377 xas_set_err(xas, -ENOMEM);
383 node->offset = xas->xa_offset;
386 xas_update(xas, parent);
393 RCU_INIT_POINTER(node->parent, xas->xa_node);
394 node->array = xas->xa;
401 static unsigned long xas_size(const struct xa_state *xas)
403 return (xas->xa_sibs + 1UL) << xas->xa_shift;
409 * in order to add the entry described by @xas. Because we cannot store a
413 static unsigned long xas_max(struct xa_state *xas)
415 unsigned long max = xas->xa_index;
418 if (xas->xa_shift || xas->xa_sibs) {
419 unsigned long mask = xas_size(xas) - 1;
437 static void xas_shrink(struct xa_state *xas)
439 struct xarray *xa = xas->xa;
440 struct xa_node *node = xas->xa_node;
455 xas->xa_node = XAS_BOUNDS;
465 xas_update(xas, node);
476 * @xas: Array operation state.
478 * Attempts to delete the @xas->xa_node. This will fail if xa->node has
481 static void xas_delete_node(struct xa_state *xas)
483 struct xa_node *node = xas->xa_node;
492 parent = xa_parent_locked(xas->xa, node);
493 xas->xa_node = parent;
494 xas->xa_offset = node->offset;
498 xas->xa->xa_head = NULL;
499 xas->xa_node = XAS_BOUNDS;
503 parent->slots[xas->xa_offset] = NULL;
507 xas_update(xas, node);
511 xas_shrink(xas);
516 * @xas: Array operation state.
523 static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
529 void *entry = xa_entry_locked(xas->xa, node, offset);
542 parent = xa_parent_locked(xas->xa, node);
546 xas_update(xas, node);
557 * sufficient height to be able to contain @xas->xa_index
559 static int xas_expand(struct xa_state *xas, void *head)
561 struct xarray *xa = xas->xa;
564 unsigned long max = xas_max(xas);
576 xas->xa_node = NULL;
582 node = xas_alloc(xas, shift);
617 xas_update(xas, node);
622 xas->xa_node = node;
628 * @xas: XArray operation state.
637 * slot, returns %NULL and indicates the error in @xas.
639 static void *xas_create(struct xa_state *xas, bool allow_root)
641 struct xarray *xa = xas->xa;
644 struct xa_node *node = xas->xa_node;
646 unsigned int order = xas->xa_shift;
650 xas->xa_node = NULL;
653 shift = xas_expand(xas, entry);
660 } else if (xas_error(xas)) {
663 unsigned int offset = xas->xa_offset;
677 node = xas_alloc(xas, shift);
688 entry = xas_descend(xas, node);
689 slot = &node->slots[xas->xa_offset];
697 * @xas: XArray operation state.
699 * Creates all of the slots in the range covered by @xas. Sets @xas to
704 void xas_create_range(struct xa_state *xas)
706 unsigned long index = xas->xa_index;
707 unsigned char shift = xas->xa_shift;
708 unsigned char sibs = xas->xa_sibs;
710 xas->xa_index |= ((sibs + 1UL) << shift) - 1;
711 if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
712 xas->xa_offset |= sibs;
713 xas->xa_shift = 0;
714 xas->xa_sibs = 0;
717 xas_create(xas, true);
718 if (xas_error(xas))
720 if (xas->xa_index <= (index | XA_CHUNK_MASK))
722 xas->xa_index -= XA_CHUNK_SIZE;
725 struct xa_node *node = xas->xa_node;
728 xas->xa_node = xa_parent_locked(xas->xa, node);
729 xas->xa_offset = node->offset - 1;
736 xas->xa_shift = shift;
737 xas->xa_sibs = sibs;
738 xas->xa_index = index;
741 xas->xa_index = index;
742 if (xas->xa_node)
743 xas_set_offset(xas);
747 static void update_node(struct xa_state *xas, struct xa_node *node,
757 xas_update(xas, node);
759 xas_delete_node(xas);
764 * @xas: XArray operation state.
767 * If @xas is operating on a multi-index entry, the entry returned by this
775 void *xas_store(struct xa_state *xas, void *entry)
778 void __rcu **slot = &xas->xa->xa_head;
787 first = xas_create(xas, allow_root);
789 first = xas_load(xas);
792 if (xas_invalid(xas))
794 node = xas->xa_node;
795 if (node && (xas->xa_shift < node->shift))
796 xas->xa_sibs = 0;
797 if ((first == entry) && !xas->xa_sibs)
801 offset = xas->xa_offset;
802 max = xas->xa_offset + xas->xa_sibs;
805 if (xas->xa_sibs)
806 xas_squash_marks(xas);
809 xas_init_marks(xas);
821 xas_free_nodes(xas, xa_to_node(next));
830 entry = xa_mk_sibling(xas->xa_offset);
835 next = xa_entry_locked(xas->xa, node, ++offset);
844 update_node(xas, node, count, values);
851 * @xas: XArray operation state.
854 * Return: true if the mark is set, false if the mark is clear or @xas
857 bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
859 if (xas_invalid(xas))
861 if (!xas->xa_node)
862 return xa_marked(xas->xa, mark);
863 return node_get_mark(xas->xa_node, xas->xa_offset, mark);
869 * @xas: XArray operation state.
873 * on all the ancestor entries. Does nothing if @xas has not been walked to
876 void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
878 struct xa_node *node = xas->xa_node;
879 unsigned int offset = xas->xa_offset;
881 if (xas_invalid(xas))
888 node = xa_parent_locked(xas->xa, node);
891 if (!xa_marked(xas->xa, mark))
892 xa_mark_set(xas->xa, mark);
898 * @xas: XArray operation state.
903 * @xas has not been walked to an entry, or is in an error state.
905 void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
907 struct xa_node *node = xas->xa_node;
908 unsigned int offset = xas->xa_offset;
910 if (xas_invalid(xas))
920 node = xa_parent_locked(xas->xa, node);
923 if (xa_marked(xas->xa, mark))
924 xa_mark_clear(xas->xa, mark);
930 * @xas: Array operations state.
932 * Initialise all marks for the entry specified by @xas. If we're tracking
939 void xas_init_marks(const struct xa_state *xas)
944 if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
945 xas_set_mark(xas, mark);
947 xas_clear_mark(xas, mark);
991 * @xas: XArray operation state.
999 * entries of the order stored in the @xas.
1003 void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
1007 unsigned int mask = xas->xa_sibs;
1010 if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
1012 if (xas->xa_shift + XA_CHUNK_SHIFT > order)
1020 node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
1023 node->array = xas->xa;
1032 RCU_INIT_POINTER(node->parent, xas->xa_alloc);
1033 xas->xa_alloc = node;
1038 xas_destroy(xas);
1039 xas_set_err(xas, -ENOMEM);
1045 * @xas: XArray operation state.
1049 * The size of the new entries is set in @xas. The value in @entry is
1054 void xas_split(struct xa_state *xas, void *entry, unsigned int order)
1059 void *curr = xas_load(xas);
1062 node = xas->xa_node;
1066 marks = node_get_marks(node, xas->xa_offset);
1068 offset = xas->xa_offset + sibs;
1070 if (xas->xa_shift < node->shift) {
1071 struct xa_node *child = xas->xa_alloc;
1073 xas->xa_alloc = rcu_dereference_raw(child->parent);
1085 xas_update(xas, child);
1087 unsigned int canon = offset - xas->xa_sibs;
1095 (xas->xa_sibs + 1);
1097 } while (offset-- > xas->xa_offset);
1100 xas_update(xas, node);
1107 * @xas: XArray operation state.
1112 * the lock. It resets the @xas to be suitable for the next iteration
1120 void xas_pause(struct xa_state *xas)
1122 struct xa_node *node = xas->xa_node;
1124 if (xas_invalid(xas))
1127 xas->xa_node = XAS_RESTART;
1129 unsigned long offset = xas->xa_offset;
1131 if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
1134 xas->xa_index += (offset - xas->xa_offset) << node->shift;
1135 if (xas->xa_index == 0)
1136 xas->xa_node = XAS_BOUNDS;
1138 xas->xa_index++;
1145 * @xas: XArray operation state.
1150 void *__xas_prev(struct xa_state *xas)
1154 if (!xas_frozen(xas->xa_node))
1155 xas->xa_index--;
1156 if (!xas->xa_node)
1157 return set_bounds(xas);
1158 if (xas_not_node(xas->xa_node))
1159 return xas_load(xas);
1161 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1162 xas->xa_offset--;
1164 while (xas->xa_offset == 255) {
1165 xas->xa_offset = xas->xa_node->offset - 1;
1166 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1167 if (!xas->xa_node)
1168 return set_bounds(xas);
1172 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1176 xas->xa_node = xa_to_node(entry);
1177 xas_set_offset(xas);
1184 * @xas: XArray operation state.
1189 void *__xas_next(struct xa_state *xas)
1193 if (!xas_frozen(xas->xa_node))
1194 xas->xa_index++;
1195 if (!xas->xa_node)
1196 return set_bounds(xas);
1197 if (xas_not_node(xas->xa_node))
1198 return xas_load(xas);
1200 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1201 xas->xa_offset++;
1203 while (xas->xa_offset == XA_CHUNK_SIZE) {
1204 xas->xa_offset = xas->xa_node->offset + 1;
1205 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1206 if (!xas->xa_node)
1207 return set_bounds(xas);
1211 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1215 xas->xa_node = xa_to_node(entry);
1216 xas_set_offset(xas);
1223 * @xas: XArray operation state.
1226 * If the @xas has not yet been walked to an entry, return the entry
1227 * which has an index >= xas.xa_index. If it has been walked, the entry
1232 * is set to the smallest index not yet in the array. This allows @xas
1237 void *xas_find(struct xa_state *xas, unsigned long max)
1241 if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1243 if (xas->xa_index > max)
1244 return set_bounds(xas);
1246 if (!xas->xa_node) {
1247 xas->xa_index = 1;
1248 return set_bounds(xas);
1249 } else if (xas->xa_node == XAS_RESTART) {
1250 entry = xas_load(xas);
1251 if (entry || xas_not_node(xas->xa_node))
1253 } else if (!xas->xa_node->shift &&
1254 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1255 xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1258 xas_next_offset(xas);
1260 while (xas->xa_node && (xas->xa_index <= max)) {
1261 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1262 xas->xa_offset = xas->xa_node->offset + 1;
1263 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1267 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1269 xas->xa_node = xa_to_node(entry);
1270 xas->xa_offset = 0;
1276 xas_next_offset(xas);
1279 if (!xas->xa_node)
1280 xas->xa_node = XAS_BOUNDS;
1287 * @xas: XArray operation state.
1291 * If the @xas has not yet been walked to an entry, return the marked entry
1292 * which has an index >= xas.xa_index. If it has been walked, the entry
1294 * first marked entry with an index > xas.xa_index.
1296 * If no marked entry is found and the array is smaller than @max, @xas is
1297 * set to the bounds state and xas->xa_index is set to the smallest index
1298 * not yet in the array. This allows @xas to be immediately passed to
1301 * If no entry is found before @max is reached, @xas is set to the restart
1306 void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1312 if (xas_error(xas))
1314 if (xas->xa_index > max)
1317 if (!xas->xa_node) {
1318 xas->xa_index = 1;
1320 } else if (xas_top(xas->xa_node)) {
1322 entry = xa_head(xas->xa);
1323 xas->xa_node = NULL;
1324 if (xas->xa_index > max_index(entry))
1327 if (xa_marked(xas->xa, mark))
1329 xas->xa_index = 1;
1332 xas->xa_node = xa_to_node(entry);
1333 xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1336 while (xas->xa_index <= max) {
1337 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1338 xas->xa_offset = xas->xa_node->offset + 1;
1339 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1340 if (!xas->xa_node)
1347 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1349 xas->xa_offset = xa_to_sibling(entry);
1350 xas_move_index(xas, xas->xa_offset);
1354 offset = xas_find_chunk(xas, advance, mark);
1355 if (offset > xas->xa_offset) {
1357 xas_move_index(xas, offset);
1359 if ((xas->xa_index - 1) >= max)
1361 xas->xa_offset = offset;
1366 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1367 if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
1371 xas->xa_node = xa_to_node(entry);
1372 xas_set_offset(xas);
1376 if (xas->xa_index > max)
1378 return set_bounds(xas);
1380 xas->xa_node = XAS_RESTART;
1387 * @xas: XArray operation state.
1389 * The @xas describes both a range and a position within that range.
1392 * Return: The next entry in the range covered by @xas or %NULL.
1394 void *xas_find_conflict(struct xa_state *xas)
1398 if (xas_error(xas))
1401 if (!xas->xa_node)
1404 if (xas_top(xas->xa_node)) {
1405 curr = xas_start(xas);
1410 curr = xas_descend(xas, node);
1416 if (xas->xa_node->shift > xas->xa_shift)
1420 if (xas->xa_node->shift == xas->xa_shift) {
1421 if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1423 } else if (xas->xa_offset == XA_CHUNK_MASK) {
1424 xas->xa_offset = xas->xa_node->offset;
1425 xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1426 if (!xas->xa_node)
1430 curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1434 xas->xa_node = xa_to_node(curr);
1435 xas->xa_offset = 0;
1436 curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1441 xas->xa_offset -= xas->xa_sibs;
1456 XA_STATE(xas, xa, index);
1461 entry = xas_load(&xas);
1464 } while (xas_retry(&xas, entry));
1471 static void *xas_result(struct xa_state *xas, void *curr)
1475 if (xas_error(xas))
1476 curr = xas->xa_node;
1494 XA_STATE(xas, xa, index);
1495 return xas_result(&xas, xas_store(&xas, NULL));
1540 XA_STATE(xas, xa, index);
1549 curr = xas_store(&xas, entry);
1551 xas_clear_mark(&xas, XA_FREE_MARK);
1552 } while (__xas_nomem(&xas, gfp));
1554 return xas_result(&xas, curr);
1606 XA_STATE(xas, xa, index);
1613 curr = xas_load(&xas);
1615 xas_store(&xas, entry);
1617 xas_clear_mark(&xas, XA_FREE_MARK);
1619 } while (__xas_nomem(&xas, gfp));
1621 return xas_result(&xas, curr);
1643 XA_STATE(xas, xa, index);
1652 curr = xas_load(&xas);
1654 xas_store(&xas, entry);
1656 xas_clear_mark(&xas, XA_FREE_MARK);
1658 xas_set_err(&xas, -EBUSY);
1660 } while (__xas_nomem(&xas, gfp));
1662 return xas_error(&xas);
1667 static void xas_set_range(struct xa_state *xas, unsigned long first,
1674 xas_set(xas, first);
1694 xas->xa_shift = shift;
1695 xas->xa_sibs = sibs;
1719 XA_STATE(xas, xa, 0);
1727 xas_lock(&xas);
1732 xas_set_order(&xas, last, order);
1733 xas_create(&xas, true);
1734 if (xas_error(&xas))
1738 xas_set_range(&xas, first, last);
1739 xas_store(&xas, entry);
1740 if (xas_error(&xas))
1742 first += xas_size(&xas);
1745 xas_unlock(&xas);
1746 } while (xas_nomem(&xas, gfp));
1748 return xas_result(&xas, NULL);
1761 XA_STATE(xas, xa, index);
1766 entry = xas_load(&xas);
1771 if (!xas.xa_node)
1775 unsigned int slot = xas.xa_offset + (1 << order);
1779 if (!xa_is_sibling(xas.xa_node->slots[slot]))
1784 order += xas.xa_node->shift;
1816 XA_STATE(xas, xa, 0);
1827 xas.xa_index = limit.min;
1828 xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1829 if (xas.xa_node == XAS_RESTART)
1830 xas_set_err(&xas, -EBUSY);
1832 *id = xas.xa_index;
1833 xas_store(&xas, entry);
1834 xas_clear_mark(&xas, XA_FREE_MARK);
1835 } while (__xas_nomem(&xas, gfp));
1837 return xas_error(&xas);
1906 XA_STATE(xas, xa, index);
1907 void *entry = xas_load(&xas);
1910 xas_set_mark(&xas, mark);
1924 XA_STATE(xas, xa, index);
1925 void *entry = xas_load(&xas);
1928 xas_clear_mark(&xas, mark);
1946 XA_STATE(xas, xa, index);
1950 entry = xas_start(&xas);
1951 while (xas_get_mark(&xas, mark)) {
1954 entry = xas_descend(&xas, xa_to_node(entry));
2020 XA_STATE(xas, xa, *indexp);
2026 entry = xas_find_marked(&xas, max, filter);
2028 entry = xas_find(&xas, max);
2029 } while (xas_retry(&xas, entry));
2033 *indexp = xas.xa_index;
2038 static bool xas_sibling(struct xa_state *xas)
2040 struct xa_node *node = xas->xa_node;
2046 return (xas->xa_index & mask) >
2047 ((unsigned long)xas->xa_offset << node->shift);
2070 XA_STATE(xas, xa, *indexp + 1);
2073 if (xas.xa_index == 0)
2079 entry = xas_find_marked(&xas, max, filter);
2081 entry = xas_find(&xas, max);
2083 if (xas_invalid(&xas))
2085 if (xas_sibling(&xas))
2087 if (!xas_retry(&xas, entry))
2093 *indexp = xas.xa_index;
2098 static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
2105 xas_for_each(xas, entry, max) {
2106 if (xas_retry(xas, entry))
2117 static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
2124 xas_for_each_marked(xas, entry, max, mark) {
2125 if (xas_retry(xas, entry))
2167 XA_STATE(xas, xa, start);
2173 return xas_extract_marked(&xas, dst, max, n, filter);
2174 return xas_extract_present(&xas, dst, max, n);
2187 struct xa_state xas = {
2197 xas_store(&xas, NULL);
2213 XA_STATE(xas, xa, 0);
2217 xas.xa_node = NULL;
2218 xas_lock_irqsave(&xas, flags);
2221 xas_init_marks(&xas);
2226 xas_free_nodes(&xas, xa_to_node(entry));
2227 xas_unlock_irqrestore(&xas, flags);