Searched refs:xas (Results 1 - 25 of 46) sorted by path

12

/linux-master/tools/testing/radix-tree/
H A Diteration_check.c23 XA_STATE(xas, xa, index);
28 xas_lock(&xas);
30 xas_set_order(&xas, index, order);
32 if (xas_find_conflict(&xas))
34 xas_store(&xas, item);
35 xas_set_mark(&xas, TAG);
38 xas_unlock(&xas);
39 if (xas_nomem(&xas, GFP_KERNEL))
69 XA_STATE(xas, &array, 0);
75 xas_set(&xas,
[all...]
H A Dtest.c176 XA_STATE(xas, xa, start);
183 xas_lock_irq(&xas);
184 xas_for_each_marked(&xas, item, end, iftag) {
185 xas_set_mark(&xas, thentag);
189 xas_pause(&xas);
190 xas_unlock_irq(&xas);
192 xas_lock_irq(&xas);
194 xas_unlock_irq(&xas);
257 XA_STATE(xas, xa, 0);
260 xas_for_each(&xas, entr
[all...]
/linux-master/arch/x86/kernel/cpu/sgx/
H A Dencl.c530 XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
545 xas_lock(&xas);
546 xas_for_each(&xas, page, PFN_DOWN(end - 1)) {
554 xas_pause(&xas);
555 xas_unlock(&xas);
561 xas_lock(&xas);
564 xas_unlock(&xas);
708 XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base));
710 xas_lock(&xas);
711 xas_for_each(&xas, entr
[all...]
/linux-master/drivers/infiniband/core/
H A Ddevice.c158 XA_STATE(xas, xa, *indexp);
163 entry = xas_find_marked(&xas, ULONG_MAX, filter);
166 } while (xas_retry(&xas, entry));
170 *indexp = xas.xa_index;
H A Dib_core_uverbs.c268 XA_STATE(xas, &ucontext->mmap_xa, min_pgoff);
294 xas_find_marked(&xas, max_pgoff, XA_FREE_MARK);
295 if (xas.xa_node == XAS_RESTART)
298 xa_first = xas.xa_index;
308 xas_next_entry(&xas, xa_last - 1);
309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last)
H A Drestrack.c116 XA_STATE(xas, &rt->xa, 0);
120 xas_for_each(&xas, e, U32_MAX)
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_mr.c96 XA_STATE(xas, &mr->page_list, 0);
106 xas_lock(&xas);
112 xas_set_err(&xas, -EINVAL);
116 xas_store(&xas, page);
117 if (xas_error(&xas))
119 xas_next(&xas);
123 xas_unlock(&xas);
124 } while (xas_nomem(&xas, GFP_KERNEL));
126 return xas_error(&xas);
161 XA_STATE(xas,
[all...]
/linux-master/drivers/iommu/iommufd/
H A Dmain.c150 XA_STATE(xas, &ictx->objects, id);
174 obj = xas_load(&xas);
196 xas_store(&xas, NULL);
H A Dpages.c523 XA_STATE(xas, xa, start_index);
528 entry = xas_next(&xas);
529 if (xas_retry(&xas, entry))
544 XA_STATE(xas, xa, start_index);
547 xas_lock(&xas);
549 entry = xas_next(&xas);
550 if (xas_retry(&xas, entry))
555 xas_store(&xas, NULL);
560 xas_unlock(&xas);
566 XA_STATE(xas, x
[all...]
/linux-master/drivers/target/
H A Dtarget_core_user.c510 XA_STATE(xas, &udev->data_pages, 0);
521 xas_set(&xas, dpi);
523 for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
3104 XA_STATE(xas, &udev->commands, cmd_id);
3106 xas_lock(&xas);
3107 cmd = xas_load(&xas);
3111 xas_unlock(&xas);
3118 xas_unlock(&xas);
3121 xas_store(&xas, NULL);
3122 xas_unlock(&xas);
[all...]
/linux-master/fs/afs/
H A Ddir.c117 XA_STATE(xas, &mapping->i_pages, 0);
123 xas_for_each(&xas, folio, last) {
124 if (xas_retry(&xas, folio))
192 XA_STATE(xas, &mapping->i_pages, 0);
201 xas_for_each(&xas, folio, last) {
202 if (xas_retry(&xas, folio))
226 XA_STATE(xas, &mapping->i_pages, 0);
232 xas_for_each(&xas, folio, last) {
233 if (xas_retry(&xas, folio))
/linux-master/fs/cachefiles/
H A Ddaemon.c359 XA_STATE(xas, &cache->reqs, 0);
369 xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
H A Dondemand.c15 XA_STATE(xas, &cache->reqs, 0);
22 xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
26 xas_store(&xas, NULL);
189 XA_STATE(xas, &cache->reqs, 0);
199 xas_lock(&xas);
200 xas_for_each(&xas, req, ULONG_MAX)
201 xas_set_mark(&xas, CACHEFILES_REQ_NEW);
202 xas_unlock(&xas);
273 static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas, argument
280 xas_for_each_marked(xas, re
[all...]
/linux-master/fs/
H A Ddax.c143 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, argument
147 unsigned long index = xas->xa_index;
156 key->xa = xas->xa;
159 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
181 static void dax_wake_entry(struct xa_state *xas, void *entry, argument
187 wq = dax_entry_waitqueue(xas, entry, &key);
209 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) argument
219 entry = xas_find_conflict(xas);
227 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
230 xas_unlock_irq(xas);
243 wait_entry_unlocked(struct xa_state *xas, void *entry) argument
264 put_unlocked_entry(struct xa_state *xas, void *entry, enum dax_wake_mode mode) argument
276 dax_unlock_entry(struct xa_state *xas, void *entry) argument
292 dax_lock_entry(struct xa_state *xas, void *entry) argument
573 grab_mapping_entry(struct xa_state *xas, struct address_space *mapping, unsigned int order) argument
869 dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void *entry, pfn_t pfn, unsigned long flags) argument
926 dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, struct address_space *mapping, void *entry) argument
1186 dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) argument
1202 dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) argument
1254 dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) argument
1642 dax_fault_iter(struct vm_fault *vmf, const struct iomap_iter *iter, pfn_t *pfnp, struct xa_state *xas, void **entry, bool pmd) argument
1774 dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, pgoff_t max_pgoff) argument
[all...]
H A Dfs-writeback.c401 XA_STATE(xas, &mapping->i_pages, 0);
422 xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
430 xas_set(&xas, 0);
431 xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
/linux-master/fs/erofs/
H A Dfscache.c62 XA_STATE(xas, &req->mapping->i_pages, start_page);
65 xas_for_each(&xas, folio, last_page) {
66 if (xas_retry(&xas, folio))
/linux-master/fs/netfs/
H A Dbuffered_read.c29 XA_STATE(xas, &rreq->mapping->i_pages, start_page);
51 xas_for_each(&xas, folio, last_page) {
57 if (xas_retry(&xas, folio))
H A Dfscache_io.c182 XA_STATE(xas, &mapping->i_pages, first);
185 xas_for_each(&xas, page, last) {
H A Diterator.c160 XA_STATE(xas, iter->xarray, index);
169 xas_for_each(&xas, folio, ULONG_MAX) {
171 if (xas_retry(&xas, folio))
H A Dmisc.c19 XA_STATE_ORDER(xas, xa, index, folio_order(folio));
22 xas_lock(&xas);
24 xas_store(&xas, folio);
25 if (!xas_error(&xas))
27 xas_unlock(&xas);
28 if (!xas_nomem(&xas, gfp_mask))
29 return xas_error(&xas);
34 xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
36 xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
37 xas_unlock(&xas);
[all...]
H A Dwrite_collect.c83 XA_STATE(xas, &wreq->mapping->i_pages, pos / PAGE_SIZE);
89 xas_reset(&xas);
90 folio = xas_load(&xas);
91 if (xas_retry(&xas, folio))
96 wreq->debug_id, xas.xa_index, pos / PAGE_SIZE);
99 if (folio == xas_reload(&xas))
/linux-master/fs/smb/client/
H A Dsmb2ops.c4313 XA_STATE(xas, buffer, 0);
4316 xas_for_each_marked(&xas, folio, ULONG_MAX, XA_MARK_0) {
H A Dsmbdirect.c2539 XA_STATE(xas, xa, index);
2543 xas_for_each(&xas, folio, ULONG_MAX) {
2544 if (xas_retry(&xas, folio))
/linux-master/include/linux/
H A Diov_iter.h154 XA_STATE(xas, iter->xarray, index);
157 xas_for_each(&xas, folio, ULONG_MAX) {
160 if (xas_retry(&xas, folio))
H A Dpagemap.h1385 XA_STATE(xas, &rac->mapping->i_pages, 0);
1393 xas_set(&xas, rac->_index);
1395 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
1396 if (xas_retry(&xas, page))

Completed in 371 milliseconds

12