Searched refs:refs (Results 1 - 25 of 169) sorted by last modified time

1234567

/linux-master/net/core/
H A Dskbuff.c6866 refcount_inc(&flow->key->refs);
/linux-master/kernel/bpf/
H A Dverifier.c1261 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1263 if (!dst->refs)
1285 state->refs = realloc_array(state->refs, state->acquired_refs, n,
1287 if (!state->refs)
1321 /* Acquire a pointer id from the env and update the state->refs to include
1336 state->refs[new_ofs].id = id;
1337 state->refs[new_of
[all...]
/linux-master/fs/btrfs/
H A Dvolumes.c3071 refcount_inc(&map->refs);
3107 refcount_inc(&prev_map->refs);
4962 * the new size, and then following the back refs to the chunks.
5612 refcount_set(&map->refs, 1);
5627 refcount_set(&clone->refs, 1);
6121 refcount_set(&bioc->refs, 1);
6133 WARN_ON(!refcount_read(&bioc->refs));
6134 refcount_inc(&bioc->refs);
6141 if (refcount_dec_and_test(&bioc->refs))
H A Dqgroup.c1312 * - run delayed refs
1524 * Quick path for updating qgroup with only excl refs.
1988 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
2230 atomic_inc(&src_eb->refs);
2463 atomic_inc(&dst_eb->refs);
2556 atomic_inc(&root_eb->refs); /* For path */
3909 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3914 * ensure we run all delayed refs and only after that, we are
H A Dordered-data.c187 refcount_set(&entry->refs, 1);
221 refcount_inc(&entry->refs);
355 refcount_inc(&ordered->refs);
538 refcount_inc(&entry->refs);
556 if (refcount_dec_and_test(&entry->refs)) {
708 refcount_inc(&ordered->refs);
889 refcount_inc(&entry->refs);
930 refcount_inc(&entry->refs);
959 refcount_inc(&ordered->refs);
981 refcount_inc(&entry->refs);
[all...]
H A Dscrub.c219 refcount_t refs; member in struct:scrub_ctx
338 if (refcount_dec_and_test(&sctx->refs))
354 refcount_set(&sctx->refs, 1);
H A Dinode.c2592 refcount_inc(&ordered->refs);
4088 * also drops the back refs in the inode to the directory
5234 * delayed refs activity by truncating.
6343 * be packed into one item. Extended refs will kick in if we
7348 /* em got 2 refs now, callers needs to do free_extent_map once. */
H A Dextent_map.c51 refcount_set(&em->refs, 1);
64 if (refcount_dec_and_test(&em->refs)) {
239 if (refcount_read(&em->refs) > 2)
363 refcount_inc(&em->refs);
426 refcount_inc(&em->refs);
732 refcount_inc(&next_em->refs);
H A Dbackref.c218 * Return 0 when both refs are for the same block (and can be merged).
271 * Add @newref to the @root rbtree, merging identical refs.
300 /* Identical refs, merge them and free @newref */
357 * delayed refs
370 * on disk refs (inline or keyed)
418 /* direct refs use root == 0, key == NULL */
428 /* indirect refs use parent == 0 */
606 * adding new delayed refs. To deal with this we need to look in cache
717 * We maintain three separate rbtrees: one for direct refs, one for
718 * indirect refs whic
2420 struct ulist *refs; local
[all...]
H A Dextent_io.c79 "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
969 refcount_inc(&em->refs);
980 refcount_inc(&em->refs);
1627 if (eb && atomic_inc_not_zero(&eb->refs)) {
1803 * The eb has already reached 0 refs thus find_extent_buffer()
1874 ret = atomic_inc_not_zero(&eb->refs);
2760 atomic_inc(&clone->refs);
3333 * Even there is no eb refs here, we may still have
3445 atomic_set(&eb->refs,
3538 int refs; local
4058 int refs; local
[all...]
H A Dextent-tree.c98 * the delayed refs are not processed.
102 u64 offset, int metadata, u64 *refs, u64 *flags,
199 refcount_inc(&head->refs);
226 if (refs)
227 *refs = num_refs;
238 * Back reference rules. Back refs have three main goals:
251 * There are two kinds of back refs. The implicit back refs is optimized
253 * back refs of this kind provide information about the block's owner tree
255 * b-tree searching. The full back refs i
100 btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 offset, int metadata, u64 *refs, u64 *flags, u64 *owning_root) argument
1003 u64 refs; local
1098 u64 refs; local
1501 u64 refs; local
3121 u64 refs; local
5241 u64 refs[BTRFS_MAX_LEVEL]; member in struct:walk_control
5267 u64 refs; local
[all...]
/linux-master/mm/
H A Dhugetlb.c350 struct hugetlb_vma_lock, refs);
367 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
428 kref_init(&vma_lock->refs);
1094 kref_init(&resv_map->refs);
1117 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1242 kref_put(&reservations->refs, resv_map_release);
5188 kref_get(&resv->refs);
5239 kref_put(&resv->refs, resv_map_release);
7249 kref_put(&resv_map->refs, resv_map_release);
H A Dpage_owner.c217 if (atomic_try_cmpxchg_relaxed(&stack_record->count.refs, &old, 1))
H A Dinternal.h1099 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
H A Dgup.c71 static inline struct folio *try_get_folio(struct page *page, int refs) argument
79 if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
92 if (!put_devmap_managed_page_refs(&folio->page, refs))
93 folio_put_refs(folio, refs);
103 * @refs: the value to (effectively) add to the folio's refcount
113 * FOLL_GET: folio's refcount will be incremented by @refs.
116 * @refs, and its pincount will be incremented by @refs.
119 * @refs * GUP_PIN_COUNTING_BIAS.
126 struct folio *try_grab_folio(struct page *page, int refs, unsigne argument
189 gup_put_folio(struct folio *folio, int refs, unsigned int flags) argument
2809 int refs; local
2882 int refs; local
2926 int refs; local
2969 int refs; local
[all...]
/linux-master/include/linux/
H A Dmm.h1439 bool __put_devmap_managed_page_refs(struct page *page, int refs);
1440 static inline bool put_devmap_managed_page_refs(struct page *page, int refs) argument
1446 return __put_devmap_managed_page_refs(page, refs);
1449 static inline bool put_devmap_managed_page_refs(struct page *page, int refs) argument
1514 * @refs: The amount to subtract from the folio's reference count.
1525 static inline void folio_put_refs(struct folio *folio, int refs) argument
1527 if (folio_ref_sub_and_test(folio, refs))
1531 void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
H A Dblkdev.h387 refcount_t refs; member in struct:request_queue
H A Dio_uring_types.h217 atomic_t refs; member in struct:io_ev_fd
248 struct percpu_ref refs; member in struct:io_ring_ctx::__anon47
655 atomic_t refs; member in struct:io_kiocb
/linux-master/fs/btrfs/tests/
H A Dextent-map-tests.c26 if (refcount_read(&em->refs) != 1) {
28 "em leak: em (start %llu len %llu block_start %llu block_len %llu) refs %d",
30 em->block_len, refcount_read(&em->refs));
32 refcount_set(&em->refs, 1);
/linux-master/fs/smb/client/
H A Dmisc.c1332 struct dfs_info3_param *refs = NULL; local
1343 !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
1346 *islink = refs[0].server_type == DFS_TYPE_LINK;
1347 free_dfs_info_array(refs, num_refs);
/linux-master/drivers/android/
H A Dbinder.c897 !hlist_empty(&node->refs))
907 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1058 * into the given proc rb_trees and node refs list.
1122 hlist_add_head(&new_ref->node_entry, &node->refs);
2849 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2850 * @node: struct binder_node for which to get refs
2865 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
4523 weak = !hlist_empty(&node->refs) ||
5764 static int binder_node_release(struct binder_node *node, int refs) argument
5779 if (hlist_empty(&node->refs)
[all...]
/linux-master/net/netfilter/
H A Dnf_tables_api.c5252 refcount_set(&set->refs, 1);
5322 if (refcount_dec_and_test(&set->refs)) {
9835 refcount_inc(&set->refs);
/linux-master/include/net/netfilter/
H A Dnf_tables.h544 * @refs: internal refcounting for async set destruction
578 refcount_t refs; member in struct:nft_set
624 return refcount_read(&s->refs) != 1;
/linux-master/io_uring/
H A Dio_uring.c85 #include "refs.h"
245 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
258 percpu_ref_get(&ctx->refs);
266 percpu_ref_put(&ctx->refs);
305 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
427 /* linked timeouts should have two refs once prep'ed */
554 if (atomic_dec_and_test(&ev_fd->refs)) {
586 atomic_inc(&ev_fd->refs);
590 atomic_dec(&ev_fd->refs);
771 unsigned int refs local
[all...]
/linux-master/block/
H A Dblk-core.c288 if (refcount_dec_and_test(&q->refs))
434 refcount_set(&q->refs, 1);
482 refcount_inc(&q->refs);

Completed in 490 milliseconds

1234567