Searched refs:head (Results 1 - 25 of 2525) sorted by last modified time

1234567891011>>

/linux-master/fs/btrfs/
H A Dbackref.c885 * add all currently queued delayed refs from this head whose seq nr is
889 struct btrfs_delayed_ref_head *head, u64 seq,
898 spin_lock(&head->lock);
899 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
925 if (head->extent_op && head->extent_op->update_key) {
926 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
1004 spin_unlock(&head->lock);
1392 struct btrfs_delayed_ref_head *head; local
1427 head
888 add_delayed_refs(const struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_head *head, u64 seq, struct preftrees *preftrees, struct share_check *sc) argument
[all...]
/linux-master/net/sunrpc/xprtrdma/
H A Dsvc_rdma_sendto.c628 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]);
674 if (xdr->head[0].iov_len)
741 if (xdr->head[0].iov_len) {
742 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len);
743 args->pd_dest += xdr->head[0].iov_len;
H A Dsvc_rdma_rw.c561 info->wi_next_off = offset - xdr->head[0].iov_len;
582 if (xdr->head[0].iov_len) {
583 ret = svc_rdma_iov_write(info, &xdr->head[0]);
589 ret = svc_rdma_pages_write(info, xdr, xdr->head[0].iov_len,
722 * @head: context for ongoing I/O
732 struct svc_rdma_recv_ctxt *head,
736 struct svc_rdma_chunk_ctxt *cc = &head->rc_cc;
743 sge_no = PAGE_ALIGN(head->rc_pageoff + len) >> PAGE_SHIFT;
752 PAGE_SIZE - head->rc_pageoff);
754 if (!head
731 svc_rdma_build_read_segment(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, const struct svc_rdma_segment *segment) argument
799 svc_rdma_build_read_chunk(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, const struct svc_rdma_chunk *chunk) argument
832 svc_rdma_copy_inline_range(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, unsigned int offset, unsigned int remaining) argument
882 svc_rdma_read_multiple_chunks(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head) argument
936 svc_rdma_read_data_item(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head) argument
958 svc_rdma_read_chunk_range(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, const struct svc_rdma_chunk *chunk, unsigned int offset, unsigned int length) argument
1002 svc_rdma_read_call_chunk(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head) argument
1065 svc_rdma_read_special(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head) argument
1081 svc_rdma_clear_rqst_pages(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head) argument
1115 svc_rdma_process_read_list(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head) argument
[all...]
/linux-master/kernel/sched/
H A Dsched.h1805 struct balance_callback *head,
1815 if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
1818 head->func = func;
1819 head->next = rq->balance_callback;
1820 rq->balance_callback = head;
1804 queue_balance_callback(struct rq *rq, struct balance_callback *head, void (*func)(struct rq *rq)) argument
/linux-master/include/linux/sunrpc/
H A Dsvc_rdma.h285 struct svc_rdma_recv_ctxt *head);
/linux-master/fs/bcachefs/
H A Dfs.c1507 static void bch2_i_callback(struct rcu_head *head) argument
1509 struct inode *vinode = container_of(head, struct inode, i_rcu);
/linux-master/drivers/usb/gadget/udc/
H A Dfsl_udc_core.c174 next_td = req->head;
739 cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
764 fsl_prime_ep(ep, req->head);
848 req->head = dtd;
970 fsl_prime_ep(ep, next_req->head);
1607 curr_td = curr_req->head;
1845 * head and TR Queue */
/linux-master/drivers/tty/serial/
H A Dstm32-usart.c736 if (xmit->tail < xmit->head) {
H A Dserial_core.c580 circ->buf[circ->head] = c;
581 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
616 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
621 memcpy(circ->buf + circ->head, buf, c);
622 circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
H A Dmxs-auart.c585 CIRC_CNT_TO_END(xmit->head,
/linux-master/drivers/clk/
H A Dclk.c4866 if (!cn->notifier_head.head) {
/linux-master/arch/x86/kvm/svm/
H A Dsev.c2017 struct list_head *head = &sev->regions_list; local
2020 list_for_each_entry(i, head, list) {
2145 struct list_head *head = &sev->regions_list; local
2175 if (!list_empty(head)) {
2176 list_for_each_safe(pos, q, head) {
/linux-master/arch/x86/kvm/mmu/
H A Dtdp_mmu.c68 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) argument
70 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
/linux-master/arch/x86/include/asm/
H A Dkvm_host.h615 struct list_head head; member in struct:kvm_mtrr
/linux-master/
H A DMakefile661 CC_VERSION_TEXT = $(subst $(pound),,$(shell LC_ALL=C $(CC) --version 2>/dev/null | head -n 1))
1133 $(AR) mPiT $$($(AR) t $@ | sed -n 1p) $@ $$($(AR) t $@ | grep -F -f $(srctree)/scripts/head-object-list.txt)
1136 vmlinux.a: $(KBUILD_VMLINUX_OBJS) scripts/head-object-list.txt FORCE
/linux-master/tools/testing/selftests/
H A Dkselftest_harness.h773 #define __LIST_APPEND(head, item) \
776 if (head == NULL) { \
777 head = item; \
784 item->prev = head->prev; \
786 head->prev = item; \
788 item->next = head; \
791 head = item; \
/linux-master/tools/perf/util/
H A Dannotate.c1468 static void annotation_line__add(struct annotation_line *al, struct list_head *head) argument
1470 list_add_tail(&al->node, head);
1474 annotation_line__next(struct annotation_line *pos, struct list_head *head) argument
1476 list_for_each_entry_continue(pos, head, node)
2883 size_t disasm__fprintf(struct list_head *head, FILE *fp) argument
2888 list_for_each_entry(pos, head, al.node)
3743 static struct annotated_item_stat *annotate_data_stat(struct list_head *head, argument
3748 list_for_each_entry(istat, head, list) {
3763 list_add_tail(&istat->list, head);
/linux-master/net/sched/
H A Dsch_generic.c1046 static void qdisc_free_cb(struct rcu_head *head) argument
1048 struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1338 * @head: list of devices to deactivate
1343 void dev_deactivate_many(struct list_head *head) argument
1347 list_for_each_entry(dev, head, close_list) {
1364 list_for_each_entry(dev, head, close_list) {
1372 list_for_each_entry(dev, head, close_list) {
/linux-master/net/netfilter/
H A Dnft_set_bitmap.c17 struct list_head head; member in struct:nft_bitmap_elem
97 list_for_each_entry_rcu(be, &priv->list, head) {
116 list_for_each_entry_rcu(be, &priv->list, head) {
144 list_add_tail_rcu(&new->head, &priv->list);
160 list_del_rcu(&be->head);
222 list_for_each_entry_rcu(be, &priv->list, head) {
277 list_for_each_entry_safe(be, n, &priv->list, head)
H A Dnf_tables_api.c9530 LIST_HEAD(head);
9533 list_splice_init(&nf_tables_destroy_list, &head);
9536 if (list_empty(&head))
9541 list_for_each_entry_safe(trans, next, &head, list) {
/linux-master/net/ipv4/
H A Dfib_frontend.c115 struct hlist_head *head; local
122 head = &net->ipv4.fib_table_hash[h];
123 hlist_for_each_entry_rcu(tb, head, tb_hlist,
190 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; local
194 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
1000 struct hlist_head *head; local
1038 head = &net->ipv4.fib_table_hash[h];
1039 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
1586 struct hlist_head *head = &net->ipv4.fib_table_hash[i]; local
1590 hlist_for_each_entry_safe(tb, tmp, head, tb_hlis
[all...]
/linux-master/net/core/
H A Ddev.c280 struct hlist_head *head = dev_name_hash(net, name); local
283 hlist_for_each_entry(name_node, head, hlist)
292 struct hlist_head *head = dev_name_hash(net, name); local
295 hlist_for_each_entry_rcu(name_node, head, hlist)
319 /* The node that holds dev->name acts as a head of per-device list. */
325 static void netdev_name_node_alt_free(struct rcu_head *head) argument
328 container_of(head, struct netdev_name_node, rcu);
564 struct list_head *head = ptype_head(pt); local
567 list_add_rcu(&pt->list, head);
587 struct list_head *head local
819 struct hlist_head *head = dev_index_hash(net, ifindex); local
843 struct hlist_head *head = dev_index_hash(net, ifindex); local
1477 __dev_close_many(struct list_head *head) argument
1530 dev_close_many(struct list_head *head, bool unlink) argument
3653 struct sk_buff *next, *head = NULL, *tail; local
5167 struct Qdisc *head; local
5576 __netif_receive_skb_list_ptype(struct list_head *head, struct packet_type *pt_prev, struct net_device *orig_dev) argument
5596 __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) argument
5663 __netif_receive_skb_list(struct list_head *head) argument
5749 netif_receive_skb_list_internal(struct list_head *head) argument
5821 netif_receive_skb_list(struct list_head *head) argument
11032 unregister_netdevice_queue(struct net_device *dev, struct list_head *head) argument
11047 unregister_netdevice_many_notify(struct list_head *head, u32 portid, const struct nlmsghdr *nlh) argument
11161 unregister_netdevice_many(struct list_head *head) argument
[all...]
/linux-master/mm/
H A Dshmem.c2956 if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2965 pipe->head++;
2983 used = pipe_occupancy(pipe->head, pipe->tail);
3048 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
3124 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
H A Dmemory-failure.c1253 #define head (1UL << PG_head) macro
1271 { head, head, MF_MSG_HUGE, me_huge_page },
1296 #undef head macro
1743 * simply mark the compound head page is by far sufficient.
1901 struct llist_node *head; local
1905 head = llist_del_all(raw_hwp_list_head(folio));
1906 llist_for_each_entry_safe(p, next, head, node) {
1919 struct llist_head *head; local
1931 head
[all...]
H A Dinternal.h530 static inline void prep_compound_tail(struct page *head, int tail_idx) argument
532 struct page *p = head + tail_idx;
535 set_compound_head(p, head);
820 * If page is a compound head, the entire compound page is considered.
1151 * page table entry, which might not necessarily be the head page for a
1196 * During GUP-fast we might not get called on the head page for a
1199 * head page. For hugetlb, PageAnonExclusive only applies on the head
1200 * page (as it cannot be partially COW-shared), so lookup the head page.

Completed in 527 milliseconds

1234567891011>>