Searched refs:wq (Results 26 - 50 of 700) sorted by relevance

1234567891011>>

/linux-master/drivers/dma/idxd/
H A Didxd.h169 struct idxd_wq *wq; member in struct:idxd_cdev
192 struct idxd_wq *wq; member in struct:idxd_dma_chan
205 struct workqueue_struct *wq; member in struct:idxd_wq
309 struct idxd_wq *wq; member in struct:idxd_evl_fault
366 struct workqueue_struct *wq; member in struct:idxd_device
418 struct idxd_wq *wq; member in struct:idxd_desc
430 #define wq_confdev(wq) &wq->idxd_dev.conf_dev
440 static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq) argument
442 struct device *dev = wq_confdev(wq);
548 is_idxd_wq_dmaengine(struct idxd_wq *wq) argument
555 is_idxd_wq_user(struct idxd_wq *wq) argument
560 is_idxd_wq_kernel(struct idxd_wq *wq) argument
565 wq_dedicated(struct idxd_wq *wq) argument
570 wq_shared(struct idxd_wq *wq) argument
585 wq_pasid_enabled(struct idxd_wq *wq) argument
591 wq_shared_supported(struct idxd_wq *wq) argument
627 idxd_wq_portal_addr(struct idxd_wq *wq) argument
635 idxd_wq_get(struct idxd_wq *wq) argument
640 idxd_wq_put(struct idxd_wq *wq) argument
645 idxd_wq_refcount(struct idxd_wq *wq) argument
650 idxd_wq_set_private(struct idxd_wq *wq, void *private) argument
655 idxd_wq_get_private(struct idxd_wq *wq) argument
674 idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq, u32 max_batch_size) argument
692 idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev) argument
[all...]
H A Dsubmit.c11 static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) argument
14 struct idxd_device *idxd = wq->idxd;
16 desc = wq->descs[idx];
27 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) argument
30 struct idxd_device *idxd = wq->idxd;
38 sbq = &wq->sbq;
44 return __get_desc(wq, idx, cpu);
62 return __get_desc(wq, idx, cpu);
66 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) argument
71 sbitmap_queue_clear(&wq
75 list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, struct idxd_desc *desc) argument
97 llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, struct idxd_desc *desc) argument
155 idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc) argument
170 idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) argument
[all...]
H A Dirq.c49 struct idxd_wq *wq = idxd->wqs[i]; local
51 rc = idxd_wq_enable(wq);
54 dev_warn(dev, "Unable to re-enable wq %s\n",
55 dev_name(wq_confdev(wq)));
73 struct idxd_wq *wq = ie_to_wq(ie); local
74 struct idxd_device *idxd = wq->idxd;
88 portal = idxd_wq_portal_addr(wq);
95 if (wq_dedicated(wq)) {
98 rc = idxd_enqcmds(wq, portal, &desc);
101 dev_warn(dev, "Failed to submit drain desc on wq
156 struct idxd_wq *wq = ie_to_wq(ie); local
225 struct idxd_wq *wq = fault->wq; local
336 struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx]; local
417 struct idxd_wq *wq = idxd->wqs[id]; local
425 struct idxd_wq *wq = idxd->wqs[i]; local
518 struct idxd_wq *wq = desc->wq; local
545 struct idxd_wq *wq = desc->wq; local
[all...]
H A Dsysfs.c342 struct idxd_wq *wq = idxd->wqs[i]; local
344 if (!wq->group)
347 if (wq->group->id == group->id)
348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
590 struct idxd_wq *wq = confdev_to_wq(dev); local
592 return sysfs_emit(buf, "%d\n", wq->client_count);
601 struct idxd_wq *wq = confdev_to_wq(dev); local
603 switch (wq->state) {
619 struct idxd_wq *wq local
631 struct idxd_wq *wq = confdev_to_wq(dev); local
674 struct idxd_wq *wq = confdev_to_wq(dev); local
683 struct idxd_wq *wq = confdev_to_wq(dev); local
710 struct idxd_wq *wq = confdev_to_wq(dev); local
721 struct idxd_wq *wq = idxd->wqs[i]; local
733 struct idxd_wq *wq = confdev_to_wq(dev); local
761 struct idxd_wq *wq = confdev_to_wq(dev); local
770 struct idxd_wq *wq = confdev_to_wq(dev); local
798 struct idxd_wq *wq = confdev_to_wq(dev); local
807 struct idxd_wq *wq = confdev_to_wq(dev); local
844 struct idxd_wq *wq = confdev_to_wq(dev); local
853 struct idxd_wq *wq = confdev_to_wq(dev); local
885 struct idxd_wq *wq = confdev_to_wq(dev); local
904 struct idxd_wq *wq = confdev_to_wq(dev); local
933 struct idxd_wq *wq = confdev_to_wq(dev); local
942 struct idxd_wq *wq = confdev_to_wq(dev); local
968 struct idxd_wq *wq = confdev_to_wq(dev); local
1002 struct idxd_wq *wq = confdev_to_wq(dev); local
1010 struct idxd_wq *wq = confdev_to_wq(dev); local
1039 struct idxd_wq *wq = confdev_to_wq(dev); local
1047 struct idxd_wq *wq = confdev_to_wq(dev); local
1075 struct idxd_wq *wq = confdev_to_wq(dev); local
1083 struct idxd_wq *wq = confdev_to_wq(dev); local
1111 struct idxd_wq *wq = confdev_to_wq(dev); local
1119 struct idxd_wq *wq = confdev_to_wq(dev); local
1149 struct idxd_wq *wq = confdev_to_wq(dev); local
1168 struct idxd_wq *wq = confdev_to_wq(dev); local
1179 struct idxd_wq *wq = confdev_to_wq(dev); local
1203 struct idxd_wq *wq = confdev_to_wq(dev); local
1229 struct idxd_wq *wq = confdev_to_wq(dev); local
1264 struct idxd_wq *wq = confdev_to_wq(dev); local
1272 struct idxd_wq *wq = confdev_to_wq(dev); local
1334 struct idxd_wq *wq = confdev_to_wq(dev); local
1364 struct idxd_wq *wq = confdev_to_wq(dev); local
1488 struct idxd_wq *wq = idxd->wqs[i]; local
1871 struct idxd_wq *wq; local
1942 struct idxd_wq *wq = idxd->wqs[i]; local
[all...]
/linux-master/drivers/infiniband/hw/mana/
H A DMakefile4 mana_ib-y := device.o main.o wq.o qp.o cq.o mr.o
/linux-master/kernel/
H A Dworkqueue.c163 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
165 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
168 * WQ: wq->mutex protected.
170 * WR: wq->mutex protected for writes. RCU protected for reads.
172 * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read
258 struct workqueue_struct *wq; /* I: the owning workqueue */ member in struct:pool_workqueue
285 struct list_head pwqs_node; /* WR: node on wq->pwqs */
286 struct list_head mayday_node; /* MD: node on wq->maydays */
294 * grabbing wq->mutex.
336 struct list_head pwqs; /* WR: all pwqs of this wq */
721 unbound_pwq_slot(struct workqueue_struct *wq, int cpu) argument
730 unbound_pwq(struct workqueue_struct *wq, int cpu) argument
745 unbound_effective_cpumask(struct workqueue_struct *wq) argument
1561 wq_node_nr_active(struct workqueue_struct *wq, int node) argument
1582 wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) argument
1748 struct workqueue_struct *wq = pwq->wq; local
1860 unplug_oldest_pwq(struct workqueue_struct *wq) argument
2273 is_chained_work(struct workqueue_struct *wq) argument
2313 __queue_work(int cpu, struct workqueue_struct *wq, struct work_struct *work) argument
2439 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) argument
2506 queue_work_node(int node, struct workqueue_struct *wq, struct work_struct *work) argument
2546 __queue_delayed_work(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
2597 queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
2635 mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
2676 queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) argument
3017 struct workqueue_struct *wq = pwq->wq; local
3472 struct workqueue_struct *wq = rescuer->rescue_wq; local
3868 flush_workqueue_prep_pwqs(struct workqueue_struct *wq, int flush_color, int work_color) argument
3908 touch_wq_lockdep_map(struct workqueue_struct *wq) argument
3922 touch_work_lockdep_map(struct work_struct *work, struct workqueue_struct *wq) argument
3944 __flush_workqueue(struct workqueue_struct *wq) argument
4105 drain_workqueue(struct workqueue_struct *wq) argument
4155 struct workqueue_struct *wq; local
4653 wq_init_lockdep(struct workqueue_struct *wq) argument
4666 wq_unregister_lockdep(struct workqueue_struct *wq) argument
4671 wq_free_lockdep(struct workqueue_struct *wq) argument
4677 wq_init_lockdep(struct workqueue_struct *wq) argument
4681 wq_unregister_lockdep(struct workqueue_struct *wq) argument
4685 wq_free_lockdep(struct workqueue_struct *wq) argument
4744 struct workqueue_struct *wq = local
4927 struct workqueue_struct *wq = pwq->wq; local
4977 init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) argument
4998 struct workqueue_struct *wq = pwq->wq; local
5014 alloc_unbound_pwq(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
5078 install_unbound_pwq(struct workqueue_struct *wq, int cpu, struct pool_workqueue *pwq) argument
5097 struct workqueue_struct *wq; /* target workqueue */ member in struct:apply_wqattrs_ctx
5122 apply_wqattrs_prepare(struct workqueue_struct *wq, const struct workqueue_attrs *attrs, const cpumask_var_t unbound_cpumask) argument
5217 apply_workqueue_attrs_locked(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
5254 apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
5290 wq_update_pod(struct workqueue_struct *wq, int cpu, int hotplug_cpu, bool online) argument
5342 alloc_and_link_pwqs(struct workqueue_struct *wq) argument
5430 init_rescuer(struct workqueue_struct *wq) argument
5473 wq_adjust_max_active(struct workqueue_struct *wq) argument
5534 struct workqueue_struct *wq; local
5669 destroy_workqueue(struct workqueue_struct *wq) argument
5761 workqueue_set_max_active(struct workqueue_struct *wq, int max_active) argument
5798 workqueue_set_min_active(struct workqueue_struct *wq, int min_active) argument
5861 workqueue_congested(int cpu, struct workqueue_struct *wq) argument
5958 struct workqueue_struct *wq = NULL; local
6136 show_one_workqueue(struct workqueue_struct *wq) argument
6232 struct workqueue_struct *wq; local
6257 struct workqueue_struct *wq; local
6476 struct workqueue_struct *wq; local
6517 struct workqueue_struct *wq; local
6626 struct workqueue_struct *wq; local
6658 struct workqueue_struct *wq; local
6699 struct workqueue_struct *wq; local
6724 struct workqueue_struct *wq; local
6807 struct workqueue_struct *wq; local
6862 struct workqueue_struct *wq; member in struct:wq_device
6876 struct workqueue_struct *wq = dev_to_wq(dev); local
6885 struct workqueue_struct *wq = dev_to_wq(dev); local
6894 struct workqueue_struct *wq = dev_to_wq(dev); local
6928 struct workqueue_struct *wq = dev_to_wq(dev); local
6939 wq_sysfs_prep_attrs(struct workqueue_struct *wq) argument
6956 struct workqueue_struct *wq = dev_to_wq(dev); local
6981 struct workqueue_struct *wq = dev_to_wq(dev); local
6995 struct workqueue_struct *wq = dev_to_wq(dev); local
7018 struct workqueue_struct *wq = dev_to_wq(dev); local
7038 struct workqueue_struct *wq = dev_to_wq(dev); local
7060 struct workqueue_struct *wq = dev_to_wq(dev); local
7070 struct workqueue_struct *wq = dev_to_wq(dev); local
7240 workqueue_sysfs_register(struct workqueue_struct *wq) argument
7298 workqueue_sysfs_unregister(struct workqueue_struct *wq) argument
7309 workqueue_sysfs_unregister(struct workqueue_struct *wq) argument
7723 struct workqueue_struct *wq; local
7845 struct workqueue_struct *wq; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dtxrx.h104 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) argument
106 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
109 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size) argument
113 wqe = mlx5_wq_cyc_get_wqe(wq, pi);
120 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
123 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) argument
125 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
126 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
140 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) argument
142 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *p
170 struct mlx5_wq_cyc *wq = &sq->wq; local
232 struct mlx5_wq_cyc *wq = &sq->wq; local
259 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, struct mlx5_wqe_ctrl_seg *ctrl) argument
358 struct mlx5_cqwq *wq = &cq->wq; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Daso.c8 #include "wq.h"
12 struct mlx5_cqwq wq; member in struct:mlx5_aso_cq
31 struct mlx5_wq_cyc wq; member in struct:mlx5_aso
56 err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
64 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
65 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
138 mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
144 mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
161 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
162 struct mlx5_wq_cyc *wq local
181 void *in, *sqc, *wq; local
272 void *sqc_data, *wq; local
[all...]
/linux-master/drivers/infiniband/hw/cxgb4/
H A Dt4.h480 static inline int t4_rqes_posted(struct t4_wq *wq) argument
482 return wq->rq.in_use;
485 static inline int t4_rq_empty(struct t4_wq *wq) argument
487 return wq->rq.in_use == 0;
490 static inline u32 t4_rq_avail(struct t4_wq *wq) argument
492 return wq->rq.size - 1 - wq->rq.in_use;
495 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) argument
497 wq->rq.in_use++;
498 if (++wq
505 t4_rq_consume(struct t4_wq *wq) argument
512 t4_rq_host_wq_pidx(struct t4_wq *wq) argument
517 t4_rq_wq_size(struct t4_wq *wq) argument
527 t4_sq_empty(struct t4_wq *wq) argument
532 t4_sq_avail(struct t4_wq *wq) argument
537 t4_sq_produce(struct t4_wq *wq, u8 len16) argument
547 t4_sq_consume(struct t4_wq *wq) argument
556 t4_sq_host_wq_pidx(struct t4_wq *wq) argument
561 t4_sq_wq_size(struct t4_wq *wq) argument
601 t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe) argument
625 t4_ring_rq_db(struct t4_wq *wq, u16 inc, union t4_recv_wr *wqe) argument
650 t4_wq_in_error(struct t4_wq *wq) argument
655 t4_set_wq_in_error(struct t4_wq *wq, u32 srqidx) argument
662 t4_disable_wq_db(struct t4_wq *wq) argument
667 t4_enable_wq_db(struct t4_wq *wq) argument
[all...]
H A Dcq.c184 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx) argument
188 pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
189 wq, cq, cq->sw_cidx, cq->sw_pidx);
195 CQE_QPID_V(wq->sq.qid));
203 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) argument
206 int in_use = wq->rq.in_use - count;
208 pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
209 wq, cq, wq->rq.in_use, count);
211 insert_recv_cqe(wq, c
217 insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, struct t4_swsqe *swcqe) argument
241 struct t4_wq *wq = &qhp->wq; local
267 flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) argument
300 create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, struct t4_cqe *read_cqe) argument
312 advance_oldest_read(struct t4_wq *wq) argument
422 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
443 c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) argument
544 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit, struct t4_srq *srq) argument
758 struct t4_wq *wq = qhp ? &qhp->wq : NULL; local
[all...]
H A Drestrack.c39 static int fill_sq(struct sk_buff *msg, struct t4_wq *wq) argument
42 if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed))
46 if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
48 if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
50 if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
52 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
54 if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
56 if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
58 if (rdma_nl_put_driver_u32(msg, "size", wq
67 fill_rq(struct sk_buff *msg, struct t4_wq *wq) argument
144 struct t4_wq wq; local
[all...]
H A Dqp.c150 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, argument
157 dealloc_sq(rdev, &wq->sq);
158 kfree(wq->sq.sw_sq);
159 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
163 wq->rq.memsize, wq->rq.queue,
164 dma_unmap_addr(&wq->rq, mapping));
165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
166 kfree(wq
199 create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp, int need_rq) argument
2478 struct t4_srq *wq = &srq->wq; local
2515 struct t4_srq *wq = &srq->wq; local
[all...]
/linux-master/drivers/net/ethernet/cisco/enic/
H A Denic_res.h30 static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, argument
36 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
52 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
56 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, argument
60 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
65 static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, argument
69 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
75 static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, argument
80 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
87 static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, argument
98 enic_queue_wq_desc_tso(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) argument
[all...]
/linux-master/fs/xfs/
H A Dxfs_pwork.c74 pctl->wq = alloc_workqueue("%s-%d",
77 if (!pctl->wq)
97 queue_work(pctl->wq, &pwork->work);
105 destroy_workqueue(pctl->wq);
106 pctl->wq = NULL;
/linux-master/io_uring/
H A Dio-wq.h47 void io_wq_exit_start(struct io_wq *wq);
48 void io_wq_put_and_exit(struct io_wq *wq);
50 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
54 int io_wq_max_workers(struct io_wq *wq, int *new_count);
64 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
/linux-master/drivers/crypto/intel/iaa/
H A Diaa_crypto_main.c51 pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__,
58 static void wq_table_add(int cpu, struct idxd_wq *wq) argument
65 entry->wqs[entry->n_wqs++] = wq;
67 pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__,
423 int idx, struct idxd_wq *wq)
472 struct idxd_wq *wq)
482 ret = init_device_compression_mode(iaa_device, mode, i, wq);
520 static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) argument
525 if (iaa_wq->wq
421 init_device_compression_mode(struct iaa_device *iaa_device, struct iaa_compression_mode *mode, int idx, struct idxd_wq *wq) argument
471 init_device_compression_modes(struct iaa_device *iaa_device, struct idxd_wq *wq) argument
567 add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq, struct iaa_wq **new_wq) argument
596 del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) argument
652 struct idxd_wq *wq; local
662 iaa_wq_get(struct idxd_wq *wq) argument
681 iaa_wq_put(struct idxd_wq *wq) argument
746 save_iaa_wq(struct idxd_wq *wq) argument
815 remove_iaa_wq(struct idxd_wq *wq) argument
1127 iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, u32 *compression_crc, bool disable_async) argument
1280 iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, u32 compression_crc) argument
1367 iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, bool disable_async) argument
1499 struct idxd_wq *wq; local
1624 struct idxd_wq *wq; local
1717 struct idxd_wq *wq; local
1862 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
1951 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
[all...]
H A Diaa_crypto_stats.c95 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); local
97 wq->comp_calls++;
98 wq->iaa_device->comp_calls++;
103 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); local
105 wq->comp_bytes += n;
106 wq->iaa_device->comp_bytes += n;
111 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); local
113 wq->decomp_calls++;
114 wq->iaa_device->decomp_calls++;
119 struct iaa_wq *wq local
139 reset_wq_stats(struct iaa_wq *wq) argument
[all...]
/linux-master/drivers/scsi/fnic/
H A Dfnic_res.h18 static inline void fnic_queue_wq_desc(struct vnic_wq *wq, argument
25 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
39 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
42 static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, argument
49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
64 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
67 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, argument
79 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
109 vnic_wq_copy_post(wq);
112 static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, argument
138 fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, u32 req_id, u8 format, u32 s_id, u8 *gw_mac) argument
157 fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, u32 req_id, u32 s_id, u8 *fcf_mac, u8 *ha_mac, u32 r_a_tov, u32 e_d_tov) argument
181 fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, u32 req_id) argument
194 fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, u32 req_id, u64 lunmap_addr, u32 lunmap_len) argument
[all...]
/linux-master/drivers/infiniband/core/
H A Duverbs_std_types_wq.c14 struct ib_wq *wq = uobject->object; local
19 ret = ib_destroy_wq_user(wq, &attrs->driver_udata);
38 struct ib_wq *wq; local
74 wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
75 if (IS_ERR(wq)) {
76 ret = PTR_ERR(wq);
80 obj->uevent.uobject.object = wq;
81 wq->wq_type = wq_init_attr.wq_type;
82 wq->cq = cq;
83 wq
[all...]
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
62 #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
98 struct hinic_wq *wq; local
100 wq = sq->wq;
101 ci_start = atomic_read(&wq->cons_idx);
102 pi_start = atomic_read(&wq->prod_idx);
105 wq_page_addr = be64_to_cpu(*wq->block_vaddr);
112 if (wq->num_q_pages == 1)
115 wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq
160 struct hinic_wq *wq; local
221 struct hinic_wq *wq = sq->wq; local
249 struct hinic_wq *wq = rq->wq; local
281 hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, dma_addr_t ci_dma_addr, void __iomem *db_base) argument
321 struct hinic_wq *wq = rq->wq; local
364 struct hinic_wq *wq = rq->wq; local
384 hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry) argument
457 struct hinic_wq *wq = sq->wq; local
470 struct hinic_wq *wq = rq->wq; local
638 struct hinic_wq *wq = sq->wq; local
877 struct hinic_wq *wq = rq->wq; local
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_sw_fence.c269 static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) argument
271 i915_sw_fence_set_error_once(wq->private, flags);
273 list_del(&wq->entry);
274 __i915_sw_fence_complete(wq->private, key);
276 if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
277 kfree(wq);
285 wait_queue_entry_t *wq; local
293 list_for_each_entry(wq, &fence->wait.head, entry) {
294 if (wq->func != i915_sw_fence_wake)
297 if (__i915_sw_fence_check_if_after(wq
306 wait_queue_entry_t *wq; local
340 __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, wait_queue_entry_t *wq, gfp_t gfp) argument
396 i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, wait_queue_entry_t *wq) argument
[all...]
/linux-master/tools/workqueue/
H A Dwq_dump.py81 def wq_type_str(wq):
82 if wq.flags & WQ_BH:
84 elif wq.flags & WQ_UNBOUND:
85 if wq.flags & WQ_ORDERED:
88 if wq.unbound_attrs.affn_strict:
181 for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
182 print(f'{wq.name.string_().decode():{WQ_NAME_LEN}} {wq_type_str(wq):10}', end='')
185 pool_id = per_cpu_ptr(wq.cpu_pwq, cpu)[0].pool.id.value_()
189 if wq
[all...]
/linux-master/include/linux/
H A Dworkqueue.h113 struct workqueue_struct *wq; member in struct:delayed_work
122 struct workqueue_struct *wq; member in struct:rcu_work
402 /* BH wq only allows the following flags */
529 extern void destroy_workqueue(struct workqueue_struct *wq);
533 int apply_workqueue_attrs(struct workqueue_struct *wq,
537 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
539 extern bool queue_work_node(int node, struct workqueue_struct *wq,
541 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
543 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
545 extern bool queue_rcu_work(struct workqueue_struct *wq, struc
602 queue_work(struct workqueue_struct *wq, struct work_struct *work) argument
616 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
631 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
784 workqueue_sysfs_register(struct workqueue_struct *wq) argument
[all...]
/linux-master/fs/btrfs/
H A Dmisc.h21 static inline void cond_wake_up(struct wait_queue_head *wq) argument
27 if (wq_has_sleeper(wq))
28 wake_up(wq);
31 static inline void cond_wake_up_nomb(struct wait_queue_head *wq) argument
39 if (waitqueue_active(wq))
40 wake_up(wq);
/linux-master/drivers/infiniband/hw/mlx5/
H A Dsrq_cmd.c27 static void set_wq(void *wq, struct mlx5_srq_attr *in) argument
29 MLX5_SET(wq, wq, wq_signature, !!(in->flags
31 MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
32 MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
33 MLX5_SET(wq, wq, log_wq_sz, in->log_size);
34 MLX5_SET(wq, w
55 get_wq(void *wq, struct mlx5_srq_attr *in) argument
334 void *wq; local
402 void *wq; local
481 void *wq; local
[all...]

Completed in 253 milliseconds

1234567891011>>