Searched refs:wq (Results 26 - 50 of 711) sorted by relevance

1234567891011>>

/linux-master/drivers/dma/idxd/
H A Didxd.h169 struct idxd_wq *wq; member in struct:idxd_cdev
192 struct idxd_wq *wq; member in struct:idxd_dma_chan
205 struct workqueue_struct *wq; member in struct:idxd_wq
310 struct idxd_wq *wq; member in struct:idxd_evl_fault
367 struct workqueue_struct *wq; member in struct:idxd_device
421 struct idxd_wq *wq; member in struct:idxd_desc
433 #define wq_confdev(wq) &wq->idxd_dev.conf_dev
443 static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq) argument
445 struct device *dev = wq_confdev(wq);
551 is_idxd_wq_dmaengine(struct idxd_wq *wq) argument
558 is_idxd_wq_user(struct idxd_wq *wq) argument
563 is_idxd_wq_kernel(struct idxd_wq *wq) argument
568 wq_dedicated(struct idxd_wq *wq) argument
573 wq_shared(struct idxd_wq *wq) argument
588 wq_pasid_enabled(struct idxd_wq *wq) argument
594 wq_shared_supported(struct idxd_wq *wq) argument
630 idxd_wq_portal_addr(struct idxd_wq *wq) argument
638 idxd_wq_get(struct idxd_wq *wq) argument
643 idxd_wq_put(struct idxd_wq *wq) argument
648 idxd_wq_refcount(struct idxd_wq *wq) argument
653 idxd_wq_set_private(struct idxd_wq *wq, void *private) argument
658 idxd_wq_get_private(struct idxd_wq *wq) argument
677 idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq, u32 max_batch_size) argument
695 idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev) argument
[all...]
H A Dsubmit.c11 static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) argument
14 struct idxd_device *idxd = wq->idxd;
16 desc = wq->descs[idx];
27 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) argument
30 struct idxd_device *idxd = wq->idxd;
38 sbq = &wq->sbq;
44 return __get_desc(wq, idx, cpu);
62 return __get_desc(wq, idx, cpu);
66 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) argument
71 sbitmap_queue_clear(&wq
75 list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, struct idxd_desc *desc) argument
97 llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, struct idxd_desc *desc) argument
155 idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc) argument
170 idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) argument
[all...]
H A Dirq.c49 struct idxd_wq *wq = idxd->wqs[i]; local
51 rc = idxd_wq_enable(wq);
54 dev_warn(dev, "Unable to re-enable wq %s\n",
55 dev_name(wq_confdev(wq)));
73 struct idxd_wq *wq = ie_to_wq(ie); local
74 struct idxd_device *idxd = wq->idxd;
88 portal = idxd_wq_portal_addr(wq);
95 if (wq_dedicated(wq)) {
98 rc = idxd_enqcmds(wq, portal, &desc);
101 dev_warn(dev, "Failed to submit drain desc on wq
156 struct idxd_wq *wq = ie_to_wq(ie); local
225 struct idxd_wq *wq = fault->wq; local
336 struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx]; local
417 struct idxd_wq *wq = idxd->wqs[id]; local
425 struct idxd_wq *wq = idxd->wqs[i]; local
518 struct idxd_wq *wq = desc->wq; local
545 struct idxd_wq *wq = desc->wq; local
[all...]
H A Dsysfs.c342 struct idxd_wq *wq = idxd->wqs[i]; local
344 if (!wq->group)
347 if (wq->group->id == group->id)
348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
590 struct idxd_wq *wq = confdev_to_wq(dev); local
592 return sysfs_emit(buf, "%d\n", wq->client_count);
601 struct idxd_wq *wq = confdev_to_wq(dev); local
603 switch (wq->state) {
619 struct idxd_wq *wq local
631 struct idxd_wq *wq = confdev_to_wq(dev); local
674 struct idxd_wq *wq = confdev_to_wq(dev); local
683 struct idxd_wq *wq = confdev_to_wq(dev); local
710 struct idxd_wq *wq = confdev_to_wq(dev); local
721 struct idxd_wq *wq = idxd->wqs[i]; local
733 struct idxd_wq *wq = confdev_to_wq(dev); local
761 struct idxd_wq *wq = confdev_to_wq(dev); local
770 struct idxd_wq *wq = confdev_to_wq(dev); local
798 struct idxd_wq *wq = confdev_to_wq(dev); local
807 struct idxd_wq *wq = confdev_to_wq(dev); local
844 struct idxd_wq *wq = confdev_to_wq(dev); local
853 struct idxd_wq *wq = confdev_to_wq(dev); local
885 struct idxd_wq *wq = confdev_to_wq(dev); local
904 struct idxd_wq *wq = confdev_to_wq(dev); local
933 struct idxd_wq *wq = confdev_to_wq(dev); local
942 struct idxd_wq *wq = confdev_to_wq(dev); local
968 struct idxd_wq *wq = confdev_to_wq(dev); local
1002 struct idxd_wq *wq = confdev_to_wq(dev); local
1010 struct idxd_wq *wq = confdev_to_wq(dev); local
1039 struct idxd_wq *wq = confdev_to_wq(dev); local
1047 struct idxd_wq *wq = confdev_to_wq(dev); local
1075 struct idxd_wq *wq = confdev_to_wq(dev); local
1083 struct idxd_wq *wq = confdev_to_wq(dev); local
1111 struct idxd_wq *wq = confdev_to_wq(dev); local
1119 struct idxd_wq *wq = confdev_to_wq(dev); local
1149 struct idxd_wq *wq = confdev_to_wq(dev); local
1168 struct idxd_wq *wq = confdev_to_wq(dev); local
1179 struct idxd_wq *wq = confdev_to_wq(dev); local
1226 struct idxd_wq *wq = confdev_to_wq(dev); local
1252 struct idxd_wq *wq = confdev_to_wq(dev); local
1287 struct idxd_wq *wq = confdev_to_wq(dev); local
1295 struct idxd_wq *wq = confdev_to_wq(dev); local
1357 struct idxd_wq *wq = confdev_to_wq(dev); local
1387 struct idxd_wq *wq = confdev_to_wq(dev); local
1511 struct idxd_wq *wq = idxd->wqs[i]; local
1894 struct idxd_wq *wq; local
1965 struct idxd_wq *wq = idxd->wqs[i]; local
[all...]
/linux-master/tools/testing/selftests/bpf/progs/
H A Dwq.c56 int (callback_fn)(void *map, int *key, struct bpf_wq *wq))
59 struct bpf_wq *wq; local
73 wq = &val->w;
74 if (bpf_wq_init(wq, map, 0) != 0)
77 if (bpf_wq_set_callback(wq, callback_fn, 0))
80 if (bpf_wq_start(wq, 0))
87 int (callback_fn)(void *map, int *key, struct bpf_wq *wq))
90 struct bpf_wq *wq; local
103 wq = &val->work;
104 if (bpf_wq_init(wq, ma
55 test_elem_callback(void *map, int *key, int (callback_fn)(void *map, int *key, struct bpf_wq *wq)) argument
86 test_hmap_elem_callback(void *map, int *key, int (callback_fn)(void *map, int *key, struct bpf_wq *wq)) argument
[all...]
/linux-master/drivers/infiniband/hw/mana/
H A DMakefile4 mana_ib-y := device.o main.o wq.o qp.o cq.o mr.o
/linux-master/kernel/
H A Dworkqueue.c164 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
166 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
169 * WQ: wq->mutex protected.
171 * WR: wq->mutex protected for writes. RCU protected for reads.
173 * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read
259 struct workqueue_struct *wq; /* I: the owning workqueue */ member in struct:pool_workqueue
286 struct list_head pwqs_node; /* WR: node on wq->pwqs */
287 struct list_head mayday_node; /* MD: node on wq->maydays */
295 * grabbing wq->mutex.
337 struct list_head pwqs; /* WR: all pwqs of this wq */
722 unbound_pwq_slot(struct workqueue_struct *wq, int cpu) argument
731 unbound_pwq(struct workqueue_struct *wq, int cpu) argument
746 unbound_effective_cpumask(struct workqueue_struct *wq) argument
1552 wq_node_nr_active(struct workqueue_struct *wq, int node) argument
1573 wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) argument
1739 struct workqueue_struct *wq = pwq->wq; local
1851 unplug_oldest_pwq(struct workqueue_struct *wq) argument
2219 is_chained_work(struct workqueue_struct *wq) argument
2259 __queue_work(int cpu, struct workqueue_struct *wq, struct work_struct *work) argument
2400 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) argument
2468 queue_work_node(int node, struct workqueue_struct *wq, struct work_struct *work) argument
2509 __queue_delayed_work(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
2560 queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
2599 mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
2635 queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) argument
2981 struct workqueue_struct *wq = pwq->wq; local
3436 struct workqueue_struct *wq = rescuer->rescue_wq; local
3833 flush_workqueue_prep_pwqs(struct workqueue_struct *wq, int flush_color, int work_color) argument
3873 touch_wq_lockdep_map(struct workqueue_struct *wq) argument
3887 touch_work_lockdep_map(struct work_struct *work, struct workqueue_struct *wq) argument
3909 __flush_workqueue(struct workqueue_struct *wq) argument
4070 drain_workqueue(struct workqueue_struct *wq) argument
4120 struct workqueue_struct *wq; local
4765 wq_init_lockdep(struct workqueue_struct *wq) argument
4778 wq_unregister_lockdep(struct workqueue_struct *wq) argument
4783 wq_free_lockdep(struct workqueue_struct *wq) argument
4789 wq_init_lockdep(struct workqueue_struct *wq) argument
4793 wq_unregister_lockdep(struct workqueue_struct *wq) argument
4797 wq_free_lockdep(struct workqueue_struct *wq) argument
4856 struct workqueue_struct *wq = local
5039 struct workqueue_struct *wq = pwq->wq; local
5089 init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) argument
5110 struct workqueue_struct *wq = pwq->wq; local
5126 alloc_unbound_pwq(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
5190 install_unbound_pwq(struct workqueue_struct *wq, int cpu, struct pool_workqueue *pwq) argument
5209 struct workqueue_struct *wq; /* target workqueue */ member in struct:apply_wqattrs_ctx
5234 apply_wqattrs_prepare(struct workqueue_struct *wq, const struct workqueue_attrs *attrs, const cpumask_var_t unbound_cpumask) argument
5329 apply_workqueue_attrs_locked(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
5366 apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
5402 wq_update_pod(struct workqueue_struct *wq, int cpu, int hotplug_cpu, bool online) argument
5454 alloc_and_link_pwqs(struct workqueue_struct *wq) argument
5542 init_rescuer(struct workqueue_struct *wq) argument
5585 wq_adjust_max_active(struct workqueue_struct *wq) argument
5646 struct workqueue_struct *wq; local
5781 destroy_workqueue(struct workqueue_struct *wq) argument
5873 workqueue_set_max_active(struct workqueue_struct *wq, int max_active) argument
5910 workqueue_set_min_active(struct workqueue_struct *wq, int min_active) argument
5973 workqueue_congested(int cpu, struct workqueue_struct *wq) argument
6070 struct workqueue_struct *wq = NULL; local
6248 show_one_workqueue(struct workqueue_struct *wq) argument
6344 struct workqueue_struct *wq; local
6369 struct workqueue_struct *wq; local
6588 struct workqueue_struct *wq; local
6629 struct workqueue_struct *wq; local
6738 struct workqueue_struct *wq; local
6770 struct workqueue_struct *wq; local
6811 struct workqueue_struct *wq; local
6836 struct workqueue_struct *wq; local
6919 struct workqueue_struct *wq; local
6974 struct workqueue_struct *wq; member in struct:wq_device
6988 struct workqueue_struct *wq = dev_to_wq(dev); local
6997 struct workqueue_struct *wq = dev_to_wq(dev); local
7006 struct workqueue_struct *wq = dev_to_wq(dev); local
7040 struct workqueue_struct *wq = dev_to_wq(dev); local
7051 wq_sysfs_prep_attrs(struct workqueue_struct *wq) argument
7068 struct workqueue_struct *wq = dev_to_wq(dev); local
7093 struct workqueue_struct *wq = dev_to_wq(dev); local
7107 struct workqueue_struct *wq = dev_to_wq(dev); local
7130 struct workqueue_struct *wq = dev_to_wq(dev); local
7150 struct workqueue_struct *wq = dev_to_wq(dev); local
7172 struct workqueue_struct *wq = dev_to_wq(dev); local
7182 struct workqueue_struct *wq = dev_to_wq(dev); local
7337 workqueue_sysfs_register(struct workqueue_struct *wq) argument
7395 workqueue_sysfs_unregister(struct workqueue_struct *wq) argument
7406 workqueue_sysfs_unregister(struct workqueue_struct *wq) argument
7820 struct workqueue_struct *wq; local
7942 struct workqueue_struct *wq; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dtxrx.h104 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) argument
106 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
109 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size) argument
113 wqe = mlx5_wq_cyc_get_wqe(wq, pi);
120 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
123 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) argument
125 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
126 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
140 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) argument
142 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *p
170 struct mlx5_wq_cyc *wq = &sq->wq; local
232 struct mlx5_wq_cyc *wq = &sq->wq; local
259 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, struct mlx5_wqe_ctrl_seg *ctrl) argument
358 struct mlx5_cqwq *wq = &cq->wq; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Daso.c8 #include "wq.h"
12 struct mlx5_cqwq wq; member in struct:mlx5_aso_cq
31 struct mlx5_wq_cyc wq; member in struct:mlx5_aso
56 err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
64 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
65 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
138 mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
144 mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
161 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
162 struct mlx5_wq_cyc *wq local
181 void *in, *sqc, *wq; local
272 void *sqc_data, *wq; local
[all...]
/linux-master/drivers/infiniband/hw/cxgb4/
H A Dt4.h480 static inline int t4_rqes_posted(struct t4_wq *wq) argument
482 return wq->rq.in_use;
485 static inline int t4_rq_empty(struct t4_wq *wq) argument
487 return wq->rq.in_use == 0;
490 static inline u32 t4_rq_avail(struct t4_wq *wq) argument
492 return wq->rq.size - 1 - wq->rq.in_use;
495 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) argument
497 wq->rq.in_use++;
498 if (++wq
505 t4_rq_consume(struct t4_wq *wq) argument
512 t4_rq_host_wq_pidx(struct t4_wq *wq) argument
517 t4_rq_wq_size(struct t4_wq *wq) argument
527 t4_sq_empty(struct t4_wq *wq) argument
532 t4_sq_avail(struct t4_wq *wq) argument
537 t4_sq_produce(struct t4_wq *wq, u8 len16) argument
547 t4_sq_consume(struct t4_wq *wq) argument
556 t4_sq_host_wq_pidx(struct t4_wq *wq) argument
561 t4_sq_wq_size(struct t4_wq *wq) argument
601 t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe) argument
625 t4_ring_rq_db(struct t4_wq *wq, u16 inc, union t4_recv_wr *wqe) argument
650 t4_wq_in_error(struct t4_wq *wq) argument
655 t4_set_wq_in_error(struct t4_wq *wq, u32 srqidx) argument
662 t4_disable_wq_db(struct t4_wq *wq) argument
667 t4_enable_wq_db(struct t4_wq *wq) argument
[all...]
H A Dcq.c184 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx) argument
188 pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
189 wq, cq, cq->sw_cidx, cq->sw_pidx);
195 CQE_QPID_V(wq->sq.qid));
203 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) argument
206 int in_use = wq->rq.in_use - count;
208 pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
209 wq, cq, wq->rq.in_use, count);
211 insert_recv_cqe(wq, c
217 insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, struct t4_swsqe *swcqe) argument
241 struct t4_wq *wq = &qhp->wq; local
267 flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) argument
300 create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, struct t4_cqe *read_cqe) argument
312 advance_oldest_read(struct t4_wq *wq) argument
422 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
443 c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) argument
544 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit, struct t4_srq *srq) argument
758 struct t4_wq *wq = qhp ? &qhp->wq : NULL; local
[all...]
H A Drestrack.c39 static int fill_sq(struct sk_buff *msg, struct t4_wq *wq) argument
42 if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed))
46 if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
48 if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
50 if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
52 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
54 if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
56 if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
58 if (rdma_nl_put_driver_u32(msg, "size", wq
67 fill_rq(struct sk_buff *msg, struct t4_wq *wq) argument
144 struct t4_wq wq; local
[all...]
H A Dqp.c150 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, argument
157 dealloc_sq(rdev, &wq->sq);
158 kfree(wq->sq.sw_sq);
159 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
163 wq->rq.memsize, wq->rq.queue,
164 dma_unmap_addr(&wq->rq, mapping));
165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
166 kfree(wq
199 create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp, int need_rq) argument
2478 struct t4_srq *wq = &srq->wq; local
2515 struct t4_srq *wq = &srq->wq; local
[all...]
/linux-master/drivers/net/ethernet/cisco/enic/
H A Denic_res.h30 static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, argument
36 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
52 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
56 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, argument
60 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
65 static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, argument
69 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
75 static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, argument
80 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
87 static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, argument
98 enic_queue_wq_desc_tso(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) argument
[all...]
/linux-master/fs/xfs/
H A Dxfs_pwork.c74 pctl->wq = alloc_workqueue("%s-%d",
77 if (!pctl->wq)
97 queue_work(pctl->wq, &pwork->work);
105 destroy_workqueue(pctl->wq);
106 pctl->wq = NULL;
/linux-master/io_uring/
H A Dio-wq.h47 void io_wq_exit_start(struct io_wq *wq);
48 void io_wq_put_and_exit(struct io_wq *wq);
50 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
54 int io_wq_max_workers(struct io_wq *wq, int *new_count);
64 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
/linux-master/drivers/crypto/intel/iaa/
H A Diaa_crypto_main.c51 pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__,
58 static void wq_table_add(int cpu, struct idxd_wq *wq) argument
65 entry->wqs[entry->n_wqs++] = wq;
67 pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__,
421 int idx, struct idxd_wq *wq)
470 struct idxd_wq *wq)
480 ret = init_device_compression_mode(iaa_device, mode, i, wq);
518 static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) argument
523 if (iaa_wq->wq
419 init_device_compression_mode(struct iaa_device *iaa_device, struct iaa_compression_mode *mode, int idx, struct idxd_wq *wq) argument
469 init_device_compression_modes(struct iaa_device *iaa_device, struct idxd_wq *wq) argument
565 add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq, struct iaa_wq **new_wq) argument
594 del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) argument
650 struct idxd_wq *wq; local
660 iaa_wq_get(struct idxd_wq *wq) argument
679 iaa_wq_put(struct idxd_wq *wq) argument
744 save_iaa_wq(struct idxd_wq *wq) argument
813 remove_iaa_wq(struct idxd_wq *wq) argument
1125 iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, u32 *compression_crc, bool disable_async) argument
1278 iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, u32 compression_crc) argument
1365 iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, bool disable_async) argument
1497 struct idxd_wq *wq; local
1619 struct idxd_wq *wq; local
1708 struct idxd_wq *wq; local
1851 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
1940 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
[all...]
H A Diaa_crypto_stats.c73 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); local
75 atomic64_inc(&wq->comp_calls);
76 atomic64_inc(&wq->iaa_device->comp_calls);
81 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); local
83 atomic64_add(n, &wq->comp_bytes);
84 atomic64_add(n, &wq->iaa_device->comp_bytes);
89 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); local
91 atomic64_inc(&wq->decomp_calls);
92 atomic64_inc(&wq->iaa_device->decomp_calls);
97 struct iaa_wq *wq local
115 reset_wq_stats(struct iaa_wq *wq) argument
[all...]
/linux-master/drivers/scsi/fnic/
H A Dfnic_res.h18 static inline void fnic_queue_wq_desc(struct vnic_wq *wq, argument
25 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
39 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
42 static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, argument
49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
64 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
67 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, argument
79 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
109 vnic_wq_copy_post(wq);
112 static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, argument
138 fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, u32 req_id, u8 format, u32 s_id, u8 *gw_mac) argument
157 fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, u32 req_id, u32 s_id, u8 *fcf_mac, u8 *ha_mac, u32 r_a_tov, u32 e_d_tov) argument
181 fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, u32 req_id) argument
194 fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, u32 req_id, u64 lunmap_addr, u32 lunmap_len) argument
[all...]
/linux-master/tools/testing/selftests/bpf/prog_tests/
H A Dwq.c4 #include "wq.skel.h"
9 struct wq *wq_skel = NULL;
14 RUN_TESTS(wq);
/linux-master/drivers/infiniband/core/
H A Duverbs_std_types_wq.c14 struct ib_wq *wq = uobject->object; local
19 ret = ib_destroy_wq_user(wq, &attrs->driver_udata);
38 struct ib_wq *wq; local
74 wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
75 if (IS_ERR(wq)) {
76 ret = PTR_ERR(wq);
80 obj->uevent.uobject.object = wq;
81 wq->wq_type = wq_init_attr.wq_type;
82 wq->cq = cq;
83 wq
[all...]
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
62 #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
98 struct hinic_wq *wq; local
100 wq = sq->wq;
101 ci_start = atomic_read(&wq->cons_idx);
102 pi_start = atomic_read(&wq->prod_idx);
105 wq_page_addr = be64_to_cpu(*wq->block_vaddr);
112 if (wq->num_q_pages == 1)
115 wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq
160 struct hinic_wq *wq; local
221 struct hinic_wq *wq = sq->wq; local
249 struct hinic_wq *wq = rq->wq; local
281 hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, dma_addr_t ci_dma_addr, void __iomem *db_base) argument
321 struct hinic_wq *wq = rq->wq; local
364 struct hinic_wq *wq = rq->wq; local
384 hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry) argument
457 struct hinic_wq *wq = sq->wq; local
470 struct hinic_wq *wq = rq->wq; local
638 struct hinic_wq *wq = sq->wq; local
877 struct hinic_wq *wq = rq->wq; local
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_sriov.c79 destroy_workqueue(xe->sriov.wq);
80 xe->sriov.wq = NULL;
104 xe_assert(xe, !xe->sriov.wq);
105 xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
106 if (!xe->sriov.wq)
/linux-master/drivers/gpu/drm/i915/
H A Di915_sw_fence.c269 static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) argument
271 i915_sw_fence_set_error_once(wq->private, flags);
273 list_del(&wq->entry);
274 __i915_sw_fence_complete(wq->private, key);
276 if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
277 kfree(wq);
285 wait_queue_entry_t *wq; local
293 list_for_each_entry(wq, &fence->wait.head, entry) {
294 if (wq->func != i915_sw_fence_wake)
297 if (__i915_sw_fence_check_if_after(wq
306 wait_queue_entry_t *wq; local
340 __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, wait_queue_entry_t *wq, gfp_t gfp) argument
396 i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, wait_queue_entry_t *wq) argument
[all...]
/linux-master/include/linux/
H A Dworkqueue.h118 struct workqueue_struct *wq; member in struct:delayed_work
127 struct workqueue_struct *wq; member in struct:rcu_work
410 /* BH wq only allows the following flags */
537 extern void destroy_workqueue(struct workqueue_struct *wq);
541 int apply_workqueue_attrs(struct workqueue_struct *wq,
545 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
547 extern bool queue_work_node(int node, struct workqueue_struct *wq,
549 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
551 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
553 extern bool queue_rcu_work(struct workqueue_struct *wq, struc
618 queue_work(struct workqueue_struct *wq, struct work_struct *work) argument
632 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
647 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
701 enable_and_queue_work(struct workqueue_struct *wq, struct work_struct *work) argument
826 workqueue_sysfs_register(struct workqueue_struct *wq) argument
[all...]

Completed in 534 milliseconds

1234567891011>>