Searched refs:wq (Results 1 - 25 of 700) sorted by relevance

1234567891011>>

/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Dwq.h80 void *wqc, struct mlx5_wq_cyc *wq,
82 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
83 void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq);
86 void *qpc, struct mlx5_wq_qp *wq,
90 void *cqc, struct mlx5_cqwq *wq,
94 void *wqc, struct mlx5_wq_ll *wq,
96 void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq);
100 static inline u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) argument
102 return (u32)wq->fbc.sz_m1 + 1;
105 static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq) argument
110 mlx5_wq_cyc_missing(struct mlx5_wq_cyc *wq) argument
115 mlx5_wq_cyc_is_empty(struct mlx5_wq_cyc *wq) argument
120 mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq) argument
126 mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u16 n) argument
132 mlx5_wq_cyc_pop(struct mlx5_wq_cyc *wq) argument
137 mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq) argument
142 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) argument
147 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq) argument
152 mlx5_wq_cyc_get_tail(struct mlx5_wq_cyc *wq) argument
157 mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) argument
162 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix) argument
175 mlx5_wq_cyc_get_counter(struct mlx5_wq_cyc *wq) argument
180 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) argument
185 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq) argument
190 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr) argument
195 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) argument
200 mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) argument
210 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr) argument
215 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) argument
220 mlx5_cqwq_pop(struct mlx5_cqwq *wq) argument
225 mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq) argument
230 mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq) argument
247 mlx5_cqwq_get_cqe_enahnced_comp(struct mlx5_cqwq *wq) argument
263 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) argument
268 mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) argument
273 mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) argument
278 mlx5_wq_ll_missing(struct mlx5_wq_ll *wq) argument
283 mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) argument
288 mlx5_wq_ll_get_wqe_next_ix(struct mlx5_wq_ll *wq, u16 ix) argument
295 mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) argument
302 mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix, __be16 *next_tail_next) argument
310 mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq) argument
315 mlx5_wq_ll_get_head(struct mlx5_wq_ll *wq) argument
320 mlx5_wq_ll_get_counter(struct mlx5_wq_ll *wq) argument
[all...]
H A Dwq.c34 #include "wq.h"
38 void *wqc, struct mlx5_wq_cyc *wq,
41 u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
42 u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
43 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
52 wq->db = wq_ctrl->db.db;
62 wq->sz = mlx5_wq_cyc_get_size(wq);
74 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides) argument
84 len = nstrides << wq
37 mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl) argument
92 mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq) argument
99 mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) argument
159 mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, struct mlx5_wq_ctrl *wq_ctrl) argument
197 mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq) argument
210 mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) argument
247 mlx5_wq_ll_reset(struct mlx5_wq_ll *wq) argument
[all...]
/linux-master/drivers/scsi/fnic/
H A Dvnic_wq_copy.h24 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) argument
26 return wq->ring.desc_avail;
29 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) argument
31 return wq->ring.desc_count - 1 - wq->ring.desc_avail;
34 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) argument
36 struct fcpio_host_req *desc = wq->ring.descs;
37 return &desc[wq->to_use_index];
40 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) argument
43 ((wq
57 vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index) argument
71 vnic_wq_copy_service(struct vnic_wq_copy *wq, u16 completed_index, void (*q_service)(struct vnic_wq_copy *wq, struct fcpio_host_req *wq_desc)) argument
[all...]
H A Dvnic_wq_copy.c13 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) argument
15 iowrite32(1, &wq->ctrl->enable);
18 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) argument
22 iowrite32(0, &wq->ctrl->enable);
26 if (!(ioread32(&wq->ctrl->running)))
33 wq->index, ioread32(&wq->ctrl->fetch_index),
34 ioread32(&wq->ctrl->posted_index));
39 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, argument
40 void (*q_clean)(struct vnic_wq_copy *wq,
57 vnic_wq_copy_free(struct vnic_wq_copy *wq) argument
66 vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument
84 vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
[all...]
H A Dvnic_wq.c16 static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, argument
19 wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
21 if (!wq->ctrl)
28 static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, argument
31 return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
35 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) argument
38 unsigned int i, j, count = wq->ring.desc_count;
42 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
43 if (!wq->bufs[i]) {
50 buf = wq
72 vnic_wq_free(struct vnic_wq *wq) argument
90 vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument
120 vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int desc_count, unsigned int desc_size) argument
141 vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
165 vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
182 vnic_wq_error_status(struct vnic_wq *wq) argument
187 vnic_wq_enable(struct vnic_wq *wq) argument
192 vnic_wq_disable(struct vnic_wq *wq) argument
210 vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) argument
[all...]
H A Dvnic_wq.h86 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) argument
89 return wq->ring.desc_avail;
92 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) argument
95 return wq->ring.desc_count - wq->ring.desc_avail - 1;
98 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) argument
100 return wq->to_use->desc;
103 static inline void vnic_wq_post(struct vnic_wq *wq, argument
107 struct vnic_wq_buf *buf = wq->to_use;
122 iowrite32(buf->index, &wq
129 vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc, u16 completed_index, void (*buf_service)(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), void *opaque) argument
[all...]
/linux-master/drivers/dma/idxd/
H A Ddefaults.c10 struct idxd_wq *wq; local
15 wq = idxd->wqs[0];
17 if (wq->state != IDXD_WQ_DISABLED)
21 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
22 wq->threshold = 0;
24 /* only setting up 1 wq, so give it all the wq space */
25 wq->size = idxd->max_wq_size;
28 wq->priority = 10;
31 wq
[all...]
H A Dcdev.c43 struct idxd_wq *wq; member in struct:idxd_user_context
55 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid);
101 struct idxd_wq *wq = ctx->wq; local
103 if (!wq_pasid_enabled(wq))
122 struct idxd_wq *wq = ctx->wq; local
123 struct idxd_device *idxd = wq->idxd;
131 if (wq_shared(wq)) {
135 /* The wq disabl
165 struct idxd_wq *wq = idxd_cdev->wq; local
193 struct idxd_wq *wq = ctx->wq; local
204 idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index) argument
225 struct idxd_wq *wq; local
333 idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) argument
365 struct idxd_wq *wq = ctx->wq; local
377 check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, const char *func) argument
396 struct idxd_wq *wq = ctx->wq; local
422 struct idxd_wq *wq = ctx->wq; local
448 idxd_wq_add_cdev(struct idxd_wq *wq) argument
499 idxd_wq_del_cdev(struct idxd_wq *wq) argument
513 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
579 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
647 idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, void *cr, int len) argument
[all...]
H A Ddma.c20 return idxd_chan->wq;
27 struct idxd_device *idxd = desc->wq->idxd;
56 idxd_free_desc(desc->wq, desc);
66 static inline void idxd_prep_desc_common(struct idxd_wq *wq, argument
88 struct idxd_wq *wq = to_idxd_wq(c); local
92 if (wq->state != IDXD_WQ_ENABLED)
96 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
100 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
110 struct idxd_wq *wq = to_idxd_wq(c); local
112 struct idxd_device *idxd = wq
137 struct idxd_wq *wq = to_idxd_wq(chan); local
148 struct idxd_wq *wq = to_idxd_wq(chan); local
174 struct idxd_wq *wq = to_idxd_wq(c); local
248 idxd_register_dma_channel(struct idxd_wq *wq) argument
285 idxd_unregister_dma_channel(struct idxd_wq *wq) argument
301 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
345 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
[all...]
H A Ddevice.c18 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
41 static void free_hw_descs(struct idxd_wq *wq) argument
45 for (i = 0; i < wq->num_descs; i++)
46 kfree(wq->hw_descs[i]);
48 kfree(wq->hw_descs);
51 static int alloc_hw_descs(struct idxd_wq *wq, int num) argument
53 struct device *dev = &wq->idxd->pdev->dev;
57 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
59 if (!wq->hw_descs)
63 wq
74 free_descs(struct idxd_wq *wq) argument
84 alloc_descs(struct idxd_wq *wq, int num) argument
108 idxd_wq_alloc_resources(struct idxd_wq *wq) argument
166 idxd_wq_free_resources(struct idxd_wq *wq) argument
180 idxd_wq_enable(struct idxd_wq *wq) argument
205 idxd_wq_disable(struct idxd_wq *wq, bool reset_config) argument
234 idxd_wq_drain(struct idxd_wq *wq) argument
250 idxd_wq_reset(struct idxd_wq *wq) argument
266 idxd_wq_map_portal(struct idxd_wq *wq) argument
283 idxd_wq_unmap_portal(struct idxd_wq *wq) argument
297 struct idxd_wq *wq = idxd->wqs[i]; local
304 __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid) argument
320 idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) argument
337 idxd_wq_disable_pasid(struct idxd_wq *wq) argument
363 idxd_wq_disable_cleanup(struct idxd_wq *wq) argument
382 idxd_wq_device_reset_cleanup(struct idxd_wq *wq) argument
392 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active); local
397 idxd_wq_init_percpu_ref(struct idxd_wq *wq) argument
412 __idxd_wq_quiesce(struct idxd_wq *wq) argument
422 idxd_wq_quiesce(struct idxd_wq *wq) argument
714 struct idxd_wq *wq = idxd->wqs[i]; local
911 idxd_wq_config_write(struct idxd_wq *wq) argument
1006 struct idxd_wq *wq = idxd->wqs[i]; local
1070 struct idxd_wq *wq; local
1132 idxd_wq_load_config(struct idxd_wq *wq) argument
1175 struct idxd_wq *wq; local
1239 struct idxd_wq *wq = idxd->wqs[i]; local
1305 idxd_wq_free_irq(struct idxd_wq *wq) argument
1323 idxd_wq_request_irq(struct idxd_wq *wq) argument
1365 idxd_drv_enable_wq(struct idxd_wq *wq) argument
1499 idxd_drv_disable_wq(struct idxd_wq *wq) argument
1586 struct idxd_wq *wq = idxd->wqs[i]; local
[all...]
/linux-master/drivers/scsi/snic/
H A Dvnic_wq.c12 static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, argument
15 wq->ctrl = svnic_dev_get_res(vdev, res_type, index);
16 if (!wq->ctrl)
22 static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, argument
25 return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count,
29 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) argument
32 unsigned int i, j, count = wq->ring.desc_count;
36 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
37 if (!wq->bufs[i]) {
45 buf = wq
67 svnic_wq_free(struct vnic_wq *wq) argument
85 vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int desc_count, unsigned int desc_size) argument
109 svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument
140 vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
163 svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
171 svnic_wq_error_status(struct vnic_wq *wq) argument
176 svnic_wq_enable(struct vnic_wq *wq) argument
181 svnic_wq_disable(struct vnic_wq *wq) argument
199 svnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) argument
[all...]
H A Dvnic_wq.h71 static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq) argument
74 return wq->ring.desc_avail;
77 static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq) argument
80 return wq->ring.desc_count - wq->ring.desc_avail - 1;
83 static inline void *svnic_wq_next_desc(struct vnic_wq *wq) argument
85 return wq->to_use->desc;
88 static inline void svnic_wq_post(struct vnic_wq *wq, argument
92 struct vnic_wq_buf *buf = wq->to_use;
107 iowrite32(buf->index, &wq
114 svnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc, u16 completed_index, void (*buf_service)(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), void *opaque) argument
[all...]
/linux-master/drivers/net/ethernet/cisco/enic/
H A Dvnic_wq.c18 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) argument
21 unsigned int i, j, count = wq->ring.desc_count;
25 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL);
26 if (!wq->bufs[i])
31 buf = wq->bufs[i];
34 buf->desc = (u8 *)wq->ring.descs +
35 wq->ring.desc_size * buf->index;
37 buf->next = wq->bufs[0];
41 buf->next = wq->bufs[i + 1];
51 wq
56 vnic_wq_free(struct vnic_wq *wq) argument
75 vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument
104 enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int desc_count, unsigned int desc_size) argument
121 enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
144 vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
153 vnic_wq_error_status(struct vnic_wq *wq) argument
158 vnic_wq_enable(struct vnic_wq *wq) argument
163 vnic_wq_disable(struct vnic_wq *wq) argument
182 vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) argument
[all...]
H A Dvnic_wq.h86 struct vnic_wq wq; member in struct:devcmd2_controller
90 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) argument
93 return wq->ring.desc_avail;
96 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) argument
99 return wq->ring.desc_count - wq->ring.desc_avail - 1;
102 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) argument
104 return wq->to_use->desc;
107 static inline void vnic_wq_doorbell(struct vnic_wq *wq) argument
115 iowrite32(wq
118 vnic_wq_post(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, int sop, int eop, uint8_t desc_skip_cnt, uint8_t cq_entry, uint8_t compressed_send, uint64_t wrid) argument
141 vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc, u16 completed_index, void (*buf_service)(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), void *opaque) argument
[all...]
/linux-master/drivers/edac/
H A Dwq.c4 static struct workqueue_struct *wq; variable in typeref:struct:workqueue_struct
8 return queue_delayed_work(wq, work, delay);
14 return mod_delayed_work(wq, work, delay);
23 flush_workqueue(wq);
31 wq = alloc_ordered_workqueue("edac-poller", WQ_MEM_RECLAIM);
32 if (!wq)
40 destroy_workqueue(wq);
41 wq = NULL;
/linux-master/fs/btrfs/
H A Dasync-thread.c50 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq) argument
52 return wq->fs_info;
57 return work->wq->fs_info;
60 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) argument
63 * We could compare wq->pending with num_online_cpus()
68 if (wq->thresh == NO_THRESHOLD)
71 return atomic_read(&wq->pending) > wq->thresh * 2;
74 static void btrfs_init_workqueue(struct btrfs_workqueue *wq, argument
77 wq
155 thresh_queue_hook(struct btrfs_workqueue *wq) argument
167 thresh_exec_hook(struct btrfs_workqueue *wq) argument
210 run_ordered_work(struct btrfs_workqueue *wq, struct btrfs_work *self) argument
298 struct btrfs_workqueue *wq = work->wq; local
341 btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work) argument
356 btrfs_destroy_workqueue(struct btrfs_workqueue *wq) argument
365 btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) argument
371 btrfs_flush_workqueue(struct btrfs_workqueue *wq) argument
[all...]
H A Dasync-thread.h28 struct btrfs_workqueue *wq; member in struct:btrfs_work
42 void btrfs_queue_work(struct btrfs_workqueue *wq,
44 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
45 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
47 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq);
48 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq);
49 void btrfs_flush_workqueue(struct btrfs_workqueue *wq);
/linux-master/fs/autofs/
H A Dwaitq.c17 struct autofs_wait_queue *wq, *nwq; local
28 wq = sbi->queues;
30 while (wq) {
31 nwq = wq->next;
32 wq->status = -ENOENT; /* Magic is gone - report failure */
33 kfree(wq->name.name - wq->offset);
34 wq->name.name = NULL;
35 wake_up(&wq->queue);
36 if (!--wq
79 autofs_notify_daemon(struct autofs_sb_info *sbi, struct autofs_wait_queue *wq, int type) argument
182 struct autofs_wait_queue *wq; local
208 struct autofs_wait_queue *wq; local
307 struct autofs_wait_queue *wq; local
490 struct autofs_wait_queue *wq, **wql; local
[all...]
/linux-master/drivers/infiniband/hw/mana/
H A Dwq.c15 struct mana_ib_wq *wq; local
25 "Failed to copy from udata for create wq, %d\n", err);
29 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
30 if (!wq)
40 "Failed to get umem for create wq, err %d\n", err);
44 wq->umem = umem;
45 wq->wqe = init_attr->max_wr;
46 wq->wq_buf_size = ucmd.wq_buf_size;
47 wq
74 mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, u32 wq_attr_mask, struct ib_udata *udata) argument
83 struct mana_ib_wq *wq = container_of(ibwq, struct mana_ib_wq, ibwq); local
[all...]
/linux-master/include/linux/
H A Dswait.h90 * @wq: the waitqueue to test for waiters
121 static inline int swait_active(struct swait_queue_head *wq) argument
123 return !list_empty(&wq->task_list);
128 * @wq: the waitqueue to test for waiters
130 * Returns true if @wq has waiting processes
134 static inline bool swq_has_sleeper(struct swait_queue_head *wq) argument
144 return swait_active(wq);
158 #define ___swait_event(wq, condition, state, ret, cmd) \
166 long __int = prepare_to_swait_event(&wq, &__wait, state);\
178 finish_swait(&wq,
[all...]
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_wq.c34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
44 #define WQ_BASE_VADDR(wqs, wq) \
45 ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
46 + (wq)->block_idx * WQ_BLOCK_SIZE)
48 #define WQ_BASE_PADDR(wqs, wq) \
49 ((wqs)->page_paddr[(wq)->page_idx] \
50 + (wq)->block_idx * WQ_BLOCK_SIZE)
52 #define WQ_BASE_ADDR(wqs, wq) \
81 WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx) argument
87 WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx) argument
376 alloc_wqes_shadow(struct hinic_wq *wq) argument
402 free_wqes_shadow(struct hinic_wq *wq) argument
417 free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, int num_q_pages) argument
444 alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, int max_pages) argument
503 hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) argument
579 hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) argument
599 hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, struct hinic_wq *wq, struct hinic_hwif *hwif, int cmdq_blocks, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) argument
688 hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, struct hinic_wq *wq, int cmdq_blocks) argument
699 copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, int num_wqebbs, u16 idx) argument
716 copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, int num_wqebbs, u16 idx) argument
740 hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, u16 *prod_idx) argument
789 hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size) argument
803 hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) argument
821 hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, u16 *cons_idx) argument
862 hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx) argument
874 wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe) argument
888 hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, unsigned int wqe_size) argument
[all...]
H A Dhinic_hw_wq.h78 struct hinic_wq *wq, struct hinic_hwif *hwif,
83 struct hinic_wq *wq, int cmdq_blocks);
90 int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
94 void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
96 struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
99 void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size);
101 void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size);
103 struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
106 struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx);
108 void hinic_write_wqe(struct hinic_wq *wq, struc
[all...]
/linux-master/io_uring/
H A Dio-wq.c21 #include "io-wq.h"
35 IO_WQ_BIT_EXIT = 0, /* wq exiting */
43 * One for each thread in a wq pool
51 struct io_wq *wq; member in struct:io_worker
136 static bool create_io_worker(struct io_wq *wq, int index);
138 static bool io_acct_cancel_pending_work(struct io_wq *wq,
142 static void io_wq_cancel_tw_create(struct io_wq *wq);
155 static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound) argument
157 return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
160 static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq, argument
171 io_worker_ref_put(struct io_wq *wq) argument
190 struct io_wq *wq = worker->wq; local
213 struct io_wq *wq = worker->wq; local
306 io_wq_create_worker(struct io_wq *wq, struct io_wq_acct *acct) argument
337 struct io_wq *wq; local
366 struct io_wq *wq = worker->wq; local
411 struct io_wq *wq = worker->wq; local
431 __io_worker_busy(struct io_wq *wq, struct io_worker *worker) argument
458 io_wait_on_hash(struct io_wq *wq, unsigned int hash) argument
482 struct io_wq *wq = worker->wq; variable in typeref:struct:io_wq
553 struct io_wq *wq = worker->wq; variable in typeref:struct:io_wq
630 struct io_wq *wq = worker->wq; local
725 io_init_new_worker(struct io_wq *wq, struct io_worker *worker, struct task_struct *tsk) argument
769 struct io_wq *wq; local
816 create_io_worker(struct io_wq *wq, int index) argument
861 io_wq_for_each_worker(struct io_wq *wq, bool (*func)(struct io_worker *, void *), void *data) argument
889 io_run_cancel(struct io_wq_work *work, struct io_wq *wq) argument
898 io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work) argument
924 io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) argument
1016 io_wq_remove_pending(struct io_wq *wq, struct io_wq_work *work, struct io_wq_work_node *prev) argument
1035 io_acct_cancel_pending_work(struct io_wq *wq, struct io_wq_acct *acct, struct io_cb_cancel_data *match) argument
1059 io_wq_cancel_pending_work(struct io_wq *wq, struct io_cb_cancel_data *match) argument
1075 io_wq_cancel_running_work(struct io_wq *wq, struct io_cb_cancel_data *match) argument
1083 io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, void *data, bool cancel_all) argument
1125 struct io_wq *wq = container_of(wait, struct io_wq, wait); local
1144 struct io_wq *wq; local
1208 io_wq_exit_start(struct io_wq *wq) argument
1213 io_wq_cancel_tw_create(struct io_wq *wq) argument
1231 io_wq_exit_workers(struct io_wq *wq) argument
1252 io_wq_destroy(struct io_wq *wq) argument
1266 io_wq_put_and_exit(struct io_wq *wq) argument
1290 __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online) argument
1305 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); local
1312 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); local
1336 io_wq_max_workers(struct io_wq *wq, int *new_count) argument
[all...]
/linux-master/fs/jfs/
H A Djfs_lock.h22 #define __SLEEP_COND(wq, cond, lock_cmd, unlock_cmd) \
26 add_wait_queue(&wq, &__wait); \
36 remove_wait_queue(&wq, &__wait); \
/linux-master/drivers/gpu/drm/xe/
H A Dxe_sriov.c63 destroy_workqueue(xe->sriov.wq);
64 xe->sriov.wq = NULL;
81 xe_assert(xe, !xe->sriov.wq);
82 xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
83 if (!xe->sriov.wq)

Completed in 575 milliseconds

1234567891011>>