Searched refs:wq (Results 1 - 25 of 700) sorted by last modified time

1234567891011>>

/linux-master/net/mptcp/
H A Dprotocol.c3563 rcu_assign_pointer(sk->sk_wq, &parent->wq);
/linux-master/fs/btrfs/
H A Dordered-data.c364 struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ? local
368 btrfs_queue_work(wq, &ordered->work);
/linux-master/drivers/net/ethernet/broadcom/genet/
H A Dbcmmii.c455 wait_event_timeout(priv->wq,
H A Dbcmgenet.h605 wait_queue_head_t wq; member in struct:bcmgenet_priv
H A Dbcmgenet.c3233 wake_up(&priv->wq);
4088 init_waitqueue_head(&priv->wq);
/linux-master/drivers/gpu/drm/amd/display/amdgpu_dm/
H A Damdgpu_dm.c1488 hpd_rx_offload_wq[i].wq =
1491 if (hpd_rx_offload_wq[i].wq == NULL) {
1503 if (hpd_rx_offload_wq[i].wq)
1504 destroy_workqueue(hpd_rx_offload_wq[i].wq);
1958 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1959 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1960 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
2673 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
3409 queue_work(offload_wq->wq, &offload_work->work);
/linux-master/kernel/
H A Dworkqueue.c163 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
165 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
168 * WQ: wq->mutex protected.
170 * WR: wq->mutex protected for writes. RCU protected for reads.
172 * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read
258 struct workqueue_struct *wq; /* I: the owning workqueue */ member in struct:pool_workqueue
285 struct list_head pwqs_node; /* WR: node on wq->pwqs */
286 struct list_head mayday_node; /* MD: node on wq->maydays */
294 * grabbing wq->mutex.
336 struct list_head pwqs; /* WR: all pwqs of this wq */
721 unbound_pwq_slot(struct workqueue_struct *wq, int cpu) argument
730 unbound_pwq(struct workqueue_struct *wq, int cpu) argument
745 unbound_effective_cpumask(struct workqueue_struct *wq) argument
1561 wq_node_nr_active(struct workqueue_struct *wq, int node) argument
1582 wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) argument
1748 struct workqueue_struct *wq = pwq->wq; local
1860 unplug_oldest_pwq(struct workqueue_struct *wq) argument
2273 is_chained_work(struct workqueue_struct *wq) argument
2313 __queue_work(int cpu, struct workqueue_struct *wq, struct work_struct *work) argument
2439 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) argument
2506 queue_work_node(int node, struct workqueue_struct *wq, struct work_struct *work) argument
2546 __queue_delayed_work(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
2597 queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
2635 mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
2676 queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) argument
3017 struct workqueue_struct *wq = pwq->wq; local
3472 struct workqueue_struct *wq = rescuer->rescue_wq; local
3868 flush_workqueue_prep_pwqs(struct workqueue_struct *wq, int flush_color, int work_color) argument
3908 touch_wq_lockdep_map(struct workqueue_struct *wq) argument
3922 touch_work_lockdep_map(struct work_struct *work, struct workqueue_struct *wq) argument
3944 __flush_workqueue(struct workqueue_struct *wq) argument
4105 drain_workqueue(struct workqueue_struct *wq) argument
4155 struct workqueue_struct *wq; local
4653 wq_init_lockdep(struct workqueue_struct *wq) argument
4666 wq_unregister_lockdep(struct workqueue_struct *wq) argument
4671 wq_free_lockdep(struct workqueue_struct *wq) argument
4677 wq_init_lockdep(struct workqueue_struct *wq) argument
4681 wq_unregister_lockdep(struct workqueue_struct *wq) argument
4685 wq_free_lockdep(struct workqueue_struct *wq) argument
4744 struct workqueue_struct *wq = local
4927 struct workqueue_struct *wq = pwq->wq; local
4977 init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) argument
4998 struct workqueue_struct *wq = pwq->wq; local
5014 alloc_unbound_pwq(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
5078 install_unbound_pwq(struct workqueue_struct *wq, int cpu, struct pool_workqueue *pwq) argument
5097 struct workqueue_struct *wq; /* target workqueue */ member in struct:apply_wqattrs_ctx
5122 apply_wqattrs_prepare(struct workqueue_struct *wq, const struct workqueue_attrs *attrs, const cpumask_var_t unbound_cpumask) argument
5217 apply_workqueue_attrs_locked(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
5254 apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
5290 wq_update_pod(struct workqueue_struct *wq, int cpu, int hotplug_cpu, bool online) argument
5342 alloc_and_link_pwqs(struct workqueue_struct *wq) argument
5430 init_rescuer(struct workqueue_struct *wq) argument
5473 wq_adjust_max_active(struct workqueue_struct *wq) argument
5534 struct workqueue_struct *wq; local
5669 destroy_workqueue(struct workqueue_struct *wq) argument
5761 workqueue_set_max_active(struct workqueue_struct *wq, int max_active) argument
5798 workqueue_set_min_active(struct workqueue_struct *wq, int min_active) argument
5861 workqueue_congested(int cpu, struct workqueue_struct *wq) argument
5958 struct workqueue_struct *wq = NULL; local
6136 show_one_workqueue(struct workqueue_struct *wq) argument
6232 struct workqueue_struct *wq; local
6257 struct workqueue_struct *wq; local
6476 struct workqueue_struct *wq; local
6517 struct workqueue_struct *wq; local
6626 struct workqueue_struct *wq; local
6658 struct workqueue_struct *wq; local
6699 struct workqueue_struct *wq; local
6724 struct workqueue_struct *wq; local
6807 struct workqueue_struct *wq; local
6862 struct workqueue_struct *wq; member in struct:wq_device
6876 struct workqueue_struct *wq = dev_to_wq(dev); local
6885 struct workqueue_struct *wq = dev_to_wq(dev); local
6894 struct workqueue_struct *wq = dev_to_wq(dev); local
6928 struct workqueue_struct *wq = dev_to_wq(dev); local
6939 wq_sysfs_prep_attrs(struct workqueue_struct *wq) argument
6956 struct workqueue_struct *wq = dev_to_wq(dev); local
6981 struct workqueue_struct *wq = dev_to_wq(dev); local
6995 struct workqueue_struct *wq = dev_to_wq(dev); local
7018 struct workqueue_struct *wq = dev_to_wq(dev); local
7038 struct workqueue_struct *wq = dev_to_wq(dev); local
7060 struct workqueue_struct *wq = dev_to_wq(dev); local
7070 struct workqueue_struct *wq = dev_to_wq(dev); local
7240 workqueue_sysfs_register(struct workqueue_struct *wq) argument
7298 workqueue_sysfs_unregister(struct workqueue_struct *wq) argument
7309 workqueue_sysfs_unregister(struct workqueue_struct *wq) argument
7723 struct workqueue_struct *wq; local
7845 struct workqueue_struct *wq; local
[all...]
/linux-master/fs/nfs/
H A Dinode.c2401 struct workqueue_struct *wq; local
2403 wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
2404 if (wq == NULL)
2406 nfsiod_workqueue = wq;
2415 struct workqueue_struct *wq; local
2417 wq = nfsiod_workqueue;
2418 if (wq == NULL)
2421 destroy_workqueue(wq);
/linux-master/drivers/dma/idxd/
H A Dirq.c49 struct idxd_wq *wq = idxd->wqs[i]; local
51 rc = idxd_wq_enable(wq);
54 dev_warn(dev, "Unable to re-enable wq %s\n",
55 dev_name(wq_confdev(wq)));
73 struct idxd_wq *wq = ie_to_wq(ie); local
74 struct idxd_device *idxd = wq->idxd;
88 portal = idxd_wq_portal_addr(wq);
95 if (wq_dedicated(wq)) {
98 rc = idxd_enqcmds(wq, portal, &desc);
101 dev_warn(dev, "Failed to submit drain desc on wq
156 struct idxd_wq *wq = ie_to_wq(ie); local
225 struct idxd_wq *wq = fault->wq; local
336 struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx]; local
417 struct idxd_wq *wq = idxd->wqs[id]; local
425 struct idxd_wq *wq = idxd->wqs[i]; local
518 struct idxd_wq *wq = desc->wq; local
545 struct idxd_wq *wq = desc->wq; local
[all...]
H A Dperfmon.c342 flt_wq = flt_cfg.wq;
H A Didxd.h169 struct idxd_wq *wq; member in struct:idxd_cdev
192 struct idxd_wq *wq; member in struct:idxd_dma_chan
205 struct workqueue_struct *wq; member in struct:idxd_wq
309 struct idxd_wq *wq; member in struct:idxd_evl_fault
366 struct workqueue_struct *wq; member in struct:idxd_device
418 struct idxd_wq *wq; member in struct:idxd_desc
430 #define wq_confdev(wq) &wq->idxd_dev.conf_dev
440 static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq) argument
442 struct device *dev = wq_confdev(wq);
548 is_idxd_wq_dmaengine(struct idxd_wq *wq) argument
555 is_idxd_wq_user(struct idxd_wq *wq) argument
560 is_idxd_wq_kernel(struct idxd_wq *wq) argument
565 wq_dedicated(struct idxd_wq *wq) argument
570 wq_shared(struct idxd_wq *wq) argument
585 wq_pasid_enabled(struct idxd_wq *wq) argument
591 wq_shared_supported(struct idxd_wq *wq) argument
627 idxd_wq_portal_addr(struct idxd_wq *wq) argument
635 idxd_wq_get(struct idxd_wq *wq) argument
640 idxd_wq_put(struct idxd_wq *wq) argument
645 idxd_wq_refcount(struct idxd_wq *wq) argument
650 idxd_wq_set_private(struct idxd_wq *wq, void *private) argument
655 idxd_wq_get_private(struct idxd_wq *wq) argument
674 idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq, u32 max_batch_size) argument
692 idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev) argument
[all...]
H A Dinit.c150 struct idxd_wq *wq; local
166 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
167 if (!wq) {
172 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
173 conf_dev = wq_confdev(wq);
174 wq->id = i;
175 wq->idxd = idxd;
176 device_initialize(wq_confdev(wq));
180 rc = dev_set_name(conf_dev, "wq
792 struct idxd_wq *wq; local
[all...]
H A Ddevice.c18 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
41 static void free_hw_descs(struct idxd_wq *wq) argument
45 for (i = 0; i < wq->num_descs; i++)
46 kfree(wq->hw_descs[i]);
48 kfree(wq->hw_descs);
51 static int alloc_hw_descs(struct idxd_wq *wq, int num) argument
53 struct device *dev = &wq->idxd->pdev->dev;
57 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
59 if (!wq->hw_descs)
63 wq
74 free_descs(struct idxd_wq *wq) argument
84 alloc_descs(struct idxd_wq *wq, int num) argument
108 idxd_wq_alloc_resources(struct idxd_wq *wq) argument
166 idxd_wq_free_resources(struct idxd_wq *wq) argument
180 idxd_wq_enable(struct idxd_wq *wq) argument
205 idxd_wq_disable(struct idxd_wq *wq, bool reset_config) argument
234 idxd_wq_drain(struct idxd_wq *wq) argument
250 idxd_wq_reset(struct idxd_wq *wq) argument
266 idxd_wq_map_portal(struct idxd_wq *wq) argument
283 idxd_wq_unmap_portal(struct idxd_wq *wq) argument
297 struct idxd_wq *wq = idxd->wqs[i]; local
304 __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid) argument
320 idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) argument
337 idxd_wq_disable_pasid(struct idxd_wq *wq) argument
363 idxd_wq_disable_cleanup(struct idxd_wq *wq) argument
382 idxd_wq_device_reset_cleanup(struct idxd_wq *wq) argument
392 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active); local
397 idxd_wq_init_percpu_ref(struct idxd_wq *wq) argument
412 __idxd_wq_quiesce(struct idxd_wq *wq) argument
422 idxd_wq_quiesce(struct idxd_wq *wq) argument
714 struct idxd_wq *wq = idxd->wqs[i]; local
911 idxd_wq_config_write(struct idxd_wq *wq) argument
1006 struct idxd_wq *wq = idxd->wqs[i]; local
1070 struct idxd_wq *wq; local
1132 idxd_wq_load_config(struct idxd_wq *wq) argument
1175 struct idxd_wq *wq; local
1239 struct idxd_wq *wq = idxd->wqs[i]; local
1305 idxd_wq_free_irq(struct idxd_wq *wq) argument
1323 idxd_wq_request_irq(struct idxd_wq *wq) argument
1365 idxd_drv_enable_wq(struct idxd_wq *wq) argument
1499 idxd_drv_disable_wq(struct idxd_wq *wq) argument
1586 struct idxd_wq *wq = idxd->wqs[i]; local
[all...]
H A Dcdev.c43 struct idxd_wq *wq; member in struct:idxd_user_context
55 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid);
101 struct idxd_wq *wq = ctx->wq; local
103 if (!wq_pasid_enabled(wq))
122 struct idxd_wq *wq = ctx->wq; local
123 struct idxd_device *idxd = wq->idxd;
131 if (wq_shared(wq)) {
135 /* The wq disabl
165 struct idxd_wq *wq = idxd_cdev->wq; local
193 struct idxd_wq *wq = ctx->wq; local
204 idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index) argument
225 struct idxd_wq *wq; local
333 idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) argument
364 struct idxd_wq *wq = ctx->wq; local
376 check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, const char *func) argument
395 struct idxd_wq *wq = ctx->wq; local
421 struct idxd_wq *wq = ctx->wq; local
447 idxd_wq_add_cdev(struct idxd_wq *wq) argument
498 idxd_wq_del_cdev(struct idxd_wq *wq) argument
512 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
578 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); local
646 idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, void *cr, int len) argument
[all...]
/linux-master/include/net/
H A Dtls.h138 struct wait_queue_head wq; member in struct:tls_sw_context_rx
H A Dsock.h86 wait_queue_head_t wq; member in struct:__anon124
1654 init_waitqueue_head(&sk->sk_lock.wq); \
2076 rcu_assign_pointer(sk->sk_wq, &parent->wq);
2312 * @wq: struct socket_wq
2329 * wq = rcu_dereference(sk->sk_wq);
2330 * if (wq && waitqueue_active(&wq->wait))
2331 * wake_up_interruptible(&wq->wait)
2341 static inline bool skwq_has_sleeper(struct socket_wq *wq) argument
2343 return wq
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dmacsec.c128 struct workqueue_struct *wq; member in struct:mlx5e_macsec
1577 WARN_ON(!queue_work(macsec->wq, &async_work->work));
1745 macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1746 if (!macsec->wq) {
1773 destroy_workqueue(macsec->wq);
1792 destroy_workqueue(macsec->wq);
/linux-master/drivers/net/ethernet/intel/iavf/
H A Diavf_main.c290 queue_work(adapter->wq, &adapter->reset_task);
302 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
419 queue_work(adapter->wq, &adapter->adminq_task);
2058 queue_work(adapter->wq, &adapter->finish_config);
2740 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2746 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2752 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2758 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2764 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2783 queue_delayed_work(adapter->wq,
[all...]
/linux-master/drivers/md/
H A Ddm.c677 queue_work(md->wq, &md->work);
886 queue_work(md->wq, &md->requeue_work);
888 queue_work(md->wq, &md->work);
2008 if (md->wq)
2009 destroy_workqueue(md->wq);
2149 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
2150 if (!md->wq)
2631 queue_work(md->wq, &md->work);
2759 * flush_workqueue(md->wq).
2766 * Stop md->queue before flushing md->wq i
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_guc_ct.c152 init_waitqueue_head(&ct->wq);
338 wake_up_all(&ct->wq);
680 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
764 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
/linux-master/drivers/gpu/drm/etnaviv/
H A Detnaviv_gpu.h114 struct workqueue_struct *wq; member in struct:etnaviv_gpu
H A Detnaviv_gpu.c1569 queue_work(gpu->wq, &gpu->sync_point_work);
1756 gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1757 if (!gpu->wq) {
1788 destroy_workqueue(gpu->wq);
1804 destroy_workqueue(gpu->wq);
/linux-master/block/
H A Dbdev.c531 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); local
534 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
537 finish_wait(wq, &wait);
/linux-master/fs/bcachefs/
H A Djournal_io.c1697 mod_delayed_work(j->wq, &j->write_work, max(0L, delta));
1768 continue_at(cl, journal_write_done, j->wq);
1782 continue_at(cl, journal_write_preflush, j->wq);
1799 continue_at(cl, journal_write_submit, j->wq);
2074 continue_at(cl, journal_write_preflush, j->wq);
2076 continue_at(cl, journal_write_submit, j->wq);
2079 continue_at(cl, journal_write_done, j->wq);
2083 continue_at(cl, journal_write_done, j->wq);
H A Dfs.c1608 wait_queue_head_t *wq = bit_waitqueue(&inode->v.i_state, __I_NEW); local
1611 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
1615 finish_wait(wq, &wait.wq_entry);

Completed in 677 milliseconds

1234567891011>>