Lines Matching refs:wq

43 	struct idxd_wq *wq;
55 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid);
101 struct idxd_wq *wq = ctx->wq;
103 if (!wq_pasid_enabled(wq))
122 struct idxd_wq *wq = ctx->wq;
123 struct idxd_device *idxd = wq->idxd;
131 if (wq_shared(wq)) {
135 /* The wq disable in the disable pasid function will drain the wq */
136 rc = idxd_wq_disable_pasid(wq);
138 dev_err(dev, "wq disable pasid failed.\n");
140 idxd_wq_drain(wq);
145 idxd_cdev_evl_drain_pasid(wq, ctx->pasid);
150 mutex_lock(&wq->wq_lock);
151 idxd_wq_put(wq);
152 mutex_unlock(&wq->wq_lock);
165 struct idxd_wq *wq = idxd_cdev->wq;
167 cdev_ctx = &ictx[wq->idxd->data->type];
188 return idxd_cdev->wq;
193 struct idxd_wq *wq = ctx->wq;
196 mutex_lock(&wq->uc_lock);
197 ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL);
199 dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n",
201 mutex_unlock(&wq->uc_lock);
204 void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index)
211 mutex_lock(&wq->uc_lock);
212 ctx = xa_load(&wq->upasid_xa, pasid);
214 mutex_unlock(&wq->uc_lock);
218 mutex_unlock(&wq->uc_lock);
225 struct idxd_wq *wq;
232 wq = inode_wq(inode);
233 idxd = wq->idxd;
236 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
242 mutex_lock(&wq->wq_lock);
244 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
249 ctx->wq = wq;
271 mutex_lock(&wq->uc_lock);
272 rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL);
273 mutex_unlock(&wq->uc_lock);
277 if (wq_dedicated(wq)) {
278 rc = idxd_wq_set_pasid(wq, pasid);
280 dev_err(dev, "wq set pasid failed: %d\n", rc);
286 idxd_cdev = wq->idxd_cdev;
313 idxd_wq_get(wq);
314 mutex_unlock(&wq->wq_lock);
328 mutex_unlock(&wq->wq_lock);
333 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
335 struct idxd_device *idxd = wq->idxd;
353 if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id)
357 drain_workqueue(wq->wq);
364 struct idxd_wq *wq = ctx->wq;
365 struct idxd_device *idxd = wq->idxd;
376 static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
379 struct device *dev = &wq->idxd->pdev->dev;
395 struct idxd_wq *wq = ctx->wq;
396 struct idxd_device *idxd = wq->idxd;
403 rc = check_vma(wq, vma, __func__);
408 pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
421 struct idxd_wq *wq = ctx->wq;
422 struct idxd_device *idxd = wq->idxd;
425 poll_wait(filp, &wq->err_queue, wait);
447 int idxd_wq_add_cdev(struct idxd_wq *wq)
449 struct idxd_device *idxd = wq->idxd;
461 idxd_cdev->wq = wq;
464 cdev_ctx = &ictx[wq->idxd->data->type];
473 dev->parent = wq_confdev(wq);
478 rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id);
482 wq->idxd_cdev = idxd_cdev;
486 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
494 wq->idxd_cdev = NULL;
498 void idxd_wq_del_cdev(struct idxd_wq *wq)
502 idxd_cdev = wq->idxd_cdev;
504 wq->idxd_cdev = NULL;
512 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
513 struct idxd_device *idxd = wq->idxd;
537 mutex_lock(&wq->wq_lock);
539 if (!idxd_wq_driver_name_match(wq, dev)) {
545 wq->wq = create_workqueue(dev_name(wq_confdev(wq)));
546 if (!wq->wq) {
551 wq->type = IDXD_WQT_USER;
552 rc = idxd_drv_enable_wq(wq);
556 rc = idxd_wq_add_cdev(wq);
563 mutex_unlock(&wq->wq_lock);
567 idxd_drv_disable_wq(wq);
569 destroy_workqueue(wq->wq);
570 wq->type = IDXD_WQT_NONE;
572 mutex_unlock(&wq->wq_lock);
578 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
580 mutex_lock(&wq->wq_lock);
581 idxd_wq_del_cdev(wq);
582 idxd_drv_disable_wq(wq);
583 wq->type = IDXD_WQT_NONE;
584 destroy_workqueue(wq->wq);
585 wq->wq = NULL;
586 mutex_unlock(&wq->wq_lock);
634 * idxd_copy_cr - copy completion record to user address space found by wq and
636 * @wq: work queue
646 int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
649 struct device *dev = &wq->idxd->pdev->dev;
654 mutex_lock(&wq->uc_lock);
656 ctx = xa_load(&wq->upasid_xa, pasid);
695 mutex_unlock(&wq->uc_lock);