Lines Matching refs:uacce

8 #include <linux/uacce.h>
33 if (q->uacce->ops->start_queue) {
34 ret = q->uacce->ops->start_queue(q);
45 struct uacce_device *uacce = q->uacce;
47 if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
48 uacce->ops->stop_queue(q);
51 uacce->ops->put_queue)
52 uacce->ops->put_queue(q);
63 struct uacce_device *uacce = q->uacce;
67 * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
69 * gets called with mmap_lock held, by taking uacce->mutex instead of
72 * mmap_lock, while holding uacce->mutex.
74 mutex_lock(&uacce->mutex);
86 if (uacce->ops->ioctl)
87 ret = uacce->ops->ioctl(q, cmd, arg);
92 mutex_unlock(&uacce->mutex);
106 static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
111 if (!(uacce->flags & UACCE_DEV_SVA))
114 handle = iommu_sva_bind_device(uacce->parent, current->mm);
139 struct uacce_device *uacce;
143 uacce = xa_load(&uacce_xa, iminor(inode));
144 if (!uacce)
151 mutex_lock(&uacce->mutex);
153 if (!uacce->parent) {
158 ret = uacce_bind_queue(uacce, q);
162 q->uacce = uacce;
164 if (uacce->ops->get_queue) {
165 ret = uacce->ops->get_queue(uacce, q->pasid, q);
175 list_add(&q->list, &uacce->queues);
176 mutex_unlock(&uacce->mutex);
184 mutex_unlock(&uacce->mutex);
191 struct uacce_device *uacce = q->uacce;
193 mutex_lock(&uacce->mutex);
197 mutex_unlock(&uacce->mutex);
224 struct uacce_device *uacce = q->uacce;
257 if (!uacce->ops->mmap) {
262 ret = uacce->ops->mmap(q, vma, qfr);
286 struct uacce_device *uacce = q->uacce;
295 if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
320 struct uacce_device *uacce = to_uacce_device(dev);
322 return sysfs_emit(buf, "%s\n", uacce->api_ver);
328 struct uacce_device *uacce = to_uacce_device(dev);
330 return sysfs_emit(buf, "%u\n", uacce->flags);
337 struct uacce_device *uacce = to_uacce_device(dev);
339 if (!uacce->ops->get_available_instances)
343 uacce->ops->get_available_instances(uacce));
349 struct uacce_device *uacce = to_uacce_device(dev);
351 return sysfs_emit(buf, "%s\n", uacce->algs);
357 struct uacce_device *uacce = to_uacce_device(dev);
360 uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
366 struct uacce_device *uacce = to_uacce_device(dev);
369 uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
375 struct uacce_device *uacce = to_uacce_device(dev);
377 return sysfs_emit(buf, "%d\n", uacce->ops->get_isolate_state(uacce));
382 struct uacce_device *uacce = to_uacce_device(dev);
385 val = uacce->ops->isolate_err_threshold_read(uacce);
393 struct uacce_device *uacce = to_uacce_device(dev);
403 ret = uacce->ops->isolate_err_threshold_write(uacce, val);
435 struct uacce_device *uacce = to_uacce_device(dev);
438 (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
440 (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
444 (!uacce->ops->isolate_err_threshold_read &&
445 !uacce->ops->isolate_err_threshold_write))
448 if (attr == &dev_attr_isolate.attr && !uacce->ops->get_isolate_state)
463 struct uacce_device *uacce = to_uacce_device(dev);
465 kfree(uacce);
493 static void uacce_disable_sva(struct uacce_device *uacce)
495 if (!(uacce->flags & UACCE_DEV_SVA))
498 iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
499 iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
504 * @parent: pointer of uacce parent device
507 * Returns uacce pointer if success and ERR_PTR if not
508 * Need check returned negotiated uacce->flags
514 struct uacce_device *uacce;
517 uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
518 if (!uacce)
523 uacce->parent = parent;
524 uacce->flags = flags;
525 uacce->ops = interface->ops;
527 ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
532 INIT_LIST_HEAD(&uacce->queues);
533 mutex_init(&uacce->mutex);
534 device_initialize(&uacce->dev);
535 uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
536 uacce->dev.class = &uacce_class;
537 uacce->dev.groups = uacce_dev_groups;
538 uacce->dev.parent = uacce->parent;
539 uacce->dev.release = uacce_release;
540 dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
542 return uacce;
545 uacce_disable_sva(uacce);
546 kfree(uacce);
553 * @uacce: The initialized uacce device
557 int uacce_register(struct uacce_device *uacce)
559 if (!uacce)
562 uacce->cdev = cdev_alloc();
563 if (!uacce->cdev)
566 uacce->cdev->ops = &uacce_fops;
567 uacce->cdev->owner = THIS_MODULE;
569 return cdev_device_add(uacce->cdev, &uacce->dev);
575 * @uacce: the accelerator to remove
577 void uacce_remove(struct uacce_device *uacce)
581 if (!uacce)
586 * the cdev. Holding uacce->mutex ensures that open() does not obtain a
587 * removed uacce device.
589 mutex_lock(&uacce->mutex);
591 list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
594 * uacce->ops after the queue is disabled.
609 uacce_disable_sva(uacce);
611 if (uacce->cdev)
612 cdev_device_del(uacce->cdev, &uacce->dev);
613 xa_erase(&uacce_xa, uacce->dev_id);
615 * uacce exists as long as there are open fds, but ops will be freed
618 uacce->ops = NULL;
619 uacce->parent = NULL;
620 mutex_unlock(&uacce->mutex);
621 put_device(&uacce->dev);