Lines Matching refs:tb

17 #include "tb.h"
123 struct tb *tb = container_of(dev, struct tb, dev);
128 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
132 pm_runtime_get_sync(&tb->dev);
134 if (mutex_lock_interruptible(&tb->lock)) {
138 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
140 mutex_unlock(&tb->lock);
143 mutex_unlock(&tb->lock);
145 for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
149 ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n");
153 pm_runtime_mark_last_busy(&tb->dev);
154 pm_runtime_put_autosuspend(&tb->dev);
163 struct tb *tb = container_of(dev, struct tb, dev);
170 * Make sure the value is not bigger than tb->nboot_acl * UUID
172 * string is tb->nboot_acl * ",".
174 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
176 if (count < tb->nboot_acl - 1)
183 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
190 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
206 if (s || i < tb->nboot_acl) {
211 pm_runtime_get_sync(&tb->dev);
213 if (mutex_lock_interruptible(&tb->lock)) {
217 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
220 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
222 mutex_unlock(&tb->lock);
225 pm_runtime_mark_last_busy(&tb->dev);
226 pm_runtime_put_autosuspend(&tb->dev);
240 const struct tb *tb = container_of(dev, struct tb, dev);
244 if (tb->security_level == TB_SECURITY_USER ||
245 tb->security_level == TB_SECURITY_SECURE)
246 deauthorization = !!tb->cm_ops->disapprove_switch;
256 struct tb *tb = container_of(dev, struct tb, dev);
258 return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
265 struct tb *tb = container_of(dev, struct tb, dev);
268 if (tb->security_level < ARRAY_SIZE(tb_security_names))
269 name = tb_security_names[tb->security_level];
287 struct tb *tb = container_of(dev, struct tb, dev);
290 if (tb->nboot_acl &&
291 tb->cm_ops->get_boot_acl &&
292 tb->cm_ops->set_boot_acl)
320 struct tb *tb = container_of(dev, struct tb, dev);
322 tb_ctl_free(tb->ctl);
323 destroy_workqueue(tb->wq);
324 ida_free(&tb_domain_ida, tb->index);
325 mutex_destroy(&tb->lock);
326 kfree(tb);
337 struct tb *tb = data;
339 if (!tb->cm_ops->handle_event) {
340 tb_warn(tb, "domain does not have event handler\n");
348 return tb_xdomain_handle_request(tb, type, buf, size);
352 tb->cm_ops->handle_event(tb, type, buf, size);
373 struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
375 struct tb *tb;
385 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
386 if (!tb)
389 tb->nhi = nhi;
390 mutex_init(&tb->lock);
392 tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL);
393 if (tb->index < 0)
396 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
397 if (!tb->wq)
400 tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb);
401 if (!tb->ctl)
404 tb->dev.parent = &nhi->pdev->dev;
405 tb->dev.bus = &tb_bus_type;
406 tb->dev.type = &tb_domain_type;
407 tb->dev.groups = domain_attr_groups;
408 dev_set_name(&tb->dev, "domain%d", tb->index);
409 device_initialize(&tb->dev);
411 return tb;
414 destroy_workqueue(tb->wq);
416 ida_free(&tb_domain_ida, tb->index);
418 kfree(tb);
425 * @tb: Domain to add
435 int tb_domain_add(struct tb *tb, bool reset)
439 if (WARN_ON(!tb->cm_ops))
442 mutex_lock(&tb->lock);
447 tb_ctl_start(tb->ctl);
449 if (tb->cm_ops->driver_ready) {
450 ret = tb->cm_ops->driver_ready(tb);
455 tb_dbg(tb, "security level set to %s\n",
456 tb_security_names[tb->security_level]);
458 ret = device_add(&tb->dev);
463 if (tb->cm_ops->start) {
464 ret = tb->cm_ops->start(tb, reset);
470 mutex_unlock(&tb->lock);
472 device_init_wakeup(&tb->dev, true);
474 pm_runtime_no_callbacks(&tb->dev);
475 pm_runtime_set_active(&tb->dev);
476 pm_runtime_enable(&tb->dev);
477 pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
478 pm_runtime_mark_last_busy(&tb->dev);
479 pm_runtime_use_autosuspend(&tb->dev);
484 device_del(&tb->dev);
486 tb_ctl_stop(tb->ctl);
487 mutex_unlock(&tb->lock);
494 * @tb: Domain to remove
499 void tb_domain_remove(struct tb *tb)
501 mutex_lock(&tb->lock);
502 if (tb->cm_ops->stop)
503 tb->cm_ops->stop(tb);
505 tb_ctl_stop(tb->ctl);
506 mutex_unlock(&tb->lock);
508 flush_workqueue(tb->wq);
510 if (tb->cm_ops->deinit)
511 tb->cm_ops->deinit(tb);
513 device_unregister(&tb->dev);
518 * @tb: Domain to suspend
522 int tb_domain_suspend_noirq(struct tb *tb)
531 mutex_lock(&tb->lock);
532 if (tb->cm_ops->suspend_noirq)
533 ret = tb->cm_ops->suspend_noirq(tb);
535 tb_ctl_stop(tb->ctl);
536 mutex_unlock(&tb->lock);
543 * @tb: Domain to resume
548 int tb_domain_resume_noirq(struct tb *tb)
552 mutex_lock(&tb->lock);
553 tb_ctl_start(tb->ctl);
554 if (tb->cm_ops->resume_noirq)
555 ret = tb->cm_ops->resume_noirq(tb);
556 mutex_unlock(&tb->lock);
561 int tb_domain_suspend(struct tb *tb)
563 return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
566 int tb_domain_freeze_noirq(struct tb *tb)
570 mutex_lock(&tb->lock);
571 if (tb->cm_ops->freeze_noirq)
572 ret = tb->cm_ops->freeze_noirq(tb);
574 tb_ctl_stop(tb->ctl);
575 mutex_unlock(&tb->lock);
580 int tb_domain_thaw_noirq(struct tb *tb)
584 mutex_lock(&tb->lock);
585 tb_ctl_start(tb->ctl);
586 if (tb->cm_ops->thaw_noirq)
587 ret = tb->cm_ops->thaw_noirq(tb);
588 mutex_unlock(&tb->lock);
593 void tb_domain_complete(struct tb *tb)
595 if (tb->cm_ops->complete)
596 tb->cm_ops->complete(tb);
599 int tb_domain_runtime_suspend(struct tb *tb)
601 if (tb->cm_ops->runtime_suspend) {
602 int ret = tb->cm_ops->runtime_suspend(tb);
606 tb_ctl_stop(tb->ctl);
610 int tb_domain_runtime_resume(struct tb *tb)
612 tb_ctl_start(tb->ctl);
613 if (tb->cm_ops->runtime_resume) {
614 int ret = tb->cm_ops->runtime_resume(tb);
623 * @tb: Domain the switch belongs to
630 int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
632 if (!tb->cm_ops->disapprove_switch)
635 return tb->cm_ops->disapprove_switch(tb, sw);
640 * @tb: Domain the switch belongs to
647 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
651 if (!tb->cm_ops->approve_switch)
659 return tb->cm_ops->approve_switch(tb, sw);
664 * @tb: Domain the switch belongs to
673 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
678 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
686 ret = tb->cm_ops->add_switch_key(tb, sw);
690 return tb->cm_ops->approve_switch(tb, sw);
695 * @tb: Domain the switch belongs to
705 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
715 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
724 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
759 return tb->cm_ops->approve_switch(tb, sw);
771 * @tb: Domain whose PCIe paths to disconnect
778 int tb_domain_disconnect_pcie_paths(struct tb *tb)
780 if (!tb->cm_ops->disconnect_pcie_paths)
783 return tb->cm_ops->disconnect_pcie_paths(tb);
788 * @tb: Domain enabling the DMA paths
802 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
806 if (!tb->cm_ops->approve_xdomain_paths)
809 return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
815 * @tb: Domain disabling the DMA paths
829 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
833 if (!tb->cm_ops->disconnect_xdomain_paths)
836 return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
843 struct tb *tb = data;
847 if (xd && xd->tb == tb)
855 * @tb: Domain whose paths are disconnected
863 int tb_domain_disconnect_all_paths(struct tb *tb)
867 ret = tb_domain_disconnect_pcie_paths(tb);
871 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);