1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt bus support
4 *
5 * Copyright (C) 2017, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <linux/device.h>
10#include <linux/idr.h>
11#include <linux/module.h>
12#include <linux/pm_runtime.h>
13#include <linux/slab.h>
14#include <linux/random.h>
15#include <crypto/hash.h>
16
17#include "tb.h"
18
19static DEFINE_IDA(tb_domain_ida);
20
21static bool match_service_id(const struct tb_service_id *id,
22			     const struct tb_service *svc)
23{
24	if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
25		if (strcmp(id->protocol_key, svc->key))
26			return false;
27	}
28
29	if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
30		if (id->protocol_id != svc->prtcid)
31			return false;
32	}
33
34	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
35		if (id->protocol_version != svc->prtcvers)
36			return false;
37	}
38
39	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
40		if (id->protocol_revision != svc->prtcrevs)
41			return false;
42	}
43
44	return true;
45}
46
47static const struct tb_service_id *__tb_service_match(struct device *dev,
48						      struct device_driver *drv)
49{
50	struct tb_service_driver *driver;
51	const struct tb_service_id *ids;
52	struct tb_service *svc;
53
54	svc = tb_to_service(dev);
55	if (!svc)
56		return NULL;
57
58	driver = container_of(drv, struct tb_service_driver, driver);
59	if (!driver->id_table)
60		return NULL;
61
62	for (ids = driver->id_table; ids->match_flags != 0; ids++) {
63		if (match_service_id(ids, svc))
64			return ids;
65	}
66
67	return NULL;
68}
69
70static int tb_service_match(struct device *dev, struct device_driver *drv)
71{
72	return !!__tb_service_match(dev, drv);
73}
74
75static int tb_service_probe(struct device *dev)
76{
77	struct tb_service *svc = tb_to_service(dev);
78	struct tb_service_driver *driver;
79	const struct tb_service_id *id;
80
81	driver = container_of(dev->driver, struct tb_service_driver, driver);
82	id = __tb_service_match(dev, &driver->driver);
83
84	return driver->probe(svc, id);
85}
86
87static void tb_service_remove(struct device *dev)
88{
89	struct tb_service *svc = tb_to_service(dev);
90	struct tb_service_driver *driver;
91
92	driver = container_of(dev->driver, struct tb_service_driver, driver);
93	if (driver->remove)
94		driver->remove(svc);
95}
96
97static void tb_service_shutdown(struct device *dev)
98{
99	struct tb_service_driver *driver;
100	struct tb_service *svc;
101
102	svc = tb_to_service(dev);
103	if (!svc || !dev->driver)
104		return;
105
106	driver = container_of(dev->driver, struct tb_service_driver, driver);
107	if (driver->shutdown)
108		driver->shutdown(svc);
109}
110
111static const char * const tb_security_names[] = {
112	[TB_SECURITY_NONE] = "none",
113	[TB_SECURITY_USER] = "user",
114	[TB_SECURITY_SECURE] = "secure",
115	[TB_SECURITY_DPONLY] = "dponly",
116	[TB_SECURITY_USBONLY] = "usbonly",
117	[TB_SECURITY_NOPCIE] = "nopcie",
118};
119
120static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
121			     char *buf)
122{
123	struct tb *tb = container_of(dev, struct tb, dev);
124	uuid_t *uuids;
125	ssize_t ret;
126	int i;
127
128	uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
129	if (!uuids)
130		return -ENOMEM;
131
132	pm_runtime_get_sync(&tb->dev);
133
134	if (mutex_lock_interruptible(&tb->lock)) {
135		ret = -ERESTARTSYS;
136		goto out;
137	}
138	ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
139	if (ret) {
140		mutex_unlock(&tb->lock);
141		goto out;
142	}
143	mutex_unlock(&tb->lock);
144
145	for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
146		if (!uuid_is_null(&uuids[i]))
147			ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]);
148
149		ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n");
150	}
151
152out:
153	pm_runtime_mark_last_busy(&tb->dev);
154	pm_runtime_put_autosuspend(&tb->dev);
155	kfree(uuids);
156
157	return ret;
158}
159
160static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
161			      const char *buf, size_t count)
162{
163	struct tb *tb = container_of(dev, struct tb, dev);
164	char *str, *s, *uuid_str;
165	ssize_t ret = 0;
166	uuid_t *acl;
167	int i = 0;
168
169	/*
170	 * Make sure the value is not bigger than tb->nboot_acl * UUID
171	 * length + commas and optional "\n". Also the smallest allowable
172	 * string is tb->nboot_acl * ",".
173	 */
174	if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
175		return -EINVAL;
176	if (count < tb->nboot_acl - 1)
177		return -EINVAL;
178
179	str = kstrdup(buf, GFP_KERNEL);
180	if (!str)
181		return -ENOMEM;
182
183	acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
184	if (!acl) {
185		ret = -ENOMEM;
186		goto err_free_str;
187	}
188
189	uuid_str = strim(str);
190	while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
191		size_t len = strlen(s);
192
193		if (len) {
194			if (len != UUID_STRING_LEN) {
195				ret = -EINVAL;
196				goto err_free_acl;
197			}
198			ret = uuid_parse(s, &acl[i]);
199			if (ret)
200				goto err_free_acl;
201		}
202
203		i++;
204	}
205
206	if (s || i < tb->nboot_acl) {
207		ret = -EINVAL;
208		goto err_free_acl;
209	}
210
211	pm_runtime_get_sync(&tb->dev);
212
213	if (mutex_lock_interruptible(&tb->lock)) {
214		ret = -ERESTARTSYS;
215		goto err_rpm_put;
216	}
217	ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
218	if (!ret) {
219		/* Notify userspace about the change */
220		kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
221	}
222	mutex_unlock(&tb->lock);
223
224err_rpm_put:
225	pm_runtime_mark_last_busy(&tb->dev);
226	pm_runtime_put_autosuspend(&tb->dev);
227err_free_acl:
228	kfree(acl);
229err_free_str:
230	kfree(str);
231
232	return ret ?: count;
233}
234static DEVICE_ATTR_RW(boot_acl);
235
236static ssize_t deauthorization_show(struct device *dev,
237				    struct device_attribute *attr,
238				    char *buf)
239{
240	const struct tb *tb = container_of(dev, struct tb, dev);
241	bool deauthorization = false;
242
243	/* Only meaningful if authorization is supported */
244	if (tb->security_level == TB_SECURITY_USER ||
245	    tb->security_level == TB_SECURITY_SECURE)
246		deauthorization = !!tb->cm_ops->disapprove_switch;
247
248	return sysfs_emit(buf, "%d\n", deauthorization);
249}
250static DEVICE_ATTR_RO(deauthorization);
251
252static ssize_t iommu_dma_protection_show(struct device *dev,
253					 struct device_attribute *attr,
254					 char *buf)
255{
256	struct tb *tb = container_of(dev, struct tb, dev);
257
258	return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
259}
260static DEVICE_ATTR_RO(iommu_dma_protection);
261
262static ssize_t security_show(struct device *dev, struct device_attribute *attr,
263			     char *buf)
264{
265	struct tb *tb = container_of(dev, struct tb, dev);
266	const char *name = "unknown";
267
268	if (tb->security_level < ARRAY_SIZE(tb_security_names))
269		name = tb_security_names[tb->security_level];
270
271	return sysfs_emit(buf, "%s\n", name);
272}
273static DEVICE_ATTR_RO(security);
274
275static struct attribute *domain_attrs[] = {
276	&dev_attr_boot_acl.attr,
277	&dev_attr_deauthorization.attr,
278	&dev_attr_iommu_dma_protection.attr,
279	&dev_attr_security.attr,
280	NULL,
281};
282
283static umode_t domain_attr_is_visible(struct kobject *kobj,
284				      struct attribute *attr, int n)
285{
286	struct device *dev = kobj_to_dev(kobj);
287	struct tb *tb = container_of(dev, struct tb, dev);
288
289	if (attr == &dev_attr_boot_acl.attr) {
290		if (tb->nboot_acl &&
291		    tb->cm_ops->get_boot_acl &&
292		    tb->cm_ops->set_boot_acl)
293			return attr->mode;
294		return 0;
295	}
296
297	return attr->mode;
298}
299
300static const struct attribute_group domain_attr_group = {
301	.is_visible = domain_attr_is_visible,
302	.attrs = domain_attrs,
303};
304
305static const struct attribute_group *domain_attr_groups[] = {
306	&domain_attr_group,
307	NULL,
308};
309
310const struct bus_type tb_bus_type = {
311	.name = "thunderbolt",
312	.match = tb_service_match,
313	.probe = tb_service_probe,
314	.remove = tb_service_remove,
315	.shutdown = tb_service_shutdown,
316};
317
318static void tb_domain_release(struct device *dev)
319{
320	struct tb *tb = container_of(dev, struct tb, dev);
321
322	tb_ctl_free(tb->ctl);
323	destroy_workqueue(tb->wq);
324	ida_free(&tb_domain_ida, tb->index);
325	mutex_destroy(&tb->lock);
326	kfree(tb);
327}
328
329const struct device_type tb_domain_type = {
330	.name = "thunderbolt_domain",
331	.release = tb_domain_release,
332};
333
334static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
335			       const void *buf, size_t size)
336{
337	struct tb *tb = data;
338
339	if (!tb->cm_ops->handle_event) {
340		tb_warn(tb, "domain does not have event handler\n");
341		return true;
342	}
343
344	switch (type) {
345	case TB_CFG_PKG_XDOMAIN_REQ:
346	case TB_CFG_PKG_XDOMAIN_RESP:
347		if (tb_is_xdomain_enabled())
348			return tb_xdomain_handle_request(tb, type, buf, size);
349		break;
350
351	default:
352		tb->cm_ops->handle_event(tb, type, buf, size);
353	}
354
355	return true;
356}
357
358/**
359 * tb_domain_alloc() - Allocate a domain
360 * @nhi: Pointer to the host controller
361 * @timeout_msec: Control channel timeout for non-raw messages
362 * @privsize: Size of the connection manager private data
363 *
364 * Allocates and initializes a new Thunderbolt domain. Connection
365 * managers are expected to call this and then fill in @cm_ops
366 * accordingly.
367 *
368 * Call tb_domain_put() to release the domain before it has been added
369 * to the system.
370 *
371 * Return: allocated domain structure on %NULL in case of error
372 */
373struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
374{
375	struct tb *tb;
376
377	/*
378	 * Make sure the structure sizes map with that the hardware
379	 * expects because bit-fields are being used.
380	 */
381	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
382	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
383	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
384
385	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
386	if (!tb)
387		return NULL;
388
389	tb->nhi = nhi;
390	mutex_init(&tb->lock);
391
392	tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL);
393	if (tb->index < 0)
394		goto err_free;
395
396	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
397	if (!tb->wq)
398		goto err_remove_ida;
399
400	tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb);
401	if (!tb->ctl)
402		goto err_destroy_wq;
403
404	tb->dev.parent = &nhi->pdev->dev;
405	tb->dev.bus = &tb_bus_type;
406	tb->dev.type = &tb_domain_type;
407	tb->dev.groups = domain_attr_groups;
408	dev_set_name(&tb->dev, "domain%d", tb->index);
409	device_initialize(&tb->dev);
410
411	return tb;
412
413err_destroy_wq:
414	destroy_workqueue(tb->wq);
415err_remove_ida:
416	ida_free(&tb_domain_ida, tb->index);
417err_free:
418	kfree(tb);
419
420	return NULL;
421}
422
423/**
424 * tb_domain_add() - Add domain to the system
425 * @tb: Domain to add
426 * @reset: Issue reset to the host router
427 *
428 * Starts the domain and adds it to the system. Hotplugging devices will
429 * work after this has been returned successfully. In order to remove
430 * and release the domain after this function has been called, call
431 * tb_domain_remove().
432 *
433 * Return: %0 in case of success and negative errno in case of error
434 */
435int tb_domain_add(struct tb *tb, bool reset)
436{
437	int ret;
438
439	if (WARN_ON(!tb->cm_ops))
440		return -EINVAL;
441
442	mutex_lock(&tb->lock);
443	/*
444	 * tb_schedule_hotplug_handler may be called as soon as the config
445	 * channel is started. Thats why we have to hold the lock here.
446	 */
447	tb_ctl_start(tb->ctl);
448
449	if (tb->cm_ops->driver_ready) {
450		ret = tb->cm_ops->driver_ready(tb);
451		if (ret)
452			goto err_ctl_stop;
453	}
454
455	tb_dbg(tb, "security level set to %s\n",
456	       tb_security_names[tb->security_level]);
457
458	ret = device_add(&tb->dev);
459	if (ret)
460		goto err_ctl_stop;
461
462	/* Start the domain */
463	if (tb->cm_ops->start) {
464		ret = tb->cm_ops->start(tb, reset);
465		if (ret)
466			goto err_domain_del;
467	}
468
469	/* This starts event processing */
470	mutex_unlock(&tb->lock);
471
472	device_init_wakeup(&tb->dev, true);
473
474	pm_runtime_no_callbacks(&tb->dev);
475	pm_runtime_set_active(&tb->dev);
476	pm_runtime_enable(&tb->dev);
477	pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
478	pm_runtime_mark_last_busy(&tb->dev);
479	pm_runtime_use_autosuspend(&tb->dev);
480
481	return 0;
482
483err_domain_del:
484	device_del(&tb->dev);
485err_ctl_stop:
486	tb_ctl_stop(tb->ctl);
487	mutex_unlock(&tb->lock);
488
489	return ret;
490}
491
492/**
493 * tb_domain_remove() - Removes and releases a domain
494 * @tb: Domain to remove
495 *
496 * Stops the domain, removes it from the system and releases all
497 * resources once the last reference has been released.
498 */
499void tb_domain_remove(struct tb *tb)
500{
501	mutex_lock(&tb->lock);
502	if (tb->cm_ops->stop)
503		tb->cm_ops->stop(tb);
504	/* Stop the domain control traffic */
505	tb_ctl_stop(tb->ctl);
506	mutex_unlock(&tb->lock);
507
508	flush_workqueue(tb->wq);
509
510	if (tb->cm_ops->deinit)
511		tb->cm_ops->deinit(tb);
512
513	device_unregister(&tb->dev);
514}
515
516/**
517 * tb_domain_suspend_noirq() - Suspend a domain
518 * @tb: Domain to suspend
519 *
520 * Suspends all devices in the domain and stops the control channel.
521 */
522int tb_domain_suspend_noirq(struct tb *tb)
523{
524	int ret = 0;
525
526	/*
527	 * The control channel interrupt is left enabled during suspend
528	 * and taking the lock here prevents any events happening before
529	 * we actually have stopped the domain and the control channel.
530	 */
531	mutex_lock(&tb->lock);
532	if (tb->cm_ops->suspend_noirq)
533		ret = tb->cm_ops->suspend_noirq(tb);
534	if (!ret)
535		tb_ctl_stop(tb->ctl);
536	mutex_unlock(&tb->lock);
537
538	return ret;
539}
540
541/**
542 * tb_domain_resume_noirq() - Resume a domain
543 * @tb: Domain to resume
544 *
545 * Re-starts the control channel, and resumes all devices connected to
546 * the domain.
547 */
548int tb_domain_resume_noirq(struct tb *tb)
549{
550	int ret = 0;
551
552	mutex_lock(&tb->lock);
553	tb_ctl_start(tb->ctl);
554	if (tb->cm_ops->resume_noirq)
555		ret = tb->cm_ops->resume_noirq(tb);
556	mutex_unlock(&tb->lock);
557
558	return ret;
559}
560
561int tb_domain_suspend(struct tb *tb)
562{
563	return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
564}
565
566int tb_domain_freeze_noirq(struct tb *tb)
567{
568	int ret = 0;
569
570	mutex_lock(&tb->lock);
571	if (tb->cm_ops->freeze_noirq)
572		ret = tb->cm_ops->freeze_noirq(tb);
573	if (!ret)
574		tb_ctl_stop(tb->ctl);
575	mutex_unlock(&tb->lock);
576
577	return ret;
578}
579
580int tb_domain_thaw_noirq(struct tb *tb)
581{
582	int ret = 0;
583
584	mutex_lock(&tb->lock);
585	tb_ctl_start(tb->ctl);
586	if (tb->cm_ops->thaw_noirq)
587		ret = tb->cm_ops->thaw_noirq(tb);
588	mutex_unlock(&tb->lock);
589
590	return ret;
591}
592
593void tb_domain_complete(struct tb *tb)
594{
595	if (tb->cm_ops->complete)
596		tb->cm_ops->complete(tb);
597}
598
599int tb_domain_runtime_suspend(struct tb *tb)
600{
601	if (tb->cm_ops->runtime_suspend) {
602		int ret = tb->cm_ops->runtime_suspend(tb);
603		if (ret)
604			return ret;
605	}
606	tb_ctl_stop(tb->ctl);
607	return 0;
608}
609
610int tb_domain_runtime_resume(struct tb *tb)
611{
612	tb_ctl_start(tb->ctl);
613	if (tb->cm_ops->runtime_resume) {
614		int ret = tb->cm_ops->runtime_resume(tb);
615		if (ret)
616			return ret;
617	}
618	return 0;
619}
620
621/**
622 * tb_domain_disapprove_switch() - Disapprove switch
623 * @tb: Domain the switch belongs to
624 * @sw: Switch to disapprove
625 *
626 * This will disconnect PCIe tunnel from parent to this @sw.
627 *
628 * Return: %0 on success and negative errno in case of failure.
629 */
630int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
631{
632	if (!tb->cm_ops->disapprove_switch)
633		return -EPERM;
634
635	return tb->cm_ops->disapprove_switch(tb, sw);
636}
637
638/**
639 * tb_domain_approve_switch() - Approve switch
640 * @tb: Domain the switch belongs to
641 * @sw: Switch to approve
642 *
643 * This will approve switch by connection manager specific means. In
644 * case of success the connection manager will create PCIe tunnel from
645 * parent to @sw.
646 */
647int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
648{
649	struct tb_switch *parent_sw;
650
651	if (!tb->cm_ops->approve_switch)
652		return -EPERM;
653
654	/* The parent switch must be authorized before this one */
655	parent_sw = tb_to_switch(sw->dev.parent);
656	if (!parent_sw || !parent_sw->authorized)
657		return -EINVAL;
658
659	return tb->cm_ops->approve_switch(tb, sw);
660}
661
662/**
663 * tb_domain_approve_switch_key() - Approve switch and add key
664 * @tb: Domain the switch belongs to
665 * @sw: Switch to approve
666 *
667 * For switches that support secure connect, this function first adds
668 * key to the switch NVM using connection manager specific means. If
669 * adding the key is successful, the switch is approved and connected.
670 *
671 * Return: %0 on success and negative errno in case of failure.
672 */
673int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
674{
675	struct tb_switch *parent_sw;
676	int ret;
677
678	if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
679		return -EPERM;
680
681	/* The parent switch must be authorized before this one */
682	parent_sw = tb_to_switch(sw->dev.parent);
683	if (!parent_sw || !parent_sw->authorized)
684		return -EINVAL;
685
686	ret = tb->cm_ops->add_switch_key(tb, sw);
687	if (ret)
688		return ret;
689
690	return tb->cm_ops->approve_switch(tb, sw);
691}
692
693/**
694 * tb_domain_challenge_switch_key() - Challenge and approve switch
695 * @tb: Domain the switch belongs to
696 * @sw: Switch to approve
697 *
698 * For switches that support secure connect, this function generates
699 * random challenge and sends it to the switch. The switch responds to
700 * this and if the response matches our random challenge, the switch is
701 * approved and connected.
702 *
703 * Return: %0 on success and negative errno in case of failure.
704 */
705int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
706{
707	u8 challenge[TB_SWITCH_KEY_SIZE];
708	u8 response[TB_SWITCH_KEY_SIZE];
709	u8 hmac[TB_SWITCH_KEY_SIZE];
710	struct tb_switch *parent_sw;
711	struct crypto_shash *tfm;
712	struct shash_desc *shash;
713	int ret;
714
715	if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
716		return -EPERM;
717
718	/* The parent switch must be authorized before this one */
719	parent_sw = tb_to_switch(sw->dev.parent);
720	if (!parent_sw || !parent_sw->authorized)
721		return -EINVAL;
722
723	get_random_bytes(challenge, sizeof(challenge));
724	ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
725	if (ret)
726		return ret;
727
728	tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
729	if (IS_ERR(tfm))
730		return PTR_ERR(tfm);
731
732	ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
733	if (ret)
734		goto err_free_tfm;
735
736	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
737			GFP_KERNEL);
738	if (!shash) {
739		ret = -ENOMEM;
740		goto err_free_tfm;
741	}
742
743	shash->tfm = tfm;
744
745	memset(hmac, 0, sizeof(hmac));
746	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
747	if (ret)
748		goto err_free_shash;
749
750	/* The returned HMAC must match the one we calculated */
751	if (memcmp(response, hmac, sizeof(hmac))) {
752		ret = -EKEYREJECTED;
753		goto err_free_shash;
754	}
755
756	crypto_free_shash(tfm);
757	kfree(shash);
758
759	return tb->cm_ops->approve_switch(tb, sw);
760
761err_free_shash:
762	kfree(shash);
763err_free_tfm:
764	crypto_free_shash(tfm);
765
766	return ret;
767}
768
769/**
770 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
771 * @tb: Domain whose PCIe paths to disconnect
772 *
773 * This needs to be called in preparation for NVM upgrade of the host
774 * controller. Makes sure all PCIe paths are disconnected.
775 *
776 * Return %0 on success and negative errno in case of error.
777 */
778int tb_domain_disconnect_pcie_paths(struct tb *tb)
779{
780	if (!tb->cm_ops->disconnect_pcie_paths)
781		return -EPERM;
782
783	return tb->cm_ops->disconnect_pcie_paths(tb);
784}
785
786/**
787 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
788 * @tb: Domain enabling the DMA paths
789 * @xd: XDomain DMA paths are created to
790 * @transmit_path: HopID we are using to send out packets
791 * @transmit_ring: DMA ring used to send out packets
792 * @receive_path: HopID the other end is using to send packets to us
793 * @receive_ring: DMA ring used to receive packets from @receive_path
794 *
795 * Calls connection manager specific method to enable DMA paths to the
796 * XDomain in question.
797 *
798 * Return: 0% in case of success and negative errno otherwise. In
799 * particular returns %-ENOTSUPP if the connection manager
800 * implementation does not support XDomains.
801 */
802int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
803				    int transmit_path, int transmit_ring,
804				    int receive_path, int receive_ring)
805{
806	if (!tb->cm_ops->approve_xdomain_paths)
807		return -ENOTSUPP;
808
809	return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
810			transmit_ring, receive_path, receive_ring);
811}
812
813/**
814 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
815 * @tb: Domain disabling the DMA paths
816 * @xd: XDomain whose DMA paths are disconnected
817 * @transmit_path: HopID we are using to send out packets
818 * @transmit_ring: DMA ring used to send out packets
819 * @receive_path: HopID the other end is using to send packets to us
820 * @receive_ring: DMA ring used to receive packets from @receive_path
821 *
822 * Calls connection manager specific method to disconnect DMA paths to
823 * the XDomain in question.
824 *
825 * Return: 0% in case of success and negative errno otherwise. In
826 * particular returns %-ENOTSUPP if the connection manager
827 * implementation does not support XDomains.
828 */
829int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
830				       int transmit_path, int transmit_ring,
831				       int receive_path, int receive_ring)
832{
833	if (!tb->cm_ops->disconnect_xdomain_paths)
834		return -ENOTSUPP;
835
836	return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
837			transmit_ring, receive_path, receive_ring);
838}
839
840static int disconnect_xdomain(struct device *dev, void *data)
841{
842	struct tb_xdomain *xd;
843	struct tb *tb = data;
844	int ret = 0;
845
846	xd = tb_to_xdomain(dev);
847	if (xd && xd->tb == tb)
848		ret = tb_xdomain_disable_all_paths(xd);
849
850	return ret;
851}
852
853/**
854 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
855 * @tb: Domain whose paths are disconnected
856 *
857 * This function can be used to disconnect all paths (PCIe, XDomain) for
858 * example in preparation for host NVM firmware upgrade. After this is
859 * called the paths cannot be established without resetting the switch.
860 *
861 * Return: %0 in case of success and negative errno otherwise.
862 */
863int tb_domain_disconnect_all_paths(struct tb *tb)
864{
865	int ret;
866
867	ret = tb_domain_disconnect_pcie_paths(tb);
868	if (ret)
869		return ret;
870
871	return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
872}
873
874int tb_domain_init(void)
875{
876	int ret;
877
878	tb_debugfs_init();
879	tb_acpi_init();
880
881	ret = tb_xdomain_init();
882	if (ret)
883		goto err_acpi;
884	ret = bus_register(&tb_bus_type);
885	if (ret)
886		goto err_xdomain;
887
888	return 0;
889
890err_xdomain:
891	tb_xdomain_exit();
892err_acpi:
893	tb_acpi_exit();
894	tb_debugfs_exit();
895
896	return ret;
897}
898
899void tb_domain_exit(void)
900{
901	bus_unregister(&tb_bus_type);
902	ida_destroy(&tb_domain_ida);
903	tb_nvm_exit();
904	tb_xdomain_exit();
905	tb_acpi_exit();
906	tb_debugfs_exit();
907}
908