1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Greybus connections
4 *
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
7 */
8
9#include <linux/workqueue.h>
10#include <linux/greybus.h>
11
12#include "greybus_trace.h"
13
14#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT	1000
15
16static void gb_connection_kref_release(struct kref *kref);
17
18static DEFINE_SPINLOCK(gb_connections_lock);
19static DEFINE_MUTEX(gb_connection_mutex);
20
21/* Caller holds gb_connection_mutex. */
22static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
23{
24	struct gb_host_device *hd = intf->hd;
25	struct gb_connection *connection;
26
27	list_for_each_entry(connection, &hd->connections, hd_links) {
28		if (connection->intf == intf &&
29		    connection->intf_cport_id == cport_id)
30			return true;
31	}
32
33	return false;
34}
35
36static void gb_connection_get(struct gb_connection *connection)
37{
38	kref_get(&connection->kref);
39
40	trace_gb_connection_get(connection);
41}
42
43static void gb_connection_put(struct gb_connection *connection)
44{
45	trace_gb_connection_put(connection);
46
47	kref_put(&connection->kref, gb_connection_kref_release);
48}
49
50/*
51 * Returns a reference-counted pointer to the connection if found.
52 */
53static struct gb_connection *
54gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
55{
56	struct gb_connection *connection;
57	unsigned long flags;
58
59	spin_lock_irqsave(&gb_connections_lock, flags);
60	list_for_each_entry(connection, &hd->connections, hd_links)
61		if (connection->hd_cport_id == cport_id) {
62			gb_connection_get(connection);
63			goto found;
64		}
65	connection = NULL;
66found:
67	spin_unlock_irqrestore(&gb_connections_lock, flags);
68
69	return connection;
70}
71
72/*
73 * Callback from the host driver to let us know that data has been
74 * received on the bundle.
75 */
76void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
77		       u8 *data, size_t length)
78{
79	struct gb_connection *connection;
80
81	trace_gb_hd_in(hd);
82
83	connection = gb_connection_hd_find(hd, cport_id);
84	if (!connection) {
85		dev_err(&hd->dev,
86			"nonexistent connection (%zu bytes dropped)\n", length);
87		return;
88	}
89	gb_connection_recv(connection, data, length);
90	gb_connection_put(connection);
91}
92EXPORT_SYMBOL_GPL(greybus_data_rcvd);
93
94static void gb_connection_kref_release(struct kref *kref)
95{
96	struct gb_connection *connection;
97
98	connection = container_of(kref, struct gb_connection, kref);
99
100	trace_gb_connection_release(connection);
101
102	kfree(connection);
103}
104
105static void gb_connection_init_name(struct gb_connection *connection)
106{
107	u16 hd_cport_id = connection->hd_cport_id;
108	u16 cport_id = 0;
109	u8 intf_id = 0;
110
111	if (connection->intf) {
112		intf_id = connection->intf->interface_id;
113		cport_id = connection->intf_cport_id;
114	}
115
116	snprintf(connection->name, sizeof(connection->name),
117		 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
118}
119
120/*
121 * _gb_connection_create() - create a Greybus connection
122 * @hd:			host device of the connection
123 * @hd_cport_id:	host-device cport id, or -1 for dynamic allocation
124 * @intf:		remote interface, or NULL for static connections
125 * @bundle:		remote-interface bundle (may be NULL)
126 * @cport_id:		remote-interface cport id, or 0 for static connections
127 * @handler:		request handler (may be NULL)
128 * @flags:		connection flags
129 *
130 * Create a Greybus connection, representing the bidirectional link
131 * between a CPort on a (local) Greybus host device and a CPort on
132 * another Greybus interface.
133 *
134 * A connection also maintains the state of operations sent over the
135 * connection.
136 *
137 * Serialised against concurrent create and destroy using the
138 * gb_connection_mutex.
139 *
140 * Return: A pointer to the new connection if successful, or an ERR_PTR
141 * otherwise.
142 */
143static struct gb_connection *
144_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
145		      struct gb_interface *intf,
146		      struct gb_bundle *bundle, int cport_id,
147		      gb_request_handler_t handler,
148		      unsigned long flags)
149{
150	struct gb_connection *connection;
151	int ret;
152
153	mutex_lock(&gb_connection_mutex);
154
155	if (intf && gb_connection_cport_in_use(intf, cport_id)) {
156		dev_err(&intf->dev, "cport %u already in use\n", cport_id);
157		ret = -EBUSY;
158		goto err_unlock;
159	}
160
161	ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
162	if (ret < 0) {
163		dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
164		goto err_unlock;
165	}
166	hd_cport_id = ret;
167
168	connection = kzalloc(sizeof(*connection), GFP_KERNEL);
169	if (!connection) {
170		ret = -ENOMEM;
171		goto err_hd_cport_release;
172	}
173
174	connection->hd_cport_id = hd_cport_id;
175	connection->intf_cport_id = cport_id;
176	connection->hd = hd;
177	connection->intf = intf;
178	connection->bundle = bundle;
179	connection->handler = handler;
180	connection->flags = flags;
181	if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
182		connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
183	connection->state = GB_CONNECTION_STATE_DISABLED;
184
185	atomic_set(&connection->op_cycle, 0);
186	mutex_init(&connection->mutex);
187	spin_lock_init(&connection->lock);
188	INIT_LIST_HEAD(&connection->operations);
189
190	connection->wq = alloc_ordered_workqueue("%s:%d", 0, dev_name(&hd->dev),
191						 hd_cport_id);
192	if (!connection->wq) {
193		ret = -ENOMEM;
194		goto err_free_connection;
195	}
196
197	kref_init(&connection->kref);
198
199	gb_connection_init_name(connection);
200
201	spin_lock_irq(&gb_connections_lock);
202	list_add(&connection->hd_links, &hd->connections);
203
204	if (bundle)
205		list_add(&connection->bundle_links, &bundle->connections);
206	else
207		INIT_LIST_HEAD(&connection->bundle_links);
208
209	spin_unlock_irq(&gb_connections_lock);
210
211	mutex_unlock(&gb_connection_mutex);
212
213	trace_gb_connection_create(connection);
214
215	return connection;
216
217err_free_connection:
218	kfree(connection);
219err_hd_cport_release:
220	gb_hd_cport_release(hd, hd_cport_id);
221err_unlock:
222	mutex_unlock(&gb_connection_mutex);
223
224	return ERR_PTR(ret);
225}
226
227struct gb_connection *
228gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
229			    gb_request_handler_t handler)
230{
231	return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
232				     GB_CONNECTION_FLAG_HIGH_PRIO);
233}
234
235struct gb_connection *
236gb_connection_create_control(struct gb_interface *intf)
237{
238	return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
239				     GB_CONNECTION_FLAG_CONTROL |
240				     GB_CONNECTION_FLAG_HIGH_PRIO);
241}
242
243struct gb_connection *
244gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
245		     gb_request_handler_t handler)
246{
247	struct gb_interface *intf = bundle->intf;
248
249	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
250				     handler, 0);
251}
252EXPORT_SYMBOL_GPL(gb_connection_create);
253
254struct gb_connection *
255gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
256			   gb_request_handler_t handler,
257			   unsigned long flags)
258{
259	struct gb_interface *intf = bundle->intf;
260
261	if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
262		flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
263
264	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
265				     handler, flags);
266}
267EXPORT_SYMBOL_GPL(gb_connection_create_flags);
268
269struct gb_connection *
270gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
271			       unsigned long flags)
272{
273	flags |= GB_CONNECTION_FLAG_OFFLOADED;
274
275	return gb_connection_create_flags(bundle, cport_id, NULL, flags);
276}
277EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
278
279static int gb_connection_hd_cport_enable(struct gb_connection *connection)
280{
281	struct gb_host_device *hd = connection->hd;
282	int ret;
283
284	if (!hd->driver->cport_enable)
285		return 0;
286
287	ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
288				       connection->flags);
289	if (ret) {
290		dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
291			connection->name, ret);
292		return ret;
293	}
294
295	return 0;
296}
297
298static void gb_connection_hd_cport_disable(struct gb_connection *connection)
299{
300	struct gb_host_device *hd = connection->hd;
301	int ret;
302
303	if (!hd->driver->cport_disable)
304		return;
305
306	ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
307	if (ret) {
308		dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
309			connection->name, ret);
310	}
311}
312
313static int gb_connection_hd_cport_connected(struct gb_connection *connection)
314{
315	struct gb_host_device *hd = connection->hd;
316	int ret;
317
318	if (!hd->driver->cport_connected)
319		return 0;
320
321	ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
322	if (ret) {
323		dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
324			connection->name, ret);
325		return ret;
326	}
327
328	return 0;
329}
330
331static int gb_connection_hd_cport_flush(struct gb_connection *connection)
332{
333	struct gb_host_device *hd = connection->hd;
334	int ret;
335
336	if (!hd->driver->cport_flush)
337		return 0;
338
339	ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
340	if (ret) {
341		dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
342			connection->name, ret);
343		return ret;
344	}
345
346	return 0;
347}
348
349static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
350{
351	struct gb_host_device *hd = connection->hd;
352	size_t peer_space;
353	int ret;
354
355	if (!hd->driver->cport_quiesce)
356		return 0;
357
358	peer_space = sizeof(struct gb_operation_msg_hdr) +
359			sizeof(struct gb_cport_shutdown_request);
360
361	if (connection->mode_switch)
362		peer_space += sizeof(struct gb_operation_msg_hdr);
363
364	ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
365					peer_space,
366					GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
367	if (ret) {
368		dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
369			connection->name, ret);
370		return ret;
371	}
372
373	return 0;
374}
375
376static int gb_connection_hd_cport_clear(struct gb_connection *connection)
377{
378	struct gb_host_device *hd = connection->hd;
379	int ret;
380
381	if (!hd->driver->cport_clear)
382		return 0;
383
384	ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
385	if (ret) {
386		dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
387			connection->name, ret);
388		return ret;
389	}
390
391	return 0;
392}
393
394/*
395 * Request the SVC to create a connection from AP's cport to interface's
396 * cport.
397 */
398static int
399gb_connection_svc_connection_create(struct gb_connection *connection)
400{
401	struct gb_host_device *hd = connection->hd;
402	struct gb_interface *intf;
403	u8 cport_flags;
404	int ret;
405
406	if (gb_connection_is_static(connection))
407		return 0;
408
409	intf = connection->intf;
410
411	/*
412	 * Enable either E2EFC or CSD, unless no flow control is requested.
413	 */
414	cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
415	if (gb_connection_flow_control_disabled(connection)) {
416		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
417	} else if (gb_connection_e2efc_enabled(connection)) {
418		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
419				GB_SVC_CPORT_FLAG_E2EFC;
420	}
421
422	ret = gb_svc_connection_create(hd->svc,
423				       hd->svc->ap_intf_id,
424				       connection->hd_cport_id,
425				       intf->interface_id,
426				       connection->intf_cport_id,
427				       cport_flags);
428	if (ret) {
429		dev_err(&connection->hd->dev,
430			"%s: failed to create svc connection: %d\n",
431			connection->name, ret);
432		return ret;
433	}
434
435	return 0;
436}
437
438static void
439gb_connection_svc_connection_destroy(struct gb_connection *connection)
440{
441	if (gb_connection_is_static(connection))
442		return;
443
444	gb_svc_connection_destroy(connection->hd->svc,
445				  connection->hd->svc->ap_intf_id,
446				  connection->hd_cport_id,
447				  connection->intf->interface_id,
448				  connection->intf_cport_id);
449}
450
451/* Inform Interface about active CPorts */
452static int gb_connection_control_connected(struct gb_connection *connection)
453{
454	struct gb_control *control;
455	u16 cport_id = connection->intf_cport_id;
456	int ret;
457
458	if (gb_connection_is_static(connection))
459		return 0;
460
461	if (gb_connection_is_control(connection))
462		return 0;
463
464	control = connection->intf->control;
465
466	ret = gb_control_connected_operation(control, cport_id);
467	if (ret) {
468		dev_err(&connection->bundle->dev,
469			"failed to connect cport: %d\n", ret);
470		return ret;
471	}
472
473	return 0;
474}
475
476static void
477gb_connection_control_disconnecting(struct gb_connection *connection)
478{
479	struct gb_control *control;
480	u16 cport_id = connection->intf_cport_id;
481	int ret;
482
483	if (gb_connection_is_static(connection))
484		return;
485
486	control = connection->intf->control;
487
488	ret = gb_control_disconnecting_operation(control, cport_id);
489	if (ret) {
490		dev_err(&connection->hd->dev,
491			"%s: failed to send disconnecting: %d\n",
492			connection->name, ret);
493	}
494}
495
496static void
497gb_connection_control_disconnected(struct gb_connection *connection)
498{
499	struct gb_control *control;
500	u16 cport_id = connection->intf_cport_id;
501	int ret;
502
503	if (gb_connection_is_static(connection))
504		return;
505
506	control = connection->intf->control;
507
508	if (gb_connection_is_control(connection)) {
509		if (connection->mode_switch) {
510			ret = gb_control_mode_switch_operation(control);
511			if (ret) {
512				/*
513				 * Allow mode switch to time out waiting for
514				 * mailbox event.
515				 */
516				return;
517			}
518		}
519
520		return;
521	}
522
523	ret = gb_control_disconnected_operation(control, cport_id);
524	if (ret) {
525		dev_warn(&connection->bundle->dev,
526			 "failed to disconnect cport: %d\n", ret);
527	}
528}
529
530static int gb_connection_shutdown_operation(struct gb_connection *connection,
531					    u8 phase)
532{
533	struct gb_cport_shutdown_request *req;
534	struct gb_operation *operation;
535	int ret;
536
537	operation = gb_operation_create_core(connection,
538					     GB_REQUEST_TYPE_CPORT_SHUTDOWN,
539					     sizeof(*req), 0, 0,
540					     GFP_KERNEL);
541	if (!operation)
542		return -ENOMEM;
543
544	req = operation->request->payload;
545	req->phase = phase;
546
547	ret = gb_operation_request_send_sync(operation);
548
549	gb_operation_put(operation);
550
551	return ret;
552}
553
554static int gb_connection_cport_shutdown(struct gb_connection *connection,
555					u8 phase)
556{
557	struct gb_host_device *hd = connection->hd;
558	const struct gb_hd_driver *drv = hd->driver;
559	int ret;
560
561	if (gb_connection_is_static(connection))
562		return 0;
563
564	if (gb_connection_is_offloaded(connection)) {
565		if (!drv->cport_shutdown)
566			return 0;
567
568		ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
569					  GB_OPERATION_TIMEOUT_DEFAULT);
570	} else {
571		ret = gb_connection_shutdown_operation(connection, phase);
572	}
573
574	if (ret) {
575		dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
576			connection->name, phase, ret);
577		return ret;
578	}
579
580	return 0;
581}
582
583static int
584gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
585{
586	return gb_connection_cport_shutdown(connection, 1);
587}
588
589static int
590gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
591{
592	return gb_connection_cport_shutdown(connection, 2);
593}
594
595/*
596 * Cancel all active operations on a connection.
597 *
598 * Locking: Called with connection lock held and state set to DISABLED or
599 * DISCONNECTING.
600 */
601static void gb_connection_cancel_operations(struct gb_connection *connection,
602					    int errno)
603	__must_hold(&connection->lock)
604{
605	struct gb_operation *operation;
606
607	while (!list_empty(&connection->operations)) {
608		operation = list_last_entry(&connection->operations,
609					    struct gb_operation, links);
610		gb_operation_get(operation);
611		spin_unlock_irq(&connection->lock);
612
613		if (gb_operation_is_incoming(operation))
614			gb_operation_cancel_incoming(operation, errno);
615		else
616			gb_operation_cancel(operation, errno);
617
618		gb_operation_put(operation);
619
620		spin_lock_irq(&connection->lock);
621	}
622}
623
624/*
625 * Cancel all active incoming operations on a connection.
626 *
627 * Locking: Called with connection lock held and state set to ENABLED_TX.
628 */
629static void
630gb_connection_flush_incoming_operations(struct gb_connection *connection,
631					int errno)
632	__must_hold(&connection->lock)
633{
634	struct gb_operation *operation;
635	bool incoming;
636
637	while (!list_empty(&connection->operations)) {
638		incoming = false;
639		list_for_each_entry(operation, &connection->operations,
640				    links) {
641			if (gb_operation_is_incoming(operation)) {
642				gb_operation_get(operation);
643				incoming = true;
644				break;
645			}
646		}
647
648		if (!incoming)
649			break;
650
651		spin_unlock_irq(&connection->lock);
652
653		/* FIXME: flush, not cancel? */
654		gb_operation_cancel_incoming(operation, errno);
655		gb_operation_put(operation);
656
657		spin_lock_irq(&connection->lock);
658	}
659}
660
661/*
662 * _gb_connection_enable() - enable a connection
663 * @connection:		connection to enable
664 * @rx:			whether to enable incoming requests
665 *
666 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
667 * ENABLED_TX->ENABLED state transitions.
668 *
669 * Locking: Caller holds connection->mutex.
670 */
671static int _gb_connection_enable(struct gb_connection *connection, bool rx)
672{
673	int ret;
674
675	/* Handle ENABLED_TX -> ENABLED transitions. */
676	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
677		if (!(connection->handler && rx))
678			return 0;
679
680		spin_lock_irq(&connection->lock);
681		connection->state = GB_CONNECTION_STATE_ENABLED;
682		spin_unlock_irq(&connection->lock);
683
684		return 0;
685	}
686
687	ret = gb_connection_hd_cport_enable(connection);
688	if (ret)
689		return ret;
690
691	ret = gb_connection_svc_connection_create(connection);
692	if (ret)
693		goto err_hd_cport_clear;
694
695	ret = gb_connection_hd_cport_connected(connection);
696	if (ret)
697		goto err_svc_connection_destroy;
698
699	spin_lock_irq(&connection->lock);
700	if (connection->handler && rx)
701		connection->state = GB_CONNECTION_STATE_ENABLED;
702	else
703		connection->state = GB_CONNECTION_STATE_ENABLED_TX;
704	spin_unlock_irq(&connection->lock);
705
706	ret = gb_connection_control_connected(connection);
707	if (ret)
708		goto err_control_disconnecting;
709
710	return 0;
711
712err_control_disconnecting:
713	spin_lock_irq(&connection->lock);
714	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
715	gb_connection_cancel_operations(connection, -ESHUTDOWN);
716	spin_unlock_irq(&connection->lock);
717
718	/* Transmit queue should already be empty. */
719	gb_connection_hd_cport_flush(connection);
720
721	gb_connection_control_disconnecting(connection);
722	gb_connection_cport_shutdown_phase_1(connection);
723	gb_connection_hd_cport_quiesce(connection);
724	gb_connection_cport_shutdown_phase_2(connection);
725	gb_connection_control_disconnected(connection);
726	connection->state = GB_CONNECTION_STATE_DISABLED;
727err_svc_connection_destroy:
728	gb_connection_svc_connection_destroy(connection);
729err_hd_cport_clear:
730	gb_connection_hd_cport_clear(connection);
731
732	gb_connection_hd_cport_disable(connection);
733
734	return ret;
735}
736
737int gb_connection_enable(struct gb_connection *connection)
738{
739	int ret = 0;
740
741	mutex_lock(&connection->mutex);
742
743	if (connection->state == GB_CONNECTION_STATE_ENABLED)
744		goto out_unlock;
745
746	ret = _gb_connection_enable(connection, true);
747	if (!ret)
748		trace_gb_connection_enable(connection);
749
750out_unlock:
751	mutex_unlock(&connection->mutex);
752
753	return ret;
754}
755EXPORT_SYMBOL_GPL(gb_connection_enable);
756
757int gb_connection_enable_tx(struct gb_connection *connection)
758{
759	int ret = 0;
760
761	mutex_lock(&connection->mutex);
762
763	if (connection->state == GB_CONNECTION_STATE_ENABLED) {
764		ret = -EINVAL;
765		goto out_unlock;
766	}
767
768	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
769		goto out_unlock;
770
771	ret = _gb_connection_enable(connection, false);
772	if (!ret)
773		trace_gb_connection_enable(connection);
774
775out_unlock:
776	mutex_unlock(&connection->mutex);
777
778	return ret;
779}
780EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
781
782void gb_connection_disable_rx(struct gb_connection *connection)
783{
784	mutex_lock(&connection->mutex);
785
786	spin_lock_irq(&connection->lock);
787	if (connection->state != GB_CONNECTION_STATE_ENABLED) {
788		spin_unlock_irq(&connection->lock);
789		goto out_unlock;
790	}
791	connection->state = GB_CONNECTION_STATE_ENABLED_TX;
792	gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
793	spin_unlock_irq(&connection->lock);
794
795	trace_gb_connection_disable(connection);
796
797out_unlock:
798	mutex_unlock(&connection->mutex);
799}
800EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
801
802void gb_connection_mode_switch_prepare(struct gb_connection *connection)
803{
804	connection->mode_switch = true;
805}
806
807void gb_connection_mode_switch_complete(struct gb_connection *connection)
808{
809	gb_connection_svc_connection_destroy(connection);
810	gb_connection_hd_cport_clear(connection);
811
812	gb_connection_hd_cport_disable(connection);
813
814	connection->mode_switch = false;
815}
816
817void gb_connection_disable(struct gb_connection *connection)
818{
819	mutex_lock(&connection->mutex);
820
821	if (connection->state == GB_CONNECTION_STATE_DISABLED)
822		goto out_unlock;
823
824	trace_gb_connection_disable(connection);
825
826	spin_lock_irq(&connection->lock);
827	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
828	gb_connection_cancel_operations(connection, -ESHUTDOWN);
829	spin_unlock_irq(&connection->lock);
830
831	gb_connection_hd_cport_flush(connection);
832
833	gb_connection_control_disconnecting(connection);
834	gb_connection_cport_shutdown_phase_1(connection);
835	gb_connection_hd_cport_quiesce(connection);
836	gb_connection_cport_shutdown_phase_2(connection);
837	gb_connection_control_disconnected(connection);
838
839	connection->state = GB_CONNECTION_STATE_DISABLED;
840
841	/* control-connection tear down is deferred when mode switching */
842	if (!connection->mode_switch) {
843		gb_connection_svc_connection_destroy(connection);
844		gb_connection_hd_cport_clear(connection);
845
846		gb_connection_hd_cport_disable(connection);
847	}
848
849out_unlock:
850	mutex_unlock(&connection->mutex);
851}
852EXPORT_SYMBOL_GPL(gb_connection_disable);
853
854/* Disable a connection without communicating with the remote end. */
855void gb_connection_disable_forced(struct gb_connection *connection)
856{
857	mutex_lock(&connection->mutex);
858
859	if (connection->state == GB_CONNECTION_STATE_DISABLED)
860		goto out_unlock;
861
862	trace_gb_connection_disable(connection);
863
864	spin_lock_irq(&connection->lock);
865	connection->state = GB_CONNECTION_STATE_DISABLED;
866	gb_connection_cancel_operations(connection, -ESHUTDOWN);
867	spin_unlock_irq(&connection->lock);
868
869	gb_connection_hd_cport_flush(connection);
870
871	gb_connection_svc_connection_destroy(connection);
872	gb_connection_hd_cport_clear(connection);
873
874	gb_connection_hd_cport_disable(connection);
875out_unlock:
876	mutex_unlock(&connection->mutex);
877}
878EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
879
880/* Caller must have disabled the connection before destroying it. */
881void gb_connection_destroy(struct gb_connection *connection)
882{
883	if (!connection)
884		return;
885
886	if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
887		gb_connection_disable(connection);
888
889	mutex_lock(&gb_connection_mutex);
890
891	spin_lock_irq(&gb_connections_lock);
892	list_del(&connection->bundle_links);
893	list_del(&connection->hd_links);
894	spin_unlock_irq(&gb_connections_lock);
895
896	destroy_workqueue(connection->wq);
897
898	gb_hd_cport_release(connection->hd, connection->hd_cport_id);
899	connection->hd_cport_id = CPORT_ID_BAD;
900
901	mutex_unlock(&gb_connection_mutex);
902
903	gb_connection_put(connection);
904}
905EXPORT_SYMBOL_GPL(gb_connection_destroy);
906
907void gb_connection_latency_tag_enable(struct gb_connection *connection)
908{
909	struct gb_host_device *hd = connection->hd;
910	int ret;
911
912	if (!hd->driver->latency_tag_enable)
913		return;
914
915	ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
916	if (ret) {
917		dev_err(&connection->hd->dev,
918			"%s: failed to enable latency tag: %d\n",
919			connection->name, ret);
920	}
921}
922EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
923
924void gb_connection_latency_tag_disable(struct gb_connection *connection)
925{
926	struct gb_host_device *hd = connection->hd;
927	int ret;
928
929	if (!hd->driver->latency_tag_disable)
930		return;
931
932	ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
933	if (ret) {
934		dev_err(&connection->hd->dev,
935			"%s: failed to disable latency tag: %d\n",
936			connection->name, ret);
937	}
938}
939EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
940