1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Greybus "AP" USB driver for "ES2" controller chips
4 *
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
7 */
8#include <linux/kthread.h>
9#include <linux/sizes.h>
10#include <linux/usb.h>
11#include <linux/kfifo.h>
12#include <linux/debugfs.h>
13#include <linux/list.h>
14#include <linux/greybus.h>
15#include <asm/unaligned.h>
16
17#include "arpc.h"
18#include "greybus_trace.h"
19
20
21/* Default timeout for USB vendor requests. */
22#define ES2_USB_CTRL_TIMEOUT	500
23
24/* Default timeout for ARPC CPort requests */
25#define ES2_ARPC_CPORT_TIMEOUT	500
26
27/* Fixed CPort numbers */
28#define ES2_CPORT_CDSI0		16
29#define ES2_CPORT_CDSI1		17
30
31/* Memory sizes for the buffers sent to/from the ES2 controller */
32#define ES2_GBUF_MSG_SIZE_MAX	2048
33
34/* Memory sizes for the ARPC buffers */
35#define ARPC_OUT_SIZE_MAX	U16_MAX
36#define ARPC_IN_SIZE_MAX	128
37
38static const struct usb_device_id id_table[] = {
39	{ USB_DEVICE(0x18d1, 0x1eaf) },
40	{ },
41};
42MODULE_DEVICE_TABLE(usb, id_table);
43
44#define APB1_LOG_SIZE		SZ_16K
45
46/*
47 * Number of CPort IN urbs in flight at any point in time.
48 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
49 * flight.
50 */
51#define NUM_CPORT_IN_URB	4
52
53/* Number of CPort OUT urbs in flight at any point in time.
54 * Adjust if we get messages saying we are out of urbs in the system log.
55 */
56#define NUM_CPORT_OUT_URB	8
57
58/*
59 * Number of ARPC in urbs in flight at any point in time.
60 */
61#define NUM_ARPC_IN_URB		2
62
63/*
64 * @endpoint: bulk in endpoint for CPort data
65 * @urb: array of urbs for the CPort in messages
66 * @buffer: array of buffers for the @cport_in_urb urbs
67 */
68struct es2_cport_in {
69	__u8 endpoint;
70	struct urb *urb[NUM_CPORT_IN_URB];
71	u8 *buffer[NUM_CPORT_IN_URB];
72};
73
74/**
75 * struct es2_ap_dev - ES2 USB Bridge to AP structure
76 * @usb_dev: pointer to the USB device we are.
77 * @usb_intf: pointer to the USB interface we are bound to.
78 * @hd: pointer to our gb_host_device structure
79 *
80 * @cport_in: endpoint, urbs and buffer for cport in messages
81 * @cport_out_endpoint: endpoint for cport out messages
82 * @cport_out_urb: array of urbs for the CPort out messages
83 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
84 *			not.
85 * @cport_out_urb_cancelled: array of flags indicating whether the
86 *			corresponding @cport_out_urb is being cancelled
87 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
88 * @cdsi1_in_use: true if cport CDSI1 is in use
89 * @apb_log_task: task pointer for logging thread
90 * @apb_log_dentry: file system entry for the log file interface
91 * @apb_log_enable_dentry: file system entry for enabling logging
92 * @apb_log_fifo: kernel FIFO to carry logged data
93 * @arpc_urb: array of urbs for the ARPC in messages
94 * @arpc_buffer: array of buffers for the @arpc_urb urbs
95 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
96 * @arpc_id_cycle: gives an unique id to ARPC
97 * @arpc_lock: locks ARPC list
98 * @arpcs: list of in progress ARPCs
99 */
100struct es2_ap_dev {
101	struct usb_device *usb_dev;
102	struct usb_interface *usb_intf;
103	struct gb_host_device *hd;
104
105	struct es2_cport_in cport_in;
106	__u8 cport_out_endpoint;
107	struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
108	bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
109	bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
110	spinlock_t cport_out_urb_lock;
111
112	bool cdsi1_in_use;
113
114	struct task_struct *apb_log_task;
115	struct dentry *apb_log_dentry;
116	struct dentry *apb_log_enable_dentry;
117	DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
118
119	__u8 arpc_endpoint_in;
120	struct urb *arpc_urb[NUM_ARPC_IN_URB];
121	u8 *arpc_buffer[NUM_ARPC_IN_URB];
122
123	int arpc_id_cycle;
124	spinlock_t arpc_lock;
125	struct list_head arpcs;
126};
127
128struct arpc {
129	struct list_head list;
130	struct arpc_request_message *req;
131	struct arpc_response_message *resp;
132	struct completion response_received;
133	bool active;
134};
135
136static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
137{
138	return (struct es2_ap_dev *)&hd->hd_priv;
139}
140
141static void cport_out_callback(struct urb *urb);
142static void usb_log_enable(struct es2_ap_dev *es2);
143static void usb_log_disable(struct es2_ap_dev *es2);
144static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
145		     size_t size, int *result, unsigned int timeout);
146
147static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
148{
149	struct usb_device *udev = es2->usb_dev;
150	u8 *data;
151	int retval;
152
153	data = kmemdup(req, size, GFP_KERNEL);
154	if (!data)
155		return -ENOMEM;
156
157	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
158				 cmd,
159				 USB_DIR_OUT | USB_TYPE_VENDOR |
160				 USB_RECIP_INTERFACE,
161				 0, 0, data, size, ES2_USB_CTRL_TIMEOUT);
162	if (retval < 0)
163		dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
164	else
165		retval = 0;
166
167	kfree(data);
168	return retval;
169}
170
171static void ap_urb_complete(struct urb *urb)
172{
173	struct usb_ctrlrequest *dr = urb->context;
174
175	kfree(dr);
176	usb_free_urb(urb);
177}
178
179static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
180{
181	struct usb_device *udev = es2->usb_dev;
182	struct urb *urb;
183	struct usb_ctrlrequest *dr;
184	u8 *buf;
185	int retval;
186
187	urb = usb_alloc_urb(0, GFP_ATOMIC);
188	if (!urb)
189		return -ENOMEM;
190
191	dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
192	if (!dr) {
193		usb_free_urb(urb);
194		return -ENOMEM;
195	}
196
197	buf = (u8 *)dr + sizeof(*dr);
198	memcpy(buf, req, size);
199
200	dr->bRequest = cmd;
201	dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
202	dr->wValue = 0;
203	dr->wIndex = 0;
204	dr->wLength = cpu_to_le16(size);
205
206	usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
207			     (unsigned char *)dr, buf, size,
208			     ap_urb_complete, dr);
209	retval = usb_submit_urb(urb, GFP_ATOMIC);
210	if (retval) {
211		usb_free_urb(urb);
212		kfree(dr);
213	}
214	return retval;
215}
216
217static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
218		  bool async)
219{
220	struct es2_ap_dev *es2 = hd_to_es2(hd);
221
222	if (async)
223		return output_async(es2, req, size, cmd);
224
225	return output_sync(es2, req, size, cmd);
226}
227
228static int es2_cport_in_enable(struct es2_ap_dev *es2,
229			       struct es2_cport_in *cport_in)
230{
231	struct urb *urb;
232	int ret;
233	int i;
234
235	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
236		urb = cport_in->urb[i];
237
238		ret = usb_submit_urb(urb, GFP_KERNEL);
239		if (ret) {
240			dev_err(&es2->usb_dev->dev,
241				"failed to submit in-urb: %d\n", ret);
242			goto err_kill_urbs;
243		}
244	}
245
246	return 0;
247
248err_kill_urbs:
249	for (--i; i >= 0; --i) {
250		urb = cport_in->urb[i];
251		usb_kill_urb(urb);
252	}
253
254	return ret;
255}
256
257static void es2_cport_in_disable(struct es2_ap_dev *es2,
258				 struct es2_cport_in *cport_in)
259{
260	struct urb *urb;
261	int i;
262
263	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
264		urb = cport_in->urb[i];
265		usb_kill_urb(urb);
266	}
267}
268
269static int es2_arpc_in_enable(struct es2_ap_dev *es2)
270{
271	struct urb *urb;
272	int ret;
273	int i;
274
275	for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
276		urb = es2->arpc_urb[i];
277
278		ret = usb_submit_urb(urb, GFP_KERNEL);
279		if (ret) {
280			dev_err(&es2->usb_dev->dev,
281				"failed to submit arpc in-urb: %d\n", ret);
282			goto err_kill_urbs;
283		}
284	}
285
286	return 0;
287
288err_kill_urbs:
289	for (--i; i >= 0; --i) {
290		urb = es2->arpc_urb[i];
291		usb_kill_urb(urb);
292	}
293
294	return ret;
295}
296
297static void es2_arpc_in_disable(struct es2_ap_dev *es2)
298{
299	struct urb *urb;
300	int i;
301
302	for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
303		urb = es2->arpc_urb[i];
304		usb_kill_urb(urb);
305	}
306}
307
308static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
309{
310	struct urb *urb = NULL;
311	unsigned long flags;
312	int i;
313
314	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
315
316	/* Look in our pool of allocated urbs first, as that's the "fastest" */
317	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
318		if (!es2->cport_out_urb_busy[i] &&
319		    !es2->cport_out_urb_cancelled[i]) {
320			es2->cport_out_urb_busy[i] = true;
321			urb = es2->cport_out_urb[i];
322			break;
323		}
324	}
325	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
326	if (urb)
327		return urb;
328
329	/*
330	 * Crap, pool is empty, complain to the syslog and go allocate one
331	 * dynamically as we have to succeed.
332	 */
333	dev_dbg(&es2->usb_dev->dev,
334		"No free CPort OUT urbs, having to dynamically allocate one!\n");
335	return usb_alloc_urb(0, gfp_mask);
336}
337
338static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
339{
340	unsigned long flags;
341	int i;
342	/*
343	 * See if this was an urb in our pool, if so mark it "free", otherwise
344	 * we need to free it ourselves.
345	 */
346	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
347	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
348		if (urb == es2->cport_out_urb[i]) {
349			es2->cport_out_urb_busy[i] = false;
350			urb = NULL;
351			break;
352		}
353	}
354	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
355
356	/* If urb is not NULL, then we need to free this urb */
357	usb_free_urb(urb);
358}
359
360/*
361 * We (ab)use the operation-message header pad bytes to transfer the
362 * cport id in order to minimise overhead.
363 */
364static void
365gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
366{
367	header->pad[0] = cport_id;
368}
369
370/* Clear the pad bytes used for the CPort id */
371static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
372{
373	header->pad[0] = 0;
374}
375
376/* Extract the CPort id packed into the header, and clear it */
377static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
378{
379	u16 cport_id = header->pad[0];
380
381	gb_message_cport_clear(header);
382
383	return cport_id;
384}
385
386/*
387 * Returns zero if the message was successfully queued, or a negative errno
388 * otherwise.
389 */
390static int message_send(struct gb_host_device *hd, u16 cport_id,
391			struct gb_message *message, gfp_t gfp_mask)
392{
393	struct es2_ap_dev *es2 = hd_to_es2(hd);
394	struct usb_device *udev = es2->usb_dev;
395	size_t buffer_size;
396	int retval;
397	struct urb *urb;
398	unsigned long flags;
399
400	/*
401	 * The data actually transferred will include an indication
402	 * of where the data should be sent.  Do one last check of
403	 * the target CPort id before filling it in.
404	 */
405	if (!cport_id_valid(hd, cport_id)) {
406		dev_err(&udev->dev, "invalid cport %u\n", cport_id);
407		return -EINVAL;
408	}
409
410	/* Find a free urb */
411	urb = next_free_urb(es2, gfp_mask);
412	if (!urb)
413		return -ENOMEM;
414
415	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
416	message->hcpriv = urb;
417	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
418
419	/* Pack the cport id into the message header */
420	gb_message_cport_pack(message->header, cport_id);
421
422	buffer_size = sizeof(*message->header) + message->payload_size;
423
424	usb_fill_bulk_urb(urb, udev,
425			  usb_sndbulkpipe(udev,
426					  es2->cport_out_endpoint),
427			  message->buffer, buffer_size,
428			  cport_out_callback, message);
429	urb->transfer_flags |= URB_ZERO_PACKET;
430
431	trace_gb_message_submit(message);
432
433	retval = usb_submit_urb(urb, gfp_mask);
434	if (retval) {
435		dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
436
437		spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
438		message->hcpriv = NULL;
439		spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
440
441		free_urb(es2, urb);
442		gb_message_cport_clear(message->header);
443
444		return retval;
445	}
446
447	return 0;
448}
449
450/*
451 * Can not be called in atomic context.
452 */
453static void message_cancel(struct gb_message *message)
454{
455	struct gb_host_device *hd = message->operation->connection->hd;
456	struct es2_ap_dev *es2 = hd_to_es2(hd);
457	struct urb *urb;
458	int i;
459
460	might_sleep();
461
462	spin_lock_irq(&es2->cport_out_urb_lock);
463	urb = message->hcpriv;
464
465	/* Prevent dynamically allocated urb from being deallocated. */
466	usb_get_urb(urb);
467
468	/* Prevent pre-allocated urb from being reused. */
469	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
470		if (urb == es2->cport_out_urb[i]) {
471			es2->cport_out_urb_cancelled[i] = true;
472			break;
473		}
474	}
475	spin_unlock_irq(&es2->cport_out_urb_lock);
476
477	usb_kill_urb(urb);
478
479	if (i < NUM_CPORT_OUT_URB) {
480		spin_lock_irq(&es2->cport_out_urb_lock);
481		es2->cport_out_urb_cancelled[i] = false;
482		spin_unlock_irq(&es2->cport_out_urb_lock);
483	}
484
485	usb_free_urb(urb);
486}
487
488static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
489			      unsigned long flags)
490{
491	struct es2_ap_dev *es2 = hd_to_es2(hd);
492	struct ida *id_map = &hd->cport_id_map;
493	int ida_start, ida_end;
494
495	switch (cport_id) {
496	case ES2_CPORT_CDSI0:
497	case ES2_CPORT_CDSI1:
498		dev_err(&hd->dev, "cport %d not available\n", cport_id);
499		return -EBUSY;
500	}
501
502	if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
503	    flags & GB_CONNECTION_FLAG_CDSI1) {
504		if (es2->cdsi1_in_use) {
505			dev_err(&hd->dev, "CDSI1 already in use\n");
506			return -EBUSY;
507		}
508
509		es2->cdsi1_in_use = true;
510
511		return ES2_CPORT_CDSI1;
512	}
513
514	if (cport_id < 0) {
515		ida_start = 0;
516		ida_end = hd->num_cports - 1;
517	} else if (cport_id < hd->num_cports) {
518		ida_start = cport_id;
519		ida_end = cport_id;
520	} else {
521		dev_err(&hd->dev, "cport %d not available\n", cport_id);
522		return -EINVAL;
523	}
524
525	return ida_alloc_range(id_map, ida_start, ida_end, GFP_KERNEL);
526}
527
528static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
529{
530	struct es2_ap_dev *es2 = hd_to_es2(hd);
531
532	switch (cport_id) {
533	case ES2_CPORT_CDSI1:
534		es2->cdsi1_in_use = false;
535		return;
536	}
537
538	ida_free(&hd->cport_id_map, cport_id);
539}
540
541static int cport_enable(struct gb_host_device *hd, u16 cport_id,
542			unsigned long flags)
543{
544	struct es2_ap_dev *es2 = hd_to_es2(hd);
545	struct usb_device *udev = es2->usb_dev;
546	struct gb_apb_request_cport_flags *req;
547	u32 connection_flags;
548	int ret;
549
550	req = kzalloc(sizeof(*req), GFP_KERNEL);
551	if (!req)
552		return -ENOMEM;
553
554	connection_flags = 0;
555	if (flags & GB_CONNECTION_FLAG_CONTROL)
556		connection_flags |= GB_APB_CPORT_FLAG_CONTROL;
557	if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
558		connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;
559
560	req->flags = cpu_to_le32(connection_flags);
561
562	dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
563		cport_id, connection_flags);
564
565	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
566			      GB_APB_REQUEST_CPORT_FLAGS,
567			      USB_DIR_OUT | USB_TYPE_VENDOR |
568			      USB_RECIP_INTERFACE, cport_id, 0,
569			      req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
570	if (ret < 0) {
571		dev_err(&udev->dev, "failed to set cport flags for port %d\n",
572			cport_id);
573		goto out;
574	}
575
576	ret = 0;
577out:
578	kfree(req);
579
580	return ret;
581}
582
583static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
584{
585	struct es2_ap_dev *es2 = hd_to_es2(hd);
586	struct device *dev = &es2->usb_dev->dev;
587	struct arpc_cport_connected_req req;
588	int ret;
589
590	req.cport_id = cpu_to_le16(cport_id);
591	ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
592			NULL, ES2_ARPC_CPORT_TIMEOUT);
593	if (ret) {
594		dev_err(dev, "failed to set connected state for cport %u: %d\n",
595			cport_id, ret);
596		return ret;
597	}
598
599	return 0;
600}
601
602static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
603{
604	struct es2_ap_dev *es2 = hd_to_es2(hd);
605	struct device *dev = &es2->usb_dev->dev;
606	struct arpc_cport_flush_req req;
607	int ret;
608
609	req.cport_id = cpu_to_le16(cport_id);
610	ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
611			NULL, ES2_ARPC_CPORT_TIMEOUT);
612	if (ret) {
613		dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret);
614		return ret;
615	}
616
617	return 0;
618}
619
620static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
621			      u8 phase, unsigned int timeout)
622{
623	struct es2_ap_dev *es2 = hd_to_es2(hd);
624	struct device *dev = &es2->usb_dev->dev;
625	struct arpc_cport_shutdown_req req;
626	int result;
627	int ret;
628
629	if (timeout > U16_MAX)
630		return -EINVAL;
631
632	req.cport_id = cpu_to_le16(cport_id);
633	req.timeout = cpu_to_le16(timeout);
634	req.phase = phase;
635	ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
636			&result, ES2_ARPC_CPORT_TIMEOUT + timeout);
637	if (ret) {
638		dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n",
639			cport_id, ret, result);
640		return ret;
641	}
642
643	return 0;
644}
645
646static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
647			     size_t peer_space, unsigned int timeout)
648{
649	struct es2_ap_dev *es2 = hd_to_es2(hd);
650	struct device *dev = &es2->usb_dev->dev;
651	struct arpc_cport_quiesce_req req;
652	int result;
653	int ret;
654
655	if (peer_space > U16_MAX)
656		return -EINVAL;
657
658	if (timeout > U16_MAX)
659		return -EINVAL;
660
661	req.cport_id = cpu_to_le16(cport_id);
662	req.peer_space = cpu_to_le16(peer_space);
663	req.timeout = cpu_to_le16(timeout);
664	ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
665			&result, ES2_ARPC_CPORT_TIMEOUT + timeout);
666	if (ret) {
667		dev_err(dev, "failed to quiesce cport %u: %d (%d)\n",
668			cport_id, ret, result);
669		return ret;
670	}
671
672	return 0;
673}
674
675static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
676{
677	struct es2_ap_dev *es2 = hd_to_es2(hd);
678	struct device *dev = &es2->usb_dev->dev;
679	struct arpc_cport_clear_req req;
680	int ret;
681
682	req.cport_id = cpu_to_le16(cport_id);
683	ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
684			NULL, ES2_ARPC_CPORT_TIMEOUT);
685	if (ret) {
686		dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret);
687		return ret;
688	}
689
690	return 0;
691}
692
693static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
694{
695	int retval;
696	struct es2_ap_dev *es2 = hd_to_es2(hd);
697	struct usb_device *udev = es2->usb_dev;
698
699	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
700				 GB_APB_REQUEST_LATENCY_TAG_EN,
701				 USB_DIR_OUT | USB_TYPE_VENDOR |
702				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
703				 0, ES2_USB_CTRL_TIMEOUT);
704
705	if (retval < 0)
706		dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
707			cport_id);
708	return retval;
709}
710
711static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
712{
713	int retval;
714	struct es2_ap_dev *es2 = hd_to_es2(hd);
715	struct usb_device *udev = es2->usb_dev;
716
717	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
718				 GB_APB_REQUEST_LATENCY_TAG_DIS,
719				 USB_DIR_OUT | USB_TYPE_VENDOR |
720				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
721				 0, ES2_USB_CTRL_TIMEOUT);
722
723	if (retval < 0)
724		dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
725			cport_id);
726	return retval;
727}
728
729static struct gb_hd_driver es2_driver = {
730	.hd_priv_size			= sizeof(struct es2_ap_dev),
731	.message_send			= message_send,
732	.message_cancel			= message_cancel,
733	.cport_allocate			= es2_cport_allocate,
734	.cport_release			= es2_cport_release,
735	.cport_enable			= cport_enable,
736	.cport_connected		= es2_cport_connected,
737	.cport_flush			= es2_cport_flush,
738	.cport_shutdown			= es2_cport_shutdown,
739	.cport_quiesce			= es2_cport_quiesce,
740	.cport_clear			= es2_cport_clear,
741	.latency_tag_enable		= latency_tag_enable,
742	.latency_tag_disable		= latency_tag_disable,
743	.output				= output,
744};
745
746/* Common function to report consistent warnings based on URB status */
747static int check_urb_status(struct urb *urb)
748{
749	struct device *dev = &urb->dev->dev;
750	int status = urb->status;
751
752	switch (status) {
753	case 0:
754		return 0;
755
756	case -EOVERFLOW:
757		dev_err(dev, "%s: overflow actual length is %d\n",
758			__func__, urb->actual_length);
759		fallthrough;
760	case -ECONNRESET:
761	case -ENOENT:
762	case -ESHUTDOWN:
763	case -EILSEQ:
764	case -EPROTO:
765		/* device is gone, stop sending */
766		return status;
767	}
768	dev_err(dev, "%s: unknown status %d\n", __func__, status);
769
770	return -EAGAIN;
771}
772
773static void es2_destroy(struct es2_ap_dev *es2)
774{
775	struct usb_device *udev;
776	struct urb *urb;
777	int i;
778
779	debugfs_remove(es2->apb_log_enable_dentry);
780	usb_log_disable(es2);
781
782	/* Tear down everything! */
783	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
784		urb = es2->cport_out_urb[i];
785		usb_kill_urb(urb);
786		usb_free_urb(urb);
787		es2->cport_out_urb[i] = NULL;
788		es2->cport_out_urb_busy[i] = false;	/* just to be anal */
789	}
790
791	for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
792		usb_free_urb(es2->arpc_urb[i]);
793		kfree(es2->arpc_buffer[i]);
794		es2->arpc_buffer[i] = NULL;
795	}
796
797	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
798		usb_free_urb(es2->cport_in.urb[i]);
799		kfree(es2->cport_in.buffer[i]);
800		es2->cport_in.buffer[i] = NULL;
801	}
802
803	/* release reserved CDSI0 and CDSI1 cports */
804	gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
805	gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
806
807	udev = es2->usb_dev;
808	gb_hd_put(es2->hd);
809
810	usb_put_dev(udev);
811}
812
813static void cport_in_callback(struct urb *urb)
814{
815	struct gb_host_device *hd = urb->context;
816	struct device *dev = &urb->dev->dev;
817	struct gb_operation_msg_hdr *header;
818	int status = check_urb_status(urb);
819	int retval;
820	u16 cport_id;
821
822	if (status) {
823		if ((status == -EAGAIN) || (status == -EPROTO))
824			goto exit;
825
826		/* The urb is being unlinked */
827		if (status == -ENOENT || status == -ESHUTDOWN)
828			return;
829
830		dev_err(dev, "urb cport in error %d (dropped)\n", status);
831		return;
832	}
833
834	if (urb->actual_length < sizeof(*header)) {
835		dev_err(dev, "short message received\n");
836		goto exit;
837	}
838
839	/* Extract the CPort id, which is packed in the message header */
840	header = urb->transfer_buffer;
841	cport_id = gb_message_cport_unpack(header);
842
843	if (cport_id_valid(hd, cport_id)) {
844		greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
845				  urb->actual_length);
846	} else {
847		dev_err(dev, "invalid cport id %u received\n", cport_id);
848	}
849exit:
850	/* put our urb back in the request pool */
851	retval = usb_submit_urb(urb, GFP_ATOMIC);
852	if (retval)
853		dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
854}
855
856static void cport_out_callback(struct urb *urb)
857{
858	struct gb_message *message = urb->context;
859	struct gb_host_device *hd = message->operation->connection->hd;
860	struct es2_ap_dev *es2 = hd_to_es2(hd);
861	int status = check_urb_status(urb);
862	unsigned long flags;
863
864	gb_message_cport_clear(message->header);
865
866	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
867	message->hcpriv = NULL;
868	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
869
870	/*
871	 * Tell the submitter that the message send (attempt) is
872	 * complete, and report the status.
873	 */
874	greybus_message_sent(hd, message, status);
875
876	free_urb(es2, urb);
877}
878
879static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
880{
881	struct arpc *rpc;
882
883	if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
884		return NULL;
885
886	rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
887	if (!rpc)
888		return NULL;
889
890	INIT_LIST_HEAD(&rpc->list);
891	rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
892	if (!rpc->req)
893		goto err_free_rpc;
894
895	rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL);
896	if (!rpc->resp)
897		goto err_free_req;
898
899	rpc->req->type = type;
900	rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size);
901	memcpy(rpc->req->data, payload, size);
902
903	init_completion(&rpc->response_received);
904
905	return rpc;
906
907err_free_req:
908	kfree(rpc->req);
909err_free_rpc:
910	kfree(rpc);
911
912	return NULL;
913}
914
915static void arpc_free(struct arpc *rpc)
916{
917	kfree(rpc->req);
918	kfree(rpc->resp);
919	kfree(rpc);
920}
921
922static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id)
923{
924	struct arpc *rpc;
925
926	list_for_each_entry(rpc, &es2->arpcs, list) {
927		if (rpc->req->id == id)
928			return rpc;
929	}
930
931	return NULL;
932}
933
934static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc)
935{
936	rpc->active = true;
937	rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
938	list_add_tail(&rpc->list, &es2->arpcs);
939}
940
941static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc)
942{
943	if (rpc->active) {
944		rpc->active = false;
945		list_del(&rpc->list);
946	}
947}
948
949static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
950{
951	struct usb_device *udev = es2->usb_dev;
952	int retval;
953
954	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
955				 GB_APB_REQUEST_ARPC_RUN,
956				 USB_DIR_OUT | USB_TYPE_VENDOR |
957				 USB_RECIP_INTERFACE,
958				 0, 0,
959				 rpc->req, le16_to_cpu(rpc->req->size),
960				 ES2_USB_CTRL_TIMEOUT);
961	if (retval < 0) {
962		dev_err(&udev->dev,
963			"failed to send ARPC request %d: %d\n",
964			rpc->req->type, retval);
965		return retval;
966	}
967
968	return 0;
969}
970
971static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
972		     size_t size, int *result, unsigned int timeout)
973{
974	struct arpc *rpc;
975	unsigned long flags;
976	int retval;
977
978	if (result)
979		*result = 0;
980
981	rpc = arpc_alloc(payload, size, type);
982	if (!rpc)
983		return -ENOMEM;
984
985	spin_lock_irqsave(&es2->arpc_lock, flags);
986	arpc_add(es2, rpc);
987	spin_unlock_irqrestore(&es2->arpc_lock, flags);
988
989	retval = arpc_send(es2, rpc, timeout);
990	if (retval)
991		goto out_arpc_del;
992
993	retval = wait_for_completion_interruptible_timeout(
994						&rpc->response_received,
995						msecs_to_jiffies(timeout));
996	if (retval <= 0) {
997		if (!retval)
998			retval = -ETIMEDOUT;
999		goto out_arpc_del;
1000	}
1001
1002	if (rpc->resp->result) {
1003		retval = -EREMOTEIO;
1004		if (result)
1005			*result = rpc->resp->result;
1006	} else {
1007		retval = 0;
1008	}
1009
1010out_arpc_del:
1011	spin_lock_irqsave(&es2->arpc_lock, flags);
1012	arpc_del(es2, rpc);
1013	spin_unlock_irqrestore(&es2->arpc_lock, flags);
1014	arpc_free(rpc);
1015
1016	if (retval < 0 && retval != -EREMOTEIO) {
1017		dev_err(&es2->usb_dev->dev,
1018			"failed to execute ARPC: %d\n", retval);
1019	}
1020
1021	return retval;
1022}
1023
1024static void arpc_in_callback(struct urb *urb)
1025{
1026	struct es2_ap_dev *es2 = urb->context;
1027	struct device *dev = &urb->dev->dev;
1028	int status = check_urb_status(urb);
1029	struct arpc *rpc;
1030	struct arpc_response_message *resp;
1031	unsigned long flags;
1032	int retval;
1033
1034	if (status) {
1035		if ((status == -EAGAIN) || (status == -EPROTO))
1036			goto exit;
1037
1038		/* The urb is being unlinked */
1039		if (status == -ENOENT || status == -ESHUTDOWN)
1040			return;
1041
1042		dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
1043		return;
1044	}
1045
1046	if (urb->actual_length < sizeof(*resp)) {
1047		dev_err(dev, "short aprc response received\n");
1048		goto exit;
1049	}
1050
1051	resp = urb->transfer_buffer;
1052	spin_lock_irqsave(&es2->arpc_lock, flags);
1053	rpc = arpc_find(es2, resp->id);
1054	if (!rpc) {
1055		dev_err(dev, "invalid arpc response id received: %u\n",
1056			le16_to_cpu(resp->id));
1057		spin_unlock_irqrestore(&es2->arpc_lock, flags);
1058		goto exit;
1059	}
1060
1061	arpc_del(es2, rpc);
1062	memcpy(rpc->resp, resp, sizeof(*resp));
1063	complete(&rpc->response_received);
1064	spin_unlock_irqrestore(&es2->arpc_lock, flags);
1065
1066exit:
1067	/* put our urb back in the request pool */
1068	retval = usb_submit_urb(urb, GFP_ATOMIC);
1069	if (retval)
1070		dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
1071}
1072
1073#define APB1_LOG_MSG_SIZE	64
1074static void apb_log_get(struct es2_ap_dev *es2, char *buf)
1075{
1076	int retval;
1077
1078	do {
1079		retval = usb_control_msg(es2->usb_dev,
1080					 usb_rcvctrlpipe(es2->usb_dev, 0),
1081					 GB_APB_REQUEST_LOG,
1082					 USB_DIR_IN | USB_TYPE_VENDOR |
1083					 USB_RECIP_INTERFACE,
1084					 0x00, 0x00,
1085					 buf,
1086					 APB1_LOG_MSG_SIZE,
1087					 ES2_USB_CTRL_TIMEOUT);
1088		if (retval > 0)
1089			kfifo_in(&es2->apb_log_fifo, buf, retval);
1090	} while (retval > 0);
1091}
1092
1093static int apb_log_poll(void *data)
1094{
1095	struct es2_ap_dev *es2 = data;
1096	char *buf;
1097
1098	buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
1099	if (!buf)
1100		return -ENOMEM;
1101
1102	while (!kthread_should_stop()) {
1103		msleep(1000);
1104		apb_log_get(es2, buf);
1105	}
1106
1107	kfree(buf);
1108
1109	return 0;
1110}
1111
1112static ssize_t apb_log_read(struct file *f, char __user *buf,
1113			    size_t count, loff_t *ppos)
1114{
1115	struct es2_ap_dev *es2 = file_inode(f)->i_private;
1116	ssize_t ret;
1117	size_t copied;
1118	char *tmp_buf;
1119
1120	if (count > APB1_LOG_SIZE)
1121		count = APB1_LOG_SIZE;
1122
1123	tmp_buf = kmalloc(count, GFP_KERNEL);
1124	if (!tmp_buf)
1125		return -ENOMEM;
1126
1127	copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
1128	ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
1129
1130	kfree(tmp_buf);
1131
1132	return ret;
1133}
1134
1135static const struct file_operations apb_log_fops = {
1136	.read	= apb_log_read,
1137};
1138
1139static void usb_log_enable(struct es2_ap_dev *es2)
1140{
1141	if (!IS_ERR_OR_NULL(es2->apb_log_task))
1142		return;
1143
1144	/* get log from APB1 */
1145	es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
1146	if (IS_ERR(es2->apb_log_task))
1147		return;
1148	/* XXX We will need to rename this per APB */
1149	es2->apb_log_dentry = debugfs_create_file("apb_log", 0444,
1150						  gb_debugfs_get(), es2,
1151						  &apb_log_fops);
1152}
1153
1154static void usb_log_disable(struct es2_ap_dev *es2)
1155{
1156	if (IS_ERR_OR_NULL(es2->apb_log_task))
1157		return;
1158
1159	debugfs_remove(es2->apb_log_dentry);
1160	es2->apb_log_dentry = NULL;
1161
1162	kthread_stop(es2->apb_log_task);
1163	es2->apb_log_task = NULL;
1164}
1165
1166static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
1167				   size_t count, loff_t *ppos)
1168{
1169	struct es2_ap_dev *es2 = file_inode(f)->i_private;
1170	int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
1171	char tmp_buf[3];
1172
1173	sprintf(tmp_buf, "%d\n", enable);
1174	return simple_read_from_buffer(buf, count, ppos, tmp_buf, 2);
1175}
1176
1177static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
1178				    size_t count, loff_t *ppos)
1179{
1180	int enable;
1181	ssize_t retval;
1182	struct es2_ap_dev *es2 = file_inode(f)->i_private;
1183
1184	retval = kstrtoint_from_user(buf, count, 10, &enable);
1185	if (retval)
1186		return retval;
1187
1188	if (enable)
1189		usb_log_enable(es2);
1190	else
1191		usb_log_disable(es2);
1192
1193	return count;
1194}
1195
1196static const struct file_operations apb_log_enable_fops = {
1197	.read	= apb_log_enable_read,
1198	.write	= apb_log_enable_write,
1199};
1200
1201static int apb_get_cport_count(struct usb_device *udev)
1202{
1203	int retval;
1204	__le16 *cport_count;
1205
1206	cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
1207	if (!cport_count)
1208		return -ENOMEM;
1209
1210	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1211				 GB_APB_REQUEST_CPORT_COUNT,
1212				 USB_DIR_IN | USB_TYPE_VENDOR |
1213				 USB_RECIP_INTERFACE, 0, 0, cport_count,
1214				 sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT);
1215	if (retval != sizeof(*cport_count)) {
1216		dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1217			retval);
1218
1219		if (retval >= 0)
1220			retval = -EIO;
1221
1222		goto out;
1223	}
1224
1225	retval = le16_to_cpu(*cport_count);
1226
1227	/* We need to fit a CPort ID in one byte of a message header */
1228	if (retval > U8_MAX) {
1229		retval = U8_MAX;
1230		dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
1231	}
1232
1233out:
1234	kfree(cport_count);
1235	return retval;
1236}
1237
1238/*
1239 * The ES2 USB Bridge device has 15 endpoints
1240 * 1 Control - usual USB stuff + AP -> APBridgeA messages
1241 * 7 Bulk IN - CPort data in
1242 * 7 Bulk OUT - CPort data out
1243 */
1244static int ap_probe(struct usb_interface *interface,
1245		    const struct usb_device_id *id)
1246{
1247	struct es2_ap_dev *es2;
1248	struct gb_host_device *hd;
1249	struct usb_device *udev;
1250	struct usb_host_interface *iface_desc;
1251	struct usb_endpoint_descriptor *endpoint;
1252	__u8 ep_addr;
1253	int retval;
1254	int i;
1255	int num_cports;
1256	bool bulk_out_found = false;
1257	bool bulk_in_found = false;
1258	bool arpc_in_found = false;
1259
1260	udev = usb_get_dev(interface_to_usbdev(interface));
1261
1262	num_cports = apb_get_cport_count(udev);
1263	if (num_cports < 0) {
1264		usb_put_dev(udev);
1265		dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1266			num_cports);
1267		return num_cports;
1268	}
1269
1270	hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
1271			  num_cports);
1272	if (IS_ERR(hd)) {
1273		usb_put_dev(udev);
1274		return PTR_ERR(hd);
1275	}
1276
1277	es2 = hd_to_es2(hd);
1278	es2->hd = hd;
1279	es2->usb_intf = interface;
1280	es2->usb_dev = udev;
1281	spin_lock_init(&es2->cport_out_urb_lock);
1282	INIT_KFIFO(es2->apb_log_fifo);
1283	usb_set_intfdata(interface, es2);
1284
1285	/*
1286	 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1287	 * dynamically.
1288	 */
1289	retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
1290	if (retval)
1291		goto error;
1292	retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
1293	if (retval)
1294		goto error;
1295
1296	/* find all bulk endpoints */
1297	iface_desc = interface->cur_altsetting;
1298	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1299		endpoint = &iface_desc->endpoint[i].desc;
1300		ep_addr = endpoint->bEndpointAddress;
1301
1302		if (usb_endpoint_is_bulk_in(endpoint)) {
1303			if (!bulk_in_found) {
1304				es2->cport_in.endpoint = ep_addr;
1305				bulk_in_found = true;
1306			} else if (!arpc_in_found) {
1307				es2->arpc_endpoint_in = ep_addr;
1308				arpc_in_found = true;
1309			} else {
1310				dev_warn(&udev->dev,
1311					 "Unused bulk IN endpoint found: 0x%02x\n",
1312					 ep_addr);
1313			}
1314			continue;
1315		}
1316		if (usb_endpoint_is_bulk_out(endpoint)) {
1317			if (!bulk_out_found) {
1318				es2->cport_out_endpoint = ep_addr;
1319				bulk_out_found = true;
1320			} else {
1321				dev_warn(&udev->dev,
1322					 "Unused bulk OUT endpoint found: 0x%02x\n",
1323					 ep_addr);
1324			}
1325			continue;
1326		}
1327		dev_warn(&udev->dev,
1328			 "Unknown endpoint type found, address 0x%02x\n",
1329			 ep_addr);
1330	}
1331	if (!bulk_in_found || !arpc_in_found || !bulk_out_found) {
1332		dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
1333		retval = -ENODEV;
1334		goto error;
1335	}
1336
1337	/* Allocate buffers for our cport in messages */
1338	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
1339		struct urb *urb;
1340		u8 *buffer;
1341
1342		urb = usb_alloc_urb(0, GFP_KERNEL);
1343		if (!urb) {
1344			retval = -ENOMEM;
1345			goto error;
1346		}
1347		es2->cport_in.urb[i] = urb;
1348
1349		buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
1350		if (!buffer) {
1351			retval = -ENOMEM;
1352			goto error;
1353		}
1354
1355		usb_fill_bulk_urb(urb, udev,
1356				  usb_rcvbulkpipe(udev, es2->cport_in.endpoint),
1357				  buffer, ES2_GBUF_MSG_SIZE_MAX,
1358				  cport_in_callback, hd);
1359
1360		es2->cport_in.buffer[i] = buffer;
1361	}
1362
1363	/* Allocate buffers for ARPC in messages */
1364	for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
1365		struct urb *urb;
1366		u8 *buffer;
1367
1368		urb = usb_alloc_urb(0, GFP_KERNEL);
1369		if (!urb) {
1370			retval = -ENOMEM;
1371			goto error;
1372		}
1373		es2->arpc_urb[i] = urb;
1374
1375		buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
1376		if (!buffer) {
1377			retval = -ENOMEM;
1378			goto error;
1379		}
1380
1381		usb_fill_bulk_urb(urb, udev,
1382				  usb_rcvbulkpipe(udev,
1383						  es2->arpc_endpoint_in),
1384				  buffer, ARPC_IN_SIZE_MAX,
1385				  arpc_in_callback, es2);
1386
1387		es2->arpc_buffer[i] = buffer;
1388	}
1389
1390	/* Allocate urbs for our CPort OUT messages */
1391	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
1392		struct urb *urb;
1393
1394		urb = usb_alloc_urb(0, GFP_KERNEL);
1395		if (!urb) {
1396			retval = -ENOMEM;
1397			goto error;
1398		}
1399
1400		es2->cport_out_urb[i] = urb;
1401		es2->cport_out_urb_busy[i] = false;	/* just to be anal */
1402	}
1403
1404	/* XXX We will need to rename this per APB */
1405	es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
1406							 0644,
1407							 gb_debugfs_get(), es2,
1408							 &apb_log_enable_fops);
1409
1410	INIT_LIST_HEAD(&es2->arpcs);
1411	spin_lock_init(&es2->arpc_lock);
1412
1413	retval = es2_arpc_in_enable(es2);
1414	if (retval)
1415		goto error;
1416
1417	retval = gb_hd_add(hd);
1418	if (retval)
1419		goto err_disable_arpc_in;
1420
1421	retval = es2_cport_in_enable(es2, &es2->cport_in);
1422	if (retval)
1423		goto err_hd_del;
1424
1425	return 0;
1426
1427err_hd_del:
1428	gb_hd_del(hd);
1429err_disable_arpc_in:
1430	es2_arpc_in_disable(es2);
1431error:
1432	es2_destroy(es2);
1433
1434	return retval;
1435}
1436
1437static void ap_disconnect(struct usb_interface *interface)
1438{
1439	struct es2_ap_dev *es2 = usb_get_intfdata(interface);
1440
1441	gb_hd_del(es2->hd);
1442
1443	es2_cport_in_disable(es2, &es2->cport_in);
1444	es2_arpc_in_disable(es2);
1445
1446	es2_destroy(es2);
1447}
1448
1449static struct usb_driver es2_ap_driver = {
1450	.name =		"es2_ap_driver",
1451	.probe =	ap_probe,
1452	.disconnect =	ap_disconnect,
1453	.id_table =	id_table,
1454	.soft_unbind =	1,
1455};
1456
1457module_usb_driver(es2_ap_driver);
1458
1459MODULE_LICENSE("GPL v2");
1460MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
1461