1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2016-2017, Linaro Ltd
4 */
5
6#include <linux/idr.h>
7#include <linux/interrupt.h>
8#include <linux/io.h>
9#include <linux/list.h>
10#include <linux/mfd/syscon.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/of_address.h>
14#include <linux/platform_device.h>
15#include <linux/regmap.h>
16#include <linux/rpmsg.h>
17#include <linux/sizes.h>
18#include <linux/slab.h>
19#include <linux/wait.h>
20#include <linux/workqueue.h>
21#include <linux/mailbox_client.h>
22
23#include "rpmsg_internal.h"
24#include "qcom_glink_native.h"
25
26#define GLINK_NAME_SIZE		32
27#define GLINK_VERSION_1		1
28
29#define RPM_GLINK_CID_MIN	1
30#define RPM_GLINK_CID_MAX	65536
31
32struct glink_msg {
33	__le16 cmd;
34	__le16 param1;
35	__le32 param2;
36	u8 data[];
37} __packed;
38
39/**
40 * struct glink_defer_cmd - deferred incoming control message
41 * @node:	list node
42 * @msg:	message header
43 * @data:	payload of the message
44 *
45 * Copy of a received control message, to be added to @rx_queue and processed
46 * by @rx_work of @qcom_glink.
47 */
48struct glink_defer_cmd {
49	struct list_head node;
50
51	struct glink_msg msg;
52	u8 data[];
53};
54
55/**
56 * struct glink_core_rx_intent - RX intent
57 * RX intent
58 *
59 * @data: pointer to the data (may be NULL for zero-copy)
60 * @id: remote or local intent ID
61 * @size: size of the original intent (do not modify)
62 * @reuse: To mark if the intent can be reused after first use
63 * @in_use: To mark if intent is already in use for the channel
64 * @offset: next write offset (initially 0)
65 * @node:	list node
66 */
67struct glink_core_rx_intent {
68	void *data;
69	u32 id;
70	size_t size;
71	bool reuse;
72	bool in_use;
73	u32 offset;
74
75	struct list_head node;
76};
77
78/**
79 * struct qcom_glink - driver context, relates to one remote subsystem
80 * @dev:	reference to the associated struct device
81 * @rx_pipe:	pipe object for receive FIFO
82 * @tx_pipe:	pipe object for transmit FIFO
83 * @rx_work:	worker for handling received control messages
84 * @rx_lock:	protects the @rx_queue
85 * @rx_queue:	queue of received control messages to be processed in @rx_work
86 * @tx_lock:	synchronizes operations on the tx fifo
87 * @idr_lock:	synchronizes @lcids and @rcids modifications
88 * @lcids:	idr of all channels with a known local channel id
89 * @rcids:	idr of all channels with a known remote channel id
90 * @features:	remote features
91 * @intentless:	flag to indicate that there is no intent
92 * @tx_avail_notify: Waitqueue for pending tx tasks
93 * @sent_read_notify: flag to check cmd sent or not
94 * @abort_tx:	flag indicating that all tx attempts should fail
95 */
96struct qcom_glink {
97	struct device *dev;
98
99	struct qcom_glink_pipe *rx_pipe;
100	struct qcom_glink_pipe *tx_pipe;
101
102	struct work_struct rx_work;
103	spinlock_t rx_lock;
104	struct list_head rx_queue;
105
106	spinlock_t tx_lock;
107
108	spinlock_t idr_lock;
109	struct idr lcids;
110	struct idr rcids;
111	unsigned long features;
112
113	bool intentless;
114	wait_queue_head_t tx_avail_notify;
115	bool sent_read_notify;
116
117	bool abort_tx;
118};
119
120enum {
121	GLINK_STATE_CLOSED,
122	GLINK_STATE_OPENING,
123	GLINK_STATE_OPEN,
124	GLINK_STATE_CLOSING,
125};
126
127/**
128 * struct glink_channel - internal representation of a channel
129 * @rpdev:	rpdev reference, only used for primary endpoints
130 * @ept:	rpmsg endpoint this channel is associated with
131 * @glink:	qcom_glink context handle
132 * @refcount:	refcount for the channel object
133 * @recv_lock:	guard for @ept.cb
134 * @name:	unique channel name/identifier
135 * @lcid:	channel id, in local space
136 * @rcid:	channel id, in remote space
137 * @intent_lock: lock for protection of @liids, @riids
138 * @liids:	idr of all local intents
139 * @riids:	idr of all remote intents
140 * @intent_work: worker responsible for transmitting rx_done packets
141 * @done_intents: list of intents that needs to be announced rx_done
142 * @buf:	receive buffer, for gathering fragments
143 * @buf_offset:	write offset in @buf
144 * @buf_size:	size of current @buf
145 * @open_ack:	completed once remote has acked the open-request
146 * @open_req:	completed once open-request has been received
147 * @intent_req_lock: Synchronises multiple intent requests
148 * @intent_req_result: Result of intent request
149 * @intent_received: flag indicating that an intent has been received
150 * @intent_req_wq: wait queue for intent_req signalling
151 */
152struct glink_channel {
153	struct rpmsg_endpoint ept;
154
155	struct rpmsg_device *rpdev;
156	struct qcom_glink *glink;
157
158	struct kref refcount;
159
160	spinlock_t recv_lock;
161
162	char *name;
163	unsigned int lcid;
164	unsigned int rcid;
165
166	spinlock_t intent_lock;
167	struct idr liids;
168	struct idr riids;
169	struct work_struct intent_work;
170	struct list_head done_intents;
171
172	struct glink_core_rx_intent *buf;
173	int buf_offset;
174	int buf_size;
175
176	struct completion open_ack;
177	struct completion open_req;
178
179	struct mutex intent_req_lock;
180	int intent_req_result;
181	bool intent_received;
182	wait_queue_head_t intent_req_wq;
183};
184
185#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
186
187static const struct rpmsg_endpoint_ops glink_endpoint_ops;
188
189#define GLINK_CMD_VERSION		0
190#define GLINK_CMD_VERSION_ACK		1
191#define GLINK_CMD_OPEN			2
192#define GLINK_CMD_CLOSE			3
193#define GLINK_CMD_OPEN_ACK		4
194#define GLINK_CMD_INTENT		5
195#define GLINK_CMD_RX_DONE		6
196#define GLINK_CMD_RX_INTENT_REQ		7
197#define GLINK_CMD_RX_INTENT_REQ_ACK	8
198#define GLINK_CMD_TX_DATA		9
199#define GLINK_CMD_CLOSE_ACK		11
200#define GLINK_CMD_TX_DATA_CONT		12
201#define GLINK_CMD_READ_NOTIF		13
202#define GLINK_CMD_RX_DONE_W_REUSE	14
203#define GLINK_CMD_SIGNALS		15
204
205#define GLINK_FEATURE_INTENTLESS	BIT(1)
206
207#define NATIVE_DTR_SIG			NATIVE_DSR_SIG
208#define NATIVE_DSR_SIG			BIT(31)
209#define NATIVE_RTS_SIG			NATIVE_CTS_SIG
210#define NATIVE_CTS_SIG			BIT(30)
211
212static void qcom_glink_rx_done_work(struct work_struct *work);
213
214static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
215						      const char *name)
216{
217	struct glink_channel *channel;
218
219	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
220	if (!channel)
221		return ERR_PTR(-ENOMEM);
222
223	/* Setup glink internal glink_channel data */
224	spin_lock_init(&channel->recv_lock);
225	spin_lock_init(&channel->intent_lock);
226	mutex_init(&channel->intent_req_lock);
227
228	channel->glink = glink;
229	channel->name = kstrdup(name, GFP_KERNEL);
230	if (!channel->name) {
231		kfree(channel);
232		return ERR_PTR(-ENOMEM);
233	}
234
235	init_completion(&channel->open_req);
236	init_completion(&channel->open_ack);
237	init_waitqueue_head(&channel->intent_req_wq);
238
239	INIT_LIST_HEAD(&channel->done_intents);
240	INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);
241
242	idr_init(&channel->liids);
243	idr_init(&channel->riids);
244	kref_init(&channel->refcount);
245
246	return channel;
247}
248
249static void qcom_glink_channel_release(struct kref *ref)
250{
251	struct glink_channel *channel = container_of(ref, struct glink_channel,
252						     refcount);
253	struct glink_core_rx_intent *intent;
254	struct glink_core_rx_intent *tmp;
255	unsigned long flags;
256	int iid;
257
258	/* cancel pending rx_done work */
259	cancel_work_sync(&channel->intent_work);
260
261	spin_lock_irqsave(&channel->intent_lock, flags);
262	/* Free all non-reuse intents pending rx_done work */
263	list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
264		if (!intent->reuse) {
265			kfree(intent->data);
266			kfree(intent);
267		}
268	}
269
270	idr_for_each_entry(&channel->liids, tmp, iid) {
271		kfree(tmp->data);
272		kfree(tmp);
273	}
274	idr_destroy(&channel->liids);
275
276	idr_for_each_entry(&channel->riids, tmp, iid)
277		kfree(tmp);
278	idr_destroy(&channel->riids);
279	spin_unlock_irqrestore(&channel->intent_lock, flags);
280
281	kfree(channel->name);
282	kfree(channel);
283}
284
285static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
286{
287	return glink->rx_pipe->avail(glink->rx_pipe);
288}
289
290static void qcom_glink_rx_peek(struct qcom_glink *glink,
291			       void *data, unsigned int offset, size_t count)
292{
293	glink->rx_pipe->peek(glink->rx_pipe, data, offset, count);
294}
295
296static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count)
297{
298	glink->rx_pipe->advance(glink->rx_pipe, count);
299}
300
301static size_t qcom_glink_tx_avail(struct qcom_glink *glink)
302{
303	return glink->tx_pipe->avail(glink->tx_pipe);
304}
305
306static void qcom_glink_tx_write(struct qcom_glink *glink,
307				const void *hdr, size_t hlen,
308				const void *data, size_t dlen)
309{
310	glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen);
311}
312
313static void qcom_glink_tx_kick(struct qcom_glink *glink)
314{
315	glink->tx_pipe->kick(glink->tx_pipe);
316}
317
318static void qcom_glink_send_read_notify(struct qcom_glink *glink)
319{
320	struct glink_msg msg;
321
322	msg.cmd = cpu_to_le16(GLINK_CMD_READ_NOTIF);
323	msg.param1 = 0;
324	msg.param2 = 0;
325
326	qcom_glink_tx_write(glink, &msg, sizeof(msg), NULL, 0);
327
328	qcom_glink_tx_kick(glink);
329}
330
331static int qcom_glink_tx(struct qcom_glink *glink,
332			 const void *hdr, size_t hlen,
333			 const void *data, size_t dlen, bool wait)
334{
335	unsigned int tlen = hlen + dlen;
336	unsigned long flags;
337	int ret = 0;
338
339	/* Reject packets that are too big */
340	if (tlen >= glink->tx_pipe->length)
341		return -EINVAL;
342
343	spin_lock_irqsave(&glink->tx_lock, flags);
344
345	if (glink->abort_tx) {
346		ret = -EIO;
347		goto out;
348	}
349
350	while (qcom_glink_tx_avail(glink) < tlen) {
351		if (!wait) {
352			ret = -EAGAIN;
353			goto out;
354		}
355
356		if (glink->abort_tx) {
357			ret = -EIO;
358			goto out;
359		}
360
361		if (!glink->sent_read_notify) {
362			glink->sent_read_notify = true;
363			qcom_glink_send_read_notify(glink);
364		}
365
366		/* Wait without holding the tx_lock */
367		spin_unlock_irqrestore(&glink->tx_lock, flags);
368
369		wait_event_timeout(glink->tx_avail_notify,
370				   qcom_glink_tx_avail(glink) >= tlen, 10 * HZ);
371
372		spin_lock_irqsave(&glink->tx_lock, flags);
373
374		if (qcom_glink_tx_avail(glink) >= tlen)
375			glink->sent_read_notify = false;
376	}
377
378	qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
379	qcom_glink_tx_kick(glink);
380
381out:
382	spin_unlock_irqrestore(&glink->tx_lock, flags);
383
384	return ret;
385}
386
387static int qcom_glink_send_version(struct qcom_glink *glink)
388{
389	struct glink_msg msg;
390
391	msg.cmd = cpu_to_le16(GLINK_CMD_VERSION);
392	msg.param1 = cpu_to_le16(GLINK_VERSION_1);
393	msg.param2 = cpu_to_le32(glink->features);
394
395	return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
396}
397
398static void qcom_glink_send_version_ack(struct qcom_glink *glink)
399{
400	struct glink_msg msg;
401
402	msg.cmd = cpu_to_le16(GLINK_CMD_VERSION_ACK);
403	msg.param1 = cpu_to_le16(GLINK_VERSION_1);
404	msg.param2 = cpu_to_le32(glink->features);
405
406	qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
407}
408
409static void qcom_glink_send_open_ack(struct qcom_glink *glink,
410				     struct glink_channel *channel)
411{
412	struct glink_msg msg;
413
414	msg.cmd = cpu_to_le16(GLINK_CMD_OPEN_ACK);
415	msg.param1 = cpu_to_le16(channel->rcid);
416	msg.param2 = cpu_to_le32(0);
417
418	qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
419}
420
421static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
422					     unsigned int cid, bool granted)
423{
424	struct glink_channel *channel;
425	unsigned long flags;
426
427	spin_lock_irqsave(&glink->idr_lock, flags);
428	channel = idr_find(&glink->rcids, cid);
429	spin_unlock_irqrestore(&glink->idr_lock, flags);
430	if (!channel) {
431		dev_err(glink->dev, "unable to find channel\n");
432		return;
433	}
434
435	WRITE_ONCE(channel->intent_req_result, granted);
436	wake_up_all(&channel->intent_req_wq);
437}
438
439static void qcom_glink_intent_req_abort(struct glink_channel *channel)
440{
441	WRITE_ONCE(channel->intent_req_result, 0);
442	wake_up_all(&channel->intent_req_wq);
443}
444
445/**
446 * qcom_glink_send_open_req() - send a GLINK_CMD_OPEN request to the remote
447 * @glink: Ptr to the glink edge
448 * @channel: Ptr to the channel that the open req is sent
449 *
450 * Allocates a local channel id and sends a GLINK_CMD_OPEN message to the remote.
451 * Will return with refcount held, regardless of outcome.
452 *
453 * Return: 0 on success, negative errno otherwise.
454 */
455static int qcom_glink_send_open_req(struct qcom_glink *glink,
456				    struct glink_channel *channel)
457{
458	struct {
459		struct glink_msg msg;
460		u8 name[GLINK_NAME_SIZE];
461	} __packed req;
462	int name_len = strlen(channel->name) + 1;
463	int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
464	int ret;
465	unsigned long flags;
466
467	kref_get(&channel->refcount);
468
469	spin_lock_irqsave(&glink->idr_lock, flags);
470	ret = idr_alloc_cyclic(&glink->lcids, channel,
471			       RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX,
472			       GFP_ATOMIC);
473	spin_unlock_irqrestore(&glink->idr_lock, flags);
474	if (ret < 0)
475		return ret;
476
477	channel->lcid = ret;
478
479	req.msg.cmd = cpu_to_le16(GLINK_CMD_OPEN);
480	req.msg.param1 = cpu_to_le16(channel->lcid);
481	req.msg.param2 = cpu_to_le32(name_len);
482	strcpy(req.name, channel->name);
483
484	ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
485	if (ret)
486		goto remove_idr;
487
488	return 0;
489
490remove_idr:
491	spin_lock_irqsave(&glink->idr_lock, flags);
492	idr_remove(&glink->lcids, channel->lcid);
493	channel->lcid = 0;
494	spin_unlock_irqrestore(&glink->idr_lock, flags);
495
496	return ret;
497}
498
499static void qcom_glink_send_close_req(struct qcom_glink *glink,
500				      struct glink_channel *channel)
501{
502	struct glink_msg req;
503
504	req.cmd = cpu_to_le16(GLINK_CMD_CLOSE);
505	req.param1 = cpu_to_le16(channel->lcid);
506	req.param2 = 0;
507
508	qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
509}
510
511static void qcom_glink_send_close_ack(struct qcom_glink *glink,
512				      unsigned int rcid)
513{
514	struct glink_msg req;
515
516	req.cmd = cpu_to_le16(GLINK_CMD_CLOSE_ACK);
517	req.param1 = cpu_to_le16(rcid);
518	req.param2 = 0;
519
520	qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
521}
522
523static void qcom_glink_rx_done_work(struct work_struct *work)
524{
525	struct glink_channel *channel = container_of(work, struct glink_channel,
526						     intent_work);
527	struct qcom_glink *glink = channel->glink;
528	struct glink_core_rx_intent *intent, *tmp;
529	struct {
530		u16 id;
531		u16 lcid;
532		u32 liid;
533	} __packed cmd;
534
535	unsigned int cid = channel->lcid;
536	unsigned int iid;
537	bool reuse;
538	unsigned long flags;
539
540	spin_lock_irqsave(&channel->intent_lock, flags);
541	list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
542		list_del(&intent->node);
543		spin_unlock_irqrestore(&channel->intent_lock, flags);
544		iid = intent->id;
545		reuse = intent->reuse;
546
547		cmd.id = reuse ? GLINK_CMD_RX_DONE_W_REUSE : GLINK_CMD_RX_DONE;
548		cmd.lcid = cid;
549		cmd.liid = iid;
550
551		qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
552		if (!reuse) {
553			kfree(intent->data);
554			kfree(intent);
555		}
556		spin_lock_irqsave(&channel->intent_lock, flags);
557	}
558	spin_unlock_irqrestore(&channel->intent_lock, flags);
559}
560
561static void qcom_glink_rx_done(struct qcom_glink *glink,
562			       struct glink_channel *channel,
563			       struct glink_core_rx_intent *intent)
564{
565	/* We don't send RX_DONE to intentless systems */
566	if (glink->intentless) {
567		kfree(intent->data);
568		kfree(intent);
569		return;
570	}
571
572	/* Take it off the tree of receive intents */
573	if (!intent->reuse) {
574		spin_lock(&channel->intent_lock);
575		idr_remove(&channel->liids, intent->id);
576		spin_unlock(&channel->intent_lock);
577	}
578
579	/* Schedule the sending of a rx_done indication */
580	spin_lock(&channel->intent_lock);
581	list_add_tail(&intent->node, &channel->done_intents);
582	spin_unlock(&channel->intent_lock);
583
584	schedule_work(&channel->intent_work);
585}
586
587/**
588 * qcom_glink_receive_version() - receive version/features from remote system
589 *
590 * @glink:	pointer to transport interface
591 * @version:	remote version
592 * @features:	remote features
593 *
594 * This function is called in response to a remote-initiated version/feature
595 * negotiation sequence.
596 */
597static void qcom_glink_receive_version(struct qcom_glink *glink,
598				       u32 version,
599				       u32 features)
600{
601	switch (version) {
602	case 0:
603		break;
604	case GLINK_VERSION_1:
605		glink->features &= features;
606		fallthrough;
607	default:
608		qcom_glink_send_version_ack(glink);
609		break;
610	}
611}
612
613/**
614 * qcom_glink_receive_version_ack() - receive negotiation ack from remote system
615 *
616 * @glink:	pointer to transport interface
617 * @version:	remote version response
618 * @features:	remote features response
619 *
620 * This function is called in response to a local-initiated version/feature
621 * negotiation sequence and is the counter-offer from the remote side based
622 * upon the initial version and feature set requested.
623 */
624static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
625					   u32 version,
626					   u32 features)
627{
628	switch (version) {
629	case 0:
630		/* Version negotiation failed */
631		break;
632	case GLINK_VERSION_1:
633		if (features == glink->features)
634			break;
635
636		glink->features &= features;
637		fallthrough;
638	default:
639		qcom_glink_send_version(glink);
640		break;
641	}
642}
643
644/**
645 * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to
646 * 	wire format and transmit
647 * @glink:	The transport to transmit on.
648 * @channel:	The glink channel
649 * @granted:	The request response to encode.
650 *
651 * Return: 0 on success or standard Linux error code.
652 */
653static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
654					  struct glink_channel *channel,
655					  bool granted)
656{
657	struct glink_msg msg;
658
659	msg.cmd = cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK);
660	msg.param1 = cpu_to_le16(channel->lcid);
661	msg.param2 = cpu_to_le32(granted);
662
663	qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
664
665	return 0;
666}
667
668/**
669 * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and
670 *			   transmit
671 * @glink:	The transport to transmit on.
672 * @channel:	The local channel
673 * @intent:	The intent to pass on to remote.
674 *
675 * Return: 0 on success or standard Linux error code.
676 */
677static int qcom_glink_advertise_intent(struct qcom_glink *glink,
678				       struct glink_channel *channel,
679				       struct glink_core_rx_intent *intent)
680{
681	struct command {
682		__le16 id;
683		__le16 lcid;
684		__le32 count;
685		__le32 size;
686		__le32 liid;
687	} __packed;
688	struct command cmd;
689
690	cmd.id = cpu_to_le16(GLINK_CMD_INTENT);
691	cmd.lcid = cpu_to_le16(channel->lcid);
692	cmd.count = cpu_to_le32(1);
693	cmd.size = cpu_to_le32(intent->size);
694	cmd.liid = cpu_to_le32(intent->id);
695
696	qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
697
698	return 0;
699}
700
701static struct glink_core_rx_intent *
702qcom_glink_alloc_intent(struct qcom_glink *glink,
703			struct glink_channel *channel,
704			size_t size,
705			bool reuseable)
706{
707	struct glink_core_rx_intent *intent;
708	int ret;
709	unsigned long flags;
710
711	intent = kzalloc(sizeof(*intent), GFP_KERNEL);
712	if (!intent)
713		return NULL;
714
715	intent->data = kzalloc(size, GFP_KERNEL);
716	if (!intent->data)
717		goto free_intent;
718
719	spin_lock_irqsave(&channel->intent_lock, flags);
720	ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
721	if (ret < 0) {
722		spin_unlock_irqrestore(&channel->intent_lock, flags);
723		goto free_data;
724	}
725	spin_unlock_irqrestore(&channel->intent_lock, flags);
726
727	intent->id = ret;
728	intent->size = size;
729	intent->reuse = reuseable;
730
731	return intent;
732
733free_data:
734	kfree(intent->data);
735free_intent:
736	kfree(intent);
737	return NULL;
738}
739
740static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
741				      u32 cid, uint32_t iid,
742				      bool reuse)
743{
744	struct glink_core_rx_intent *intent;
745	struct glink_channel *channel;
746	unsigned long flags;
747
748	spin_lock_irqsave(&glink->idr_lock, flags);
749	channel = idr_find(&glink->rcids, cid);
750	spin_unlock_irqrestore(&glink->idr_lock, flags);
751	if (!channel) {
752		dev_err(glink->dev, "invalid channel id received\n");
753		return;
754	}
755
756	spin_lock_irqsave(&channel->intent_lock, flags);
757	intent = idr_find(&channel->riids, iid);
758
759	if (!intent) {
760		spin_unlock_irqrestore(&channel->intent_lock, flags);
761		dev_err(glink->dev, "invalid intent id received\n");
762		return;
763	}
764
765	intent->in_use = false;
766
767	if (!reuse) {
768		idr_remove(&channel->riids, intent->id);
769		kfree(intent);
770	}
771	spin_unlock_irqrestore(&channel->intent_lock, flags);
772
773	if (reuse) {
774		WRITE_ONCE(channel->intent_received, true);
775		wake_up_all(&channel->intent_req_wq);
776	}
777}
778
779/**
780 * qcom_glink_handle_intent_req() - Receive a request for rx_intent
781 *					    from remote side
782 * @glink:      Pointer to the transport interface
783 * @cid:	Remote channel ID
784 * @size:	size of the intent
785 *
786 * The function searches for the local channel to which the request for
787 * rx_intent has arrived and allocates and notifies the remote back
788 */
789static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
790					 u32 cid, size_t size)
791{
792	struct glink_core_rx_intent *intent;
793	struct glink_channel *channel;
794	unsigned long flags;
795
796	spin_lock_irqsave(&glink->idr_lock, flags);
797	channel = idr_find(&glink->rcids, cid);
798	spin_unlock_irqrestore(&glink->idr_lock, flags);
799
800	if (!channel) {
801		pr_err("%s channel not found for cid %d\n", __func__, cid);
802		return;
803	}
804
805	intent = qcom_glink_alloc_intent(glink, channel, size, false);
806	if (intent)
807		qcom_glink_advertise_intent(glink, channel, intent);
808
809	qcom_glink_send_intent_req_ack(glink, channel, !!intent);
810}
811
812static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
813{
814	struct glink_defer_cmd *dcmd;
815
816	extra = ALIGN(extra, 8);
817
818	if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) {
819		dev_dbg(glink->dev, "Insufficient data in rx fifo");
820		return -ENXIO;
821	}
822
823	dcmd = kzalloc(struct_size(dcmd, data, extra), GFP_ATOMIC);
824	if (!dcmd)
825		return -ENOMEM;
826
827	INIT_LIST_HEAD(&dcmd->node);
828
829	qcom_glink_rx_peek(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra);
830
831	spin_lock(&glink->rx_lock);
832	list_add_tail(&dcmd->node, &glink->rx_queue);
833	spin_unlock(&glink->rx_lock);
834
835	schedule_work(&glink->rx_work);
836	qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra);
837
838	return 0;
839}
840
841static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
842{
843	struct glink_core_rx_intent *intent;
844	struct glink_channel *channel;
845	struct {
846		struct glink_msg msg;
847		__le32 chunk_size;
848		__le32 left_size;
849	} __packed hdr;
850	unsigned int chunk_size;
851	unsigned int left_size;
852	unsigned int rcid;
853	unsigned int liid;
854	int ret = 0;
855	unsigned long flags;
856
857	if (avail < sizeof(hdr)) {
858		dev_dbg(glink->dev, "Not enough data in fifo\n");
859		return -EAGAIN;
860	}
861
862	qcom_glink_rx_peek(glink, &hdr, 0, sizeof(hdr));
863	chunk_size = le32_to_cpu(hdr.chunk_size);
864	left_size = le32_to_cpu(hdr.left_size);
865
866	if (avail < sizeof(hdr) + chunk_size) {
867		dev_dbg(glink->dev, "Payload not yet in fifo\n");
868		return -EAGAIN;
869	}
870
871	rcid = le16_to_cpu(hdr.msg.param1);
872	spin_lock_irqsave(&glink->idr_lock, flags);
873	channel = idr_find(&glink->rcids, rcid);
874	spin_unlock_irqrestore(&glink->idr_lock, flags);
875	if (!channel) {
876		dev_dbg(glink->dev, "Data on non-existing channel\n");
877
878		/* Drop the message */
879		goto advance_rx;
880	}
881
882	if (glink->intentless) {
883		/* Might have an ongoing, fragmented, message to append */
884		if (!channel->buf) {
885			intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
886			if (!intent)
887				return -ENOMEM;
888
889			intent->data = kmalloc(chunk_size + left_size,
890					       GFP_ATOMIC);
891			if (!intent->data) {
892				kfree(intent);
893				return -ENOMEM;
894			}
895
896			intent->id = 0xbabababa;
897			intent->size = chunk_size + left_size;
898			intent->offset = 0;
899
900			channel->buf = intent;
901		} else {
902			intent = channel->buf;
903		}
904	} else {
905		liid = le32_to_cpu(hdr.msg.param2);
906
907		spin_lock_irqsave(&channel->intent_lock, flags);
908		intent = idr_find(&channel->liids, liid);
909		spin_unlock_irqrestore(&channel->intent_lock, flags);
910
911		if (!intent) {
912			dev_err(glink->dev,
913				"no intent found for channel %s intent %d",
914				channel->name, liid);
915			ret = -ENOENT;
916			goto advance_rx;
917		}
918	}
919
920	if (intent->size - intent->offset < chunk_size) {
921		dev_err(glink->dev, "Insufficient space in intent\n");
922
923		/* The packet header lied, drop payload */
924		goto advance_rx;
925	}
926
927	qcom_glink_rx_peek(glink, intent->data + intent->offset,
928			   sizeof(hdr), chunk_size);
929	intent->offset += chunk_size;
930
931	/* Handle message when no fragments remain to be received */
932	if (!left_size) {
933		spin_lock(&channel->recv_lock);
934		if (channel->ept.cb) {
935			channel->ept.cb(channel->ept.rpdev,
936					intent->data,
937					intent->offset,
938					channel->ept.priv,
939					RPMSG_ADDR_ANY);
940		}
941		spin_unlock(&channel->recv_lock);
942
943		intent->offset = 0;
944		channel->buf = NULL;
945
946		qcom_glink_rx_done(glink, channel, intent);
947	}
948
949advance_rx:
950	qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8));
951
952	return ret;
953}
954
955static void qcom_glink_handle_intent(struct qcom_glink *glink,
956				     unsigned int cid,
957				     unsigned int count,
958				     size_t avail)
959{
960	struct glink_core_rx_intent *intent;
961	struct glink_channel *channel;
962	struct intent_pair {
963		__le32 size;
964		__le32 iid;
965	};
966
967	struct {
968		struct glink_msg msg;
969		struct intent_pair intents[];
970	} __packed * msg;
971
972	const size_t msglen = struct_size(msg, intents, count);
973	int ret;
974	int i;
975	unsigned long flags;
976
977	if (avail < msglen) {
978		dev_dbg(glink->dev, "Not enough data in fifo\n");
979		return;
980	}
981
982	spin_lock_irqsave(&glink->idr_lock, flags);
983	channel = idr_find(&glink->rcids, cid);
984	spin_unlock_irqrestore(&glink->idr_lock, flags);
985	if (!channel) {
986		dev_err(glink->dev, "intents for non-existing channel\n");
987		qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
988		return;
989	}
990
991	msg = kmalloc(msglen, GFP_ATOMIC);
992	if (!msg)
993		return;
994
995	qcom_glink_rx_peek(glink, msg, 0, msglen);
996
997	for (i = 0; i < count; ++i) {
998		intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
999		if (!intent)
1000			break;
1001
1002		intent->id = le32_to_cpu(msg->intents[i].iid);
1003		intent->size = le32_to_cpu(msg->intents[i].size);
1004
1005		spin_lock_irqsave(&channel->intent_lock, flags);
1006		ret = idr_alloc(&channel->riids, intent,
1007				intent->id, intent->id + 1, GFP_ATOMIC);
1008		spin_unlock_irqrestore(&channel->intent_lock, flags);
1009
1010		if (ret < 0)
1011			dev_err(glink->dev, "failed to store remote intent\n");
1012	}
1013
1014	WRITE_ONCE(channel->intent_received, true);
1015	wake_up_all(&channel->intent_req_wq);
1016
1017	kfree(msg);
1018	qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
1019}
1020
1021static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
1022{
1023	struct glink_channel *channel;
1024
1025	spin_lock(&glink->idr_lock);
1026	channel = idr_find(&glink->lcids, lcid);
1027	spin_unlock(&glink->idr_lock);
1028	if (!channel) {
1029		dev_err(glink->dev, "Invalid open ack packet\n");
1030		return -EINVAL;
1031	}
1032
1033	complete_all(&channel->open_ack);
1034
1035	return 0;
1036}
1037
1038/**
1039 * qcom_glink_set_flow_control() - convert a signal cmd to wire format and transmit
1040 * @ept:	Rpmsg endpoint for channel.
1041 * @pause:	Pause transmission
1042 * @dst:	destination address of the endpoint
1043 *
1044 * Return: 0 on success or standard Linux error code.
1045 */
1046static int qcom_glink_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
1047{
1048	struct glink_channel *channel = to_glink_channel(ept);
1049	struct qcom_glink *glink = channel->glink;
1050	struct glink_msg msg;
1051	u32 sigs = 0;
1052
1053	if (pause)
1054		sigs |= NATIVE_DTR_SIG | NATIVE_RTS_SIG;
1055
1056	msg.cmd = cpu_to_le16(GLINK_CMD_SIGNALS);
1057	msg.param1 = cpu_to_le16(channel->lcid);
1058	msg.param2 = cpu_to_le32(sigs);
1059
1060	return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
1061}
1062
1063static void qcom_glink_handle_signals(struct qcom_glink *glink,
1064				      unsigned int rcid, unsigned int sigs)
1065{
1066	struct glink_channel *channel;
1067	unsigned long flags;
1068	bool enable;
1069
1070	spin_lock_irqsave(&glink->idr_lock, flags);
1071	channel = idr_find(&glink->rcids, rcid);
1072	spin_unlock_irqrestore(&glink->idr_lock, flags);
1073	if (!channel) {
1074		dev_err(glink->dev, "signal for non-existing channel\n");
1075		return;
1076	}
1077
1078	enable = sigs & NATIVE_DSR_SIG || sigs & NATIVE_CTS_SIG;
1079
1080	if (channel->ept.flow_cb)
1081		channel->ept.flow_cb(channel->ept.rpdev, channel->ept.priv, enable);
1082}
1083
1084void qcom_glink_native_rx(struct qcom_glink *glink)
1085{
1086	struct glink_msg msg;
1087	unsigned int param1;
1088	unsigned int param2;
1089	unsigned int avail;
1090	unsigned int cmd;
1091	int ret = 0;
1092
1093	/* To wakeup any blocking writers */
1094	wake_up_all(&glink->tx_avail_notify);
1095
1096	for (;;) {
1097		avail = qcom_glink_rx_avail(glink);
1098		if (avail < sizeof(msg))
1099			break;
1100
1101		qcom_glink_rx_peek(glink, &msg, 0, sizeof(msg));
1102
1103		cmd = le16_to_cpu(msg.cmd);
1104		param1 = le16_to_cpu(msg.param1);
1105		param2 = le32_to_cpu(msg.param2);
1106
1107		switch (cmd) {
1108		case GLINK_CMD_VERSION:
1109		case GLINK_CMD_VERSION_ACK:
1110		case GLINK_CMD_CLOSE:
1111		case GLINK_CMD_CLOSE_ACK:
1112		case GLINK_CMD_RX_INTENT_REQ:
1113			ret = qcom_glink_rx_defer(glink, 0);
1114			break;
1115		case GLINK_CMD_OPEN_ACK:
1116			ret = qcom_glink_rx_open_ack(glink, param1);
1117			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1118			break;
1119		case GLINK_CMD_OPEN:
1120			ret = qcom_glink_rx_defer(glink, param2);
1121			break;
1122		case GLINK_CMD_TX_DATA:
1123		case GLINK_CMD_TX_DATA_CONT:
1124			ret = qcom_glink_rx_data(glink, avail);
1125			break;
1126		case GLINK_CMD_READ_NOTIF:
1127			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1128			qcom_glink_tx_kick(glink);
1129			break;
1130		case GLINK_CMD_INTENT:
1131			qcom_glink_handle_intent(glink, param1, param2, avail);
1132			break;
1133		case GLINK_CMD_RX_DONE:
1134			qcom_glink_handle_rx_done(glink, param1, param2, false);
1135			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1136			break;
1137		case GLINK_CMD_RX_DONE_W_REUSE:
1138			qcom_glink_handle_rx_done(glink, param1, param2, true);
1139			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1140			break;
1141		case GLINK_CMD_RX_INTENT_REQ_ACK:
1142			qcom_glink_handle_intent_req_ack(glink, param1, param2);
1143			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1144			break;
1145		case GLINK_CMD_SIGNALS:
1146			qcom_glink_handle_signals(glink, param1, param2);
1147			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1148			break;
1149		default:
1150			dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
1151			ret = -EINVAL;
1152			break;
1153		}
1154
1155		if (ret)
1156			break;
1157	}
1158}
1159EXPORT_SYMBOL(qcom_glink_native_rx);
1160
1161/* Locally initiated rpmsg_create_ept */
1162static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
1163						     const char *name)
1164{
1165	struct glink_channel *channel;
1166	int ret;
1167	unsigned long flags;
1168
1169	channel = qcom_glink_alloc_channel(glink, name);
1170	if (IS_ERR(channel))
1171		return ERR_CAST(channel);
1172
1173	ret = qcom_glink_send_open_req(glink, channel);
1174	if (ret)
1175		goto release_channel;
1176
1177	ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
1178	if (!ret)
1179		goto err_timeout;
1180
1181	ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ);
1182	if (!ret)
1183		goto err_timeout;
1184
1185	qcom_glink_send_open_ack(glink, channel);
1186
1187	return channel;
1188
1189err_timeout:
1190	/* qcom_glink_send_open_req() did register the channel in lcids*/
1191	spin_lock_irqsave(&glink->idr_lock, flags);
1192	idr_remove(&glink->lcids, channel->lcid);
1193	spin_unlock_irqrestore(&glink->idr_lock, flags);
1194
1195release_channel:
1196	/* Release qcom_glink_send_open_req() reference */
1197	kref_put(&channel->refcount, qcom_glink_channel_release);
1198	/* Release qcom_glink_alloc_channel() reference */
1199	kref_put(&channel->refcount, qcom_glink_channel_release);
1200
1201	return ERR_PTR(-ETIMEDOUT);
1202}
1203
1204/* Remote initiated rpmsg_create_ept */
1205static int qcom_glink_create_remote(struct qcom_glink *glink,
1206				    struct glink_channel *channel)
1207{
1208	int ret;
1209
1210	qcom_glink_send_open_ack(glink, channel);
1211
1212	ret = qcom_glink_send_open_req(glink, channel);
1213	if (ret)
1214		goto close_link;
1215
1216	ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
1217	if (!ret) {
1218		ret = -ETIMEDOUT;
1219		goto close_link;
1220	}
1221
1222	return 0;
1223
1224close_link:
1225	/*
1226	 * Send a close request to "undo" our open-ack. The close-ack will
1227	 * release qcom_glink_send_open_req() reference and the last reference
1228	 * will be relesed after receiving remote_close or transport unregister
1229	 * by calling qcom_glink_native_remove().
1230	 */
1231	qcom_glink_send_close_req(glink, channel);
1232
1233	return ret;
1234}
1235
1236static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
1237						    rpmsg_rx_cb_t cb,
1238						    void *priv,
1239						    struct rpmsg_channel_info
1240									chinfo)
1241{
1242	struct glink_channel *parent = to_glink_channel(rpdev->ept);
1243	struct glink_channel *channel;
1244	struct qcom_glink *glink = parent->glink;
1245	struct rpmsg_endpoint *ept;
1246	const char *name = chinfo.name;
1247	int cid;
1248	int ret;
1249	unsigned long flags;
1250
1251	spin_lock_irqsave(&glink->idr_lock, flags);
1252	idr_for_each_entry(&glink->rcids, channel, cid) {
1253		if (!strcmp(channel->name, name))
1254			break;
1255	}
1256	spin_unlock_irqrestore(&glink->idr_lock, flags);
1257
1258	if (!channel) {
1259		channel = qcom_glink_create_local(glink, name);
1260		if (IS_ERR(channel))
1261			return NULL;
1262	} else {
1263		ret = qcom_glink_create_remote(glink, channel);
1264		if (ret)
1265			return NULL;
1266	}
1267
1268	ept = &channel->ept;
1269	ept->rpdev = rpdev;
1270	ept->cb = cb;
1271	ept->priv = priv;
1272	ept->ops = &glink_endpoint_ops;
1273
1274	return ept;
1275}
1276
1277static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
1278{
1279	struct glink_channel *channel = to_glink_channel(rpdev->ept);
1280	struct device_node *np = rpdev->dev.of_node;
1281	struct qcom_glink *glink = channel->glink;
1282	struct glink_core_rx_intent *intent;
1283	const struct property *prop = NULL;
1284	__be32 defaults[] = { cpu_to_be32(SZ_1K), cpu_to_be32(5) };
1285	int num_intents;
1286	int num_groups = 1;
1287	__be32 *val = defaults;
1288	int size;
1289
1290	if (glink->intentless || !completion_done(&channel->open_ack))
1291		return 0;
1292
1293	prop = of_find_property(np, "qcom,intents", NULL);
1294	if (prop) {
1295		val = prop->value;
1296		num_groups = prop->length / sizeof(u32) / 2;
1297	}
1298
1299	/* Channel is now open, advertise base set of intents */
1300	while (num_groups--) {
1301		size = be32_to_cpup(val++);
1302		num_intents = be32_to_cpup(val++);
1303		while (num_intents--) {
1304			intent = qcom_glink_alloc_intent(glink, channel, size,
1305							 true);
1306			if (!intent)
1307				break;
1308
1309			qcom_glink_advertise_intent(glink, channel, intent);
1310		}
1311	}
1312	return 0;
1313}
1314
1315static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
1316{
1317	struct glink_channel *channel = to_glink_channel(ept);
1318	struct qcom_glink *glink = channel->glink;
1319	unsigned long flags;
1320
1321	spin_lock_irqsave(&channel->recv_lock, flags);
1322	channel->ept.cb = NULL;
1323	spin_unlock_irqrestore(&channel->recv_lock, flags);
1324
1325	/* Decouple the potential rpdev from the channel */
1326	channel->rpdev = NULL;
1327
1328	qcom_glink_send_close_req(glink, channel);
1329}
1330
1331static int qcom_glink_request_intent(struct qcom_glink *glink,
1332				     struct glink_channel *channel,
1333				     size_t size)
1334{
1335	struct {
1336		u16 id;
1337		u16 cid;
1338		u32 size;
1339	} __packed cmd;
1340
1341	int ret;
1342
1343	mutex_lock(&channel->intent_req_lock);
1344
1345	WRITE_ONCE(channel->intent_req_result, -1);
1346	WRITE_ONCE(channel->intent_received, false);
1347
1348	cmd.id = GLINK_CMD_RX_INTENT_REQ;
1349	cmd.cid = channel->lcid;
1350	cmd.size = size;
1351
1352	ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
1353	if (ret)
1354		goto unlock;
1355
1356	ret = wait_event_timeout(channel->intent_req_wq,
1357				 READ_ONCE(channel->intent_req_result) >= 0 &&
1358				 READ_ONCE(channel->intent_received),
1359				 10 * HZ);
1360	if (!ret) {
1361		dev_err(glink->dev, "intent request timed out\n");
1362		ret = -ETIMEDOUT;
1363	} else {
1364		ret = READ_ONCE(channel->intent_req_result) ? 0 : -ECANCELED;
1365	}
1366
1367unlock:
1368	mutex_unlock(&channel->intent_req_lock);
1369	return ret;
1370}
1371
1372static int __qcom_glink_send(struct glink_channel *channel,
1373			     void *data, int len, bool wait)
1374{
1375	struct qcom_glink *glink = channel->glink;
1376	struct glink_core_rx_intent *intent = NULL;
1377	struct glink_core_rx_intent *tmp;
1378	int iid = 0;
1379	struct {
1380		struct glink_msg msg;
1381		__le32 chunk_size;
1382		__le32 left_size;
1383	} __packed req;
1384	int ret;
1385	unsigned long flags;
1386	int chunk_size = len;
1387	size_t offset = 0;
1388
1389	if (!glink->intentless) {
1390		while (!intent) {
1391			spin_lock_irqsave(&channel->intent_lock, flags);
1392			idr_for_each_entry(&channel->riids, tmp, iid) {
1393				if (tmp->size >= len && !tmp->in_use) {
1394					if (!intent)
1395						intent = tmp;
1396					else if (intent->size > tmp->size)
1397						intent = tmp;
1398					if (intent->size == len)
1399						break;
1400				}
1401			}
1402			if (intent)
1403				intent->in_use = true;
1404			spin_unlock_irqrestore(&channel->intent_lock, flags);
1405
1406			/* We found an available intent */
1407			if (intent)
1408				break;
1409
1410			if (!wait)
1411				return -EBUSY;
1412
1413			ret = qcom_glink_request_intent(glink, channel, len);
1414			if (ret < 0)
1415				return ret;
1416		}
1417
1418		iid = intent->id;
1419	}
1420
1421	while (offset < len) {
1422		chunk_size = len - offset;
1423		if (chunk_size > SZ_8K && wait)
1424			chunk_size = SZ_8K;
1425
1426		req.msg.cmd = cpu_to_le16(offset == 0 ? GLINK_CMD_TX_DATA : GLINK_CMD_TX_DATA_CONT);
1427		req.msg.param1 = cpu_to_le16(channel->lcid);
1428		req.msg.param2 = cpu_to_le32(iid);
1429		req.chunk_size = cpu_to_le32(chunk_size);
1430		req.left_size = cpu_to_le32(len - offset - chunk_size);
1431
1432		ret = qcom_glink_tx(glink, &req, sizeof(req), data + offset, chunk_size, wait);
1433		if (ret) {
1434			/* Mark intent available if we failed */
1435			if (intent)
1436				intent->in_use = false;
1437			return ret;
1438		}
1439
1440		offset += chunk_size;
1441	}
1442
1443	return 0;
1444}
1445
1446static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len)
1447{
1448	struct glink_channel *channel = to_glink_channel(ept);
1449
1450	return __qcom_glink_send(channel, data, len, true);
1451}
1452
1453static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
1454{
1455	struct glink_channel *channel = to_glink_channel(ept);
1456
1457	return __qcom_glink_send(channel, data, len, false);
1458}
1459
1460static int qcom_glink_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
1461{
1462	struct glink_channel *channel = to_glink_channel(ept);
1463
1464	return __qcom_glink_send(channel, data, len, true);
1465}
1466
1467static int qcom_glink_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
1468{
1469	struct glink_channel *channel = to_glink_channel(ept);
1470
1471	return __qcom_glink_send(channel, data, len, false);
1472}
1473
1474/*
1475 * Finds the device_node for the glink child interested in this channel.
1476 */
1477static struct device_node *qcom_glink_match_channel(struct device_node *node,
1478						    const char *channel)
1479{
1480	struct device_node *child;
1481	const char *name;
1482	const char *key;
1483	int ret;
1484
1485	for_each_available_child_of_node(node, child) {
1486		key = "qcom,glink-channels";
1487		ret = of_property_read_string(child, key, &name);
1488		if (ret)
1489			continue;
1490
1491		if (strcmp(name, channel) == 0)
1492			return child;
1493	}
1494
1495	return NULL;
1496}
1497
1498static const struct rpmsg_device_ops glink_device_ops = {
1499	.create_ept = qcom_glink_create_ept,
1500	.announce_create = qcom_glink_announce_create,
1501};
1502
1503static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
1504	.destroy_ept = qcom_glink_destroy_ept,
1505	.send = qcom_glink_send,
1506	.sendto = qcom_glink_sendto,
1507	.trysend = qcom_glink_trysend,
1508	.trysendto = qcom_glink_trysendto,
1509	.set_flow_control = qcom_glink_set_flow_control,
1510};
1511
1512static void qcom_glink_rpdev_release(struct device *dev)
1513{
1514	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
1515
1516	kfree(rpdev->driver_override);
1517	kfree(rpdev);
1518}
1519
1520static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
1521			      char *name)
1522{
1523	struct glink_channel *channel;
1524	struct rpmsg_device *rpdev;
1525	bool create_device = false;
1526	struct device_node *node;
1527	int lcid;
1528	int ret;
1529	unsigned long flags;
1530
1531	spin_lock_irqsave(&glink->idr_lock, flags);
1532	idr_for_each_entry(&glink->lcids, channel, lcid) {
1533		if (!strcmp(channel->name, name))
1534			break;
1535	}
1536	spin_unlock_irqrestore(&glink->idr_lock, flags);
1537
1538	if (!channel) {
1539		channel = qcom_glink_alloc_channel(glink, name);
1540		if (IS_ERR(channel))
1541			return PTR_ERR(channel);
1542
1543		/* The opening dance was initiated by the remote */
1544		create_device = true;
1545	}
1546
1547	spin_lock_irqsave(&glink->idr_lock, flags);
1548	ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC);
1549	if (ret < 0) {
1550		dev_err(glink->dev, "Unable to insert channel into rcid list\n");
1551		spin_unlock_irqrestore(&glink->idr_lock, flags);
1552		goto free_channel;
1553	}
1554	channel->rcid = ret;
1555	spin_unlock_irqrestore(&glink->idr_lock, flags);
1556
1557	complete_all(&channel->open_req);
1558
1559	if (create_device) {
1560		rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
1561		if (!rpdev) {
1562			ret = -ENOMEM;
1563			goto rcid_remove;
1564		}
1565
1566		rpdev->ept = &channel->ept;
1567		strscpy_pad(rpdev->id.name, name, RPMSG_NAME_SIZE);
1568		rpdev->src = RPMSG_ADDR_ANY;
1569		rpdev->dst = RPMSG_ADDR_ANY;
1570		rpdev->ops = &glink_device_ops;
1571
1572		node = qcom_glink_match_channel(glink->dev->of_node, name);
1573		rpdev->dev.of_node = node;
1574		rpdev->dev.parent = glink->dev;
1575		rpdev->dev.release = qcom_glink_rpdev_release;
1576
1577		ret = rpmsg_register_device(rpdev);
1578		if (ret)
1579			goto rcid_remove;
1580
1581		channel->rpdev = rpdev;
1582	}
1583
1584	return 0;
1585
1586rcid_remove:
1587	spin_lock_irqsave(&glink->idr_lock, flags);
1588	idr_remove(&glink->rcids, channel->rcid);
1589	channel->rcid = 0;
1590	spin_unlock_irqrestore(&glink->idr_lock, flags);
1591free_channel:
1592	/* Release the reference, iff we took it */
1593	if (create_device)
1594		kref_put(&channel->refcount, qcom_glink_channel_release);
1595
1596	return ret;
1597}
1598
1599static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
1600{
1601	struct rpmsg_channel_info chinfo;
1602	struct glink_channel *channel;
1603	unsigned long flags;
1604
1605	spin_lock_irqsave(&glink->idr_lock, flags);
1606	channel = idr_find(&glink->rcids, rcid);
1607	spin_unlock_irqrestore(&glink->idr_lock, flags);
1608	if (WARN(!channel, "close request on unknown channel\n"))
1609		return;
1610
1611	/* cancel pending rx_done work */
1612	cancel_work_sync(&channel->intent_work);
1613
1614	if (channel->rpdev) {
1615		strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
1616		chinfo.src = RPMSG_ADDR_ANY;
1617		chinfo.dst = RPMSG_ADDR_ANY;
1618
1619		rpmsg_unregister_device(glink->dev, &chinfo);
1620	}
1621	channel->rpdev = NULL;
1622
1623	qcom_glink_send_close_ack(glink, channel->rcid);
1624
1625	spin_lock_irqsave(&glink->idr_lock, flags);
1626	idr_remove(&glink->rcids, channel->rcid);
1627	channel->rcid = 0;
1628	spin_unlock_irqrestore(&glink->idr_lock, flags);
1629
1630	kref_put(&channel->refcount, qcom_glink_channel_release);
1631}
1632
1633static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
1634{
1635	struct rpmsg_channel_info chinfo;
1636	struct glink_channel *channel;
1637	unsigned long flags;
1638
1639	/* To wakeup any blocking writers */
1640	wake_up_all(&glink->tx_avail_notify);
1641
1642	spin_lock_irqsave(&glink->idr_lock, flags);
1643	channel = idr_find(&glink->lcids, lcid);
1644	if (WARN(!channel, "close ack on unknown channel\n")) {
1645		spin_unlock_irqrestore(&glink->idr_lock, flags);
1646		return;
1647	}
1648
1649	idr_remove(&glink->lcids, channel->lcid);
1650	channel->lcid = 0;
1651	spin_unlock_irqrestore(&glink->idr_lock, flags);
1652
1653	/* Decouple the potential rpdev from the channel */
1654	if (channel->rpdev) {
1655		strscpy(chinfo.name, channel->name, sizeof(chinfo.name));
1656		chinfo.src = RPMSG_ADDR_ANY;
1657		chinfo.dst = RPMSG_ADDR_ANY;
1658
1659		rpmsg_unregister_device(glink->dev, &chinfo);
1660	}
1661	channel->rpdev = NULL;
1662
1663	kref_put(&channel->refcount, qcom_glink_channel_release);
1664}
1665
1666static void qcom_glink_work(struct work_struct *work)
1667{
1668	struct qcom_glink *glink = container_of(work, struct qcom_glink,
1669						rx_work);
1670	struct glink_defer_cmd *dcmd;
1671	struct glink_msg *msg;
1672	unsigned long flags;
1673	unsigned int param1;
1674	unsigned int param2;
1675	unsigned int cmd;
1676
1677	for (;;) {
1678		spin_lock_irqsave(&glink->rx_lock, flags);
1679		if (list_empty(&glink->rx_queue)) {
1680			spin_unlock_irqrestore(&glink->rx_lock, flags);
1681			break;
1682		}
1683		dcmd = list_first_entry(&glink->rx_queue,
1684					struct glink_defer_cmd, node);
1685		list_del(&dcmd->node);
1686		spin_unlock_irqrestore(&glink->rx_lock, flags);
1687
1688		msg = &dcmd->msg;
1689		cmd = le16_to_cpu(msg->cmd);
1690		param1 = le16_to_cpu(msg->param1);
1691		param2 = le32_to_cpu(msg->param2);
1692
1693		switch (cmd) {
1694		case GLINK_CMD_VERSION:
1695			qcom_glink_receive_version(glink, param1, param2);
1696			break;
1697		case GLINK_CMD_VERSION_ACK:
1698			qcom_glink_receive_version_ack(glink, param1, param2);
1699			break;
1700		case GLINK_CMD_OPEN:
1701			qcom_glink_rx_open(glink, param1, msg->data);
1702			break;
1703		case GLINK_CMD_CLOSE:
1704			qcom_glink_rx_close(glink, param1);
1705			break;
1706		case GLINK_CMD_CLOSE_ACK:
1707			qcom_glink_rx_close_ack(glink, param1);
1708			break;
1709		case GLINK_CMD_RX_INTENT_REQ:
1710			qcom_glink_handle_intent_req(glink, param1, param2);
1711			break;
1712		default:
1713			WARN(1, "Unknown defer object %d\n", cmd);
1714			break;
1715		}
1716
1717		kfree(dcmd);
1718	}
1719}
1720
1721static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
1722{
1723	struct glink_defer_cmd *dcmd;
1724	struct glink_defer_cmd *tmp;
1725
1726	/* cancel any pending deferred rx_work */
1727	cancel_work_sync(&glink->rx_work);
1728
1729	list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
1730		kfree(dcmd);
1731}
1732
1733static ssize_t rpmsg_name_show(struct device *dev,
1734			       struct device_attribute *attr, char *buf)
1735{
1736	int ret = 0;
1737	const char *name;
1738
1739	ret = of_property_read_string(dev->of_node, "label", &name);
1740	if (ret < 0)
1741		name = dev->of_node->name;
1742
1743	return sysfs_emit(buf, "%s\n", name);
1744}
1745static DEVICE_ATTR_RO(rpmsg_name);
1746
1747static struct attribute *qcom_glink_attrs[] = {
1748	&dev_attr_rpmsg_name.attr,
1749	NULL
1750};
1751ATTRIBUTE_GROUPS(qcom_glink);
1752
1753static void qcom_glink_device_release(struct device *dev)
1754{
1755	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
1756	struct glink_channel *channel = to_glink_channel(rpdev->ept);
1757
1758	/* Release qcom_glink_alloc_channel() reference */
1759	kref_put(&channel->refcount, qcom_glink_channel_release);
1760	kfree(rpdev->driver_override);
1761	kfree(rpdev);
1762}
1763
1764static int qcom_glink_create_chrdev(struct qcom_glink *glink)
1765{
1766	struct rpmsg_device *rpdev;
1767	struct glink_channel *channel;
1768
1769	rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
1770	if (!rpdev)
1771		return -ENOMEM;
1772
1773	channel = qcom_glink_alloc_channel(glink, "rpmsg_chrdev");
1774	if (IS_ERR(channel)) {
1775		kfree(rpdev);
1776		return PTR_ERR(channel);
1777	}
1778	channel->rpdev = rpdev;
1779
1780	rpdev->ept = &channel->ept;
1781	rpdev->ops = &glink_device_ops;
1782	rpdev->dev.parent = glink->dev;
1783	rpdev->dev.release = qcom_glink_device_release;
1784
1785	return rpmsg_ctrldev_register_device(rpdev);
1786}
1787
1788struct qcom_glink *qcom_glink_native_probe(struct device *dev,
1789					   unsigned long features,
1790					   struct qcom_glink_pipe *rx,
1791					   struct qcom_glink_pipe *tx,
1792					   bool intentless)
1793{
1794	int ret;
1795	struct qcom_glink *glink;
1796
1797	glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
1798	if (!glink)
1799		return ERR_PTR(-ENOMEM);
1800
1801	glink->dev = dev;
1802	glink->tx_pipe = tx;
1803	glink->rx_pipe = rx;
1804
1805	glink->features = features;
1806	glink->intentless = intentless;
1807
1808	spin_lock_init(&glink->tx_lock);
1809	spin_lock_init(&glink->rx_lock);
1810	INIT_LIST_HEAD(&glink->rx_queue);
1811	INIT_WORK(&glink->rx_work, qcom_glink_work);
1812	init_waitqueue_head(&glink->tx_avail_notify);
1813
1814	spin_lock_init(&glink->idr_lock);
1815	idr_init(&glink->lcids);
1816	idr_init(&glink->rcids);
1817
1818	glink->dev->groups = qcom_glink_groups;
1819
1820	ret = device_add_groups(dev, qcom_glink_groups);
1821	if (ret)
1822		dev_err(dev, "failed to add groups\n");
1823
1824	ret = qcom_glink_send_version(glink);
1825	if (ret)
1826		return ERR_PTR(ret);
1827
1828	ret = qcom_glink_create_chrdev(glink);
1829	if (ret)
1830		dev_err(glink->dev, "failed to register chrdev\n");
1831
1832	return glink;
1833}
1834EXPORT_SYMBOL_GPL(qcom_glink_native_probe);
1835
1836static int qcom_glink_remove_device(struct device *dev, void *data)
1837{
1838	device_unregister(dev);
1839
1840	return 0;
1841}
1842
1843void qcom_glink_native_remove(struct qcom_glink *glink)
1844{
1845	struct glink_channel *channel;
1846	unsigned long flags;
1847	int cid;
1848	int ret;
1849
1850	qcom_glink_cancel_rx_work(glink);
1851
1852	/* Fail all attempts at sending messages */
1853	spin_lock_irqsave(&glink->tx_lock, flags);
1854	glink->abort_tx = true;
1855	wake_up_all(&glink->tx_avail_notify);
1856	spin_unlock_irqrestore(&glink->tx_lock, flags);
1857
1858	/* Abort any senders waiting for intent requests */
1859	spin_lock_irqsave(&glink->idr_lock, flags);
1860	idr_for_each_entry(&glink->lcids, channel, cid)
1861		qcom_glink_intent_req_abort(channel);
1862	spin_unlock_irqrestore(&glink->idr_lock, flags);
1863
1864	ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
1865	if (ret)
1866		dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
1867
1868	/* Release any defunct local channels, waiting for close-ack */
1869	idr_for_each_entry(&glink->lcids, channel, cid)
1870		kref_put(&channel->refcount, qcom_glink_channel_release);
1871
1872	/* Release any defunct local channels, waiting for close-req */
1873	idr_for_each_entry(&glink->rcids, channel, cid)
1874		kref_put(&channel->refcount, qcom_glink_channel_release);
1875
1876	idr_destroy(&glink->lcids);
1877	idr_destroy(&glink->rcids);
1878}
1879EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
1880
1881MODULE_DESCRIPTION("Qualcomm GLINK driver");
1882MODULE_LICENSE("GPL v2");
1883