1// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018-2021 ARM Ltd.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/bitmap.h>
20#include <linux/debugfs.h>
21#include <linux/device.h>
22#include <linux/export.h>
23#include <linux/idr.h>
24#include <linux/io.h>
25#include <linux/io-64-nonatomic-hi-lo.h>
26#include <linux/kernel.h>
27#include <linux/ktime.h>
28#include <linux/hashtable.h>
29#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/of.h>
32#include <linux/platform_device.h>
33#include <linux/processor.h>
34#include <linux/refcount.h>
35#include <linux/slab.h>
36
37#include "common.h"
38#include "notify.h"
39
40#include "raw_mode.h"
41
42#define CREATE_TRACE_POINTS
43#include <trace/events/scmi.h>
44
45static DEFINE_IDA(scmi_id);
46
47static DEFINE_IDR(scmi_protocols);
48static DEFINE_SPINLOCK(protocol_lock);
49
50/* List of all SCMI devices active in system */
51static LIST_HEAD(scmi_list);
52/* Protection for the entire list */
53static DEFINE_MUTEX(scmi_list_mutex);
54/* Track the unique id for the transfers for debug & profiling purpose */
55static atomic_t transfer_last_id;
56
57static struct dentry *scmi_top_dentry;
58
59/**
60 * struct scmi_xfers_info - Structure to manage transfer information
61 *
62 * @xfer_alloc_table: Bitmap table for allocated messages.
63 *	Index of this bitmap table is also used for message
64 *	sequence identifier.
65 * @xfer_lock: Protection for message allocation
66 * @max_msg: Maximum number of messages that can be pending
67 * @free_xfers: A free list for available to use xfers. It is initialized with
68 *		a number of xfers equal to the maximum allowed in-flight
69 *		messages.
70 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
71 *		   currently in-flight messages.
72 */
73struct scmi_xfers_info {
74	unsigned long *xfer_alloc_table;
75	spinlock_t xfer_lock;
76	int max_msg;
77	struct hlist_head free_xfers;
78	DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
79};
80
81/**
82 * struct scmi_protocol_instance  - Describe an initialized protocol instance.
83 * @handle: Reference to the SCMI handle associated to this protocol instance.
84 * @proto: A reference to the protocol descriptor.
85 * @gid: A reference for per-protocol devres management.
86 * @users: A refcount to track effective users of this protocol.
87 * @priv: Reference for optional protocol private data.
88 * @version: Protocol version supported by the platform as detected at runtime.
89 * @negotiated_version: When the platform supports a newer protocol version,
90 *			the agent will try to negotiate with the platform the
91 *			usage of the newest version known to it, since
92 *			backward compatibility is NOT automatically assured.
93 *			This field is NON-zero when a successful negotiation
94 *			has completed.
95 * @ph: An embedded protocol handle that will be passed down to protocol
96 *	initialization code to identify this instance.
97 *
98 * Each protocol is initialized independently once for each SCMI platform in
99 * which is defined by DT and implemented by the SCMI server fw.
100 */
101struct scmi_protocol_instance {
102	const struct scmi_handle	*handle;
103	const struct scmi_protocol	*proto;
104	void				*gid;
105	refcount_t			users;
106	void				*priv;
107	unsigned int			version;
108	unsigned int			negotiated_version;
109	struct scmi_protocol_handle	ph;
110};
111
112#define ph_to_pi(h)	container_of(h, struct scmi_protocol_instance, ph)
113
114/**
115 * struct scmi_debug_info  - Debug common info
116 * @top_dentry: A reference to the top debugfs dentry
117 * @name: Name of this SCMI instance
118 * @type: Type of this SCMI instance
119 * @is_atomic: Flag to state if the transport of this instance is atomic
120 */
121struct scmi_debug_info {
122	struct dentry *top_dentry;
123	const char *name;
124	const char *type;
125	bool is_atomic;
126};
127
128/**
129 * struct scmi_info - Structure representing a SCMI instance
130 *
131 * @id: A sequence number starting from zero identifying this instance
132 * @dev: Device pointer
133 * @desc: SoC description for this instance
134 * @version: SCMI revision information containing protocol version,
135 *	implementation version and (sub-)vendor identification.
136 * @handle: Instance of SCMI handle to send to clients
137 * @tx_minfo: Universal Transmit Message management info
138 * @rx_minfo: Universal Receive Message management info
139 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
140 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
141 * @protocols: IDR for protocols' instance descriptors initialized for
142 *	       this SCMI instance: populated on protocol's first attempted
143 *	       usage.
144 * @protocols_mtx: A mutex to protect protocols instances initialization.
145 * @protocols_imp: List of protocols implemented, currently maximum of
146 *		   scmi_revision_info.num_protocols elements allocated by the
147 *		   base protocol
148 * @active_protocols: IDR storing device_nodes for protocols actually defined
149 *		      in the DT and confirmed as implemented by fw.
150 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
151 *		      in microseconds, for atomic operations.
152 *		      Only SCMI synchronous commands reported by the platform
153 *		      to have an execution latency lesser-equal to the threshold
154 *		      should be considered for atomic mode operation: such
155 *		      decision is finally left up to the SCMI drivers.
156 * @notify_priv: Pointer to private data structure specific to notifications.
157 * @node: List head
158 * @users: Number of users of this instance
159 * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
160 * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
161 *		bus
162 * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
163 * @dbg: A pointer to debugfs related data (if any)
164 * @raw: An opaque reference handle used by SCMI Raw mode.
165 */
166struct scmi_info {
167	int id;
168	struct device *dev;
169	const struct scmi_desc *desc;
170	struct scmi_revision_info version;
171	struct scmi_handle handle;
172	struct scmi_xfers_info tx_minfo;
173	struct scmi_xfers_info rx_minfo;
174	struct idr tx_idr;
175	struct idr rx_idr;
176	struct idr protocols;
177	/* Ensure mutual exclusive access to protocols instance array */
178	struct mutex protocols_mtx;
179	u8 *protocols_imp;
180	struct idr active_protocols;
181	unsigned int atomic_threshold;
182	void *notify_priv;
183	struct list_head node;
184	int users;
185	struct notifier_block bus_nb;
186	struct notifier_block dev_req_nb;
187	/* Serialize device creation process for this instance */
188	struct mutex devreq_mtx;
189	struct scmi_debug_info *dbg;
190	void *raw;
191};
192
193#define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
194#define bus_nb_to_scmi_info(nb)	container_of(nb, struct scmi_info, bus_nb)
195#define req_nb_to_scmi_info(nb)	container_of(nb, struct scmi_info, dev_req_nb)
196
197static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
198{
199	const struct scmi_protocol *proto;
200
201	proto = idr_find(&scmi_protocols, protocol_id);
202	if (!proto || !try_module_get(proto->owner)) {
203		pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
204		return NULL;
205	}
206
207	pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
208
209	return proto;
210}
211
212static void scmi_protocol_put(int protocol_id)
213{
214	const struct scmi_protocol *proto;
215
216	proto = idr_find(&scmi_protocols, protocol_id);
217	if (proto)
218		module_put(proto->owner);
219}
220
221int scmi_protocol_register(const struct scmi_protocol *proto)
222{
223	int ret;
224
225	if (!proto) {
226		pr_err("invalid protocol\n");
227		return -EINVAL;
228	}
229
230	if (!proto->instance_init) {
231		pr_err("missing init for protocol 0x%x\n", proto->id);
232		return -EINVAL;
233	}
234
235	spin_lock(&protocol_lock);
236	ret = idr_alloc(&scmi_protocols, (void *)proto,
237			proto->id, proto->id + 1, GFP_ATOMIC);
238	spin_unlock(&protocol_lock);
239	if (ret != proto->id) {
240		pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
241		       proto->id, ret);
242		return ret;
243	}
244
245	pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
246
247	return 0;
248}
249EXPORT_SYMBOL_GPL(scmi_protocol_register);
250
251void scmi_protocol_unregister(const struct scmi_protocol *proto)
252{
253	spin_lock(&protocol_lock);
254	idr_remove(&scmi_protocols, proto->id);
255	spin_unlock(&protocol_lock);
256
257	pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
258}
259EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
260
261/**
262 * scmi_create_protocol_devices  - Create devices for all pending requests for
263 * this SCMI instance.
264 *
265 * @np: The device node describing the protocol
266 * @info: The SCMI instance descriptor
267 * @prot_id: The protocol ID
268 * @name: The optional name of the device to be created: if not provided this
269 *	  call will lead to the creation of all the devices currently requested
270 *	  for the specified protocol.
271 */
272static void scmi_create_protocol_devices(struct device_node *np,
273					 struct scmi_info *info,
274					 int prot_id, const char *name)
275{
276	struct scmi_device *sdev;
277
278	mutex_lock(&info->devreq_mtx);
279	sdev = scmi_device_create(np, info->dev, prot_id, name);
280	if (name && !sdev)
281		dev_err(info->dev,
282			"failed to create device for protocol 0x%X (%s)\n",
283			prot_id, name);
284	mutex_unlock(&info->devreq_mtx);
285}
286
287static void scmi_destroy_protocol_devices(struct scmi_info *info,
288					  int prot_id, const char *name)
289{
290	mutex_lock(&info->devreq_mtx);
291	scmi_device_destroy(info->dev, prot_id, name);
292	mutex_unlock(&info->devreq_mtx);
293}
294
295void scmi_notification_instance_data_set(const struct scmi_handle *handle,
296					 void *priv)
297{
298	struct scmi_info *info = handle_to_scmi_info(handle);
299
300	info->notify_priv = priv;
301	/* Ensure updated protocol private date are visible */
302	smp_wmb();
303}
304
305void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
306{
307	struct scmi_info *info = handle_to_scmi_info(handle);
308
309	/* Ensure protocols_private_data has been updated */
310	smp_rmb();
311	return info->notify_priv;
312}
313
314/**
315 * scmi_xfer_token_set  - Reserve and set new token for the xfer at hand
316 *
317 * @minfo: Pointer to Tx/Rx Message management info based on channel type
318 * @xfer: The xfer to act upon
319 *
320 * Pick the next unused monotonically increasing token and set it into
321 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
322 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
323 * of incorrect association of a late and expired xfer with a live in-flight
324 * transaction, both happening to re-use the same token identifier.
325 *
326 * Since platform is NOT required to answer our request in-order we should
327 * account for a few rare but possible scenarios:
328 *
329 *  - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
330 *    using find_next_zero_bit() starting from candidate next_token bit
331 *
332 *  - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
333 *    are plenty of free tokens at start, so try a second pass using
334 *    find_next_zero_bit() and starting from 0.
335 *
336 *  X = used in-flight
337 *
338 * Normal
339 * ------
340 *
341 *		|- xfer_id picked
342 *   -----------+----------------------------------------------------------
343 *   | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
344 *   ----------------------------------------------------------------------
345 *		^
346 *		|- next_token
347 *
348 * Out-of-order pending at start
349 * -----------------------------
350 *
351 *	  |- xfer_id picked, last_token fixed
352 *   -----+----------------------------------------------------------------
353 *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
354 *   ----------------------------------------------------------------------
355 *    ^
356 *    |- next_token
357 *
358 *
359 * Out-of-order pending at end
360 * ---------------------------
361 *
362 *	  |- xfer_id picked, last_token fixed
363 *   -----+----------------------------------------------------------------
364 *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
365 *   ----------------------------------------------------------------------
366 *								^
367 *								|- next_token
368 *
369 * Context: Assumes to be called with @xfer_lock already acquired.
370 *
371 * Return: 0 on Success or error
372 */
373static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
374			       struct scmi_xfer *xfer)
375{
376	unsigned long xfer_id, next_token;
377
378	/*
379	 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
380	 * using the pre-allocated transfer_id as a base.
381	 * Note that the global transfer_id is shared across all message types
382	 * so there could be holes in the allocated set of monotonic sequence
383	 * numbers, but that is going to limit the effectiveness of the
384	 * mitigation only in very rare limit conditions.
385	 */
386	next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
387
388	/* Pick the next available xfer_id >= next_token */
389	xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
390				     MSG_TOKEN_MAX, next_token);
391	if (xfer_id == MSG_TOKEN_MAX) {
392		/*
393		 * After heavily out-of-order responses, there are no free
394		 * tokens ahead, but only at start of xfer_alloc_table so
395		 * try again from the beginning.
396		 */
397		xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
398					     MSG_TOKEN_MAX, 0);
399		/*
400		 * Something is wrong if we got here since there can be a
401		 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
402		 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
403		 */
404		if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
405			return -ENOMEM;
406	}
407
408	/* Update +/- last_token accordingly if we skipped some hole */
409	if (xfer_id != next_token)
410		atomic_add((int)(xfer_id - next_token), &transfer_last_id);
411
412	xfer->hdr.seq = (u16)xfer_id;
413
414	return 0;
415}
416
417/**
418 * scmi_xfer_token_clear  - Release the token
419 *
420 * @minfo: Pointer to Tx/Rx Message management info based on channel type
421 * @xfer: The xfer to act upon
422 */
423static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
424					 struct scmi_xfer *xfer)
425{
426	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
427}
428
429/**
430 * scmi_xfer_inflight_register_unlocked  - Register the xfer as in-flight
431 *
432 * @xfer: The xfer to register
433 * @minfo: Pointer to Tx/Rx Message management info based on channel type
434 *
435 * Note that this helper assumes that the xfer to be registered as in-flight
436 * had been built using an xfer sequence number which still corresponds to a
437 * free slot in the xfer_alloc_table.
438 *
439 * Context: Assumes to be called with @xfer_lock already acquired.
440 */
441static inline void
442scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
443				     struct scmi_xfers_info *minfo)
444{
445	/* Set in-flight */
446	set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
447	hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
448	xfer->pending = true;
449}
450
451/**
452 * scmi_xfer_inflight_register  - Try to register an xfer as in-flight
453 *
454 * @xfer: The xfer to register
455 * @minfo: Pointer to Tx/Rx Message management info based on channel type
456 *
457 * Note that this helper does NOT assume anything about the sequence number
458 * that was baked into the provided xfer, so it checks at first if it can
459 * be mapped to a free slot and fails with an error if another xfer with the
460 * same sequence number is currently still registered as in-flight.
461 *
462 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
463 *	   could not rbe mapped to a free slot in the xfer_alloc_table.
464 */
465static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
466				       struct scmi_xfers_info *minfo)
467{
468	int ret = 0;
469	unsigned long flags;
470
471	spin_lock_irqsave(&minfo->xfer_lock, flags);
472	if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
473		scmi_xfer_inflight_register_unlocked(xfer, minfo);
474	else
475		ret = -EBUSY;
476	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
477
478	return ret;
479}
480
481/**
482 * scmi_xfer_raw_inflight_register  - An helper to register the given xfer as in
483 * flight on the TX channel, if possible.
484 *
485 * @handle: Pointer to SCMI entity handle
486 * @xfer: The xfer to register
487 *
488 * Return: 0 on Success, error otherwise
489 */
490int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
491				    struct scmi_xfer *xfer)
492{
493	struct scmi_info *info = handle_to_scmi_info(handle);
494
495	return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
496}
497
498/**
499 * scmi_xfer_pending_set  - Pick a proper sequence number and mark the xfer
500 * as pending in-flight
501 *
502 * @xfer: The xfer to act upon
503 * @minfo: Pointer to Tx/Rx Message management info based on channel type
504 *
505 * Return: 0 on Success or error otherwise
506 */
507static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
508					struct scmi_xfers_info *minfo)
509{
510	int ret;
511	unsigned long flags;
512
513	spin_lock_irqsave(&minfo->xfer_lock, flags);
514	/* Set a new monotonic token as the xfer sequence number */
515	ret = scmi_xfer_token_set(minfo, xfer);
516	if (!ret)
517		scmi_xfer_inflight_register_unlocked(xfer, minfo);
518	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
519
520	return ret;
521}
522
523/**
524 * scmi_xfer_get() - Allocate one message
525 *
526 * @handle: Pointer to SCMI entity handle
527 * @minfo: Pointer to Tx/Rx Message management info based on channel type
528 *
529 * Helper function which is used by various message functions that are
530 * exposed to clients of this driver for allocating a message traffic event.
531 *
532 * Picks an xfer from the free list @free_xfers (if any available) and perform
533 * a basic initialization.
534 *
535 * Note that, at this point, still no sequence number is assigned to the
536 * allocated xfer, nor it is registered as a pending transaction.
537 *
538 * The successfully initialized xfer is refcounted.
539 *
540 * Context: Holds @xfer_lock while manipulating @free_xfers.
541 *
542 * Return: An initialized xfer if all went fine, else pointer error.
543 */
544static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
545				       struct scmi_xfers_info *minfo)
546{
547	unsigned long flags;
548	struct scmi_xfer *xfer;
549
550	spin_lock_irqsave(&minfo->xfer_lock, flags);
551	if (hlist_empty(&minfo->free_xfers)) {
552		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
553		return ERR_PTR(-ENOMEM);
554	}
555
556	/* grab an xfer from the free_list */
557	xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
558	hlist_del_init(&xfer->node);
559
560	/*
561	 * Allocate transfer_id early so that can be used also as base for
562	 * monotonic sequence number generation if needed.
563	 */
564	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
565
566	refcount_set(&xfer->users, 1);
567	atomic_set(&xfer->busy, SCMI_XFER_FREE);
568	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
569
570	return xfer;
571}
572
573/**
574 * scmi_xfer_raw_get  - Helper to get a bare free xfer from the TX channel
575 *
576 * @handle: Pointer to SCMI entity handle
577 *
578 * Note that xfer is taken from the TX channel structures.
579 *
580 * Return: A valid xfer on Success, or an error-pointer otherwise
581 */
582struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
583{
584	struct scmi_xfer *xfer;
585	struct scmi_info *info = handle_to_scmi_info(handle);
586
587	xfer = scmi_xfer_get(handle, &info->tx_minfo);
588	if (!IS_ERR(xfer))
589		xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
590
591	return xfer;
592}
593
594/**
595 * scmi_xfer_raw_channel_get  - Helper to get a reference to the proper channel
596 * to use for a specific protocol_id Raw transaction.
597 *
598 * @handle: Pointer to SCMI entity handle
599 * @protocol_id: Identifier of the protocol
600 *
601 * Note that in a regular SCMI stack, usually, a protocol has to be defined in
602 * the DT to have an associated channel and be usable; but in Raw mode any
603 * protocol in range is allowed, re-using the Base channel, so as to enable
604 * fuzzing on any protocol without the need of a fully compiled DT.
605 *
606 * Return: A reference to the channel to use, or an ERR_PTR
607 */
608struct scmi_chan_info *
609scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
610{
611	struct scmi_chan_info *cinfo;
612	struct scmi_info *info = handle_to_scmi_info(handle);
613
614	cinfo = idr_find(&info->tx_idr, protocol_id);
615	if (!cinfo) {
616		if (protocol_id == SCMI_PROTOCOL_BASE)
617			return ERR_PTR(-EINVAL);
618		/* Use Base channel for protocols not defined for DT */
619		cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
620		if (!cinfo)
621			return ERR_PTR(-EINVAL);
622		dev_warn_once(handle->dev,
623			      "Using Base channel for protocol 0x%X\n",
624			      protocol_id);
625	}
626
627	return cinfo;
628}
629
630/**
631 * __scmi_xfer_put() - Release a message
632 *
633 * @minfo: Pointer to Tx/Rx Message management info based on channel type
634 * @xfer: message that was reserved by scmi_xfer_get
635 *
636 * After refcount check, possibly release an xfer, clearing the token slot,
637 * removing xfer from @pending_xfers and putting it back into free_xfers.
638 *
639 * This holds a spinlock to maintain integrity of internal data structures.
640 */
641static void
642__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
643{
644	unsigned long flags;
645
646	spin_lock_irqsave(&minfo->xfer_lock, flags);
647	if (refcount_dec_and_test(&xfer->users)) {
648		if (xfer->pending) {
649			scmi_xfer_token_clear(minfo, xfer);
650			hash_del(&xfer->node);
651			xfer->pending = false;
652		}
653		hlist_add_head(&xfer->node, &minfo->free_xfers);
654	}
655	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
656}
657
658/**
659 * scmi_xfer_raw_put  - Release an xfer that was taken by @scmi_xfer_raw_get
660 *
661 * @handle: Pointer to SCMI entity handle
662 * @xfer: A reference to the xfer to put
663 *
664 * Note that as with other xfer_put() handlers the xfer is really effectively
665 * released only if there are no more users on the system.
666 */
667void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
668{
669	struct scmi_info *info = handle_to_scmi_info(handle);
670
671	xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
672	xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
673	return __scmi_xfer_put(&info->tx_minfo, xfer);
674}
675
676/**
677 * scmi_xfer_lookup_unlocked  -  Helper to lookup an xfer_id
678 *
679 * @minfo: Pointer to Tx/Rx Message management info based on channel type
680 * @xfer_id: Token ID to lookup in @pending_xfers
681 *
682 * Refcounting is untouched.
683 *
684 * Context: Assumes to be called with @xfer_lock already acquired.
685 *
686 * Return: A valid xfer on Success or error otherwise
687 */
688static struct scmi_xfer *
689scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
690{
691	struct scmi_xfer *xfer = NULL;
692
693	if (test_bit(xfer_id, minfo->xfer_alloc_table))
694		xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
695
696	return xfer ?: ERR_PTR(-EINVAL);
697}
698
699/**
700 * scmi_msg_response_validate  - Validate message type against state of related
701 * xfer
702 *
703 * @cinfo: A reference to the channel descriptor.
704 * @msg_type: Message type to check
705 * @xfer: A reference to the xfer to validate against @msg_type
706 *
707 * This function checks if @msg_type is congruent with the current state of
708 * a pending @xfer; if an asynchronous delayed response is received before the
709 * related synchronous response (Out-of-Order Delayed Response) the missing
710 * synchronous response is assumed to be OK and completed, carrying on with the
711 * Delayed Response: this is done to address the case in which the underlying
712 * SCMI transport can deliver such out-of-order responses.
713 *
714 * Context: Assumes to be called with xfer->lock already acquired.
715 *
716 * Return: 0 on Success, error otherwise
717 */
718static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
719					     u8 msg_type,
720					     struct scmi_xfer *xfer)
721{
722	/*
723	 * Even if a response was indeed expected on this slot at this point,
724	 * a buggy platform could wrongly reply feeding us an unexpected
725	 * delayed response we're not prepared to handle: bail-out safely
726	 * blaming firmware.
727	 */
728	if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
729		dev_err(cinfo->dev,
730			"Delayed Response for %d not expected! Buggy F/W ?\n",
731			xfer->hdr.seq);
732		return -EINVAL;
733	}
734
735	switch (xfer->state) {
736	case SCMI_XFER_SENT_OK:
737		if (msg_type == MSG_TYPE_DELAYED_RESP) {
738			/*
739			 * Delayed Response expected but delivered earlier.
740			 * Assume message RESPONSE was OK and skip state.
741			 */
742			xfer->hdr.status = SCMI_SUCCESS;
743			xfer->state = SCMI_XFER_RESP_OK;
744			complete(&xfer->done);
745			dev_warn(cinfo->dev,
746				 "Received valid OoO Delayed Response for %d\n",
747				 xfer->hdr.seq);
748		}
749		break;
750	case SCMI_XFER_RESP_OK:
751		if (msg_type != MSG_TYPE_DELAYED_RESP)
752			return -EINVAL;
753		break;
754	case SCMI_XFER_DRESP_OK:
755		/* No further message expected once in SCMI_XFER_DRESP_OK */
756		return -EINVAL;
757	}
758
759	return 0;
760}
761
762/**
763 * scmi_xfer_state_update  - Update xfer state
764 *
765 * @xfer: A reference to the xfer to update
766 * @msg_type: Type of message being processed.
767 *
768 * Note that this message is assumed to have been already successfully validated
769 * by @scmi_msg_response_validate(), so here we just update the state.
770 *
771 * Context: Assumes to be called on an xfer exclusively acquired using the
772 *	    busy flag.
773 */
774static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
775{
776	xfer->hdr.type = msg_type;
777
778	/* Unknown command types were already discarded earlier */
779	if (xfer->hdr.type == MSG_TYPE_COMMAND)
780		xfer->state = SCMI_XFER_RESP_OK;
781	else
782		xfer->state = SCMI_XFER_DRESP_OK;
783}
784
785static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
786{
787	int ret;
788
789	ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
790
791	return ret == SCMI_XFER_FREE;
792}
793
794/**
795 * scmi_xfer_command_acquire  -  Helper to lookup and acquire a command xfer
796 *
797 * @cinfo: A reference to the channel descriptor.
798 * @msg_hdr: A message header to use as lookup key
799 *
800 * When a valid xfer is found for the sequence number embedded in the provided
801 * msg_hdr, reference counting is properly updated and exclusive access to this
802 * xfer is granted till released with @scmi_xfer_command_release.
803 *
804 * Return: A valid @xfer on Success or error otherwise.
805 */
806static inline struct scmi_xfer *
807scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
808{
809	int ret;
810	unsigned long flags;
811	struct scmi_xfer *xfer;
812	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
813	struct scmi_xfers_info *minfo = &info->tx_minfo;
814	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
815	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
816
817	/* Are we even expecting this? */
818	spin_lock_irqsave(&minfo->xfer_lock, flags);
819	xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
820	if (IS_ERR(xfer)) {
821		dev_err(cinfo->dev,
822			"Message for %d type %d is not expected!\n",
823			xfer_id, msg_type);
824		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
825		return xfer;
826	}
827	refcount_inc(&xfer->users);
828	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
829
830	spin_lock_irqsave(&xfer->lock, flags);
831	ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
832	/*
833	 * If a pending xfer was found which was also in a congruent state with
834	 * the received message, acquire exclusive access to it setting the busy
835	 * flag.
836	 * Spins only on the rare limit condition of concurrent reception of
837	 * RESP and DRESP for the same xfer.
838	 */
839	if (!ret) {
840		spin_until_cond(scmi_xfer_acquired(xfer));
841		scmi_xfer_state_update(xfer, msg_type);
842	}
843	spin_unlock_irqrestore(&xfer->lock, flags);
844
845	if (ret) {
846		dev_err(cinfo->dev,
847			"Invalid message type:%d for %d - HDR:0x%X  state:%d\n",
848			msg_type, xfer_id, msg_hdr, xfer->state);
849		/* On error the refcount incremented above has to be dropped */
850		__scmi_xfer_put(minfo, xfer);
851		xfer = ERR_PTR(-EINVAL);
852	}
853
854	return xfer;
855}
856
857static inline void scmi_xfer_command_release(struct scmi_info *info,
858					     struct scmi_xfer *xfer)
859{
860	atomic_set(&xfer->busy, SCMI_XFER_FREE);
861	__scmi_xfer_put(&info->tx_minfo, xfer);
862}
863
864static inline void scmi_clear_channel(struct scmi_info *info,
865				      struct scmi_chan_info *cinfo)
866{
867	if (info->desc->ops->clear_channel)
868		info->desc->ops->clear_channel(cinfo);
869}
870
871static void scmi_handle_notification(struct scmi_chan_info *cinfo,
872				     u32 msg_hdr, void *priv)
873{
874	struct scmi_xfer *xfer;
875	struct device *dev = cinfo->dev;
876	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
877	struct scmi_xfers_info *minfo = &info->rx_minfo;
878	ktime_t ts;
879
880	ts = ktime_get_boottime();
881	xfer = scmi_xfer_get(cinfo->handle, minfo);
882	if (IS_ERR(xfer)) {
883		dev_err(dev, "failed to get free message slot (%ld)\n",
884			PTR_ERR(xfer));
885		scmi_clear_channel(info, cinfo);
886		return;
887	}
888
889	unpack_scmi_header(msg_hdr, &xfer->hdr);
890	if (priv)
891		/* Ensure order between xfer->priv store and following ops */
892		smp_store_mb(xfer->priv, priv);
893	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
894					    xfer);
895
896	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
897			    xfer->hdr.id, "NOTI", xfer->hdr.seq,
898			    xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
899
900	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
901		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
902
903	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
904			   xfer->hdr.protocol_id, xfer->hdr.seq,
905			   MSG_TYPE_NOTIFICATION);
906
907	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
908		xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
909		scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
910					cinfo->id);
911	}
912
913	__scmi_xfer_put(minfo, xfer);
914
915	scmi_clear_channel(info, cinfo);
916}
917
918static void scmi_handle_response(struct scmi_chan_info *cinfo,
919				 u32 msg_hdr, void *priv)
920{
921	struct scmi_xfer *xfer;
922	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
923
924	xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
925	if (IS_ERR(xfer)) {
926		if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
927			scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
928
929		if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
930			scmi_clear_channel(info, cinfo);
931		return;
932	}
933
934	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
935	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
936		xfer->rx.len = info->desc->max_msg_size;
937
938	if (priv)
939		/* Ensure order between xfer->priv store and following ops */
940		smp_store_mb(xfer->priv, priv);
941	info->desc->ops->fetch_response(cinfo, xfer);
942
943	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
944			    xfer->hdr.id,
945			    xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
946			    (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
947			    (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
948			    xfer->hdr.seq, xfer->hdr.status,
949			    xfer->rx.buf, xfer->rx.len);
950
951	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
952			   xfer->hdr.protocol_id, xfer->hdr.seq,
953			   xfer->hdr.type);
954
955	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
956		scmi_clear_channel(info, cinfo);
957		complete(xfer->async_done);
958	} else {
959		complete(&xfer->done);
960	}
961
962	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
963		/*
964		 * When in polling mode avoid to queue the Raw xfer on the IRQ
965		 * RX path since it will be already queued at the end of the TX
966		 * poll loop.
967		 */
968		if (!xfer->hdr.poll_completion)
969			scmi_raw_message_report(info->raw, xfer,
970						SCMI_RAW_REPLY_QUEUE,
971						cinfo->id);
972	}
973
974	scmi_xfer_command_release(info, xfer);
975}
976
977/**
978 * scmi_rx_callback() - callback for receiving messages
979 *
980 * @cinfo: SCMI channel info
981 * @msg_hdr: Message header
982 * @priv: Transport specific private data.
983 *
984 * Processes one received message to appropriate transfer information and
985 * signals completion of the transfer.
986 *
987 * NOTE: This function will be invoked in IRQ context, hence should be
988 * as optimal as possible.
989 */
990void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
991{
992	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
993
994	switch (msg_type) {
995	case MSG_TYPE_NOTIFICATION:
996		scmi_handle_notification(cinfo, msg_hdr, priv);
997		break;
998	case MSG_TYPE_COMMAND:
999	case MSG_TYPE_DELAYED_RESP:
1000		scmi_handle_response(cinfo, msg_hdr, priv);
1001		break;
1002	default:
1003		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
1004		break;
1005	}
1006}
1007
1008/**
1009 * xfer_put() - Release a transmit message
1010 *
1011 * @ph: Pointer to SCMI protocol handle
1012 * @xfer: message that was reserved by xfer_get_init
1013 */
1014static void xfer_put(const struct scmi_protocol_handle *ph,
1015		     struct scmi_xfer *xfer)
1016{
1017	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1018	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1019
1020	__scmi_xfer_put(&info->tx_minfo, xfer);
1021}
1022
1023static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
1024				      struct scmi_xfer *xfer, ktime_t stop)
1025{
1026	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1027
1028	/*
1029	 * Poll also on xfer->done so that polling can be forcibly terminated
1030	 * in case of out-of-order receptions of delayed responses
1031	 */
1032	return info->desc->ops->poll_done(cinfo, xfer) ||
1033	       try_wait_for_completion(&xfer->done) ||
1034	       ktime_after(ktime_get(), stop);
1035}
1036
1037static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
1038			       struct scmi_chan_info *cinfo,
1039			       struct scmi_xfer *xfer, unsigned int timeout_ms)
1040{
1041	int ret = 0;
1042
1043	if (xfer->hdr.poll_completion) {
1044		/*
1045		 * Real polling is needed only if transport has NOT declared
1046		 * itself to support synchronous commands replies.
1047		 */
1048		if (!desc->sync_cmds_completed_on_ret) {
1049			/*
1050			 * Poll on xfer using transport provided .poll_done();
1051			 * assumes no completion interrupt was available.
1052			 */
1053			ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
1054
1055			spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
1056								  xfer, stop));
1057			if (ktime_after(ktime_get(), stop)) {
1058				dev_err(dev,
1059					"timed out in resp(caller: %pS) - polling\n",
1060					(void *)_RET_IP_);
1061				ret = -ETIMEDOUT;
1062			}
1063		}
1064
1065		if (!ret) {
1066			unsigned long flags;
1067			struct scmi_info *info =
1068				handle_to_scmi_info(cinfo->handle);
1069
1070			/*
1071			 * Do not fetch_response if an out-of-order delayed
1072			 * response is being processed.
1073			 */
1074			spin_lock_irqsave(&xfer->lock, flags);
1075			if (xfer->state == SCMI_XFER_SENT_OK) {
1076				desc->ops->fetch_response(cinfo, xfer);
1077				xfer->state = SCMI_XFER_RESP_OK;
1078			}
1079			spin_unlock_irqrestore(&xfer->lock, flags);
1080
1081			/* Trace polled replies. */
1082			trace_scmi_msg_dump(info->id, cinfo->id,
1083					    xfer->hdr.protocol_id, xfer->hdr.id,
1084					    !SCMI_XFER_IS_RAW(xfer) ?
1085					    "RESP" : "resp",
1086					    xfer->hdr.seq, xfer->hdr.status,
1087					    xfer->rx.buf, xfer->rx.len);
1088
1089			if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1090				struct scmi_info *info =
1091					handle_to_scmi_info(cinfo->handle);
1092
1093				scmi_raw_message_report(info->raw, xfer,
1094							SCMI_RAW_REPLY_QUEUE,
1095							cinfo->id);
1096			}
1097		}
1098	} else {
1099		/* And we wait for the response. */
1100		if (!wait_for_completion_timeout(&xfer->done,
1101						 msecs_to_jiffies(timeout_ms))) {
1102			dev_err(dev, "timed out in resp(caller: %pS)\n",
1103				(void *)_RET_IP_);
1104			ret = -ETIMEDOUT;
1105		}
1106	}
1107
1108	return ret;
1109}
1110
1111/**
1112 * scmi_wait_for_message_response  - An helper to group all the possible ways of
1113 * waiting for a synchronous message response.
1114 *
1115 * @cinfo: SCMI channel info
1116 * @xfer: Reference to the transfer being waited for.
1117 *
1118 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1119 * configuration flags like xfer->hdr.poll_completion.
1120 *
1121 * Return: 0 on Success, error otherwise.
1122 */
1123static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
1124					  struct scmi_xfer *xfer)
1125{
1126	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1127	struct device *dev = info->dev;
1128
1129	trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1130				      xfer->hdr.protocol_id, xfer->hdr.seq,
1131				      info->desc->max_rx_timeout_ms,
1132				      xfer->hdr.poll_completion);
1133
1134	return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1135				   info->desc->max_rx_timeout_ms);
1136}
1137
1138/**
1139 * scmi_xfer_raw_wait_for_message_response  - An helper to wait for a message
1140 * reply to an xfer raw request on a specific channel for the required timeout.
1141 *
1142 * @cinfo: SCMI channel info
1143 * @xfer: Reference to the transfer being waited for.
1144 * @timeout_ms: The maximum timeout in milliseconds
1145 *
1146 * Return: 0 on Success, error otherwise.
1147 */
1148int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
1149					    struct scmi_xfer *xfer,
1150					    unsigned int timeout_ms)
1151{
1152	int ret;
1153	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1154	struct device *dev = info->dev;
1155
1156	ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1157	if (ret)
1158		dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1159			pack_scmi_header(&xfer->hdr));
1160
1161	return ret;
1162}
1163
1164/**
1165 * do_xfer() - Do one transfer
1166 *
1167 * @ph: Pointer to SCMI protocol handle
1168 * @xfer: Transfer to initiate and wait for response
1169 *
1170 * Return: -ETIMEDOUT in case of no response, if transmit error,
1171 *	return corresponding error, else if all goes well,
1172 *	return 0.
1173 */
1174static int do_xfer(const struct scmi_protocol_handle *ph,
1175		   struct scmi_xfer *xfer)
1176{
1177	int ret;
1178	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1179	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1180	struct device *dev = info->dev;
1181	struct scmi_chan_info *cinfo;
1182
1183	/* Check for polling request on custom command xfers at first */
1184	if (xfer->hdr.poll_completion &&
1185	    !is_transport_polling_capable(info->desc)) {
1186		dev_warn_once(dev,
1187			      "Polling mode is not supported by transport.\n");
1188		return -EINVAL;
1189	}
1190
1191	cinfo = idr_find(&info->tx_idr, pi->proto->id);
1192	if (unlikely(!cinfo))
1193		return -EINVAL;
1194
1195	/* True ONLY if also supported by transport. */
1196	if (is_polling_enabled(cinfo, info->desc))
1197		xfer->hdr.poll_completion = true;
1198
1199	/*
1200	 * Initialise protocol id now from protocol handle to avoid it being
1201	 * overridden by mistake (or malice) by the protocol code mangling with
1202	 * the scmi_xfer structure prior to this.
1203	 */
1204	xfer->hdr.protocol_id = pi->proto->id;
1205	reinit_completion(&xfer->done);
1206
1207	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1208			      xfer->hdr.protocol_id, xfer->hdr.seq,
1209			      xfer->hdr.poll_completion);
1210
1211	/* Clear any stale status */
1212	xfer->hdr.status = SCMI_SUCCESS;
1213	xfer->state = SCMI_XFER_SENT_OK;
1214	/*
1215	 * Even though spinlocking is not needed here since no race is possible
1216	 * on xfer->state due to the monotonically increasing tokens allocation,
1217	 * we must anyway ensure xfer->state initialization is not re-ordered
1218	 * after the .send_message() to be sure that on the RX path an early
1219	 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1220	 */
1221	smp_mb();
1222
1223	ret = info->desc->ops->send_message(cinfo, xfer);
1224	if (ret < 0) {
1225		dev_dbg(dev, "Failed to send message %d\n", ret);
1226		return ret;
1227	}
1228
1229	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1230			    xfer->hdr.id, "CMND", xfer->hdr.seq,
1231			    xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1232
1233	ret = scmi_wait_for_message_response(cinfo, xfer);
1234	if (!ret && xfer->hdr.status)
1235		ret = scmi_to_linux_errno(xfer->hdr.status);
1236
1237	if (info->desc->ops->mark_txdone)
1238		info->desc->ops->mark_txdone(cinfo, ret, xfer);
1239
1240	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1241			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
1242
1243	return ret;
1244}
1245
1246static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
1247			      struct scmi_xfer *xfer)
1248{
1249	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1250	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1251
1252	xfer->rx.len = info->desc->max_msg_size;
1253}
1254
1255/**
1256 * do_xfer_with_response() - Do one transfer and wait until the delayed
1257 *	response is received
1258 *
1259 * @ph: Pointer to SCMI protocol handle
1260 * @xfer: Transfer to initiate and wait for response
1261 *
1262 * Using asynchronous commands in atomic/polling mode should be avoided since
1263 * it could cause long busy-waiting here, so ignore polling for the delayed
1264 * response and WARN if it was requested for this command transaction since
1265 * upper layers should refrain from issuing such kind of requests.
1266 *
1267 * The only other option would have been to refrain from using any asynchronous
1268 * command even if made available, when an atomic transport is detected, and
1269 * instead forcibly use the synchronous version (thing that can be easily
1270 * attained at the protocol layer), but this would also have led to longer
1271 * stalls of the channel for synchronous commands and possibly timeouts.
1272 * (in other words there is usually a good reason if a platform provides an
1273 *  asynchronous version of a command and we should prefer to use it...just not
1274 *  when using atomic/polling mode)
1275 *
1276 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1277 *	return corresponding error, else if all goes well, return 0.
1278 */
1279static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
1280				 struct scmi_xfer *xfer)
1281{
1282	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
1283	DECLARE_COMPLETION_ONSTACK(async_response);
1284
1285	xfer->async_done = &async_response;
1286
1287	/*
1288	 * Delayed responses should not be polled, so an async command should
1289	 * not have been used when requiring an atomic/poll context; WARN and
1290	 * perform instead a sleeping wait.
1291	 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1292	 */
1293	WARN_ON_ONCE(xfer->hdr.poll_completion);
1294
1295	ret = do_xfer(ph, xfer);
1296	if (!ret) {
1297		if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1298			dev_err(ph->dev,
1299				"timed out in delayed resp(caller: %pS)\n",
1300				(void *)_RET_IP_);
1301			ret = -ETIMEDOUT;
1302		} else if (xfer->hdr.status) {
1303			ret = scmi_to_linux_errno(xfer->hdr.status);
1304		}
1305	}
1306
1307	xfer->async_done = NULL;
1308	return ret;
1309}
1310
1311/**
1312 * xfer_get_init() - Allocate and initialise one message for transmit
1313 *
1314 * @ph: Pointer to SCMI protocol handle
1315 * @msg_id: Message identifier
1316 * @tx_size: transmit message size
1317 * @rx_size: receive message size
1318 * @p: pointer to the allocated and initialised message
1319 *
1320 * This function allocates the message using @scmi_xfer_get and
1321 * initialise the header.
1322 *
1323 * Return: 0 if all went fine with @p pointing to message, else
1324 *	corresponding error.
1325 */
1326static int xfer_get_init(const struct scmi_protocol_handle *ph,
1327			 u8 msg_id, size_t tx_size, size_t rx_size,
1328			 struct scmi_xfer **p)
1329{
1330	int ret;
1331	struct scmi_xfer *xfer;
1332	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1333	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1334	struct scmi_xfers_info *minfo = &info->tx_minfo;
1335	struct device *dev = info->dev;
1336
1337	/* Ensure we have sane transfer sizes */
1338	if (rx_size > info->desc->max_msg_size ||
1339	    tx_size > info->desc->max_msg_size)
1340		return -ERANGE;
1341
1342	xfer = scmi_xfer_get(pi->handle, minfo);
1343	if (IS_ERR(xfer)) {
1344		ret = PTR_ERR(xfer);
1345		dev_err(dev, "failed to get free message slot(%d)\n", ret);
1346		return ret;
1347	}
1348
1349	/* Pick a sequence number and register this xfer as in-flight */
1350	ret = scmi_xfer_pending_set(xfer, minfo);
1351	if (ret) {
1352		dev_err(pi->handle->dev,
1353			"Failed to get monotonic token %d\n", ret);
1354		__scmi_xfer_put(minfo, xfer);
1355		return ret;
1356	}
1357
1358	xfer->tx.len = tx_size;
1359	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1360	xfer->hdr.type = MSG_TYPE_COMMAND;
1361	xfer->hdr.id = msg_id;
1362	xfer->hdr.poll_completion = false;
1363
1364	*p = xfer;
1365
1366	return 0;
1367}
1368
1369/**
1370 * version_get() - command to get the revision of the SCMI entity
1371 *
1372 * @ph: Pointer to SCMI protocol handle
1373 * @version: Holds returned version of protocol.
1374 *
1375 * Updates the SCMI information in the internal data structure.
1376 *
1377 * Return: 0 if all went fine, else return appropriate error.
1378 */
1379static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1380{
1381	int ret;
1382	__le32 *rev_info;
1383	struct scmi_xfer *t;
1384
1385	ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1386	if (ret)
1387		return ret;
1388
1389	ret = do_xfer(ph, t);
1390	if (!ret) {
1391		rev_info = t->rx.buf;
1392		*version = le32_to_cpu(*rev_info);
1393	}
1394
1395	xfer_put(ph, t);
1396	return ret;
1397}
1398
1399/**
1400 * scmi_set_protocol_priv  - Set protocol specific data at init time
1401 *
1402 * @ph: A reference to the protocol handle.
1403 * @priv: The private data to set.
1404 * @version: The detected protocol version for the core to register.
1405 *
1406 * Return: 0 on Success
1407 */
1408static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1409				  void *priv, u32 version)
1410{
1411	struct scmi_protocol_instance *pi = ph_to_pi(ph);
1412
1413	pi->priv = priv;
1414	pi->version = version;
1415
1416	return 0;
1417}
1418
1419/**
1420 * scmi_get_protocol_priv  - Set protocol specific data at init time
1421 *
1422 * @ph: A reference to the protocol handle.
1423 *
1424 * Return: Protocol private data if any was set.
1425 */
1426static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1427{
1428	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1429
1430	return pi->priv;
1431}
1432
1433static const struct scmi_xfer_ops xfer_ops = {
1434	.version_get = version_get,
1435	.xfer_get_init = xfer_get_init,
1436	.reset_rx_to_maxsz = reset_rx_to_maxsz,
1437	.do_xfer = do_xfer,
1438	.do_xfer_with_response = do_xfer_with_response,
1439	.xfer_put = xfer_put,
1440};
1441
1442struct scmi_msg_resp_domain_name_get {
1443	__le32 flags;
1444	u8 name[SCMI_MAX_STR_SIZE];
1445};
1446
1447/**
1448 * scmi_common_extended_name_get  - Common helper to get extended resources name
1449 * @ph: A protocol handle reference.
1450 * @cmd_id: The specific command ID to use.
1451 * @res_id: The specific resource ID to use.
1452 * @flags: A pointer to specific flags to use, if any.
1453 * @name: A pointer to the preallocated area where the retrieved name will be
1454 *	  stored as a NULL terminated string.
1455 * @len: The len in bytes of the @name char array.
1456 *
1457 * Return: 0 on Succcess
1458 */
1459static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1460					 u8 cmd_id, u32 res_id, u32 *flags,
1461					 char *name, size_t len)
1462{
1463	int ret;
1464	size_t txlen;
1465	struct scmi_xfer *t;
1466	struct scmi_msg_resp_domain_name_get *resp;
1467
1468	txlen = !flags ? sizeof(res_id) : sizeof(res_id) + sizeof(*flags);
1469	ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
1470	if (ret)
1471		goto out;
1472
1473	put_unaligned_le32(res_id, t->tx.buf);
1474	if (flags)
1475		put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
1476	resp = t->rx.buf;
1477
1478	ret = ph->xops->do_xfer(ph, t);
1479	if (!ret)
1480		strscpy(name, resp->name, len);
1481
1482	ph->xops->xfer_put(ph, t);
1483out:
1484	if (ret)
1485		dev_warn(ph->dev,
1486			 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1487			 res_id, ret, name);
1488	return ret;
1489}
1490
1491/**
1492 * struct scmi_iterator  - Iterator descriptor
1493 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1494 *	 a proper custom command payload for each multi-part command request.
1495 * @resp: A reference to the response RX buffer; used by @update_state and
1496 *	  @process_response to parse the multi-part replies.
1497 * @t: A reference to the underlying xfer initialized and used transparently by
1498 *     the iterator internal routines.
1499 * @ph: A reference to the associated protocol handle to be used.
1500 * @ops: A reference to the custom provided iterator operations.
1501 * @state: The current iterator state; used and updated in turn by the iterators
1502 *	   internal routines and by the caller-provided @scmi_iterator_ops.
1503 * @priv: A reference to optional private data as provided by the caller and
1504 *	  passed back to the @@scmi_iterator_ops.
1505 */
1506struct scmi_iterator {
1507	void *msg;
1508	void *resp;
1509	struct scmi_xfer *t;
1510	const struct scmi_protocol_handle *ph;
1511	struct scmi_iterator_ops *ops;
1512	struct scmi_iterator_state state;
1513	void *priv;
1514};
1515
1516static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1517				struct scmi_iterator_ops *ops,
1518				unsigned int max_resources, u8 msg_id,
1519				size_t tx_size, void *priv)
1520{
1521	int ret;
1522	struct scmi_iterator *i;
1523
1524	i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1525	if (!i)
1526		return ERR_PTR(-ENOMEM);
1527
1528	i->ph = ph;
1529	i->ops = ops;
1530	i->priv = priv;
1531
1532	ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1533	if (ret) {
1534		devm_kfree(ph->dev, i);
1535		return ERR_PTR(ret);
1536	}
1537
1538	i->state.max_resources = max_resources;
1539	i->msg = i->t->tx.buf;
1540	i->resp = i->t->rx.buf;
1541
1542	return i;
1543}
1544
1545static int scmi_iterator_run(void *iter)
1546{
1547	int ret = -EINVAL;
1548	struct scmi_iterator_ops *iops;
1549	const struct scmi_protocol_handle *ph;
1550	struct scmi_iterator_state *st;
1551	struct scmi_iterator *i = iter;
1552
1553	if (!i || !i->ops || !i->ph)
1554		return ret;
1555
1556	iops = i->ops;
1557	ph = i->ph;
1558	st = &i->state;
1559
1560	do {
1561		iops->prepare_message(i->msg, st->desc_index, i->priv);
1562		ret = ph->xops->do_xfer(ph, i->t);
1563		if (ret)
1564			break;
1565
1566		st->rx_len = i->t->rx.len;
1567		ret = iops->update_state(st, i->resp, i->priv);
1568		if (ret)
1569			break;
1570
1571		if (st->num_returned > st->max_resources - st->desc_index) {
1572			dev_err(ph->dev,
1573				"No. of resources can't exceed %d\n",
1574				st->max_resources);
1575			ret = -EINVAL;
1576			break;
1577		}
1578
1579		for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1580		     st->loop_idx++) {
1581			ret = iops->process_response(ph, i->resp, st, i->priv);
1582			if (ret)
1583				goto out;
1584		}
1585
1586		st->desc_index += st->num_returned;
1587		ph->xops->reset_rx_to_maxsz(ph, i->t);
1588		/*
1589		 * check for both returned and remaining to avoid infinite
1590		 * loop due to buggy firmware
1591		 */
1592	} while (st->num_returned && st->num_remaining);
1593
1594out:
1595	/* Finalize and destroy iterator */
1596	ph->xops->xfer_put(ph, i->t);
1597	devm_kfree(ph->dev, i);
1598
1599	return ret;
1600}
1601
1602struct scmi_msg_get_fc_info {
1603	__le32 domain;
1604	__le32 message_id;
1605};
1606
1607struct scmi_msg_resp_desc_fc {
1608	__le32 attr;
1609#define SUPPORTS_DOORBELL(x)		((x) & BIT(0))
1610#define DOORBELL_REG_WIDTH(x)		FIELD_GET(GENMASK(2, 1), (x))
1611	__le32 rate_limit;
1612	__le32 chan_addr_low;
1613	__le32 chan_addr_high;
1614	__le32 chan_size;
1615	__le32 db_addr_low;
1616	__le32 db_addr_high;
1617	__le32 db_set_lmask;
1618	__le32 db_set_hmask;
1619	__le32 db_preserve_lmask;
1620	__le32 db_preserve_hmask;
1621};
1622
1623static void
1624scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1625			     u8 describe_id, u32 message_id, u32 valid_size,
1626			     u32 domain, void __iomem **p_addr,
1627			     struct scmi_fc_db_info **p_db, u32 *rate_limit)
1628{
1629	int ret;
1630	u32 flags;
1631	u64 phys_addr;
1632	u8 size;
1633	void __iomem *addr;
1634	struct scmi_xfer *t;
1635	struct scmi_fc_db_info *db = NULL;
1636	struct scmi_msg_get_fc_info *info;
1637	struct scmi_msg_resp_desc_fc *resp;
1638	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1639
1640	if (!p_addr) {
1641		ret = -EINVAL;
1642		goto err_out;
1643	}
1644
1645	ret = ph->xops->xfer_get_init(ph, describe_id,
1646				      sizeof(*info), sizeof(*resp), &t);
1647	if (ret)
1648		goto err_out;
1649
1650	info = t->tx.buf;
1651	info->domain = cpu_to_le32(domain);
1652	info->message_id = cpu_to_le32(message_id);
1653
1654	/*
1655	 * Bail out on error leaving fc_info addresses zeroed; this includes
1656	 * the case in which the requested domain/message_id does NOT support
1657	 * fastchannels at all.
1658	 */
1659	ret = ph->xops->do_xfer(ph, t);
1660	if (ret)
1661		goto err_xfer;
1662
1663	resp = t->rx.buf;
1664	flags = le32_to_cpu(resp->attr);
1665	size = le32_to_cpu(resp->chan_size);
1666	if (size != valid_size) {
1667		ret = -EINVAL;
1668		goto err_xfer;
1669	}
1670
1671	if (rate_limit)
1672		*rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0);
1673
1674	phys_addr = le32_to_cpu(resp->chan_addr_low);
1675	phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1676	addr = devm_ioremap(ph->dev, phys_addr, size);
1677	if (!addr) {
1678		ret = -EADDRNOTAVAIL;
1679		goto err_xfer;
1680	}
1681
1682	*p_addr = addr;
1683
1684	if (p_db && SUPPORTS_DOORBELL(flags)) {
1685		db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1686		if (!db) {
1687			ret = -ENOMEM;
1688			goto err_db;
1689		}
1690
1691		size = 1 << DOORBELL_REG_WIDTH(flags);
1692		phys_addr = le32_to_cpu(resp->db_addr_low);
1693		phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1694		addr = devm_ioremap(ph->dev, phys_addr, size);
1695		if (!addr) {
1696			ret = -EADDRNOTAVAIL;
1697			goto err_db_mem;
1698		}
1699
1700		db->addr = addr;
1701		db->width = size;
1702		db->set = le32_to_cpu(resp->db_set_lmask);
1703		db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1704		db->mask = le32_to_cpu(resp->db_preserve_lmask);
1705		db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1706
1707		*p_db = db;
1708	}
1709
1710	ph->xops->xfer_put(ph, t);
1711
1712	dev_dbg(ph->dev,
1713		"Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
1714		pi->proto->id, message_id, domain);
1715
1716	return;
1717
1718err_db_mem:
1719	devm_kfree(ph->dev, db);
1720
1721err_db:
1722	*p_addr = NULL;
1723
1724err_xfer:
1725	ph->xops->xfer_put(ph, t);
1726
1727err_out:
1728	dev_warn(ph->dev,
1729		 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
1730		 pi->proto->id, message_id, domain, ret);
1731}
1732
1733#define SCMI_PROTO_FC_RING_DB(w)			\
1734do {							\
1735	u##w val = 0;					\
1736							\
1737	if (db->mask)					\
1738		val = ioread##w(db->addr) & db->mask;	\
1739	iowrite##w((u##w)db->set | val, db->addr);	\
1740} while (0)
1741
1742static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
1743{
1744	if (!db || !db->addr)
1745		return;
1746
1747	if (db->width == 1)
1748		SCMI_PROTO_FC_RING_DB(8);
1749	else if (db->width == 2)
1750		SCMI_PROTO_FC_RING_DB(16);
1751	else if (db->width == 4)
1752		SCMI_PROTO_FC_RING_DB(32);
1753	else /* db->width == 8 */
1754#ifdef CONFIG_64BIT
1755		SCMI_PROTO_FC_RING_DB(64);
1756#else
1757	{
1758		u64 val = 0;
1759
1760		if (db->mask)
1761			val = ioread64_hi_lo(db->addr) & db->mask;
1762		iowrite64_hi_lo(db->set | val, db->addr);
1763	}
1764#endif
1765}
1766
1767/**
1768 * scmi_protocol_msg_check  - Check protocol message attributes
1769 *
1770 * @ph: A reference to the protocol handle.
1771 * @message_id: The ID of the message to check.
1772 * @attributes: A parameter to optionally return the retrieved message
1773 *		attributes, in case of Success.
1774 *
1775 * An helper to check protocol message attributes for a specific protocol
1776 * and message pair.
1777 *
1778 * Return: 0 on SUCCESS
1779 */
1780static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
1781				   u32 message_id, u32 *attributes)
1782{
1783	int ret;
1784	struct scmi_xfer *t;
1785
1786	ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
1787			    sizeof(__le32), 0, &t);
1788	if (ret)
1789		return ret;
1790
1791	put_unaligned_le32(message_id, t->tx.buf);
1792	ret = do_xfer(ph, t);
1793	if (!ret && attributes)
1794		*attributes = get_unaligned_le32(t->rx.buf);
1795	xfer_put(ph, t);
1796
1797	return ret;
1798}
1799
1800static const struct scmi_proto_helpers_ops helpers_ops = {
1801	.extended_name_get = scmi_common_extended_name_get,
1802	.iter_response_init = scmi_iterator_init,
1803	.iter_response_run = scmi_iterator_run,
1804	.protocol_msg_check = scmi_protocol_msg_check,
1805	.fastchannel_init = scmi_common_fastchannel_init,
1806	.fastchannel_db_ring = scmi_common_fastchannel_db_ring,
1807};
1808
1809/**
1810 * scmi_revision_area_get  - Retrieve version memory area.
1811 *
1812 * @ph: A reference to the protocol handle.
1813 *
1814 * A helper to grab the version memory area reference during SCMI Base protocol
1815 * initialization.
1816 *
1817 * Return: A reference to the version memory area associated to the SCMI
1818 *	   instance underlying this protocol handle.
1819 */
1820struct scmi_revision_info *
1821scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1822{
1823	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1824
1825	return pi->handle->version;
1826}
1827
1828/**
1829 * scmi_protocol_version_negotiate  - Negotiate protocol version
1830 *
1831 * @ph: A reference to the protocol handle.
1832 *
1833 * An helper to negotiate a protocol version different from the latest
1834 * advertised as supported from the platform: on Success backward
1835 * compatibility is assured by the platform.
1836 *
1837 * Return: 0 on Success
1838 */
1839static int scmi_protocol_version_negotiate(struct scmi_protocol_handle *ph)
1840{
1841	int ret;
1842	struct scmi_xfer *t;
1843	struct scmi_protocol_instance *pi = ph_to_pi(ph);
1844
1845	/* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */
1846	ret = scmi_protocol_msg_check(ph, NEGOTIATE_PROTOCOL_VERSION, NULL);
1847	if (ret)
1848		return ret;
1849
1850	/* ... then attempt protocol version negotiation */
1851	ret = xfer_get_init(ph, NEGOTIATE_PROTOCOL_VERSION,
1852			    sizeof(__le32), 0, &t);
1853	if (ret)
1854		return ret;
1855
1856	put_unaligned_le32(pi->proto->supported_version, t->tx.buf);
1857	ret = do_xfer(ph, t);
1858	if (!ret)
1859		pi->negotiated_version = pi->proto->supported_version;
1860
1861	xfer_put(ph, t);
1862
1863	return ret;
1864}
1865
1866/**
1867 * scmi_alloc_init_protocol_instance  - Allocate and initialize a protocol
1868 * instance descriptor.
1869 * @info: The reference to the related SCMI instance.
1870 * @proto: The protocol descriptor.
1871 *
1872 * Allocate a new protocol instance descriptor, using the provided @proto
1873 * description, against the specified SCMI instance @info, and initialize it;
1874 * all resources management is handled via a dedicated per-protocol devres
1875 * group.
1876 *
1877 * Context: Assumes to be called with @protocols_mtx already acquired.
1878 * Return: A reference to a freshly allocated and initialized protocol instance
1879 *	   or ERR_PTR on failure. On failure the @proto reference is at first
1880 *	   put using @scmi_protocol_put() before releasing all the devres group.
1881 */
1882static struct scmi_protocol_instance *
1883scmi_alloc_init_protocol_instance(struct scmi_info *info,
1884				  const struct scmi_protocol *proto)
1885{
1886	int ret = -ENOMEM;
1887	void *gid;
1888	struct scmi_protocol_instance *pi;
1889	const struct scmi_handle *handle = &info->handle;
1890
1891	/* Protocol specific devres group */
1892	gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1893	if (!gid) {
1894		scmi_protocol_put(proto->id);
1895		goto out;
1896	}
1897
1898	pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1899	if (!pi)
1900		goto clean;
1901
1902	pi->gid = gid;
1903	pi->proto = proto;
1904	pi->handle = handle;
1905	pi->ph.dev = handle->dev;
1906	pi->ph.xops = &xfer_ops;
1907	pi->ph.hops = &helpers_ops;
1908	pi->ph.set_priv = scmi_set_protocol_priv;
1909	pi->ph.get_priv = scmi_get_protocol_priv;
1910	refcount_set(&pi->users, 1);
1911	/* proto->init is assured NON NULL by scmi_protocol_register */
1912	ret = pi->proto->instance_init(&pi->ph);
1913	if (ret)
1914		goto clean;
1915
1916	ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1917			GFP_KERNEL);
1918	if (ret != proto->id)
1919		goto clean;
1920
1921	/*
1922	 * Warn but ignore events registration errors since we do not want
1923	 * to skip whole protocols if their notifications are messed up.
1924	 */
1925	if (pi->proto->events) {
1926		ret = scmi_register_protocol_events(handle, pi->proto->id,
1927						    &pi->ph,
1928						    pi->proto->events);
1929		if (ret)
1930			dev_warn(handle->dev,
1931				 "Protocol:%X - Events Registration Failed - err:%d\n",
1932				 pi->proto->id, ret);
1933	}
1934
1935	devres_close_group(handle->dev, pi->gid);
1936	dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1937
1938	if (pi->version > proto->supported_version) {
1939		ret = scmi_protocol_version_negotiate(&pi->ph);
1940		if (!ret) {
1941			dev_info(handle->dev,
1942				 "Protocol 0x%X successfully negotiated version 0x%X\n",
1943				 proto->id, pi->negotiated_version);
1944		} else {
1945			dev_warn(handle->dev,
1946				 "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n",
1947				 pi->version, pi->proto->id);
1948			dev_warn(handle->dev,
1949				 "Trying version 0x%X. Backward compatibility is NOT assured.\n",
1950				 pi->proto->supported_version);
1951		}
1952	}
1953
1954	return pi;
1955
1956clean:
1957	/* Take care to put the protocol module's owner before releasing all */
1958	scmi_protocol_put(proto->id);
1959	devres_release_group(handle->dev, gid);
1960out:
1961	return ERR_PTR(ret);
1962}
1963
1964/**
1965 * scmi_get_protocol_instance  - Protocol initialization helper.
1966 * @handle: A reference to the SCMI platform instance.
1967 * @protocol_id: The protocol being requested.
1968 *
1969 * In case the required protocol has never been requested before for this
1970 * instance, allocate and initialize all the needed structures while handling
1971 * resource allocation with a dedicated per-protocol devres subgroup.
1972 *
1973 * Return: A reference to an initialized protocol instance or error on failure:
1974 *	   in particular returns -EPROBE_DEFER when the desired protocol could
1975 *	   NOT be found.
1976 */
1977static struct scmi_protocol_instance * __must_check
1978scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1979{
1980	struct scmi_protocol_instance *pi;
1981	struct scmi_info *info = handle_to_scmi_info(handle);
1982
1983	mutex_lock(&info->protocols_mtx);
1984	pi = idr_find(&info->protocols, protocol_id);
1985
1986	if (pi) {
1987		refcount_inc(&pi->users);
1988	} else {
1989		const struct scmi_protocol *proto;
1990
1991		/* Fails if protocol not registered on bus */
1992		proto = scmi_protocol_get(protocol_id);
1993		if (proto)
1994			pi = scmi_alloc_init_protocol_instance(info, proto);
1995		else
1996			pi = ERR_PTR(-EPROBE_DEFER);
1997	}
1998	mutex_unlock(&info->protocols_mtx);
1999
2000	return pi;
2001}
2002
2003/**
2004 * scmi_protocol_acquire  - Protocol acquire
2005 * @handle: A reference to the SCMI platform instance.
2006 * @protocol_id: The protocol being requested.
2007 *
2008 * Register a new user for the requested protocol on the specified SCMI
2009 * platform instance, possibly triggering its initialization on first user.
2010 *
2011 * Return: 0 if protocol was acquired successfully.
2012 */
2013int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
2014{
2015	return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
2016}
2017
2018/**
2019 * scmi_protocol_release  - Protocol de-initialization helper.
2020 * @handle: A reference to the SCMI platform instance.
2021 * @protocol_id: The protocol being requested.
2022 *
2023 * Remove one user for the specified protocol and triggers de-initialization
2024 * and resources de-allocation once the last user has gone.
2025 */
2026void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
2027{
2028	struct scmi_info *info = handle_to_scmi_info(handle);
2029	struct scmi_protocol_instance *pi;
2030
2031	mutex_lock(&info->protocols_mtx);
2032	pi = idr_find(&info->protocols, protocol_id);
2033	if (WARN_ON(!pi))
2034		goto out;
2035
2036	if (refcount_dec_and_test(&pi->users)) {
2037		void *gid = pi->gid;
2038
2039		if (pi->proto->events)
2040			scmi_deregister_protocol_events(handle, protocol_id);
2041
2042		if (pi->proto->instance_deinit)
2043			pi->proto->instance_deinit(&pi->ph);
2044
2045		idr_remove(&info->protocols, protocol_id);
2046
2047		scmi_protocol_put(protocol_id);
2048
2049		devres_release_group(handle->dev, gid);
2050		dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
2051			protocol_id);
2052	}
2053
2054out:
2055	mutex_unlock(&info->protocols_mtx);
2056}
2057
2058void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
2059				     u8 *prot_imp)
2060{
2061	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2062	struct scmi_info *info = handle_to_scmi_info(pi->handle);
2063
2064	info->protocols_imp = prot_imp;
2065}
2066
2067static bool
2068scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
2069{
2070	int i;
2071	struct scmi_info *info = handle_to_scmi_info(handle);
2072	struct scmi_revision_info *rev = handle->version;
2073
2074	if (!info->protocols_imp)
2075		return false;
2076
2077	for (i = 0; i < rev->num_protocols; i++)
2078		if (info->protocols_imp[i] == prot_id)
2079			return true;
2080	return false;
2081}
2082
2083struct scmi_protocol_devres {
2084	const struct scmi_handle *handle;
2085	u8 protocol_id;
2086};
2087
2088static void scmi_devm_release_protocol(struct device *dev, void *res)
2089{
2090	struct scmi_protocol_devres *dres = res;
2091
2092	scmi_protocol_release(dres->handle, dres->protocol_id);
2093}
2094
2095static struct scmi_protocol_instance __must_check *
2096scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
2097{
2098	struct scmi_protocol_instance *pi;
2099	struct scmi_protocol_devres *dres;
2100
2101	dres = devres_alloc(scmi_devm_release_protocol,
2102			    sizeof(*dres), GFP_KERNEL);
2103	if (!dres)
2104		return ERR_PTR(-ENOMEM);
2105
2106	pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2107	if (IS_ERR(pi)) {
2108		devres_free(dres);
2109		return pi;
2110	}
2111
2112	dres->handle = sdev->handle;
2113	dres->protocol_id = protocol_id;
2114	devres_add(&sdev->dev, dres);
2115
2116	return pi;
2117}
2118
2119/**
2120 * scmi_devm_protocol_get  - Devres managed get protocol operations and handle
2121 * @sdev: A reference to an scmi_device whose embedded struct device is to
2122 *	  be used for devres accounting.
2123 * @protocol_id: The protocol being requested.
2124 * @ph: A pointer reference used to pass back the associated protocol handle.
2125 *
2126 * Get hold of a protocol accounting for its usage, eventually triggering its
2127 * initialization, and returning the protocol specific operations and related
2128 * protocol handle which will be used as first argument in most of the
2129 * protocols operations methods.
2130 * Being a devres based managed method, protocol hold will be automatically
2131 * released, and possibly de-initialized on last user, once the SCMI driver
2132 * owning the scmi_device is unbound from it.
2133 *
2134 * Return: A reference to the requested protocol operations or error.
2135 *	   Must be checked for errors by caller.
2136 */
2137static const void __must_check *
2138scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
2139		       struct scmi_protocol_handle **ph)
2140{
2141	struct scmi_protocol_instance *pi;
2142
2143	if (!ph)
2144		return ERR_PTR(-EINVAL);
2145
2146	pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2147	if (IS_ERR(pi))
2148		return pi;
2149
2150	*ph = &pi->ph;
2151
2152	return pi->proto->ops;
2153}
2154
2155/**
2156 * scmi_devm_protocol_acquire  - Devres managed helper to get hold of a protocol
2157 * @sdev: A reference to an scmi_device whose embedded struct device is to
2158 *	  be used for devres accounting.
2159 * @protocol_id: The protocol being requested.
2160 *
2161 * Get hold of a protocol accounting for its usage, possibly triggering its
2162 * initialization but without getting access to its protocol specific operations
2163 * and handle.
2164 *
2165 * Being a devres based managed method, protocol hold will be automatically
2166 * released, and possibly de-initialized on last user, once the SCMI driver
2167 * owning the scmi_device is unbound from it.
2168 *
2169 * Return: 0 on SUCCESS
2170 */
2171static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
2172						   u8 protocol_id)
2173{
2174	struct scmi_protocol_instance *pi;
2175
2176	pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2177	if (IS_ERR(pi))
2178		return PTR_ERR(pi);
2179
2180	return 0;
2181}
2182
2183static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
2184{
2185	struct scmi_protocol_devres *dres = res;
2186
2187	if (WARN_ON(!dres || !data))
2188		return 0;
2189
2190	return dres->protocol_id == *((u8 *)data);
2191}
2192
2193/**
2194 * scmi_devm_protocol_put  - Devres managed put protocol operations and handle
2195 * @sdev: A reference to an scmi_device whose embedded struct device is to
2196 *	  be used for devres accounting.
2197 * @protocol_id: The protocol being requested.
2198 *
2199 * Explicitly release a protocol hold previously obtained calling the above
2200 * @scmi_devm_protocol_get.
2201 */
2202static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
2203{
2204	int ret;
2205
2206	ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2207			     scmi_devm_protocol_match, &protocol_id);
2208	WARN_ON(ret);
2209}
2210
2211/**
2212 * scmi_is_transport_atomic  - Method to check if underlying transport for an
2213 * SCMI instance is configured as atomic.
2214 *
2215 * @handle: A reference to the SCMI platform instance.
2216 * @atomic_threshold: An optional return value for the system wide currently
2217 *		      configured threshold for atomic operations.
2218 *
2219 * Return: True if transport is configured as atomic
2220 */
2221static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
2222				     unsigned int *atomic_threshold)
2223{
2224	bool ret;
2225	struct scmi_info *info = handle_to_scmi_info(handle);
2226
2227	ret = info->desc->atomic_enabled &&
2228		is_transport_polling_capable(info->desc);
2229	if (ret && atomic_threshold)
2230		*atomic_threshold = info->atomic_threshold;
2231
2232	return ret;
2233}
2234
2235/**
2236 * scmi_handle_get() - Get the SCMI handle for a device
2237 *
2238 * @dev: pointer to device for which we want SCMI handle
2239 *
2240 * NOTE: The function does not track individual clients of the framework
2241 * and is expected to be maintained by caller of SCMI protocol library.
2242 * scmi_handle_put must be balanced with successful scmi_handle_get
2243 *
2244 * Return: pointer to handle if successful, NULL on error
2245 */
2246static struct scmi_handle *scmi_handle_get(struct device *dev)
2247{
2248	struct list_head *p;
2249	struct scmi_info *info;
2250	struct scmi_handle *handle = NULL;
2251
2252	mutex_lock(&scmi_list_mutex);
2253	list_for_each(p, &scmi_list) {
2254		info = list_entry(p, struct scmi_info, node);
2255		if (dev->parent == info->dev) {
2256			info->users++;
2257			handle = &info->handle;
2258			break;
2259		}
2260	}
2261	mutex_unlock(&scmi_list_mutex);
2262
2263	return handle;
2264}
2265
2266/**
2267 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2268 *
2269 * @handle: handle acquired by scmi_handle_get
2270 *
2271 * NOTE: The function does not track individual clients of the framework
2272 * and is expected to be maintained by caller of SCMI protocol library.
2273 * scmi_handle_put must be balanced with successful scmi_handle_get
2274 *
2275 * Return: 0 is successfully released
2276 *	if null was passed, it returns -EINVAL;
2277 */
2278static int scmi_handle_put(const struct scmi_handle *handle)
2279{
2280	struct scmi_info *info;
2281
2282	if (!handle)
2283		return -EINVAL;
2284
2285	info = handle_to_scmi_info(handle);
2286	mutex_lock(&scmi_list_mutex);
2287	if (!WARN_ON(!info->users))
2288		info->users--;
2289	mutex_unlock(&scmi_list_mutex);
2290
2291	return 0;
2292}
2293
2294static void scmi_device_link_add(struct device *consumer,
2295				 struct device *supplier)
2296{
2297	struct device_link *link;
2298
2299	link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
2300
2301	WARN_ON(!link);
2302}
2303
2304static void scmi_set_handle(struct scmi_device *scmi_dev)
2305{
2306	scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2307	if (scmi_dev->handle)
2308		scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2309}
2310
2311static int __scmi_xfer_info_init(struct scmi_info *sinfo,
2312				 struct scmi_xfers_info *info)
2313{
2314	int i;
2315	struct scmi_xfer *xfer;
2316	struct device *dev = sinfo->dev;
2317	const struct scmi_desc *desc = sinfo->desc;
2318
2319	/* Pre-allocated messages, no more than what hdr.seq can support */
2320	if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2321		dev_err(dev,
2322			"Invalid maximum messages %d, not in range [1 - %lu]\n",
2323			info->max_msg, MSG_TOKEN_MAX);
2324		return -EINVAL;
2325	}
2326
2327	hash_init(info->pending_xfers);
2328
2329	/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2330	info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2331						    GFP_KERNEL);
2332	if (!info->xfer_alloc_table)
2333		return -ENOMEM;
2334
2335	/*
2336	 * Preallocate a number of xfers equal to max inflight messages,
2337	 * pre-initialize the buffer pointer to pre-allocated buffers and
2338	 * attach all of them to the free list
2339	 */
2340	INIT_HLIST_HEAD(&info->free_xfers);
2341	for (i = 0; i < info->max_msg; i++) {
2342		xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2343		if (!xfer)
2344			return -ENOMEM;
2345
2346		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2347					    GFP_KERNEL);
2348		if (!xfer->rx.buf)
2349			return -ENOMEM;
2350
2351		xfer->tx.buf = xfer->rx.buf;
2352		init_completion(&xfer->done);
2353		spin_lock_init(&xfer->lock);
2354
2355		/* Add initialized xfer to the free list */
2356		hlist_add_head(&xfer->node, &info->free_xfers);
2357	}
2358
2359	spin_lock_init(&info->xfer_lock);
2360
2361	return 0;
2362}
2363
2364static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
2365{
2366	const struct scmi_desc *desc = sinfo->desc;
2367
2368	if (!desc->ops->get_max_msg) {
2369		sinfo->tx_minfo.max_msg = desc->max_msg;
2370		sinfo->rx_minfo.max_msg = desc->max_msg;
2371	} else {
2372		struct scmi_chan_info *base_cinfo;
2373
2374		base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2375		if (!base_cinfo)
2376			return -EINVAL;
2377		sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2378
2379		/* RX channel is optional so can be skipped */
2380		base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2381		if (base_cinfo)
2382			sinfo->rx_minfo.max_msg =
2383				desc->ops->get_max_msg(base_cinfo);
2384	}
2385
2386	return 0;
2387}
2388
2389static int scmi_xfer_info_init(struct scmi_info *sinfo)
2390{
2391	int ret;
2392
2393	ret = scmi_channels_max_msg_configure(sinfo);
2394	if (ret)
2395		return ret;
2396
2397	ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2398	if (!ret && !idr_is_empty(&sinfo->rx_idr))
2399		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2400
2401	return ret;
2402}
2403
2404static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
2405			   int prot_id, bool tx)
2406{
2407	int ret, idx;
2408	char name[32];
2409	struct scmi_chan_info *cinfo;
2410	struct idr *idr;
2411	struct scmi_device *tdev = NULL;
2412
2413	/* Transmit channel is first entry i.e. index 0 */
2414	idx = tx ? 0 : 1;
2415	idr = tx ? &info->tx_idr : &info->rx_idr;
2416
2417	if (!info->desc->ops->chan_available(of_node, idx)) {
2418		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2419		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2420			return -EINVAL;
2421		goto idr_alloc;
2422	}
2423
2424	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2425	if (!cinfo)
2426		return -ENOMEM;
2427
2428	cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2429
2430	/* Create a unique name for this transport device */
2431	snprintf(name, 32, "__scmi_transport_device_%s_%02X",
2432		 idx ? "rx" : "tx", prot_id);
2433	/* Create a uniquely named, dedicated transport device for this chan */
2434	tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2435	if (!tdev) {
2436		dev_err(info->dev,
2437			"failed to create transport device (%s)\n", name);
2438		devm_kfree(info->dev, cinfo);
2439		return -EINVAL;
2440	}
2441	of_node_get(of_node);
2442
2443	cinfo->id = prot_id;
2444	cinfo->dev = &tdev->dev;
2445	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2446	if (ret) {
2447		of_node_put(of_node);
2448		scmi_device_destroy(info->dev, prot_id, name);
2449		devm_kfree(info->dev, cinfo);
2450		return ret;
2451	}
2452
2453	if (tx && is_polling_required(cinfo, info->desc)) {
2454		if (is_transport_polling_capable(info->desc))
2455			dev_info(&tdev->dev,
2456				 "Enabled polling mode TX channel - prot_id:%d\n",
2457				 prot_id);
2458		else
2459			dev_warn(&tdev->dev,
2460				 "Polling mode NOT supported by transport.\n");
2461	}
2462
2463idr_alloc:
2464	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2465	if (ret != prot_id) {
2466		dev_err(info->dev,
2467			"unable to allocate SCMI idr slot err %d\n", ret);
2468		/* Destroy channel and device only if created by this call. */
2469		if (tdev) {
2470			of_node_put(of_node);
2471			scmi_device_destroy(info->dev, prot_id, name);
2472			devm_kfree(info->dev, cinfo);
2473		}
2474		return ret;
2475	}
2476
2477	cinfo->handle = &info->handle;
2478	return 0;
2479}
2480
2481static inline int
2482scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
2483		int prot_id)
2484{
2485	int ret = scmi_chan_setup(info, of_node, prot_id, true);
2486
2487	if (!ret) {
2488		/* Rx is optional, report only memory errors */
2489		ret = scmi_chan_setup(info, of_node, prot_id, false);
2490		if (ret && ret != -ENOMEM)
2491			ret = 0;
2492	}
2493
2494	return ret;
2495}
2496
2497/**
2498 * scmi_channels_setup  - Helper to initialize all required channels
2499 *
2500 * @info: The SCMI instance descriptor.
2501 *
2502 * Initialize all the channels found described in the DT against the underlying
2503 * configured transport using custom defined dedicated devices instead of
2504 * borrowing devices from the SCMI drivers; this way channels are initialized
2505 * upfront during core SCMI stack probing and are no more coupled with SCMI
2506 * devices used by SCMI drivers.
2507 *
2508 * Note that, even though a pair of TX/RX channels is associated to each
2509 * protocol defined in the DT, a distinct freshly initialized channel is
2510 * created only if the DT node for the protocol at hand describes a dedicated
2511 * channel: in all the other cases the common BASE protocol channel is reused.
2512 *
2513 * Return: 0 on Success
2514 */
2515static int scmi_channels_setup(struct scmi_info *info)
2516{
2517	int ret;
2518	struct device_node *child, *top_np = info->dev->of_node;
2519
2520	/* Initialize a common generic channel at first */
2521	ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
2522	if (ret)
2523		return ret;
2524
2525	for_each_available_child_of_node(top_np, child) {
2526		u32 prot_id;
2527
2528		if (of_property_read_u32(child, "reg", &prot_id))
2529			continue;
2530
2531		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2532			dev_err(info->dev,
2533				"Out of range protocol %d\n", prot_id);
2534
2535		ret = scmi_txrx_setup(info, child, prot_id);
2536		if (ret) {
2537			of_node_put(child);
2538			return ret;
2539		}
2540	}
2541
2542	return 0;
2543}
2544
2545static int scmi_chan_destroy(int id, void *p, void *idr)
2546{
2547	struct scmi_chan_info *cinfo = p;
2548
2549	if (cinfo->dev) {
2550		struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2551		struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2552
2553		of_node_put(cinfo->dev->of_node);
2554		scmi_device_destroy(info->dev, id, sdev->name);
2555		cinfo->dev = NULL;
2556	}
2557
2558	idr_remove(idr, id);
2559
2560	return 0;
2561}
2562
2563static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
2564{
2565	/* At first free all channels at the transport layer ... */
2566	idr_for_each(idr, info->desc->ops->chan_free, idr);
2567
2568	/* ...then destroy all underlying devices */
2569	idr_for_each(idr, scmi_chan_destroy, idr);
2570
2571	idr_destroy(idr);
2572}
2573
2574static void scmi_cleanup_txrx_channels(struct scmi_info *info)
2575{
2576	scmi_cleanup_channels(info, &info->tx_idr);
2577
2578	scmi_cleanup_channels(info, &info->rx_idr);
2579}
2580
2581static int scmi_bus_notifier(struct notifier_block *nb,
2582			     unsigned long action, void *data)
2583{
2584	struct scmi_info *info = bus_nb_to_scmi_info(nb);
2585	struct scmi_device *sdev = to_scmi_dev(data);
2586
2587	/* Skip transport devices and devices of different SCMI instances */
2588	if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
2589	    sdev->dev.parent != info->dev)
2590		return NOTIFY_DONE;
2591
2592	switch (action) {
2593	case BUS_NOTIFY_BIND_DRIVER:
2594		/* setup handle now as the transport is ready */
2595		scmi_set_handle(sdev);
2596		break;
2597	case BUS_NOTIFY_UNBOUND_DRIVER:
2598		scmi_handle_put(sdev->handle);
2599		sdev->handle = NULL;
2600		break;
2601	default:
2602		return NOTIFY_DONE;
2603	}
2604
2605	dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2606		sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2607		"about to be BOUND." : "UNBOUND.");
2608
2609	return NOTIFY_OK;
2610}
2611
2612static int scmi_device_request_notifier(struct notifier_block *nb,
2613					unsigned long action, void *data)
2614{
2615	struct device_node *np;
2616	struct scmi_device_id *id_table = data;
2617	struct scmi_info *info = req_nb_to_scmi_info(nb);
2618
2619	np = idr_find(&info->active_protocols, id_table->protocol_id);
2620	if (!np)
2621		return NOTIFY_DONE;
2622
2623	dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2624		action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2625		id_table->name, id_table->protocol_id);
2626
2627	switch (action) {
2628	case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
2629		scmi_create_protocol_devices(np, info, id_table->protocol_id,
2630					     id_table->name);
2631		break;
2632	case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
2633		scmi_destroy_protocol_devices(info, id_table->protocol_id,
2634					      id_table->name);
2635		break;
2636	default:
2637		return NOTIFY_DONE;
2638	}
2639
2640	return NOTIFY_OK;
2641}
2642
2643static void scmi_debugfs_common_cleanup(void *d)
2644{
2645	struct scmi_debug_info *dbg = d;
2646
2647	if (!dbg)
2648		return;
2649
2650	debugfs_remove_recursive(dbg->top_dentry);
2651	kfree(dbg->name);
2652	kfree(dbg->type);
2653}
2654
2655static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
2656{
2657	char top_dir[16];
2658	struct dentry *trans, *top_dentry;
2659	struct scmi_debug_info *dbg;
2660	const char *c_ptr = NULL;
2661
2662	dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
2663	if (!dbg)
2664		return NULL;
2665
2666	dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
2667	if (!dbg->name) {
2668		devm_kfree(info->dev, dbg);
2669		return NULL;
2670	}
2671
2672	of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
2673	dbg->type = kstrdup(c_ptr, GFP_KERNEL);
2674	if (!dbg->type) {
2675		kfree(dbg->name);
2676		devm_kfree(info->dev, dbg);
2677		return NULL;
2678	}
2679
2680	snprintf(top_dir, 16, "%d", info->id);
2681	top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
2682	trans = debugfs_create_dir("transport", top_dentry);
2683
2684	dbg->is_atomic = info->desc->atomic_enabled &&
2685				is_transport_polling_capable(info->desc);
2686
2687	debugfs_create_str("instance_name", 0400, top_dentry,
2688			   (char **)&dbg->name);
2689
2690	debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
2691			   &info->atomic_threshold);
2692
2693	debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
2694
2695	debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
2696
2697	debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
2698			   (u32 *)&info->desc->max_rx_timeout_ms);
2699
2700	debugfs_create_u32("max_msg_size", 0400, trans,
2701			   (u32 *)&info->desc->max_msg_size);
2702
2703	debugfs_create_u32("tx_max_msg", 0400, trans,
2704			   (u32 *)&info->tx_minfo.max_msg);
2705
2706	debugfs_create_u32("rx_max_msg", 0400, trans,
2707			   (u32 *)&info->rx_minfo.max_msg);
2708
2709	dbg->top_dentry = top_dentry;
2710
2711	if (devm_add_action_or_reset(info->dev,
2712				     scmi_debugfs_common_cleanup, dbg)) {
2713		scmi_debugfs_common_cleanup(dbg);
2714		return NULL;
2715	}
2716
2717	return dbg;
2718}
2719
2720static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
2721{
2722	int id, num_chans = 0, ret = 0;
2723	struct scmi_chan_info *cinfo;
2724	u8 channels[SCMI_MAX_CHANNELS] = {};
2725	DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
2726
2727	if (!info->dbg)
2728		return -EINVAL;
2729
2730	/* Enumerate all channels to collect their ids */
2731	idr_for_each_entry(&info->tx_idr, cinfo, id) {
2732		/*
2733		 * Cannot happen, but be defensive.
2734		 * Zero as num_chans is ok, warn and carry on.
2735		 */
2736		if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
2737			dev_warn(info->dev,
2738				 "SCMI RAW - Error enumerating channels\n");
2739			break;
2740		}
2741
2742		if (!test_bit(cinfo->id, protos)) {
2743			channels[num_chans++] = cinfo->id;
2744			set_bit(cinfo->id, protos);
2745		}
2746	}
2747
2748	info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
2749				       info->id, channels, num_chans,
2750				       info->desc, info->tx_minfo.max_msg);
2751	if (IS_ERR(info->raw)) {
2752		dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
2753		ret = PTR_ERR(info->raw);
2754		info->raw = NULL;
2755	}
2756
2757	return ret;
2758}
2759
2760static int scmi_probe(struct platform_device *pdev)
2761{
2762	int ret;
2763	struct scmi_handle *handle;
2764	const struct scmi_desc *desc;
2765	struct scmi_info *info;
2766	bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
2767	struct device *dev = &pdev->dev;
2768	struct device_node *child, *np = dev->of_node;
2769
2770	desc = of_device_get_match_data(dev);
2771	if (!desc)
2772		return -EINVAL;
2773
2774	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2775	if (!info)
2776		return -ENOMEM;
2777
2778	info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
2779	if (info->id < 0)
2780		return info->id;
2781
2782	info->dev = dev;
2783	info->desc = desc;
2784	info->bus_nb.notifier_call = scmi_bus_notifier;
2785	info->dev_req_nb.notifier_call = scmi_device_request_notifier;
2786	INIT_LIST_HEAD(&info->node);
2787	idr_init(&info->protocols);
2788	mutex_init(&info->protocols_mtx);
2789	idr_init(&info->active_protocols);
2790	mutex_init(&info->devreq_mtx);
2791
2792	platform_set_drvdata(pdev, info);
2793	idr_init(&info->tx_idr);
2794	idr_init(&info->rx_idr);
2795
2796	handle = &info->handle;
2797	handle->dev = info->dev;
2798	handle->version = &info->version;
2799	handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
2800	handle->devm_protocol_get = scmi_devm_protocol_get;
2801	handle->devm_protocol_put = scmi_devm_protocol_put;
2802
2803	/* System wide atomic threshold for atomic ops .. if any */
2804	if (!of_property_read_u32(np, "atomic-threshold-us",
2805				  &info->atomic_threshold))
2806		dev_info(dev,
2807			 "SCMI System wide atomic threshold set to %d us\n",
2808			 info->atomic_threshold);
2809	handle->is_transport_atomic = scmi_is_transport_atomic;
2810
2811	if (desc->ops->link_supplier) {
2812		ret = desc->ops->link_supplier(dev);
2813		if (ret)
2814			goto clear_ida;
2815	}
2816
2817	/* Setup all channels described in the DT at first */
2818	ret = scmi_channels_setup(info);
2819	if (ret)
2820		goto clear_ida;
2821
2822	ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
2823	if (ret)
2824		goto clear_txrx_setup;
2825
2826	ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
2827					       &info->dev_req_nb);
2828	if (ret)
2829		goto clear_bus_notifier;
2830
2831	ret = scmi_xfer_info_init(info);
2832	if (ret)
2833		goto clear_dev_req_notifier;
2834
2835	if (scmi_top_dentry) {
2836		info->dbg = scmi_debugfs_common_setup(info);
2837		if (!info->dbg)
2838			dev_warn(dev, "Failed to setup SCMI debugfs.\n");
2839
2840		if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
2841			ret = scmi_debugfs_raw_mode_setup(info);
2842			if (!coex) {
2843				if (ret)
2844					goto clear_dev_req_notifier;
2845
2846				/* Bail out anyway when coex disabled. */
2847				return 0;
2848			}
2849
2850			/* Coex enabled, carry on in any case. */
2851			dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
2852		}
2853	}
2854
2855	if (scmi_notification_init(handle))
2856		dev_err(dev, "SCMI Notifications NOT available.\n");
2857
2858	if (info->desc->atomic_enabled &&
2859	    !is_transport_polling_capable(info->desc))
2860		dev_err(dev,
2861			"Transport is not polling capable. Atomic mode not supported.\n");
2862
2863	/*
2864	 * Trigger SCMI Base protocol initialization.
2865	 * It's mandatory and won't be ever released/deinit until the
2866	 * SCMI stack is shutdown/unloaded as a whole.
2867	 */
2868	ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2869	if (ret) {
2870		dev_err(dev, "unable to communicate with SCMI\n");
2871		if (coex)
2872			return 0;
2873		goto notification_exit;
2874	}
2875
2876	mutex_lock(&scmi_list_mutex);
2877	list_add_tail(&info->node, &scmi_list);
2878	mutex_unlock(&scmi_list_mutex);
2879
2880	for_each_available_child_of_node(np, child) {
2881		u32 prot_id;
2882
2883		if (of_property_read_u32(child, "reg", &prot_id))
2884			continue;
2885
2886		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2887			dev_err(dev, "Out of range protocol %d\n", prot_id);
2888
2889		if (!scmi_is_protocol_implemented(handle, prot_id)) {
2890			dev_err(dev, "SCMI protocol %d not implemented\n",
2891				prot_id);
2892			continue;
2893		}
2894
2895		/*
2896		 * Save this valid DT protocol descriptor amongst
2897		 * @active_protocols for this SCMI instance/
2898		 */
2899		ret = idr_alloc(&info->active_protocols, child,
2900				prot_id, prot_id + 1, GFP_KERNEL);
2901		if (ret != prot_id) {
2902			dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2903				prot_id);
2904			continue;
2905		}
2906
2907		of_node_get(child);
2908		scmi_create_protocol_devices(child, info, prot_id, NULL);
2909	}
2910
2911	return 0;
2912
2913notification_exit:
2914	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
2915		scmi_raw_mode_cleanup(info->raw);
2916	scmi_notification_exit(&info->handle);
2917clear_dev_req_notifier:
2918	blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
2919					   &info->dev_req_nb);
2920clear_bus_notifier:
2921	bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
2922clear_txrx_setup:
2923	scmi_cleanup_txrx_channels(info);
2924clear_ida:
2925	ida_free(&scmi_id, info->id);
2926	return ret;
2927}
2928
2929static void scmi_remove(struct platform_device *pdev)
2930{
2931	int id;
2932	struct scmi_info *info = platform_get_drvdata(pdev);
2933	struct device_node *child;
2934
2935	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
2936		scmi_raw_mode_cleanup(info->raw);
2937
2938	mutex_lock(&scmi_list_mutex);
2939	if (info->users)
2940		dev_warn(&pdev->dev,
2941			 "Still active SCMI users will be forcibly unbound.\n");
2942	list_del(&info->node);
2943	mutex_unlock(&scmi_list_mutex);
2944
2945	scmi_notification_exit(&info->handle);
2946
2947	mutex_lock(&info->protocols_mtx);
2948	idr_destroy(&info->protocols);
2949	mutex_unlock(&info->protocols_mtx);
2950
2951	idr_for_each_entry(&info->active_protocols, child, id)
2952		of_node_put(child);
2953	idr_destroy(&info->active_protocols);
2954
2955	blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
2956					   &info->dev_req_nb);
2957	bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
2958
2959	/* Safe to free channels since no more users */
2960	scmi_cleanup_txrx_channels(info);
2961
2962	ida_free(&scmi_id, info->id);
2963}
2964
2965static ssize_t protocol_version_show(struct device *dev,
2966				     struct device_attribute *attr, char *buf)
2967{
2968	struct scmi_info *info = dev_get_drvdata(dev);
2969
2970	return sprintf(buf, "%u.%u\n", info->version.major_ver,
2971		       info->version.minor_ver);
2972}
2973static DEVICE_ATTR_RO(protocol_version);
2974
2975static ssize_t firmware_version_show(struct device *dev,
2976				     struct device_attribute *attr, char *buf)
2977{
2978	struct scmi_info *info = dev_get_drvdata(dev);
2979
2980	return sprintf(buf, "0x%x\n", info->version.impl_ver);
2981}
2982static DEVICE_ATTR_RO(firmware_version);
2983
2984static ssize_t vendor_id_show(struct device *dev,
2985			      struct device_attribute *attr, char *buf)
2986{
2987	struct scmi_info *info = dev_get_drvdata(dev);
2988
2989	return sprintf(buf, "%s\n", info->version.vendor_id);
2990}
2991static DEVICE_ATTR_RO(vendor_id);
2992
2993static ssize_t sub_vendor_id_show(struct device *dev,
2994				  struct device_attribute *attr, char *buf)
2995{
2996	struct scmi_info *info = dev_get_drvdata(dev);
2997
2998	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2999}
3000static DEVICE_ATTR_RO(sub_vendor_id);
3001
3002static struct attribute *versions_attrs[] = {
3003	&dev_attr_firmware_version.attr,
3004	&dev_attr_protocol_version.attr,
3005	&dev_attr_vendor_id.attr,
3006	&dev_attr_sub_vendor_id.attr,
3007	NULL,
3008};
3009ATTRIBUTE_GROUPS(versions);
3010
3011/* Each compatible listed below must have descriptor associated with it */
3012static const struct of_device_id scmi_of_match[] = {
3013#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
3014	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
3015#endif
3016#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
3017	{ .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
3018#endif
3019#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
3020	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
3021	{ .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
3022	{ .compatible = "qcom,scmi-smc", .data = &scmi_smc_desc},
3023#endif
3024#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
3025	{ .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
3026#endif
3027	{ /* Sentinel */ },
3028};
3029
3030MODULE_DEVICE_TABLE(of, scmi_of_match);
3031
3032static struct platform_driver scmi_driver = {
3033	.driver = {
3034		   .name = "arm-scmi",
3035		   .suppress_bind_attrs = true,
3036		   .of_match_table = scmi_of_match,
3037		   .dev_groups = versions_groups,
3038		   },
3039	.probe = scmi_probe,
3040	.remove_new = scmi_remove,
3041};
3042
3043/**
3044 * __scmi_transports_setup  - Common helper to call transport-specific
3045 * .init/.exit code if provided.
3046 *
3047 * @init: A flag to distinguish between init and exit.
3048 *
3049 * Note that, if provided, we invoke .init/.exit functions for all the
3050 * transports currently compiled in.
3051 *
3052 * Return: 0 on Success.
3053 */
3054static inline int __scmi_transports_setup(bool init)
3055{
3056	int ret = 0;
3057	const struct of_device_id *trans;
3058
3059	for (trans = scmi_of_match; trans->data; trans++) {
3060		const struct scmi_desc *tdesc = trans->data;
3061
3062		if ((init && !tdesc->transport_init) ||
3063		    (!init && !tdesc->transport_exit))
3064			continue;
3065
3066		if (init)
3067			ret = tdesc->transport_init();
3068		else
3069			tdesc->transport_exit();
3070
3071		if (ret) {
3072			pr_err("SCMI transport %s FAILED initialization!\n",
3073			       trans->compatible);
3074			break;
3075		}
3076	}
3077
3078	return ret;
3079}
3080
3081static int __init scmi_transports_init(void)
3082{
3083	return __scmi_transports_setup(true);
3084}
3085
3086static void __exit scmi_transports_exit(void)
3087{
3088	__scmi_transports_setup(false);
3089}
3090
3091static struct dentry *scmi_debugfs_init(void)
3092{
3093	struct dentry *d;
3094
3095	d = debugfs_create_dir("scmi", NULL);
3096	if (IS_ERR(d)) {
3097		pr_err("Could NOT create SCMI top dentry.\n");
3098		return NULL;
3099	}
3100
3101	return d;
3102}
3103
3104static int __init scmi_driver_init(void)
3105{
3106	int ret;
3107
3108	/* Bail out if no SCMI transport was configured */
3109	if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
3110		return -EINVAL;
3111
3112	/* Initialize any compiled-in transport which provided an init/exit */
3113	ret = scmi_transports_init();
3114	if (ret)
3115		return ret;
3116
3117	if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
3118		scmi_top_dentry = scmi_debugfs_init();
3119
3120	scmi_base_register();
3121
3122	scmi_clock_register();
3123	scmi_perf_register();
3124	scmi_power_register();
3125	scmi_reset_register();
3126	scmi_sensors_register();
3127	scmi_voltage_register();
3128	scmi_system_register();
3129	scmi_powercap_register();
3130
3131	return platform_driver_register(&scmi_driver);
3132}
3133module_init(scmi_driver_init);
3134
3135static void __exit scmi_driver_exit(void)
3136{
3137	scmi_base_unregister();
3138
3139	scmi_clock_unregister();
3140	scmi_perf_unregister();
3141	scmi_power_unregister();
3142	scmi_reset_unregister();
3143	scmi_sensors_unregister();
3144	scmi_voltage_unregister();
3145	scmi_system_unregister();
3146	scmi_powercap_unregister();
3147
3148	scmi_transports_exit();
3149
3150	platform_driver_unregister(&scmi_driver);
3151
3152	debugfs_remove_recursive(scmi_top_dentry);
3153}
3154module_exit(scmi_driver_exit);
3155
3156MODULE_ALIAS("platform:arm-scmi");
3157MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
3158MODULE_DESCRIPTION("ARM SCMI protocol driver");
3159MODULE_LICENSE("GPL v2");
3160