1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015, Sony Mobile Communications AB.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5 */
6
7#include <linux/hwspinlock.h>
8#include <linux/io.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_address.h>
12#include <linux/of_reserved_mem.h>
13#include <linux/platform_device.h>
14#include <linux/sizes.h>
15#include <linux/slab.h>
16#include <linux/soc/qcom/smem.h>
17#include <linux/soc/qcom/socinfo.h>
18
19/*
20 * The Qualcomm shared memory system is a allocate only heap structure that
21 * consists of one of more memory areas that can be accessed by the processors
22 * in the SoC.
23 *
24 * All systems contains a global heap, accessible by all processors in the SoC,
25 * with a table of contents data structure (@smem_header) at the beginning of
26 * the main shared memory block.
27 *
28 * The global header contains meta data for allocations as well as a fixed list
29 * of 512 entries (@smem_global_entry) that can be initialized to reference
30 * parts of the shared memory space.
31 *
32 *
33 * In addition to this global heap a set of "private" heaps can be set up at
34 * boot time with access restrictions so that only certain processor pairs can
35 * access the data.
36 *
37 * These partitions are referenced from an optional partition table
38 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
39 * partition table entries (@smem_ptable_entry) lists the involved processors
40 * (or hosts) and their location in the main shared memory region.
41 *
42 * Each partition starts with a header (@smem_partition_header) that identifies
43 * the partition and holds properties for the two internal memory regions. The
44 * two regions are cached and non-cached memory respectively. Each region
45 * contain a link list of allocation headers (@smem_private_entry) followed by
46 * their data.
47 *
48 * Items in the non-cached region are allocated from the start of the partition
49 * while items in the cached region are allocated from the end. The free area
50 * is hence the region between the cached and non-cached offsets. The header of
51 * cached items comes after the data.
52 *
53 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
54 * for the global heap. A new global partition is created from the global heap
55 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
56 * set by the bootloader.
57 *
58 * To synchronize allocations in the shared memory heaps a remote spinlock must
59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
60 * platforms.
61 *
62 */
63
64/*
65 * The version member of the smem header contains an array of versions for the
66 * various software components in the SoC. We verify that the boot loader
67 * version is a valid version as a sanity check.
68 */
69#define SMEM_MASTER_SBL_VERSION_INDEX	7
70#define SMEM_GLOBAL_HEAP_VERSION	11
71#define SMEM_GLOBAL_PART_VERSION	12
72
73/*
74 * The first 8 items are only to be allocated by the boot loader while
75 * initializing the heap.
76 */
77#define SMEM_ITEM_LAST_FIXED	8
78
79/* Highest accepted item number, for both global and private heaps */
80#define SMEM_ITEM_COUNT		512
81
82/* Processor/host identifier for the application processor */
83#define SMEM_HOST_APPS		0
84
85/* Processor/host identifier for the global partition */
86#define SMEM_GLOBAL_HOST	0xfffe
87
88/* Max number of processors/hosts in a system */
89#define SMEM_HOST_COUNT		20
90
91/**
92  * struct smem_proc_comm - proc_comm communication struct (legacy)
93  * @command:	current command to be executed
94  * @status:	status of the currently requested command
95  * @params:	parameters to the command
96  */
97struct smem_proc_comm {
98	__le32 command;
99	__le32 status;
100	__le32 params[2];
101};
102
103/**
104 * struct smem_global_entry - entry to reference smem items on the heap
105 * @allocated:	boolean to indicate if this entry is used
106 * @offset:	offset to the allocated space
107 * @size:	size of the allocated space, 8 byte aligned
108 * @aux_base:	base address for the memory region used by this unit, or 0 for
109 *		the default region. bits 0,1 are reserved
110 */
111struct smem_global_entry {
112	__le32 allocated;
113	__le32 offset;
114	__le32 size;
115	__le32 aux_base; /* bits 1:0 reserved */
116};
117#define AUX_BASE_MASK		0xfffffffc
118
119/**
120 * struct smem_header - header found in beginning of primary smem region
121 * @proc_comm:		proc_comm communication interface (legacy)
122 * @version:		array of versions for the various subsystems
123 * @initialized:	boolean to indicate that smem is initialized
124 * @free_offset:	index of the first unallocated byte in smem
125 * @available:		number of bytes available for allocation
126 * @reserved:		reserved field, must be 0
127 * @toc:		array of references to items
128 */
129struct smem_header {
130	struct smem_proc_comm proc_comm[4];
131	__le32 version[32];
132	__le32 initialized;
133	__le32 free_offset;
134	__le32 available;
135	__le32 reserved;
136	struct smem_global_entry toc[SMEM_ITEM_COUNT];
137};
138
139/**
140 * struct smem_ptable_entry - one entry in the @smem_ptable list
141 * @offset:	offset, within the main shared memory region, of the partition
142 * @size:	size of the partition
143 * @flags:	flags for the partition (currently unused)
144 * @host0:	first processor/host with access to this partition
145 * @host1:	second processor/host with access to this partition
146 * @cacheline:	alignment for "cached" entries
147 * @reserved:	reserved entries for later use
148 */
149struct smem_ptable_entry {
150	__le32 offset;
151	__le32 size;
152	__le32 flags;
153	__le16 host0;
154	__le16 host1;
155	__le32 cacheline;
156	__le32 reserved[7];
157};
158
159/**
160 * struct smem_ptable - partition table for the private partitions
161 * @magic:	magic number, must be SMEM_PTABLE_MAGIC
162 * @version:	version of the partition table
163 * @num_entries: number of partitions in the table
164 * @reserved:	for now reserved entries
165 * @entry:	list of @smem_ptable_entry for the @num_entries partitions
166 */
167struct smem_ptable {
168	u8 magic[4];
169	__le32 version;
170	__le32 num_entries;
171	__le32 reserved[5];
172	struct smem_ptable_entry entry[];
173};
174
175static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
176
177/**
178 * struct smem_partition_header - header of the partitions
179 * @magic:	magic number, must be SMEM_PART_MAGIC
180 * @host0:	first processor/host with access to this partition
181 * @host1:	second processor/host with access to this partition
182 * @size:	size of the partition
183 * @offset_free_uncached: offset to the first free byte of uncached memory in
184 *		this partition
185 * @offset_free_cached: offset to the first free byte of cached memory in this
186 *		partition
187 * @reserved:	for now reserved entries
188 */
189struct smem_partition_header {
190	u8 magic[4];
191	__le16 host0;
192	__le16 host1;
193	__le32 size;
194	__le32 offset_free_uncached;
195	__le32 offset_free_cached;
196	__le32 reserved[3];
197};
198
199/**
200 * struct smem_partition - describes smem partition
201 * @virt_base:	starting virtual address of partition
202 * @phys_base:	starting physical address of partition
203 * @cacheline:	alignment for "cached" entries
204 * @size:	size of partition
205 */
206struct smem_partition {
207	void __iomem *virt_base;
208	phys_addr_t phys_base;
209	size_t cacheline;
210	size_t size;
211};
212
213static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
214
215/**
216 * struct smem_private_entry - header of each item in the private partition
217 * @canary:	magic number, must be SMEM_PRIVATE_CANARY
218 * @item:	identifying number of the smem item
219 * @size:	size of the data, including padding bytes
220 * @padding_data: number of bytes of padding of data
221 * @padding_hdr: number of bytes of padding between the header and the data
222 * @reserved:	for now reserved entry
223 */
224struct smem_private_entry {
225	u16 canary; /* bytes are the same so no swapping needed */
226	__le16 item;
227	__le32 size; /* includes padding bytes */
228	__le16 padding_data;
229	__le16 padding_hdr;
230	__le32 reserved;
231};
232#define SMEM_PRIVATE_CANARY	0xa5a5
233
234/**
235 * struct smem_info - smem region info located after the table of contents
236 * @magic:	magic number, must be SMEM_INFO_MAGIC
237 * @size:	size of the smem region
238 * @base_addr:	base address of the smem region
239 * @reserved:	for now reserved entry
240 * @num_items:	highest accepted item number
241 */
242struct smem_info {
243	u8 magic[4];
244	__le32 size;
245	__le32 base_addr;
246	__le32 reserved;
247	__le16 num_items;
248};
249
250static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
251
252/**
253 * struct smem_region - representation of a chunk of memory used for smem
254 * @aux_base:	identifier of aux_mem base
255 * @virt_base:	virtual base address of memory with this aux_mem identifier
256 * @size:	size of the memory region
257 */
258struct smem_region {
259	phys_addr_t aux_base;
260	void __iomem *virt_base;
261	size_t size;
262};
263
264/**
265 * struct qcom_smem - device data for the smem device
266 * @dev:	device pointer
267 * @hwlock:	reference to a hwspinlock
268 * @ptable: virtual base of partition table
269 * @global_partition: describes for global partition when in use
270 * @partitions: list of partitions of current processor/host
271 * @item_count: max accepted item number
272 * @socinfo:	platform device pointer
273 * @num_regions: number of @regions
274 * @regions:	list of the memory regions defining the shared memory
275 */
276struct qcom_smem {
277	struct device *dev;
278
279	struct hwspinlock *hwlock;
280
281	u32 item_count;
282	struct platform_device *socinfo;
283	struct smem_ptable *ptable;
284	struct smem_partition global_partition;
285	struct smem_partition partitions[SMEM_HOST_COUNT];
286
287	unsigned num_regions;
288	struct smem_region regions[] __counted_by(num_regions);
289};
290
291static void *
292phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
293{
294	void *p = phdr;
295
296	return p + le32_to_cpu(phdr->offset_free_uncached);
297}
298
299static struct smem_private_entry *
300phdr_to_first_cached_entry(struct smem_partition_header *phdr,
301					size_t cacheline)
302{
303	void *p = phdr;
304	struct smem_private_entry *e;
305
306	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
307}
308
309static void *
310phdr_to_last_cached_entry(struct smem_partition_header *phdr)
311{
312	void *p = phdr;
313
314	return p + le32_to_cpu(phdr->offset_free_cached);
315}
316
317static struct smem_private_entry *
318phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
319{
320	void *p = phdr;
321
322	return p + sizeof(*phdr);
323}
324
325static struct smem_private_entry *
326uncached_entry_next(struct smem_private_entry *e)
327{
328	void *p = e;
329
330	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
331	       le32_to_cpu(e->size);
332}
333
334static struct smem_private_entry *
335cached_entry_next(struct smem_private_entry *e, size_t cacheline)
336{
337	void *p = e;
338
339	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
340}
341
342static void *uncached_entry_to_item(struct smem_private_entry *e)
343{
344	void *p = e;
345
346	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
347}
348
349static void *cached_entry_to_item(struct smem_private_entry *e)
350{
351	void *p = e;
352
353	return p - le32_to_cpu(e->size);
354}
355
356/* Pointer to the one and only smem handle */
357static struct qcom_smem *__smem;
358
359/* Timeout (ms) for the trylock of remote spinlocks */
360#define HWSPINLOCK_TIMEOUT	1000
361
362/**
363 * qcom_smem_is_available() - Check if SMEM is available
364 *
365 * Return: true if SMEM is available, false otherwise.
366 */
367bool qcom_smem_is_available(void)
368{
369	return !!__smem;
370}
371EXPORT_SYMBOL_GPL(qcom_smem_is_available);
372
373static int qcom_smem_alloc_private(struct qcom_smem *smem,
374				   struct smem_partition *part,
375				   unsigned item,
376				   size_t size)
377{
378	struct smem_private_entry *hdr, *end;
379	struct smem_partition_header *phdr;
380	size_t alloc_size;
381	void *cached;
382	void *p_end;
383
384	phdr = (struct smem_partition_header __force *)part->virt_base;
385	p_end = (void *)phdr + part->size;
386
387	hdr = phdr_to_first_uncached_entry(phdr);
388	end = phdr_to_last_uncached_entry(phdr);
389	cached = phdr_to_last_cached_entry(phdr);
390
391	if (WARN_ON((void *)end > p_end || cached > p_end))
392		return -EINVAL;
393
394	while (hdr < end) {
395		if (hdr->canary != SMEM_PRIVATE_CANARY)
396			goto bad_canary;
397		if (le16_to_cpu(hdr->item) == item)
398			return -EEXIST;
399
400		hdr = uncached_entry_next(hdr);
401	}
402
403	if (WARN_ON((void *)hdr > p_end))
404		return -EINVAL;
405
406	/* Check that we don't grow into the cached region */
407	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
408	if ((void *)hdr + alloc_size > cached) {
409		dev_err(smem->dev, "Out of memory\n");
410		return -ENOSPC;
411	}
412
413	hdr->canary = SMEM_PRIVATE_CANARY;
414	hdr->item = cpu_to_le16(item);
415	hdr->size = cpu_to_le32(ALIGN(size, 8));
416	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
417	hdr->padding_hdr = 0;
418
419	/*
420	 * Ensure the header is written before we advance the free offset, so
421	 * that remote processors that does not take the remote spinlock still
422	 * gets a consistent view of the linked list.
423	 */
424	wmb();
425	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
426
427	return 0;
428bad_canary:
429	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
430		le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
431
432	return -EINVAL;
433}
434
435static int qcom_smem_alloc_global(struct qcom_smem *smem,
436				  unsigned item,
437				  size_t size)
438{
439	struct smem_global_entry *entry;
440	struct smem_header *header;
441
442	header = smem->regions[0].virt_base;
443	entry = &header->toc[item];
444	if (entry->allocated)
445		return -EEXIST;
446
447	size = ALIGN(size, 8);
448	if (WARN_ON(size > le32_to_cpu(header->available)))
449		return -ENOMEM;
450
451	entry->offset = header->free_offset;
452	entry->size = cpu_to_le32(size);
453
454	/*
455	 * Ensure the header is consistent before we mark the item allocated,
456	 * so that remote processors will get a consistent view of the item
457	 * even though they do not take the spinlock on read.
458	 */
459	wmb();
460	entry->allocated = cpu_to_le32(1);
461
462	le32_add_cpu(&header->free_offset, size);
463	le32_add_cpu(&header->available, -size);
464
465	return 0;
466}
467
468/**
469 * qcom_smem_alloc() - allocate space for a smem item
470 * @host:	remote processor id, or -1
471 * @item:	smem item handle
472 * @size:	number of bytes to be allocated
473 *
474 * Allocate space for a given smem item of size @size, given that the item is
475 * not yet allocated.
476 */
477int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
478{
479	struct smem_partition *part;
480	unsigned long flags;
481	int ret;
482
483	if (!__smem)
484		return -EPROBE_DEFER;
485
486	if (item < SMEM_ITEM_LAST_FIXED) {
487		dev_err(__smem->dev,
488			"Rejecting allocation of static entry %d\n", item);
489		return -EINVAL;
490	}
491
492	if (WARN_ON(item >= __smem->item_count))
493		return -EINVAL;
494
495	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
496					  HWSPINLOCK_TIMEOUT,
497					  &flags);
498	if (ret)
499		return ret;
500
501	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
502		part = &__smem->partitions[host];
503		ret = qcom_smem_alloc_private(__smem, part, item, size);
504	} else if (__smem->global_partition.virt_base) {
505		part = &__smem->global_partition;
506		ret = qcom_smem_alloc_private(__smem, part, item, size);
507	} else {
508		ret = qcom_smem_alloc_global(__smem, item, size);
509	}
510
511	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
512
513	return ret;
514}
515EXPORT_SYMBOL_GPL(qcom_smem_alloc);
516
517static void *qcom_smem_get_global(struct qcom_smem *smem,
518				  unsigned item,
519				  size_t *size)
520{
521	struct smem_header *header;
522	struct smem_region *region;
523	struct smem_global_entry *entry;
524	u64 entry_offset;
525	u32 e_size;
526	u32 aux_base;
527	unsigned i;
528
529	header = smem->regions[0].virt_base;
530	entry = &header->toc[item];
531	if (!entry->allocated)
532		return ERR_PTR(-ENXIO);
533
534	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
535
536	for (i = 0; i < smem->num_regions; i++) {
537		region = &smem->regions[i];
538
539		if ((u32)region->aux_base == aux_base || !aux_base) {
540			e_size = le32_to_cpu(entry->size);
541			entry_offset = le32_to_cpu(entry->offset);
542
543			if (WARN_ON(e_size + entry_offset > region->size))
544				return ERR_PTR(-EINVAL);
545
546			if (size != NULL)
547				*size = e_size;
548
549			return region->virt_base + entry_offset;
550		}
551	}
552
553	return ERR_PTR(-ENOENT);
554}
555
556static void *qcom_smem_get_private(struct qcom_smem *smem,
557				   struct smem_partition *part,
558				   unsigned item,
559				   size_t *size)
560{
561	struct smem_private_entry *e, *end;
562	struct smem_partition_header *phdr;
563	void *item_ptr, *p_end;
564	u32 padding_data;
565	u32 e_size;
566
567	phdr = (struct smem_partition_header __force *)part->virt_base;
568	p_end = (void *)phdr + part->size;
569
570	e = phdr_to_first_uncached_entry(phdr);
571	end = phdr_to_last_uncached_entry(phdr);
572
573	while (e < end) {
574		if (e->canary != SMEM_PRIVATE_CANARY)
575			goto invalid_canary;
576
577		if (le16_to_cpu(e->item) == item) {
578			if (size != NULL) {
579				e_size = le32_to_cpu(e->size);
580				padding_data = le16_to_cpu(e->padding_data);
581
582				if (WARN_ON(e_size > part->size || padding_data > e_size))
583					return ERR_PTR(-EINVAL);
584
585				*size = e_size - padding_data;
586			}
587
588			item_ptr = uncached_entry_to_item(e);
589			if (WARN_ON(item_ptr > p_end))
590				return ERR_PTR(-EINVAL);
591
592			return item_ptr;
593		}
594
595		e = uncached_entry_next(e);
596	}
597
598	if (WARN_ON((void *)e > p_end))
599		return ERR_PTR(-EINVAL);
600
601	/* Item was not found in the uncached list, search the cached list */
602
603	e = phdr_to_first_cached_entry(phdr, part->cacheline);
604	end = phdr_to_last_cached_entry(phdr);
605
606	if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
607		return ERR_PTR(-EINVAL);
608
609	while (e > end) {
610		if (e->canary != SMEM_PRIVATE_CANARY)
611			goto invalid_canary;
612
613		if (le16_to_cpu(e->item) == item) {
614			if (size != NULL) {
615				e_size = le32_to_cpu(e->size);
616				padding_data = le16_to_cpu(e->padding_data);
617
618				if (WARN_ON(e_size > part->size || padding_data > e_size))
619					return ERR_PTR(-EINVAL);
620
621				*size = e_size - padding_data;
622			}
623
624			item_ptr = cached_entry_to_item(e);
625			if (WARN_ON(item_ptr < (void *)phdr))
626				return ERR_PTR(-EINVAL);
627
628			return item_ptr;
629		}
630
631		e = cached_entry_next(e, part->cacheline);
632	}
633
634	if (WARN_ON((void *)e < (void *)phdr))
635		return ERR_PTR(-EINVAL);
636
637	return ERR_PTR(-ENOENT);
638
639invalid_canary:
640	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
641			le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
642
643	return ERR_PTR(-EINVAL);
644}
645
646/**
647 * qcom_smem_get() - resolve ptr of size of a smem item
648 * @host:	the remote processor, or -1
649 * @item:	smem item handle
650 * @size:	pointer to be filled out with size of the item
651 *
652 * Looks up smem item and returns pointer to it. Size of smem
653 * item is returned in @size.
654 */
655void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
656{
657	struct smem_partition *part;
658	void *ptr = ERR_PTR(-EPROBE_DEFER);
659
660	if (!__smem)
661		return ptr;
662
663	if (WARN_ON(item >= __smem->item_count))
664		return ERR_PTR(-EINVAL);
665
666	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
667		part = &__smem->partitions[host];
668		ptr = qcom_smem_get_private(__smem, part, item, size);
669	} else if (__smem->global_partition.virt_base) {
670		part = &__smem->global_partition;
671		ptr = qcom_smem_get_private(__smem, part, item, size);
672	} else {
673		ptr = qcom_smem_get_global(__smem, item, size);
674	}
675
676	return ptr;
677}
678EXPORT_SYMBOL_GPL(qcom_smem_get);
679
680/**
681 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
682 * @host:	the remote processor identifying a partition, or -1
683 *
684 * To be used by smem clients as a quick way to determine if any new
685 * allocations has been made.
686 */
687int qcom_smem_get_free_space(unsigned host)
688{
689	struct smem_partition *part;
690	struct smem_partition_header *phdr;
691	struct smem_header *header;
692	unsigned ret;
693
694	if (!__smem)
695		return -EPROBE_DEFER;
696
697	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
698		part = &__smem->partitions[host];
699		phdr = part->virt_base;
700		ret = le32_to_cpu(phdr->offset_free_cached) -
701		      le32_to_cpu(phdr->offset_free_uncached);
702
703		if (ret > le32_to_cpu(part->size))
704			return -EINVAL;
705	} else if (__smem->global_partition.virt_base) {
706		part = &__smem->global_partition;
707		phdr = part->virt_base;
708		ret = le32_to_cpu(phdr->offset_free_cached) -
709		      le32_to_cpu(phdr->offset_free_uncached);
710
711		if (ret > le32_to_cpu(part->size))
712			return -EINVAL;
713	} else {
714		header = __smem->regions[0].virt_base;
715		ret = le32_to_cpu(header->available);
716
717		if (ret > __smem->regions[0].size)
718			return -EINVAL;
719	}
720
721	return ret;
722}
723EXPORT_SYMBOL_GPL(qcom_smem_get_free_space);
724
725static bool addr_in_range(void __iomem *base, size_t size, void *addr)
726{
727	return base && ((void __iomem *)addr >= base && (void __iomem *)addr < base + size);
728}
729
730/**
731 * qcom_smem_virt_to_phys() - return the physical address associated
732 * with an smem item pointer (previously returned by qcom_smem_get()
733 * @p:	the virtual address to convert
734 *
735 * Returns 0 if the pointer provided is not within any smem region.
736 */
737phys_addr_t qcom_smem_virt_to_phys(void *p)
738{
739	struct smem_partition *part;
740	struct smem_region *area;
741	u64 offset;
742	u32 i;
743
744	for (i = 0; i < SMEM_HOST_COUNT; i++) {
745		part = &__smem->partitions[i];
746
747		if (addr_in_range(part->virt_base, part->size, p)) {
748			offset = p - part->virt_base;
749
750			return (phys_addr_t)part->phys_base + offset;
751		}
752	}
753
754	part = &__smem->global_partition;
755
756	if (addr_in_range(part->virt_base, part->size, p)) {
757		offset = p - part->virt_base;
758
759		return (phys_addr_t)part->phys_base + offset;
760	}
761
762	for (i = 0; i < __smem->num_regions; i++) {
763		area = &__smem->regions[i];
764
765		if (addr_in_range(area->virt_base, area->size, p)) {
766			offset = p - area->virt_base;
767
768			return (phys_addr_t)area->aux_base + offset;
769		}
770	}
771
772	return 0;
773}
774EXPORT_SYMBOL_GPL(qcom_smem_virt_to_phys);
775
776/**
777 * qcom_smem_get_soc_id() - return the SoC ID
778 * @id:	On success, we return the SoC ID here.
779 *
780 * Look up SoC ID from HW/SW build ID and return it.
781 *
782 * Return: 0 on success, negative errno on failure.
783 */
784int qcom_smem_get_soc_id(u32 *id)
785{
786	struct socinfo *info;
787
788	info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL);
789	if (IS_ERR(info))
790		return PTR_ERR(info);
791
792	*id = __le32_to_cpu(info->id);
793
794	return 0;
795}
796EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id);
797
798static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
799{
800	struct smem_header *header;
801	__le32 *versions;
802
803	header = smem->regions[0].virt_base;
804	versions = header->version;
805
806	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
807}
808
809static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
810{
811	struct smem_ptable *ptable;
812	u32 version;
813
814	ptable = smem->ptable;
815	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
816		return ERR_PTR(-ENOENT);
817
818	version = le32_to_cpu(ptable->version);
819	if (version != 1) {
820		dev_err(smem->dev,
821			"Unsupported partition header version %d\n", version);
822		return ERR_PTR(-EINVAL);
823	}
824	return ptable;
825}
826
827static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
828{
829	struct smem_ptable *ptable;
830	struct smem_info *info;
831
832	ptable = qcom_smem_get_ptable(smem);
833	if (IS_ERR_OR_NULL(ptable))
834		return SMEM_ITEM_COUNT;
835
836	info = (struct smem_info *)&ptable->entry[ptable->num_entries];
837	if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
838		return SMEM_ITEM_COUNT;
839
840	return le16_to_cpu(info->num_items);
841}
842
843/*
844 * Validate the partition header for a partition whose partition
845 * table entry is supplied.  Returns a pointer to its header if
846 * valid, or a null pointer otherwise.
847 */
848static struct smem_partition_header *
849qcom_smem_partition_header(struct qcom_smem *smem,
850		struct smem_ptable_entry *entry, u16 host0, u16 host1)
851{
852	struct smem_partition_header *header;
853	u32 phys_addr;
854	u32 size;
855
856	phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
857	header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
858
859	if (!header)
860		return NULL;
861
862	if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
863		dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
864		return NULL;
865	}
866
867	if (host0 != le16_to_cpu(header->host0)) {
868		dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
869				host0, le16_to_cpu(header->host0));
870		return NULL;
871	}
872	if (host1 != le16_to_cpu(header->host1)) {
873		dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
874				host1, le16_to_cpu(header->host1));
875		return NULL;
876	}
877
878	size = le32_to_cpu(header->size);
879	if (size != le32_to_cpu(entry->size)) {
880		dev_err(smem->dev, "bad partition size (%u != %u)\n",
881			size, le32_to_cpu(entry->size));
882		return NULL;
883	}
884
885	if (le32_to_cpu(header->offset_free_uncached) > size) {
886		dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
887			le32_to_cpu(header->offset_free_uncached), size);
888		return NULL;
889	}
890
891	return header;
892}
893
894static int qcom_smem_set_global_partition(struct qcom_smem *smem)
895{
896	struct smem_partition_header *header;
897	struct smem_ptable_entry *entry;
898	struct smem_ptable *ptable;
899	bool found = false;
900	int i;
901
902	if (smem->global_partition.virt_base) {
903		dev_err(smem->dev, "Already found the global partition\n");
904		return -EINVAL;
905	}
906
907	ptable = qcom_smem_get_ptable(smem);
908	if (IS_ERR(ptable))
909		return PTR_ERR(ptable);
910
911	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
912		entry = &ptable->entry[i];
913		if (!le32_to_cpu(entry->offset))
914			continue;
915		if (!le32_to_cpu(entry->size))
916			continue;
917
918		if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
919			continue;
920
921		if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
922			found = true;
923			break;
924		}
925	}
926
927	if (!found) {
928		dev_err(smem->dev, "Missing entry for global partition\n");
929		return -EINVAL;
930	}
931
932	header = qcom_smem_partition_header(smem, entry,
933				SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
934	if (!header)
935		return -EINVAL;
936
937	smem->global_partition.virt_base = (void __iomem *)header;
938	smem->global_partition.phys_base = smem->regions[0].aux_base +
939								le32_to_cpu(entry->offset);
940	smem->global_partition.size = le32_to_cpu(entry->size);
941	smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
942
943	return 0;
944}
945
946static int
947qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
948{
949	struct smem_partition_header *header;
950	struct smem_ptable_entry *entry;
951	struct smem_ptable *ptable;
952	u16 remote_host;
953	u16 host0, host1;
954	int i;
955
956	ptable = qcom_smem_get_ptable(smem);
957	if (IS_ERR(ptable))
958		return PTR_ERR(ptable);
959
960	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
961		entry = &ptable->entry[i];
962		if (!le32_to_cpu(entry->offset))
963			continue;
964		if (!le32_to_cpu(entry->size))
965			continue;
966
967		host0 = le16_to_cpu(entry->host0);
968		host1 = le16_to_cpu(entry->host1);
969		if (host0 == local_host)
970			remote_host = host1;
971		else if (host1 == local_host)
972			remote_host = host0;
973		else
974			continue;
975
976		if (remote_host >= SMEM_HOST_COUNT) {
977			dev_err(smem->dev, "bad host %u\n", remote_host);
978			return -EINVAL;
979		}
980
981		if (smem->partitions[remote_host].virt_base) {
982			dev_err(smem->dev, "duplicate host %u\n", remote_host);
983			return -EINVAL;
984		}
985
986		header = qcom_smem_partition_header(smem, entry, host0, host1);
987		if (!header)
988			return -EINVAL;
989
990		smem->partitions[remote_host].virt_base = (void __iomem *)header;
991		smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
992										le32_to_cpu(entry->offset);
993		smem->partitions[remote_host].size = le32_to_cpu(entry->size);
994		smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
995	}
996
997	return 0;
998}
999
1000static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
1001{
1002	u32 ptable_start;
1003
1004	/* map starting 4K for smem header */
1005	region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
1006	ptable_start = region->aux_base + region->size - SZ_4K;
1007	/* map last 4k for toc */
1008	smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
1009
1010	if (!region->virt_base || !smem->ptable)
1011		return -ENOMEM;
1012
1013	return 0;
1014}
1015
1016static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
1017{
1018	u32 phys_addr;
1019
1020	phys_addr = smem->regions[0].aux_base;
1021
1022	smem->regions[0].size = size;
1023	smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
1024
1025	if (!smem->regions[0].virt_base)
1026		return -ENOMEM;
1027
1028	return 0;
1029}
1030
1031static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
1032				 struct smem_region *region)
1033{
1034	struct device *dev = smem->dev;
1035	struct device_node *np;
1036	struct resource r;
1037	int ret;
1038
1039	np = of_parse_phandle(dev->of_node, name, 0);
1040	if (!np) {
1041		dev_err(dev, "No %s specified\n", name);
1042		return -EINVAL;
1043	}
1044
1045	ret = of_address_to_resource(np, 0, &r);
1046	of_node_put(np);
1047	if (ret)
1048		return ret;
1049
1050	region->aux_base = r.start;
1051	region->size = resource_size(&r);
1052
1053	return 0;
1054}
1055
1056static int qcom_smem_probe(struct platform_device *pdev)
1057{
1058	struct smem_header *header;
1059	struct reserved_mem *rmem;
1060	struct qcom_smem *smem;
1061	unsigned long flags;
1062	int num_regions;
1063	int hwlock_id;
1064	u32 version;
1065	u32 size;
1066	int ret;
1067	int i;
1068
1069	num_regions = 1;
1070	if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram"))
1071		num_regions++;
1072
1073	smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions),
1074			    GFP_KERNEL);
1075	if (!smem)
1076		return -ENOMEM;
1077
1078	smem->dev = &pdev->dev;
1079	smem->num_regions = num_regions;
1080
1081	rmem = of_reserved_mem_lookup(pdev->dev.of_node);
1082	if (rmem) {
1083		smem->regions[0].aux_base = rmem->base;
1084		smem->regions[0].size = rmem->size;
1085	} else {
1086		/*
1087		 * Fall back to the memory-region reference, if we're not a
1088		 * reserved-memory node.
1089		 */
1090		ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
1091		if (ret)
1092			return ret;
1093	}
1094
1095	if (num_regions > 1) {
1096		ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
1097		if (ret)
1098			return ret;
1099	}
1100
1101
1102	ret = qcom_smem_map_toc(smem, &smem->regions[0]);
1103	if (ret)
1104		return ret;
1105
1106	for (i = 1; i < num_regions; i++) {
1107		smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
1108							     smem->regions[i].aux_base,
1109							     smem->regions[i].size);
1110		if (!smem->regions[i].virt_base) {
1111			dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
1112			return -ENOMEM;
1113		}
1114	}
1115
1116	header = smem->regions[0].virt_base;
1117	if (le32_to_cpu(header->initialized) != 1 ||
1118	    le32_to_cpu(header->reserved)) {
1119		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
1120		return -EINVAL;
1121	}
1122
1123	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
1124	if (hwlock_id < 0) {
1125		if (hwlock_id != -EPROBE_DEFER)
1126			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
1127		return hwlock_id;
1128	}
1129
1130	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
1131	if (!smem->hwlock)
1132		return -ENXIO;
1133
1134	ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
1135	if (ret)
1136		return ret;
1137	size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
1138	hwspin_unlock_irqrestore(smem->hwlock, &flags);
1139
1140	version = qcom_smem_get_sbl_version(smem);
1141	/*
1142	 * smem header mapping is required only in heap version scheme, so unmap
1143	 * it here. It will be remapped in qcom_smem_map_global() when whole
1144	 * partition is mapped again.
1145	 */
1146	devm_iounmap(smem->dev, smem->regions[0].virt_base);
1147	switch (version >> 16) {
1148	case SMEM_GLOBAL_PART_VERSION:
1149		ret = qcom_smem_set_global_partition(smem);
1150		if (ret < 0)
1151			return ret;
1152		smem->item_count = qcom_smem_get_item_count(smem);
1153		break;
1154	case SMEM_GLOBAL_HEAP_VERSION:
1155		qcom_smem_map_global(smem, size);
1156		smem->item_count = SMEM_ITEM_COUNT;
1157		break;
1158	default:
1159		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
1160		return -EINVAL;
1161	}
1162
1163	BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
1164	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
1165	if (ret < 0 && ret != -ENOENT)
1166		return ret;
1167
1168	__smem = smem;
1169
1170	smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
1171						      PLATFORM_DEVID_NONE, NULL,
1172						      0);
1173	if (IS_ERR(smem->socinfo))
1174		dev_dbg(&pdev->dev, "failed to register socinfo device\n");
1175
1176	return 0;
1177}
1178
1179static void qcom_smem_remove(struct platform_device *pdev)
1180{
1181	platform_device_unregister(__smem->socinfo);
1182
1183	hwspin_lock_free(__smem->hwlock);
1184	__smem = NULL;
1185}
1186
1187static const struct of_device_id qcom_smem_of_match[] = {
1188	{ .compatible = "qcom,smem" },
1189	{}
1190};
1191MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
1192
1193static struct platform_driver qcom_smem_driver = {
1194	.probe = qcom_smem_probe,
1195	.remove_new = qcom_smem_remove,
1196	.driver  = {
1197		.name = "qcom-smem",
1198		.of_match_table = qcom_smem_of_match,
1199		.suppress_bind_attrs = true,
1200	},
1201};
1202
1203static int __init qcom_smem_init(void)
1204{
1205	return platform_driver_register(&qcom_smem_driver);
1206}
1207arch_initcall(qcom_smem_init);
1208
1209static void __exit qcom_smem_exit(void)
1210{
1211	platform_driver_unregister(&qcom_smem_driver);
1212}
1213module_exit(qcom_smem_exit)
1214
1215MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1216MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
1217MODULE_LICENSE("GPL v2");
1218