1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2017 Linaro Ltd.
4 */
5
6#include <linux/kernel.h>
7#include <linux/cdev.h>
8#include <linux/err.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11#include <linux/of.h>
12#include <linux/of_reserved_mem.h>
13#include <linux/dma-mapping.h>
14#include <linux/slab.h>
15#include <linux/uaccess.h>
16#include <linux/io.h>
17#include <linux/firmware/qcom/qcom_scm.h>
18
19#define QCOM_RMTFS_MEM_DEV_MAX	(MINORMASK + 1)
20#define NUM_MAX_VMIDS		2
21
22static dev_t qcom_rmtfs_mem_major;
23
24struct qcom_rmtfs_mem {
25	struct device dev;
26	struct cdev cdev;
27
28	void *base;
29	phys_addr_t addr;
30	phys_addr_t size;
31
32	unsigned int client_id;
33
34	u64 perms;
35};
36
37static ssize_t qcom_rmtfs_mem_show(struct device *dev,
38			      struct device_attribute *attr,
39			      char *buf);
40
41static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL);
42static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL);
43static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL);
44
45static ssize_t qcom_rmtfs_mem_show(struct device *dev,
46			      struct device_attribute *attr,
47			      char *buf)
48{
49	struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
50							struct qcom_rmtfs_mem,
51							dev);
52
53	if (attr == &dev_attr_phys_addr)
54		return sprintf(buf, "%pa\n", &rmtfs_mem->addr);
55	if (attr == &dev_attr_size)
56		return sprintf(buf, "%pa\n", &rmtfs_mem->size);
57	if (attr == &dev_attr_client_id)
58		return sprintf(buf, "%d\n", rmtfs_mem->client_id);
59
60	return -EINVAL;
61}
62
63static struct attribute *qcom_rmtfs_mem_attrs[] = {
64	&dev_attr_phys_addr.attr,
65	&dev_attr_size.attr,
66	&dev_attr_client_id.attr,
67	NULL
68};
69ATTRIBUTE_GROUPS(qcom_rmtfs_mem);
70
71static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp)
72{
73	struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev,
74							struct qcom_rmtfs_mem,
75							cdev);
76
77	get_device(&rmtfs_mem->dev);
78	filp->private_data = rmtfs_mem;
79
80	return 0;
81}
82static ssize_t qcom_rmtfs_mem_read(struct file *filp,
83			      char __user *buf, size_t count, loff_t *f_pos)
84{
85	struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
86
87	if (*f_pos >= rmtfs_mem->size)
88		return 0;
89
90	if (*f_pos + count >= rmtfs_mem->size)
91		count = rmtfs_mem->size - *f_pos;
92
93	if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count))
94		return -EFAULT;
95
96	*f_pos += count;
97	return count;
98}
99
100static ssize_t qcom_rmtfs_mem_write(struct file *filp,
101			       const char __user *buf, size_t count,
102			       loff_t *f_pos)
103{
104	struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
105
106	if (*f_pos >= rmtfs_mem->size)
107		return 0;
108
109	if (*f_pos + count >= rmtfs_mem->size)
110		count = rmtfs_mem->size - *f_pos;
111
112	if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count))
113		return -EFAULT;
114
115	*f_pos += count;
116	return count;
117}
118
119static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
120{
121	struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
122
123	put_device(&rmtfs_mem->dev);
124
125	return 0;
126}
127
128static struct class rmtfs_class = {
129	.name           = "rmtfs",
130};
131
132static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma)
133{
134	struct qcom_rmtfs_mem *rmtfs_mem = filep->private_data;
135
136	if (vma->vm_end - vma->vm_start > rmtfs_mem->size) {
137		dev_dbg(&rmtfs_mem->dev,
138			"vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n",
139			vma->vm_end, vma->vm_start,
140			(vma->vm_end - vma->vm_start), &rmtfs_mem->size);
141		return -EINVAL;
142	}
143
144	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
145	return remap_pfn_range(vma,
146			       vma->vm_start,
147			       rmtfs_mem->addr >> PAGE_SHIFT,
148			       vma->vm_end - vma->vm_start,
149			       vma->vm_page_prot);
150}
151
152static const struct file_operations qcom_rmtfs_mem_fops = {
153	.owner = THIS_MODULE,
154	.open = qcom_rmtfs_mem_open,
155	.read = qcom_rmtfs_mem_read,
156	.write = qcom_rmtfs_mem_write,
157	.release = qcom_rmtfs_mem_release,
158	.llseek = default_llseek,
159	.mmap = qcom_rmtfs_mem_mmap,
160};
161
162static void qcom_rmtfs_mem_release_device(struct device *dev)
163{
164	struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
165							struct qcom_rmtfs_mem,
166							dev);
167
168	kfree(rmtfs_mem);
169}
170
171static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
172{
173	struct device_node *node = pdev->dev.of_node;
174	struct qcom_scm_vmperm perms[NUM_MAX_VMIDS + 1];
175	struct reserved_mem *rmem;
176	struct qcom_rmtfs_mem *rmtfs_mem;
177	u32 client_id;
178	u32 vmid[NUM_MAX_VMIDS];
179	int num_vmids;
180	int ret, i;
181
182	rmem = of_reserved_mem_lookup(node);
183	if (!rmem) {
184		dev_err(&pdev->dev, "failed to acquire memory region\n");
185		return -EINVAL;
186	}
187
188	ret = of_property_read_u32(node, "qcom,client-id", &client_id);
189	if (ret) {
190		dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n");
191		return ret;
192
193	}
194
195	rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL);
196	if (!rmtfs_mem)
197		return -ENOMEM;
198
199	rmtfs_mem->addr = rmem->base;
200	rmtfs_mem->client_id = client_id;
201	rmtfs_mem->size = rmem->size;
202
203	/*
204	 * If requested, discard the first and last 4k block in order to ensure
205	 * that the rmtfs region isn't adjacent to other protected regions.
206	 */
207	if (of_property_read_bool(node, "qcom,use-guard-pages")) {
208		rmtfs_mem->addr += SZ_4K;
209		rmtfs_mem->size -= 2 * SZ_4K;
210	}
211
212	device_initialize(&rmtfs_mem->dev);
213	rmtfs_mem->dev.parent = &pdev->dev;
214	rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
215	rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
216
217	rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
218					rmtfs_mem->size, MEMREMAP_WC);
219	if (IS_ERR(rmtfs_mem->base)) {
220		dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n");
221		ret = PTR_ERR(rmtfs_mem->base);
222		goto put_device;
223	}
224
225	cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops);
226	rmtfs_mem->cdev.owner = THIS_MODULE;
227
228	dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
229	rmtfs_mem->dev.id = client_id;
230	rmtfs_mem->dev.class = &rmtfs_class;
231	rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
232
233	ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
234	if (ret) {
235		dev_err(&pdev->dev, "failed to add cdev: %d\n", ret);
236		goto put_device;
237	}
238
239	num_vmids = of_property_count_u32_elems(node, "qcom,vmid");
240	if (num_vmids == -EINVAL) {
241		/* qcom,vmid is optional */
242		num_vmids = 0;
243	} else if (num_vmids < 0) {
244		dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", num_vmids);
245		ret = num_vmids;
246		goto remove_cdev;
247	} else if (num_vmids > NUM_MAX_VMIDS) {
248		dev_warn(&pdev->dev,
249			 "too many VMIDs (%d) specified! Only mapping first %d entries\n",
250			 num_vmids, NUM_MAX_VMIDS);
251		num_vmids = NUM_MAX_VMIDS;
252	}
253
254	ret = of_property_read_u32_array(node, "qcom,vmid", vmid, num_vmids);
255	if (ret < 0 && ret != -EINVAL) {
256		dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
257		goto remove_cdev;
258	} else if (!ret) {
259		if (!qcom_scm_is_available()) {
260			ret = -EPROBE_DEFER;
261			goto remove_cdev;
262		}
263
264		perms[0].vmid = QCOM_SCM_VMID_HLOS;
265		perms[0].perm = QCOM_SCM_PERM_RW;
266
267		for (i = 0; i < num_vmids; i++) {
268			perms[i + 1].vmid = vmid[i];
269			perms[i + 1].perm = QCOM_SCM_PERM_RW;
270		}
271
272		rmtfs_mem->perms = BIT(QCOM_SCM_VMID_HLOS);
273		ret = qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
274					  &rmtfs_mem->perms, perms, num_vmids + 1);
275		if (ret < 0) {
276			dev_err(&pdev->dev, "assign memory failed\n");
277			goto remove_cdev;
278		}
279	}
280
281	dev_set_drvdata(&pdev->dev, rmtfs_mem);
282
283	return 0;
284
285remove_cdev:
286	cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
287put_device:
288	put_device(&rmtfs_mem->dev);
289
290	return ret;
291}
292
293static void qcom_rmtfs_mem_remove(struct platform_device *pdev)
294{
295	struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev);
296	struct qcom_scm_vmperm perm;
297
298	if (rmtfs_mem->perms) {
299		perm.vmid = QCOM_SCM_VMID_HLOS;
300		perm.perm = QCOM_SCM_PERM_RW;
301
302		qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
303				    &rmtfs_mem->perms, &perm, 1);
304	}
305
306	cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
307	put_device(&rmtfs_mem->dev);
308}
309
310static const struct of_device_id qcom_rmtfs_mem_of_match[] = {
311	{ .compatible = "qcom,rmtfs-mem" },
312	{}
313};
314MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match);
315
316static struct platform_driver qcom_rmtfs_mem_driver = {
317	.probe = qcom_rmtfs_mem_probe,
318	.remove_new = qcom_rmtfs_mem_remove,
319	.driver  = {
320		.name  = "qcom_rmtfs_mem",
321		.of_match_table = qcom_rmtfs_mem_of_match,
322	},
323};
324
325static int __init qcom_rmtfs_mem_init(void)
326{
327	int ret;
328
329	ret = class_register(&rmtfs_class);
330	if (ret)
331		return ret;
332
333	ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
334				  QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
335	if (ret < 0) {
336		pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
337		goto unregister_class;
338	}
339
340	ret = platform_driver_register(&qcom_rmtfs_mem_driver);
341	if (ret < 0) {
342		pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
343		goto unregister_chrdev;
344	}
345
346	return 0;
347
348unregister_chrdev:
349	unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
350unregister_class:
351	class_unregister(&rmtfs_class);
352	return ret;
353}
354module_init(qcom_rmtfs_mem_init);
355
356static void __exit qcom_rmtfs_mem_exit(void)
357{
358	platform_driver_unregister(&qcom_rmtfs_mem_driver);
359	unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
360	class_unregister(&rmtfs_class);
361}
362module_exit(qcom_rmtfs_mem_exit);
363
364MODULE_AUTHOR("Linaro Ltd");
365MODULE_DESCRIPTION("Qualcomm Remote Filesystem memory driver");
366MODULE_LICENSE("GPL v2");
367