1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
4 */
5#include <linux/dma-mapping.h>
6#include <linux/mei.h>
7
8#include "mei_dev.h"
9
10/**
11 * mei_dmam_dscr_alloc() - allocate a managed coherent buffer
12 *     for the dma descriptor
13 * @dev: mei_device
14 * @dscr: dma descriptor
15 *
16 * Return:
17 * * 0       - on success or zero allocation request
18 * * -EINVAL - if size is not power of 2
19 * * -ENOMEM - of allocation has failed
20 */
21static int mei_dmam_dscr_alloc(struct mei_device *dev,
22			       struct mei_dma_dscr *dscr)
23{
24	if (!dscr->size)
25		return 0;
26
27	if (WARN_ON(!is_power_of_2(dscr->size)))
28		return -EINVAL;
29
30	if (dscr->vaddr)
31		return 0;
32
33	dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
34					  GFP_KERNEL);
35	if (!dscr->vaddr)
36		return -ENOMEM;
37
38	return 0;
39}
40
41/**
42 * mei_dmam_dscr_free() - free a managed coherent buffer
43 *     from the dma descriptor
44 * @dev: mei_device
45 * @dscr: dma descriptor
46 */
47static void mei_dmam_dscr_free(struct mei_device *dev,
48			       struct mei_dma_dscr *dscr)
49{
50	if (!dscr->vaddr)
51		return;
52
53	dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
54	dscr->vaddr = NULL;
55}
56
57/**
58 * mei_dmam_ring_free() - free dma ring buffers
59 * @dev: mei device
60 */
61void mei_dmam_ring_free(struct mei_device *dev)
62{
63	int i;
64
65	for (i = 0; i < DMA_DSCR_NUM; i++)
66		mei_dmam_dscr_free(dev, &dev->dr_dscr[i]);
67}
68
69/**
70 * mei_dmam_ring_alloc() - allocate dma ring buffers
71 * @dev: mei device
72 *
73 * Return: -ENOMEM on allocation failure 0 otherwise
74 */
75int mei_dmam_ring_alloc(struct mei_device *dev)
76{
77	int i;
78
79	for (i = 0; i < DMA_DSCR_NUM; i++)
80		if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i]))
81			goto err;
82
83	return 0;
84
85err:
86	mei_dmam_ring_free(dev);
87	return -ENOMEM;
88}
89
90/**
91 * mei_dma_ring_is_allocated() - check if dma ring is allocated
92 * @dev: mei device
93 *
94 * Return: true if dma ring is allocated
95 */
96bool mei_dma_ring_is_allocated(struct mei_device *dev)
97{
98	return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr;
99}
100
101static inline
102struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev)
103{
104	return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr;
105}
106
107/**
108 * mei_dma_ring_reset() - reset the dma control block
109 * @dev: mei device
110 */
111void mei_dma_ring_reset(struct mei_device *dev)
112{
113	struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
114
115	if (!ctrl)
116		return;
117
118	memset(ctrl, 0, sizeof(*ctrl));
119}
120
121/**
122 * mei_dma_copy_from() - copy from dma ring into buffer
123 * @dev: mei device
124 * @buf: data buffer
125 * @offset: offset in slots.
126 * @n: number of slots to copy.
127 *
128 * Return: number of bytes copied
129 */
130static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
131				u32 offset, u32 n)
132{
133	unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr;
134
135	size_t b_offset = offset << 2;
136	size_t b_n = n << 2;
137
138	memcpy(buf, dbuf + b_offset, b_n);
139
140	return b_n;
141}
142
143/**
144 * mei_dma_copy_to() - copy to a buffer to the dma ring
145 * @dev: mei device
146 * @buf: data buffer
147 * @offset: offset in slots.
148 * @n: number of slots to copy.
149 *
150 * Return: number of bytes copied
151 */
152static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
153			      u32 offset, u32 n)
154{
155	unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
156
157	size_t b_offset = offset << 2;
158	size_t b_n = n << 2;
159
160	memcpy(hbuf + b_offset, buf, b_n);
161
162	return b_n;
163}
164
165/**
166 * mei_dma_ring_read() - read data from the ring
167 * @dev: mei device
168 * @buf: buffer to read into: may be NULL in case of dropping the data.
169 * @len: length to read.
170 */
171void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
172{
173	struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
174	u32 dbuf_depth;
175	u32 rd_idx, rem, slots;
176
177	if (WARN_ON(!ctrl))
178		return;
179
180	dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
181
182	if (!len)
183		return;
184
185	dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2;
186	rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1);
187	slots = mei_data2slots(len);
188
189	/* if buf is NULL we drop the packet by advancing the pointer.*/
190	if (!buf)
191		goto out;
192
193	if (rd_idx + slots > dbuf_depth) {
194		buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx);
195		rem = slots - (dbuf_depth - rd_idx);
196		rd_idx = 0;
197	} else {
198		rem = slots;
199	}
200
201	mei_dma_copy_from(dev, buf, rd_idx, rem);
202out:
203	WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots);
204}
205
206static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev)
207{
208	return dev->dr_dscr[DMA_DSCR_HOST].size >> 2;
209}
210
211/**
212 * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
213 * @dev: mei_device
214 *
215 * Return: number of empty slots
216 */
217u32 mei_dma_ring_empty_slots(struct mei_device *dev)
218{
219	struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
220	u32 wr_idx, rd_idx, hbuf_depth, empty;
221
222	if (!mei_dma_ring_is_allocated(dev))
223		return 0;
224
225	if (WARN_ON(!ctrl))
226		return 0;
227
228	/* easier to work in slots */
229	hbuf_depth = mei_dma_ring_hbuf_depth(dev);
230	rd_idx = READ_ONCE(ctrl->hbuf_rd_idx);
231	wr_idx = READ_ONCE(ctrl->hbuf_wr_idx);
232
233	if (rd_idx > wr_idx)
234		empty = rd_idx - wr_idx;
235	else
236		empty = hbuf_depth - (wr_idx - rd_idx);
237
238	return empty;
239}
240
241/**
242 * mei_dma_ring_write - write data to dma ring host buffer
243 *
244 * @dev: mei_device
245 * @buf: data will be written
246 * @len: data length
247 */
248void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
249{
250	struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
251	u32 hbuf_depth;
252	u32 wr_idx, rem, slots;
253
254	if (WARN_ON(!ctrl))
255		return;
256
257	dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
258	hbuf_depth = mei_dma_ring_hbuf_depth(dev);
259	wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
260	slots = mei_data2slots(len);
261
262	if (wr_idx + slots > hbuf_depth) {
263		buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx);
264		rem = slots - (hbuf_depth - wr_idx);
265		wr_idx = 0;
266	} else {
267		rem = slots;
268	}
269
270	mei_dma_copy_to(dev, buf, wr_idx, rem);
271
272	WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots);
273}
274