1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-21 Intel Corporation.
4 */
5
6#include "iosm_ipc_protocol.h"
7#include "iosm_ipc_protocol_ops.h"
8
9/* Get the next free message element.*/
10static union ipc_mem_msg_entry *
11ipc_protocol_free_msg_get(struct iosm_protocol *ipc_protocol, int *index)
12{
13	u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
14	u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
15	union ipc_mem_msg_entry *msg;
16
17	if (new_head == le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail)) {
18		dev_err(ipc_protocol->dev, "message ring is full");
19		return NULL;
20	}
21
22	/* Get the pointer to the next free message element,
23	 * reset the fields and mark is as invalid.
24	 */
25	msg = &ipc_protocol->p_ap_shm->msg_ring[head];
26	memset(msg, 0, sizeof(*msg));
27
28	/* return index in message ring */
29	*index = head;
30
31	return msg;
32}
33
34/* Updates the message ring Head pointer */
35void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem)
36{
37	struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
38	u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
39	u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
40
41	/* Update head pointer and fire doorbell. */
42	ipc_protocol->p_ap_shm->msg_head = cpu_to_le32(new_head);
43	ipc_protocol->old_msg_tail =
44		le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
45
46	ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, IPC_HP_MR, false);
47}
48
49/* Allocate and prepare a OPEN_PIPE message.
50 * This also allocates the memory for the new TDR structure and
51 * updates the pipe structure referenced in the preparation arguments.
52 */
53static int ipc_protocol_msg_prepipe_open(struct iosm_protocol *ipc_protocol,
54					 union ipc_msg_prep_args *args)
55{
56	int index;
57	union ipc_mem_msg_entry *msg =
58		ipc_protocol_free_msg_get(ipc_protocol, &index);
59	struct ipc_pipe *pipe = args->pipe_open.pipe;
60	struct ipc_protocol_td *tdr;
61	struct sk_buff **skbr;
62
63	if (!msg) {
64		dev_err(ipc_protocol->dev, "failed to get free message");
65		return -EIO;
66	}
67
68	/* Allocate the skbuf elements for the skbuf which are on the way.
69	 * SKB ring is internal memory allocation for driver. No need to
70	 * re-calculate the start and end addresses.
71	 */
72	skbr = kcalloc(pipe->nr_of_entries, sizeof(*skbr), GFP_ATOMIC);
73	if (!skbr)
74		return -ENOMEM;
75
76	/* Allocate the transfer descriptors for the pipe. */
77	tdr = dma_alloc_coherent(&ipc_protocol->pcie->pci->dev,
78				 pipe->nr_of_entries * sizeof(*tdr),
79				 &pipe->phy_tdr_start, GFP_ATOMIC);
80	if (!tdr) {
81		kfree(skbr);
82		dev_err(ipc_protocol->dev, "tdr alloc error");
83		return -ENOMEM;
84	}
85
86	pipe->max_nr_of_queued_entries = pipe->nr_of_entries - 1;
87	pipe->nr_of_queued_entries = 0;
88	pipe->tdr_start = tdr;
89	pipe->skbr_start = skbr;
90	pipe->old_tail = 0;
91
92	ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
93
94	msg->open_pipe.type_of_message = IPC_MEM_MSG_OPEN_PIPE;
95	msg->open_pipe.pipe_nr = pipe->pipe_nr;
96	msg->open_pipe.tdr_addr = cpu_to_le64(pipe->phy_tdr_start);
97	msg->open_pipe.tdr_entries = cpu_to_le16(pipe->nr_of_entries);
98	msg->open_pipe.accumulation_backoff =
99				cpu_to_le32(pipe->accumulation_backoff);
100	msg->open_pipe.irq_vector = cpu_to_le32(pipe->irq);
101
102	return index;
103}
104
105static int ipc_protocol_msg_prepipe_close(struct iosm_protocol *ipc_protocol,
106					  union ipc_msg_prep_args *args)
107{
108	int index = -1;
109	union ipc_mem_msg_entry *msg =
110		ipc_protocol_free_msg_get(ipc_protocol, &index);
111	struct ipc_pipe *pipe = args->pipe_close.pipe;
112
113	if (!msg)
114		return -EIO;
115
116	msg->close_pipe.type_of_message = IPC_MEM_MSG_CLOSE_PIPE;
117	msg->close_pipe.pipe_nr = pipe->pipe_nr;
118
119	dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_CLOSE_PIPE(pipe_nr=%d)",
120		msg->close_pipe.pipe_nr);
121
122	return index;
123}
124
125static int ipc_protocol_msg_prep_sleep(struct iosm_protocol *ipc_protocol,
126				       union ipc_msg_prep_args *args)
127{
128	int index = -1;
129	union ipc_mem_msg_entry *msg =
130		ipc_protocol_free_msg_get(ipc_protocol, &index);
131
132	if (!msg) {
133		dev_err(ipc_protocol->dev, "failed to get free message");
134		return -EIO;
135	}
136
137	/* Prepare and send the host sleep message to CP to enter or exit D3. */
138	msg->host_sleep.type_of_message = IPC_MEM_MSG_SLEEP;
139	msg->host_sleep.target = args->sleep.target; /* 0=host, 1=device */
140
141	/* state; 0=enter, 1=exit 2=enter w/o protocol */
142	msg->host_sleep.state = args->sleep.state;
143
144	dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_SLEEP(target=%d; state=%d)",
145		msg->host_sleep.target, msg->host_sleep.state);
146
147	return index;
148}
149
150static int ipc_protocol_msg_prep_feature_set(struct iosm_protocol *ipc_protocol,
151					     union ipc_msg_prep_args *args)
152{
153	int index = -1;
154	union ipc_mem_msg_entry *msg =
155		ipc_protocol_free_msg_get(ipc_protocol, &index);
156
157	if (!msg) {
158		dev_err(ipc_protocol->dev, "failed to get free message");
159		return -EIO;
160	}
161
162	msg->feature_set.type_of_message = IPC_MEM_MSG_FEATURE_SET;
163	msg->feature_set.reset_enable = args->feature_set.reset_enable <<
164					RESET_BIT;
165
166	dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_FEATURE_SET(reset_enable=%d)",
167		msg->feature_set.reset_enable >> RESET_BIT);
168
169	return index;
170}
171
172/* Processes the message consumed by CP. */
173bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq)
174{
175	struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
176	struct ipc_rsp **rsp_ring = ipc_protocol->rsp_ring;
177	bool msg_processed = false;
178	u32 i;
179
180	if (le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail) >=
181			IPC_MEM_MSG_ENTRIES) {
182		dev_err(ipc_protocol->dev, "msg_tail out of range: %d",
183			le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail));
184		return msg_processed;
185	}
186
187	if (irq != IMEM_IRQ_DONT_CARE &&
188	    irq != ipc_protocol->p_ap_shm->ci.msg_irq_vector)
189		return msg_processed;
190
191	for (i = ipc_protocol->old_msg_tail;
192	     i != le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
193	     i = (i + 1) % IPC_MEM_MSG_ENTRIES) {
194		union ipc_mem_msg_entry *msg =
195			&ipc_protocol->p_ap_shm->msg_ring[i];
196
197		dev_dbg(ipc_protocol->dev, "msg[%d]: type=%u status=%d", i,
198			msg->common.type_of_message,
199			msg->common.completion_status);
200
201		/* Update response with status and wake up waiting requestor */
202		if (rsp_ring[i]) {
203			rsp_ring[i]->status =
204				le32_to_cpu(msg->common.completion_status);
205			complete(&rsp_ring[i]->completion);
206			rsp_ring[i] = NULL;
207		}
208		msg_processed = true;
209	}
210
211	ipc_protocol->old_msg_tail = i;
212	return msg_processed;
213}
214
215/* Sends data from UL list to CP for the provided pipe by updating the Head
216 * pointer of given pipe.
217 */
218bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol,
219			     struct ipc_pipe *pipe,
220			     struct sk_buff_head *p_ul_list)
221{
222	struct ipc_protocol_td *td;
223	bool hpda_pending = false;
224	struct sk_buff *skb;
225	s32 free_elements;
226	u32 head;
227	u32 tail;
228
229	if (!ipc_protocol->p_ap_shm) {
230		dev_err(ipc_protocol->dev, "driver is not initialized");
231		return false;
232	}
233
234	/* Get head and tail of the td list and calculate
235	 * the number of free elements.
236	 */
237	head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
238	tail = pipe->old_tail;
239
240	while (!skb_queue_empty(p_ul_list)) {
241		if (head < tail)
242			free_elements = tail - head - 1;
243		else
244			free_elements =
245				pipe->nr_of_entries - head + ((s32)tail - 1);
246
247		if (free_elements <= 0) {
248			dev_dbg(ipc_protocol->dev,
249				"no free td elements for UL pipe %d",
250				pipe->pipe_nr);
251			break;
252		}
253
254		/* Get the td address. */
255		td = &pipe->tdr_start[head];
256
257		/* Take the first element of the uplink list and add it
258		 * to the td list.
259		 */
260		skb = skb_dequeue(p_ul_list);
261		if (WARN_ON(!skb))
262			break;
263
264		/* Save the reference to the uplink skbuf. */
265		pipe->skbr_start[head] = skb;
266
267		td->buffer.address = IPC_CB(skb)->mapping;
268		td->scs = cpu_to_le32(skb->len) & cpu_to_le32(SIZE_MASK);
269		td->next = 0;
270
271		pipe->nr_of_queued_entries++;
272
273		/* Calculate the new head and save it. */
274		head++;
275		if (head >= pipe->nr_of_entries)
276			head = 0;
277
278		ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
279			cpu_to_le32(head);
280	}
281
282	if (pipe->old_head != head) {
283		dev_dbg(ipc_protocol->dev, "New UL TDs Pipe:%d", pipe->pipe_nr);
284
285		pipe->old_head = head;
286		/* Trigger doorbell because of pending UL packets. */
287		hpda_pending = true;
288	}
289
290	return hpda_pending;
291}
292
293/* Checks for Tail pointer update from CP and returns the data as SKB. */
294struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol,
295					   struct ipc_pipe *pipe)
296{
297	struct ipc_protocol_td *p_td = &pipe->tdr_start[pipe->old_tail];
298	struct sk_buff *skb = pipe->skbr_start[pipe->old_tail];
299
300	pipe->nr_of_queued_entries--;
301	pipe->old_tail++;
302	if (pipe->old_tail >= pipe->nr_of_entries)
303		pipe->old_tail = 0;
304
305	if (!p_td->buffer.address) {
306		dev_err(ipc_protocol->dev, "Td buffer address is NULL");
307		return NULL;
308	}
309
310	if (p_td->buffer.address != IPC_CB(skb)->mapping) {
311		dev_err(ipc_protocol->dev,
312			"pipe %d: invalid buf_addr or skb_data",
313			pipe->pipe_nr);
314		return NULL;
315	}
316
317	return skb;
318}
319
320/* Allocates an SKB for CP to send data and updates the Head Pointer
321 * of the given Pipe#.
322 */
323bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
324				struct ipc_pipe *pipe)
325{
326	struct ipc_protocol_td *td;
327	dma_addr_t mapping = 0;
328	u32 head, new_head;
329	struct sk_buff *skb;
330	u32 tail;
331
332	/* Get head and tail of the td list and calculate
333	 * the number of free elements.
334	 */
335	head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
336	tail = le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
337
338	new_head = head + 1;
339	if (new_head >= pipe->nr_of_entries)
340		new_head = 0;
341
342	if (new_head == tail)
343		return false;
344
345	/* Get the td address. */
346	td = &pipe->tdr_start[head];
347
348	/* Allocate the skbuf for the descriptor. */
349	skb = ipc_pcie_alloc_skb(ipc_protocol->pcie, pipe->buf_size, GFP_ATOMIC,
350				 &mapping, DMA_FROM_DEVICE,
351				 IPC_MEM_DL_ETH_OFFSET);
352	if (!skb)
353		return false;
354
355	td->buffer.address = mapping;
356	td->scs = cpu_to_le32(pipe->buf_size) & cpu_to_le32(SIZE_MASK);
357	td->next = 0;
358
359	/* store the new head value. */
360	ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
361		cpu_to_le32(new_head);
362
363	/* Save the reference to the skbuf. */
364	pipe->skbr_start[head] = skb;
365
366	pipe->nr_of_queued_entries++;
367
368	return true;
369}
370
371/* Processes DL TD's */
372struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
373					   struct ipc_pipe *pipe)
374{
375	struct ipc_protocol_td *p_td;
376	struct sk_buff *skb;
377
378	if (!pipe->tdr_start)
379		return NULL;
380
381	/* Copy the reference to the downlink buffer. */
382	p_td = &pipe->tdr_start[pipe->old_tail];
383	skb = pipe->skbr_start[pipe->old_tail];
384
385	/* Reset the ring elements. */
386	pipe->skbr_start[pipe->old_tail] = NULL;
387
388	pipe->nr_of_queued_entries--;
389
390	pipe->old_tail++;
391	if (pipe->old_tail >= pipe->nr_of_entries)
392		pipe->old_tail = 0;
393
394	if (!skb) {
395		dev_err(ipc_protocol->dev, "skb is null");
396		goto ret;
397	} else if (!p_td->buffer.address) {
398		dev_err(ipc_protocol->dev, "td/buffer address is null");
399		ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
400		skb = NULL;
401		goto ret;
402	}
403
404	if (p_td->buffer.address != IPC_CB(skb)->mapping) {
405		dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
406			(unsigned long long)p_td->buffer.address, skb->data);
407		ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
408		skb = NULL;
409		goto ret;
410	} else if ((le32_to_cpu(p_td->scs) & SIZE_MASK) > pipe->buf_size) {
411		dev_err(ipc_protocol->dev, "invalid buffer size %d > %d",
412			le32_to_cpu(p_td->scs) & SIZE_MASK,
413			pipe->buf_size);
414		ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
415		skb = NULL;
416		goto ret;
417	} else if (le32_to_cpu(p_td->scs) >> COMPLETION_STATUS ==
418		  IPC_MEM_TD_CS_ABORT) {
419		/* Discard aborted buffers. */
420		dev_dbg(ipc_protocol->dev, "discard 'aborted' buffers");
421		ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
422		skb = NULL;
423		goto ret;
424	}
425
426	/* Set the length field in skbuf. */
427	skb_put(skb, le32_to_cpu(p_td->scs) & SIZE_MASK);
428
429ret:
430	return skb;
431}
432
433void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol,
434				      struct ipc_pipe *pipe, u32 *head,
435				      u32 *tail)
436{
437	struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
438
439	if (head)
440		*head = le32_to_cpu(ipc_ap_shm->head_array[pipe->pipe_nr]);
441
442	if (tail)
443		*tail = le32_to_cpu(ipc_ap_shm->tail_array[pipe->pipe_nr]);
444}
445
446/* Frees the TDs given to CP.  */
447void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol,
448			       struct ipc_pipe *pipe)
449{
450	struct sk_buff *skb;
451	u32 head;
452	u32 tail;
453
454	/* Get the start and the end of the buffer list. */
455	head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
456	tail = pipe->old_tail;
457
458	/* Reset tail and head to 0. */
459	ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr] = 0;
460	ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
461
462	/* Free pending uplink and downlink buffers. */
463	if (pipe->skbr_start) {
464		while (head != tail) {
465			/* Get the reference to the skbuf,
466			 * which is on the way and free it.
467			 */
468			skb = pipe->skbr_start[tail];
469			if (skb)
470				ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
471
472			tail++;
473			if (tail >= pipe->nr_of_entries)
474				tail = 0;
475		}
476
477		kfree(pipe->skbr_start);
478		pipe->skbr_start = NULL;
479	}
480
481	pipe->old_tail = 0;
482
483	/* Free and reset the td and skbuf circular buffers. kfree is save! */
484	if (pipe->tdr_start) {
485		dma_free_coherent(&ipc_protocol->pcie->pci->dev,
486				  sizeof(*pipe->tdr_start) * pipe->nr_of_entries,
487				  pipe->tdr_start, pipe->phy_tdr_start);
488
489		pipe->tdr_start = NULL;
490	}
491}
492
493enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol
494							  *ipc_protocol)
495{
496	return (enum ipc_mem_device_ipc_state)
497		le32_to_cpu(ipc_protocol->p_ap_shm->device_info.ipc_status);
498}
499
500enum ipc_mem_exec_stage
501ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol)
502{
503	return le32_to_cpu(ipc_protocol->p_ap_shm->device_info.execution_stage);
504}
505
506int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem,
507			  enum ipc_msg_prep_type msg_type,
508			  union ipc_msg_prep_args *args)
509{
510	struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
511
512	switch (msg_type) {
513	case IPC_MSG_PREP_SLEEP:
514		return ipc_protocol_msg_prep_sleep(ipc_protocol, args);
515
516	case IPC_MSG_PREP_PIPE_OPEN:
517		return ipc_protocol_msg_prepipe_open(ipc_protocol, args);
518
519	case IPC_MSG_PREP_PIPE_CLOSE:
520		return ipc_protocol_msg_prepipe_close(ipc_protocol, args);
521
522	case IPC_MSG_PREP_FEATURE_SET:
523		return ipc_protocol_msg_prep_feature_set(ipc_protocol, args);
524
525		/* Unsupported messages in protocol */
526	case IPC_MSG_PREP_MAP:
527	case IPC_MSG_PREP_UNMAP:
528	default:
529		dev_err(ipc_protocol->dev,
530			"unsupported message type: %d in protocol", msg_type);
531		return -EINVAL;
532	}
533}
534
535u32
536ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol *ipc_protocol)
537{
538	struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
539
540	return le32_to_cpu(ipc_ap_shm->device_info.device_sleep_notification);
541}
542