1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-21 Intel Corporation.
4 */
5
6#include "iosm_ipc_imem.h"
7#include "iosm_ipc_protocol.h"
8#include "iosm_ipc_protocol_ops.h"
9#include "iosm_ipc_pm.h"
10#include "iosm_ipc_task_queue.h"
11
12int ipc_protocol_tq_msg_send(struct iosm_protocol *ipc_protocol,
13			     enum ipc_msg_prep_type msg_type,
14			     union ipc_msg_prep_args *prep_args,
15			     struct ipc_rsp *response)
16{
17	int index = ipc_protocol_msg_prep(ipc_protocol->imem, msg_type,
18					  prep_args);
19
20	/* Store reference towards caller specified response in response ring
21	 * and signal CP
22	 */
23	if (index >= 0 && index < IPC_MEM_MSG_ENTRIES) {
24		ipc_protocol->rsp_ring[index] = response;
25		ipc_protocol_msg_hp_update(ipc_protocol->imem);
26	}
27
28	return index;
29}
30
31/* Callback for message send */
32static int ipc_protocol_tq_msg_send_cb(struct iosm_imem *ipc_imem, int arg,
33				       void *msg, size_t size)
34{
35	struct ipc_call_msg_send_args *send_args = msg;
36	struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
37
38	return ipc_protocol_tq_msg_send(ipc_protocol, send_args->msg_type,
39					send_args->prep_args,
40					send_args->response);
41}
42
43/* Remove reference to a response. This is typically used when a requestor timed
44 * out and is no longer interested in the response.
45 */
46static int ipc_protocol_tq_msg_remove(struct iosm_imem *ipc_imem, int arg,
47				      void *msg, size_t size)
48{
49	struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
50
51	ipc_protocol->rsp_ring[arg] = NULL;
52	return 0;
53}
54
55int ipc_protocol_msg_send(struct iosm_protocol *ipc_protocol,
56			  enum ipc_msg_prep_type prep,
57			  union ipc_msg_prep_args *prep_args)
58{
59	struct ipc_call_msg_send_args send_args;
60	unsigned int exec_timeout;
61	struct ipc_rsp response;
62	int index;
63
64	exec_timeout = (ipc_protocol_get_ap_exec_stage(ipc_protocol) ==
65					IPC_MEM_EXEC_STAGE_RUN ?
66				IPC_MSG_COMPLETE_RUN_DEFAULT_TIMEOUT :
67				IPC_MSG_COMPLETE_BOOT_DEFAULT_TIMEOUT);
68
69	/* Trap if called from non-preemptible context */
70	might_sleep();
71
72	response.status = IPC_MEM_MSG_CS_INVALID;
73	init_completion(&response.completion);
74
75	send_args.msg_type = prep;
76	send_args.prep_args = prep_args;
77	send_args.response = &response;
78
79	/* Allocate and prepare message to be sent in tasklet context.
80	 * A positive index returned form tasklet_call references the message
81	 * in case it needs to be cancelled when there is a timeout.
82	 */
83	index = ipc_task_queue_send_task(ipc_protocol->imem,
84					 ipc_protocol_tq_msg_send_cb, 0,
85					 &send_args, 0, true);
86
87	if (index < 0) {
88		dev_err(ipc_protocol->dev, "msg %d failed", prep);
89		return index;
90	}
91
92	/* Wait for the device to respond to the message */
93	switch (wait_for_completion_timeout(&response.completion,
94					    msecs_to_jiffies(exec_timeout))) {
95	case 0:
96		/* Timeout, there was no response from the device.
97		 * Remove the reference to the local response completion
98		 * object as we are no longer interested in the response.
99		 */
100		ipc_task_queue_send_task(ipc_protocol->imem,
101					 ipc_protocol_tq_msg_remove, index,
102					 NULL, 0, true);
103		dev_err(ipc_protocol->dev, "msg timeout");
104		ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
105		break;
106	default:
107		/* We got a response in time; check completion status: */
108		if (response.status != IPC_MEM_MSG_CS_SUCCESS) {
109			dev_err(ipc_protocol->dev,
110				"msg completion status error %d",
111				response.status);
112			return -EIO;
113		}
114	}
115
116	return 0;
117}
118
119static int ipc_protocol_msg_send_host_sleep(struct iosm_protocol *ipc_protocol,
120					    u32 state)
121{
122	union ipc_msg_prep_args prep_args = {
123		.sleep.target = 0,
124		.sleep.state = state,
125	};
126
127	return ipc_protocol_msg_send(ipc_protocol, IPC_MSG_PREP_SLEEP,
128				     &prep_args);
129}
130
131void ipc_protocol_doorbell_trigger(struct iosm_protocol *ipc_protocol,
132				   u32 identifier)
133{
134	ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, identifier, true);
135}
136
137bool ipc_protocol_pm_dev_sleep_handle(struct iosm_protocol *ipc_protocol)
138{
139	u32 ipc_status = ipc_protocol_get_ipc_status(ipc_protocol);
140	u32 requested;
141
142	if (ipc_status != IPC_MEM_DEVICE_IPC_RUNNING) {
143		dev_err(ipc_protocol->dev,
144			"irq ignored, CP IPC state is %d, should be RUNNING",
145			ipc_status);
146
147		/* Stop further processing. */
148		return false;
149	}
150
151	/* Get a copy of the requested PM state by the device and the local
152	 * device PM state.
153	 */
154	requested = ipc_protocol_pm_dev_get_sleep_notification(ipc_protocol);
155
156	return ipc_pm_dev_slp_notification(&ipc_protocol->pm, requested);
157}
158
159static int ipc_protocol_tq_wakeup_dev_slp(struct iosm_imem *ipc_imem, int arg,
160					  void *msg, size_t size)
161{
162	struct iosm_pm *ipc_pm = &ipc_imem->ipc_protocol->pm;
163
164	/* Wakeup from device sleep if it is not ACTIVE */
165	ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, true);
166
167	ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, false);
168
169	return 0;
170}
171
172void ipc_protocol_s2idle_sleep(struct iosm_protocol *ipc_protocol, bool sleep)
173{
174	ipc_pm_set_s2idle_sleep(&ipc_protocol->pm, sleep);
175}
176
177bool ipc_protocol_suspend(struct iosm_protocol *ipc_protocol)
178{
179	if (!ipc_pm_prepare_host_sleep(&ipc_protocol->pm))
180		goto err;
181
182	ipc_task_queue_send_task(ipc_protocol->imem,
183				 ipc_protocol_tq_wakeup_dev_slp, 0, NULL, 0,
184				 true);
185
186	if (!ipc_pm_wait_for_device_active(&ipc_protocol->pm)) {
187		ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
188		goto err;
189	}
190
191	/* Send the sleep message for sync sys calls. */
192	dev_dbg(ipc_protocol->dev, "send TARGET_HOST, ENTER_SLEEP");
193	if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
194					     IPC_HOST_SLEEP_ENTER_SLEEP)) {
195		/* Sending ENTER_SLEEP message failed, we are still active */
196		ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
197		goto err;
198	}
199
200	ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
201	return true;
202err:
203	return false;
204}
205
206bool ipc_protocol_resume(struct iosm_protocol *ipc_protocol)
207{
208	if (!ipc_pm_prepare_host_active(&ipc_protocol->pm))
209		return false;
210
211	dev_dbg(ipc_protocol->dev, "send TARGET_HOST, EXIT_SLEEP");
212	if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
213					     IPC_HOST_SLEEP_EXIT_SLEEP)) {
214		ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
215		return false;
216	}
217
218	ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
219
220	return true;
221}
222
223struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem)
224{
225	struct iosm_protocol *ipc_protocol =
226		kzalloc(sizeof(*ipc_protocol), GFP_KERNEL);
227	struct ipc_protocol_context_info *p_ci;
228	u64 addr;
229
230	if (!ipc_protocol)
231		return NULL;
232
233	ipc_protocol->dev = ipc_imem->dev;
234	ipc_protocol->pcie = ipc_imem->pcie;
235	ipc_protocol->imem = ipc_imem;
236	ipc_protocol->p_ap_shm = NULL;
237	ipc_protocol->phy_ap_shm = 0;
238
239	ipc_protocol->old_msg_tail = 0;
240
241	ipc_protocol->p_ap_shm =
242		dma_alloc_coherent(&ipc_protocol->pcie->pci->dev,
243				   sizeof(*ipc_protocol->p_ap_shm),
244				   &ipc_protocol->phy_ap_shm, GFP_KERNEL);
245
246	if (!ipc_protocol->p_ap_shm) {
247		dev_err(ipc_protocol->dev, "pci shm alloc error");
248		kfree(ipc_protocol);
249		return NULL;
250	}
251
252	/* Prepare the context info for CP. */
253	addr = ipc_protocol->phy_ap_shm;
254	p_ci = &ipc_protocol->p_ap_shm->ci;
255	p_ci->device_info_addr =
256		addr + offsetof(struct ipc_protocol_ap_shm, device_info);
257	p_ci->head_array =
258		addr + offsetof(struct ipc_protocol_ap_shm, head_array);
259	p_ci->tail_array =
260		addr + offsetof(struct ipc_protocol_ap_shm, tail_array);
261	p_ci->msg_head = addr + offsetof(struct ipc_protocol_ap_shm, msg_head);
262	p_ci->msg_tail = addr + offsetof(struct ipc_protocol_ap_shm, msg_tail);
263	p_ci->msg_ring_addr =
264		addr + offsetof(struct ipc_protocol_ap_shm, msg_ring);
265	p_ci->msg_ring_entries = cpu_to_le16(IPC_MEM_MSG_ENTRIES);
266	p_ci->msg_irq_vector = IPC_MSG_IRQ_VECTOR;
267	p_ci->device_info_irq_vector = IPC_DEVICE_IRQ_VECTOR;
268
269	ipc_mmio_set_contex_info_addr(ipc_imem->mmio, addr);
270
271	ipc_pm_init(ipc_protocol);
272
273	return ipc_protocol;
274}
275
276void ipc_protocol_deinit(struct iosm_protocol *proto)
277{
278	dma_free_coherent(&proto->pcie->pci->dev, sizeof(*proto->p_ap_shm),
279			  proto->p_ap_shm, proto->phy_ap_shm);
280
281	ipc_pm_deinit(proto);
282	kfree(proto);
283}
284