1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-21 Intel Corporation.
4 */
5
6#include <linux/delay.h>
7
8#include "iosm_ipc_chnl_cfg.h"
9#include "iosm_ipc_devlink.h"
10#include "iosm_ipc_flash.h"
11#include "iosm_ipc_imem.h"
12#include "iosm_ipc_port.h"
13#include "iosm_ipc_trace.h"
14#include "iosm_ipc_debugfs.h"
15
16/* Check the wwan ips if it is valid with Channel as input. */
17static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
18{
19	if (chnl)
20		return chnl->ctype == IPC_CTYPE_WWAN &&
21		       chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
22	return false;
23}
24
25static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
26{
27	union ipc_msg_prep_args prep_args = {
28		.sleep.target = 1,
29		.sleep.state = state,
30	};
31
32	ipc_imem->device_sleep = state;
33
34	return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
35					IPC_MSG_PREP_SLEEP, &prep_args, NULL);
36}
37
38static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
39				  struct ipc_pipe *pipe)
40{
41	/* limit max. nr of entries */
42	if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
43		return false;
44
45	return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
46}
47
48/* This timer handler will retry DL buff allocation if a pipe has no free buf
49 * and gives doorbell if TD is available
50 */
51static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
52				      void *msg, size_t size)
53{
54	bool new_buffers_available = false;
55	bool retry_allocation = false;
56	int i;
57
58	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
59		struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
60
61		if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
62			continue;
63
64		while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
65			new_buffers_available = true;
66
67		if (pipe->nr_of_queued_entries == 0)
68			retry_allocation = true;
69	}
70
71	if (new_buffers_available)
72		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
73					      IPC_HP_DL_PROCESS);
74
75	if (retry_allocation) {
76		ipc_imem->hrtimer_period =
77		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
78		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
79			hrtimer_start(&ipc_imem->td_alloc_timer,
80				      ipc_imem->hrtimer_period,
81				      HRTIMER_MODE_REL);
82	}
83	return 0;
84}
85
86static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
87{
88	struct iosm_imem *ipc_imem =
89		container_of(hr_timer, struct iosm_imem, td_alloc_timer);
90	/* Post an async tasklet event to trigger HP update Doorbell */
91	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
92				 0, false);
93	return HRTIMER_NORESTART;
94}
95
96/* Fast update timer tasklet handler to trigger HP update */
97static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
98					    void *msg, size_t size)
99{
100	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
101				      IPC_HP_FAST_TD_UPD_TMR);
102
103	return 0;
104}
105
106static enum hrtimer_restart
107ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
108{
109	struct iosm_imem *ipc_imem =
110		container_of(hr_timer, struct iosm_imem, fast_update_timer);
111	/* Post an async tasklet event to trigger HP update Doorbell */
112	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
113				 NULL, 0, false);
114	return HRTIMER_NORESTART;
115}
116
117static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
118				    void *msg, size_t size)
119{
120	ipc_mux_ul_adb_finish(ipc_imem->mux);
121	return 0;
122}
123
124static enum hrtimer_restart
125ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
126{
127	struct iosm_imem *ipc_imem =
128		container_of(hr_timer, struct iosm_imem, adb_timer);
129
130	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
131				 NULL, 0, false);
132	return HRTIMER_NORESTART;
133}
134
135static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
136					  struct ipc_mux_config *cfg)
137{
138	ipc_mmio_update_cp_capability(ipc_imem->mmio);
139
140	if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
141		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
142		return -EINVAL;
143	}
144
145	cfg->protocol = ipc_imem->mmio->mux_protocol;
146
147	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
148			       MUX_UL_ON_CREDITS :
149			       MUX_UL;
150
151	/* The instance ID is same as channel ID because this is been reused
152	 * for channel alloc function.
153	 */
154	cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
155
156	return 0;
157}
158
159void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
160				   unsigned int reset_enable, bool atomic_ctx)
161{
162	union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
163						      reset_enable };
164
165	if (atomic_ctx)
166		ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
167					 IPC_MSG_PREP_FEATURE_SET, &prep_args,
168					 NULL);
169	else
170		ipc_protocol_msg_send(ipc_imem->ipc_protocol,
171				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
172}
173
174/**
175 * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
176 * @ipc_imem:                       Pointer to imem data-struct
177 */
178void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
179{
180	/* Use the TD update timer only in the runtime phase */
181	if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
182		/* trigger the doorbell irq on CP directly. */
183		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
184					      IPC_HP_TD_UPD_TMR_START);
185		return;
186	}
187
188	if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
189		ipc_imem->hrtimer_period =
190		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
191		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
192			hrtimer_start(&ipc_imem->tdupdate_timer,
193				      ipc_imem->hrtimer_period,
194				      HRTIMER_MODE_REL);
195	}
196}
197
198void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
199{
200	if (hrtimer_active(hr_timer))
201		hrtimer_cancel(hr_timer);
202}
203
204/**
205 * ipc_imem_adb_timer_start -	Starts the adb Timer if not starting.
206 * @ipc_imem:			Pointer to imem data-struct
207 */
208void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
209{
210	if (!hrtimer_active(&ipc_imem->adb_timer)) {
211		ipc_imem->hrtimer_period =
212			ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
213		hrtimer_start(&ipc_imem->adb_timer,
214			      ipc_imem->hrtimer_period,
215			      HRTIMER_MODE_REL);
216	}
217}
218
219bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
220{
221	struct ipc_mem_channel *channel;
222	bool hpda_ctrl_pending = false;
223	struct sk_buff_head *ul_list;
224	bool hpda_pending = false;
225	struct ipc_pipe *pipe;
226	int i;
227
228	/* Analyze the uplink pipe of all active channels. */
229	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
230		channel = &ipc_imem->channels[i];
231
232		if (channel->state != IMEM_CHANNEL_ACTIVE)
233			continue;
234
235		pipe = &channel->ul_pipe;
236
237		/* Get the reference to the skbuf accumulator list. */
238		ul_list = &channel->ul_list;
239
240		/* Fill the transfer descriptor with the uplink buffer info. */
241		if (!ipc_imem_check_wwan_ips(channel)) {
242			hpda_ctrl_pending |=
243				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
244							pipe, ul_list);
245		} else {
246			hpda_pending |=
247				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
248							pipe, ul_list);
249		}
250	}
251
252	/* forced HP update needed for non data channels */
253	if (hpda_ctrl_pending) {
254		hpda_pending = false;
255		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
256					      IPC_HP_UL_WRITE_TD);
257	}
258
259	return hpda_pending;
260}
261
262void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
263{
264	int timeout = IPC_MODEM_BOOT_TIMEOUT;
265
266	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
267
268	/* Trigger the CP interrupt to enter the init state. */
269	ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
270			  IPC_MEM_DEVICE_IPC_INIT);
271	/* Wait for the CP update. */
272	do {
273		if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
274		    ipc_imem->ipc_requested_state) {
275			/* Prepare the MMIO space */
276			ipc_mmio_config(ipc_imem->mmio);
277
278			/* Trigger the CP irq to enter the running state. */
279			ipc_imem->ipc_requested_state =
280				IPC_MEM_DEVICE_IPC_RUNNING;
281			ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
282					  IPC_MEM_DEVICE_IPC_RUNNING);
283
284			return;
285		}
286		msleep(20);
287	} while (--timeout);
288
289	/* timeout */
290	dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
291		ipc_imem_phase_get_string(ipc_imem->phase),
292		ipc_mmio_get_ipc_state(ipc_imem->mmio));
293
294	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
295}
296
297/* Analyze the packet type and distribute it. */
298static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
299				    struct ipc_pipe *pipe, struct sk_buff *skb)
300{
301	u16 port_id;
302
303	if (!skb)
304		return;
305
306	/* An AT/control or IP packet is expected. */
307	switch (pipe->channel->ctype) {
308	case IPC_CTYPE_CTRL:
309		port_id = pipe->channel->channel_id;
310		ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
311				    IPC_CB(skb)->mapping,
312				    IPC_CB(skb)->direction);
313		if (port_id == IPC_MEM_CTRL_CHL_ID_7)
314			ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
315						       skb);
316		else if (ipc_is_trace_channel(ipc_imem, port_id))
317			ipc_trace_port_rx(ipc_imem, skb);
318		else
319			wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
320				     skb);
321		break;
322
323	case IPC_CTYPE_WWAN:
324		if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
325			ipc_mux_dl_decode(ipc_imem->mux, skb);
326		break;
327	default:
328		dev_err(ipc_imem->dev, "Invalid channel type");
329		break;
330	}
331}
332
333/* Process the downlink data and pass them to the char or net layer. */
334static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
335				     struct ipc_pipe *pipe)
336{
337	s32 cnt = 0, processed_td_cnt = 0;
338	struct ipc_mem_channel *channel;
339	u32 head = 0, tail = 0;
340	bool processed = false;
341	struct sk_buff *skb;
342
343	channel = pipe->channel;
344
345	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
346					 &tail);
347	if (pipe->old_tail != tail) {
348		if (pipe->old_tail < tail)
349			cnt = tail - pipe->old_tail;
350		else
351			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
352	}
353
354	processed_td_cnt = cnt;
355
356	/* Seek for pipes with pending DL data. */
357	while (cnt--) {
358		skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
359
360		/* Analyze the packet type and distribute it. */
361		ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
362	}
363
364	/* try to allocate new empty DL SKbs from head..tail - 1*/
365	while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
366		processed = true;
367
368	if (processed && !ipc_imem_check_wwan_ips(channel)) {
369		/* Force HP update for non IP channels */
370		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
371					      IPC_HP_DL_PROCESS);
372		processed = false;
373
374		/* If Fast Update timer is already running then stop */
375		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
376	}
377
378	/* Any control channel process will get immediate HP update.
379	 * Start Fast update timer only for IP channel if all the TDs were
380	 * used in last process.
381	 */
382	if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
383		ipc_imem->hrtimer_period =
384		ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
385		hrtimer_start(&ipc_imem->fast_update_timer,
386			      ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
387	}
388
389	if (ipc_imem->app_notify_dl_pend)
390		complete(&ipc_imem->dl_pend_sem);
391}
392
393/* process open uplink pipe */
394static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
395				     struct ipc_pipe *pipe)
396{
397	struct ipc_mem_channel *channel;
398	u32 tail = 0, head = 0;
399	struct sk_buff *skb;
400	s32 cnt = 0;
401
402	channel = pipe->channel;
403
404	/* Get the internal phase. */
405	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
406					 &tail);
407
408	if (pipe->old_tail != tail) {
409		if (pipe->old_tail < tail)
410			cnt = tail - pipe->old_tail;
411		else
412			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
413	}
414
415	/* Free UL buffers. */
416	while (cnt--) {
417		skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
418
419		if (!skb)
420			continue;
421
422		/* If the user app was suspended in uplink direction - blocking
423		 * write, resume it.
424		 */
425		if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
426			complete(&channel->ul_sem);
427
428		/* Free the skbuf element. */
429		if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
430			if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
431				ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
432			else
433				dev_err(ipc_imem->dev,
434					"OP Type is UL_MUX, unknown if_id %d",
435					channel->if_id);
436		} else {
437			ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
438		}
439	}
440
441	/* Trace channel stats for IP UL pipe. */
442	if (ipc_imem_check_wwan_ips(pipe->channel))
443		ipc_mux_check_n_restart_tx(ipc_imem->mux);
444
445	if (ipc_imem->app_notify_ul_pend)
446		complete(&ipc_imem->ul_pend_sem);
447}
448
449/* Executes the irq. */
450static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
451{
452	struct ipc_mem_channel *channel;
453
454	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
455	ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
456	complete(&channel->ul_sem);
457}
458
459/* Execute the UL bundle timer actions, generating the doorbell irq. */
460static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
461					  void *msg, size_t size)
462{
463	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
464				      IPC_HP_TD_UPD_TMR);
465	return 0;
466}
467
468/* Consider link power management in the runtime phase. */
469static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
470{
471	    /* link will go down, Test pending UL packets.*/
472	if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
473	    hrtimer_active(&ipc_imem->tdupdate_timer)) {
474		/* Generate the doorbell irq. */
475		ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
476		/* Stop the TD update timer. */
477		ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
478		/* Stop the fast update timer. */
479		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
480	}
481}
482
483/* Execute startup timer and wait for delayed start (e.g. NAND) */
484static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
485					void *msg, size_t size)
486{
487	/* Update & check the current operation phase. */
488	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
489		return -EIO;
490
491	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
492	    IPC_MEM_DEVICE_IPC_UNINIT) {
493		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
494
495		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
496				  IPC_MEM_DEVICE_IPC_INIT);
497
498		ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
499		/* reduce period to 100 ms to check for mmio init state */
500		if (!hrtimer_active(&ipc_imem->startup_timer))
501			hrtimer_start(&ipc_imem->startup_timer,
502				      ipc_imem->hrtimer_period,
503				      HRTIMER_MODE_REL);
504	} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
505		   IPC_MEM_DEVICE_IPC_INIT) {
506		/* Startup complete  - disable timer */
507		ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
508
509		/* Prepare the MMIO space */
510		ipc_mmio_config(ipc_imem->mmio);
511		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
512		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
513				  IPC_MEM_DEVICE_IPC_RUNNING);
514	}
515
516	return 0;
517}
518
519static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
520{
521	enum hrtimer_restart result = HRTIMER_NORESTART;
522	struct iosm_imem *ipc_imem =
523		container_of(hr_timer, struct iosm_imem, startup_timer);
524
525	if (ktime_to_ns(ipc_imem->hrtimer_period)) {
526		hrtimer_forward_now(&ipc_imem->startup_timer,
527				    ipc_imem->hrtimer_period);
528		result = HRTIMER_RESTART;
529	}
530
531	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
532				 NULL, 0, false);
533	return result;
534}
535
536/* Get the CP execution stage */
537static enum ipc_mem_exec_stage
538ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
539{
540	return (ipc_imem->phase == IPC_P_RUN &&
541		ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
542		       ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
543		       ipc_mmio_get_exec_stage(ipc_imem->mmio);
544}
545
546/* Callback to send the modem ready uevent */
547static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
548				    void *msg, size_t size)
549{
550	enum ipc_mem_exec_stage exec_stage =
551		ipc_imem_get_exec_stage_buffered(ipc_imem);
552
553	if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
554		ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
555
556	return 0;
557}
558
559/* This function is executed in a task context via an ipc_worker object,
560 * as the creation or removal of device can't be done from tasklet.
561 */
562static void ipc_imem_run_state_worker(struct work_struct *instance)
563{
564	struct ipc_chnl_cfg chnl_cfg_port = { 0 };
565	struct ipc_mux_config mux_cfg;
566	struct iosm_imem *ipc_imem;
567	u8 ctrl_chl_idx = 0;
568	int ret;
569
570	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
571
572	if (ipc_imem->phase != IPC_P_RUN) {
573		dev_err(ipc_imem->dev,
574			"Modem link down. Exit run state worker.");
575		goto err_out;
576	}
577
578	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
579		ipc_devlink_deinit(ipc_imem->ipc_devlink);
580
581	ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
582	if (ret < 0)
583		goto err_out;
584
585	ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
586	if (!ipc_imem->mux)
587		goto err_out;
588
589	ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
590	if (ret < 0)
591		goto err_ipc_mux_deinit;
592
593	ipc_imem->mux->wwan = ipc_imem->wwan;
594
595	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
596		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
597			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
598
599			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID &&
600			    chnl_cfg_port.wwan_port_type == WWAN_PORT_XMMRPC) {
601				ctrl_chl_idx++;
602				continue;
603			}
604
605			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
606			    chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
607				ctrl_chl_idx++;
608				continue;
609			}
610			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
611				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
612						      chnl_cfg_port,
613						      IRQ_MOD_OFF);
614				ipc_imem->ipc_port[ctrl_chl_idx] =
615					ipc_port_init(ipc_imem, chnl_cfg_port);
616			}
617		}
618		ctrl_chl_idx++;
619	}
620
621	ipc_debugfs_init(ipc_imem);
622
623	ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
624				 false);
625
626	/* Complete all memory stores before setting bit */
627	smp_mb__before_atomic();
628
629	set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
630
631	/* Complete all memory stores after setting bit */
632	smp_mb__after_atomic();
633
634	return;
635
636err_ipc_mux_deinit:
637	ipc_mux_deinit(ipc_imem->mux);
638err_out:
639	ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
640}
641
642static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
643{
644	enum ipc_mem_device_ipc_state curr_ipc_status;
645	enum ipc_phase old_phase, phase;
646	bool retry_allocation = false;
647	bool ul_pending = false;
648	int i;
649
650	if (irq != IMEM_IRQ_DONT_CARE)
651		ipc_imem->ev_irq_pending[irq] = false;
652
653	/* Get the internal phase. */
654	old_phase = ipc_imem->phase;
655
656	if (old_phase == IPC_P_OFF_REQ) {
657		dev_dbg(ipc_imem->dev,
658			"[%s]: Ignoring MSI. Deinit sequence in progress!",
659			ipc_imem_phase_get_string(old_phase));
660		return;
661	}
662
663	/* Update the phase controlled by CP. */
664	phase = ipc_imem_phase_update(ipc_imem);
665
666	switch (phase) {
667	case IPC_P_RUN:
668		if (!ipc_imem->enter_runtime) {
669			/* Excute the transition from flash/boot to runtime. */
670			ipc_imem->enter_runtime = 1;
671
672			/* allow device to sleep, default value is
673			 * IPC_HOST_SLEEP_ENTER_SLEEP
674			 */
675			ipc_imem_msg_send_device_sleep(ipc_imem,
676						       ipc_imem->device_sleep);
677
678			ipc_imem_msg_send_feature_set(ipc_imem,
679						      IPC_MEM_INBAND_CRASH_SIG,
680						  true);
681		}
682
683		curr_ipc_status =
684			ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
685
686		/* check ipc_status change */
687		if (ipc_imem->ipc_status != curr_ipc_status) {
688			ipc_imem->ipc_status = curr_ipc_status;
689
690			if (ipc_imem->ipc_status ==
691			    IPC_MEM_DEVICE_IPC_RUNNING) {
692				schedule_work(&ipc_imem->run_state_worker);
693			}
694		}
695
696		/* Consider power management in the runtime phase. */
697		ipc_imem_slp_control_exec(ipc_imem);
698		break; /* Continue with skbuf processing. */
699
700		/* Unexpected phases. */
701	case IPC_P_OFF:
702	case IPC_P_OFF_REQ:
703		dev_err(ipc_imem->dev, "confused phase %s",
704			ipc_imem_phase_get_string(phase));
705		return;
706
707	case IPC_P_PSI:
708		if (old_phase != IPC_P_ROM)
709			break;
710
711		fallthrough;
712		/* On CP the PSI phase is already active. */
713
714	case IPC_P_ROM:
715		/* Before CP ROM driver starts the PSI image, it sets
716		 * the exit_code field on the doorbell scratchpad and
717		 * triggers the irq.
718		 */
719		ipc_imem_rom_irq_exec(ipc_imem);
720		return;
721
722	default:
723		break;
724	}
725
726	/* process message ring */
727	ipc_protocol_msg_process(ipc_imem, irq);
728
729	/* process all open pipes */
730	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
731		struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
732		struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
733
734		if (dl_pipe->is_open &&
735		    (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
736			ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
737
738			if (dl_pipe->nr_of_queued_entries == 0)
739				retry_allocation = true;
740		}
741
742		if (ul_pipe->is_open)
743			ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
744	}
745
746	/* Try to generate new ADB or ADGH. */
747	if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
748		ipc_imem_td_update_timer_start(ipc_imem);
749		if (ipc_imem->mux->protocol == MUX_AGGREGATION)
750			ipc_imem_adb_timer_start(ipc_imem);
751	}
752
753	/* Continue the send procedure with accumulated SIO or NETIF packets.
754	 * Reset the debounce flags.
755	 */
756	ul_pending |= ipc_imem_ul_write_td(ipc_imem);
757
758	/* if UL data is pending restart TD update timer */
759	if (ul_pending) {
760		ipc_imem->hrtimer_period =
761		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
762		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
763			hrtimer_start(&ipc_imem->tdupdate_timer,
764				      ipc_imem->hrtimer_period,
765				      HRTIMER_MODE_REL);
766	}
767
768	/* If CP has executed the transition
769	 * from IPC_INIT to IPC_RUNNING in the PSI
770	 * phase, wake up the flash app to open the pipes.
771	 */
772	if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
773	    ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
774	    ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
775						IPC_MEM_DEVICE_IPC_RUNNING) {
776		complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
777	}
778
779	/* Reset the expected CP state. */
780	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
781
782	if (retry_allocation) {
783		ipc_imem->hrtimer_period =
784		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
785		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
786			hrtimer_start(&ipc_imem->td_alloc_timer,
787				      ipc_imem->hrtimer_period,
788				      HRTIMER_MODE_REL);
789	}
790}
791
792/* Callback by tasklet for handling interrupt events. */
793static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
794			      size_t size)
795{
796	ipc_imem_handle_irq(ipc_imem, arg);
797
798	return 0;
799}
800
801void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
802{
803	/* start doorbell irq delay timer if UL is pending */
804	if (ipc_imem_ul_write_td(ipc_imem))
805		ipc_imem_td_update_timer_start(ipc_imem);
806}
807
808/* Check the execution stage and update the AP phase */
809static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
810						  enum ipc_mem_exec_stage stage)
811{
812	switch (stage) {
813	case IPC_MEM_EXEC_STAGE_BOOT:
814		if (ipc_imem->phase != IPC_P_ROM) {
815			/* Send this event only once */
816			ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
817		}
818
819		ipc_imem->phase = IPC_P_ROM;
820		break;
821
822	case IPC_MEM_EXEC_STAGE_PSI:
823		ipc_imem->phase = IPC_P_PSI;
824		break;
825
826	case IPC_MEM_EXEC_STAGE_EBL:
827		ipc_imem->phase = IPC_P_EBL;
828		break;
829
830	case IPC_MEM_EXEC_STAGE_RUN:
831		if (ipc_imem->phase != IPC_P_RUN &&
832		    ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
833			ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
834		}
835		ipc_imem->phase = IPC_P_RUN;
836		break;
837
838	case IPC_MEM_EXEC_STAGE_CRASH:
839		if (ipc_imem->phase != IPC_P_CRASH)
840			ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
841
842		ipc_imem->phase = IPC_P_CRASH;
843		break;
844
845	case IPC_MEM_EXEC_STAGE_CD_READY:
846		if (ipc_imem->phase != IPC_P_CD_READY)
847			ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
848		ipc_imem->phase = IPC_P_CD_READY;
849		break;
850
851	default:
852		/* unknown exec stage:
853		 * assume that link is down and send info to listeners
854		 */
855		ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
856		break;
857	}
858
859	return ipc_imem->phase;
860}
861
862/* Send msg to device to open pipe */
863static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
864			       struct ipc_pipe *pipe)
865{
866	union ipc_msg_prep_args prep_args = {
867		.pipe_open.pipe = pipe,
868	};
869
870	if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
871				  IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
872		pipe->is_open = true;
873
874	return pipe->is_open;
875}
876
877/* Allocates the TDs for the given pipe along with firing HP update DB. */
878static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
879				     void *msg, size_t size)
880{
881	struct ipc_pipe *dl_pipe = msg;
882	bool processed = false;
883	int i;
884
885	for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
886		processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
887
888	/* Trigger the doorbell irq to inform CP that new downlink buffers are
889	 * available.
890	 */
891	if (processed)
892		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
893
894	return 0;
895}
896
897static enum hrtimer_restart
898ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
899{
900	struct iosm_imem *ipc_imem =
901		container_of(hr_timer, struct iosm_imem, tdupdate_timer);
902
903	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
904				 NULL, 0, false);
905	return HRTIMER_NORESTART;
906}
907
908/* Get the CP execution state and map it to the AP phase. */
909enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
910{
911	enum ipc_mem_exec_stage exec_stage =
912				ipc_imem_get_exec_stage_buffered(ipc_imem);
913	/* If the CP stage is undef, return the internal precalculated phase. */
914	return ipc_imem->phase == IPC_P_OFF_REQ ?
915		       ipc_imem->phase :
916		       ipc_imem_phase_update_check(ipc_imem, exec_stage);
917}
918
919const char *ipc_imem_phase_get_string(enum ipc_phase phase)
920{
921	switch (phase) {
922	case IPC_P_RUN:
923		return "A-RUN";
924
925	case IPC_P_OFF:
926		return "A-OFF";
927
928	case IPC_P_ROM:
929		return "A-ROM";
930
931	case IPC_P_PSI:
932		return "A-PSI";
933
934	case IPC_P_EBL:
935		return "A-EBL";
936
937	case IPC_P_CRASH:
938		return "A-CRASH";
939
940	case IPC_P_CD_READY:
941		return "A-CD_READY";
942
943	case IPC_P_OFF_REQ:
944		return "A-OFF_REQ";
945
946	default:
947		return "A-???";
948	}
949}
950
951void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
952{
953	union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
954
955	pipe->is_open = false;
956	ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
957			      &prep_args);
958
959	ipc_imem_pipe_cleanup(ipc_imem, pipe);
960}
961
962void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
963{
964	struct ipc_mem_channel *channel;
965
966	if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
967		dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
968		return;
969	}
970
971	channel = &ipc_imem->channels[channel_id];
972
973	if (channel->state == IMEM_CHANNEL_FREE) {
974		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
975			channel_id, channel->state);
976		return;
977	}
978
979	/* Free only the channel id in the CP power off mode. */
980	if (channel->state == IMEM_CHANNEL_RESERVED)
981		/* Release only the channel id. */
982		goto channel_free;
983
984	if (ipc_imem->phase == IPC_P_RUN) {
985		ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
986		ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
987	}
988
989	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
990	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
991
992channel_free:
993	ipc_imem_channel_free(channel);
994}
995
996struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
997					      int channel_id, u32 db_id)
998{
999	struct ipc_mem_channel *channel;
1000
1001	if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
1002		dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
1003		return NULL;
1004	}
1005
1006	channel = &ipc_imem->channels[channel_id];
1007
1008	channel->state = IMEM_CHANNEL_ACTIVE;
1009
1010	if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
1011		goto ul_pipe_err;
1012
1013	if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
1014		goto dl_pipe_err;
1015
1016	/* Allocate the downlink buffers in tasklet context. */
1017	if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
1018				     &channel->dl_pipe, 0, false)) {
1019		dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
1020		goto task_failed;
1021	}
1022
1023	/* Active channel. */
1024	return channel;
1025task_failed:
1026	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
1027dl_pipe_err:
1028	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
1029ul_pipe_err:
1030	ipc_imem_channel_free(channel);
1031	return NULL;
1032}
1033
1034void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
1035{
1036	ipc_protocol_suspend(ipc_imem->ipc_protocol);
1037}
1038
1039void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
1040{
1041	ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
1042}
1043
1044void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
1045{
1046	enum ipc_mem_exec_stage stage;
1047
1048	if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
1049		stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1050		ipc_imem_phase_update_check(ipc_imem, stage);
1051	}
1052}
1053
1054void ipc_imem_channel_free(struct ipc_mem_channel *channel)
1055{
1056	/* Reset dynamic channel elements. */
1057	channel->state = IMEM_CHANNEL_FREE;
1058}
1059
1060int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
1061			   enum ipc_ctype ctype)
1062{
1063	struct ipc_mem_channel *channel;
1064	int i;
1065
1066	/* Find channel of given type/index */
1067	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1068		channel = &ipc_imem->channels[i];
1069		if (channel->ctype == ctype && channel->index == index)
1070			break;
1071	}
1072
1073	if (i >= ipc_imem->nr_of_channels) {
1074		dev_dbg(ipc_imem->dev,
1075			"no channel definition for index=%d ctype=%d", index,
1076			ctype);
1077		return -ECHRNG;
1078	}
1079
1080	if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1081		dev_dbg(ipc_imem->dev, "channel is in use");
1082		return -EBUSY;
1083	}
1084
1085	if (channel->ctype == IPC_CTYPE_WWAN &&
1086	    index == IPC_MEM_MUX_IP_CH_IF_ID)
1087		channel->if_id = index;
1088
1089	channel->channel_id = index;
1090	channel->state = IMEM_CHANNEL_RESERVED;
1091
1092	return i;
1093}
1094
1095void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1096			   struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1097{
1098	struct ipc_mem_channel *channel;
1099
1100	if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1101	    chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1102		dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1103			chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1104		return;
1105	}
1106
1107	if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1108		dev_err(ipc_imem->dev, "too many channels");
1109		return;
1110	}
1111
1112	channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1113	channel->channel_id = ipc_imem->nr_of_channels;
1114	channel->ctype = ctype;
1115	channel->index = chnl_cfg.id;
1116	channel->net_err_count = 0;
1117	channel->state = IMEM_CHANNEL_FREE;
1118	ipc_imem->nr_of_channels++;
1119
1120	ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1121				IRQ_MOD_OFF);
1122
1123	skb_queue_head_init(&channel->ul_list);
1124
1125	init_completion(&channel->ul_sem);
1126}
1127
1128void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1129			     struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1130{
1131	struct ipc_mem_channel *channel;
1132
1133	if (id < 0 || id >= ipc_imem->nr_of_channels) {
1134		dev_err(ipc_imem->dev, "invalid channel id %d", id);
1135		return;
1136	}
1137
1138	channel = &ipc_imem->channels[id];
1139
1140	if (channel->state != IMEM_CHANNEL_FREE &&
1141	    channel->state != IMEM_CHANNEL_RESERVED) {
1142		dev_err(ipc_imem->dev, "invalid channel state %d",
1143			channel->state);
1144		return;
1145	}
1146
1147	channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1148	channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1149	channel->ul_pipe.is_open = false;
1150	channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1151	channel->ul_pipe.channel = channel;
1152	channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1153	channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1154	channel->ul_pipe.irq_moderation = irq_moderation;
1155	channel->ul_pipe.buf_size = 0;
1156
1157	channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1158	channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1159	channel->dl_pipe.is_open = false;
1160	channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1161	channel->dl_pipe.channel = channel;
1162	channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1163	channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1164	channel->dl_pipe.irq_moderation = irq_moderation;
1165	channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1166}
1167
1168static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1169{
1170	int i;
1171
1172	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1173		struct ipc_mem_channel *channel;
1174
1175		channel = &ipc_imem->channels[i];
1176
1177		ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1178		ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1179
1180		ipc_imem_channel_free(channel);
1181	}
1182}
1183
1184void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1185{
1186	struct sk_buff *skb;
1187
1188	/* Force pipe to closed state also when not explicitly closed through
1189	 * ipc_imem_pipe_close()
1190	 */
1191	pipe->is_open = false;
1192
1193	/* Empty the uplink skb accumulator. */
1194	while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1195		ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1196
1197	ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1198}
1199
1200/* Send IPC protocol uninit to the modem when Link is active. */
1201static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1202{
1203	int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1204	enum ipc_mem_device_ipc_state ipc_state;
1205
1206	/* When PCIe link is up set IPC_UNINIT
1207	 * of the modem otherwise ignore it when PCIe link down happens.
1208	 */
1209	if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1210		/* set modem to UNINIT
1211		 * (in case we want to reload the AP driver without resetting
1212		 * the modem)
1213		 */
1214		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1215				  IPC_MEM_DEVICE_IPC_UNINIT);
1216		ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1217
1218		/* Wait for maximum 30ms to allow the Modem to uninitialize the
1219		 * protocol.
1220		 */
1221		while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1222		       (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1223		       (timeout > 0)) {
1224			usleep_range(1000, 1250);
1225			timeout--;
1226			ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1227		}
1228	}
1229}
1230
1231void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1232{
1233	ipc_imem->phase = IPC_P_OFF_REQ;
1234
1235	/* forward MDM_NOT_READY to listeners */
1236	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1237
1238	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1239	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1240	hrtimer_cancel(&ipc_imem->fast_update_timer);
1241	hrtimer_cancel(&ipc_imem->startup_timer);
1242
1243	/* cancel the workqueue */
1244	cancel_work_sync(&ipc_imem->run_state_worker);
1245
1246	if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1247		ipc_mux_deinit(ipc_imem->mux);
1248		ipc_debugfs_deinit(ipc_imem);
1249		ipc_wwan_deinit(ipc_imem->wwan);
1250		ipc_port_deinit(ipc_imem->ipc_port);
1251	}
1252
1253	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
1254		ipc_devlink_deinit(ipc_imem->ipc_devlink);
1255
1256	ipc_imem_device_ipc_uninit(ipc_imem);
1257	ipc_imem_channel_reset(ipc_imem);
1258
1259	ipc_protocol_deinit(ipc_imem->ipc_protocol);
1260	ipc_task_deinit(ipc_imem->ipc_task);
1261
1262	kfree(ipc_imem->ipc_task);
1263	kfree(ipc_imem->mmio);
1264
1265	ipc_imem->phase = IPC_P_OFF;
1266}
1267
1268/* After CP has unblocked the PCIe link, save the start address of the doorbell
1269 * scratchpad and prepare the shared memory region. If the flashing to RAM
1270 * procedure shall be executed, copy the chip information from the doorbell
1271 * scratchtpad to the application buffer and wake up the flash app.
1272 */
1273static int ipc_imem_config(struct iosm_imem *ipc_imem)
1274{
1275	enum ipc_phase phase;
1276
1277	/* Initialize the semaphore for the blocking read UL/DL transfer. */
1278	init_completion(&ipc_imem->ul_pend_sem);
1279
1280	init_completion(&ipc_imem->dl_pend_sem);
1281
1282	/* clear internal flags */
1283	ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1284	ipc_imem->enter_runtime = 0;
1285
1286	phase = ipc_imem_phase_update(ipc_imem);
1287
1288	/* Either CP shall be in the power off or power on phase. */
1289	switch (phase) {
1290	case IPC_P_ROM:
1291		ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1292		/* poll execution stage (for delayed start, e.g. NAND) */
1293		if (!hrtimer_active(&ipc_imem->startup_timer))
1294			hrtimer_start(&ipc_imem->startup_timer,
1295				      ipc_imem->hrtimer_period,
1296				      HRTIMER_MODE_REL);
1297		return 0;
1298
1299	case IPC_P_PSI:
1300	case IPC_P_EBL:
1301	case IPC_P_RUN:
1302		/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1303		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1304
1305		/* Verify the exepected initial state. */
1306		if (ipc_imem->ipc_requested_state ==
1307		    ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1308			ipc_imem_ipc_init_check(ipc_imem);
1309
1310			return 0;
1311		}
1312		dev_err(ipc_imem->dev,
1313			"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1314			ipc_mmio_get_ipc_state(ipc_imem->mmio));
1315		break;
1316	case IPC_P_CRASH:
1317	case IPC_P_CD_READY:
1318		dev_dbg(ipc_imem->dev,
1319			"Modem is in phase %d, reset Modem to collect CD",
1320			phase);
1321		return 0;
1322	default:
1323		dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1324		break;
1325	}
1326
1327	complete(&ipc_imem->dl_pend_sem);
1328	complete(&ipc_imem->ul_pend_sem);
1329	ipc_imem->phase = IPC_P_OFF;
1330	return -EIO;
1331}
1332
1333/* Pass the dev ptr to the shared memory driver and request the entry points */
1334struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1335				void __iomem *mmio, struct device *dev)
1336{
1337	struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1338	enum ipc_mem_exec_stage stage;
1339
1340	if (!ipc_imem)
1341		return NULL;
1342
1343	/* Save the device address. */
1344	ipc_imem->pcie = pcie;
1345	ipc_imem->dev = dev;
1346
1347	ipc_imem->pci_device_id = device_id;
1348
1349	ipc_imem->cp_version = 0;
1350	ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1351
1352	/* Reset the max number of configured channels */
1353	ipc_imem->nr_of_channels = 0;
1354
1355	/* allocate IPC MMIO */
1356	ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1357	if (!ipc_imem->mmio) {
1358		dev_err(ipc_imem->dev, "failed to initialize mmio region");
1359		goto mmio_init_fail;
1360	}
1361
1362	ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1363				     GFP_KERNEL);
1364
1365	/* Create tasklet for event handling*/
1366	if (!ipc_imem->ipc_task)
1367		goto ipc_task_fail;
1368
1369	if (ipc_task_init(ipc_imem->ipc_task))
1370		goto ipc_task_init_fail;
1371
1372	ipc_imem->ipc_task->dev = ipc_imem->dev;
1373
1374	INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1375
1376	ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1377
1378	if (!ipc_imem->ipc_protocol)
1379		goto protocol_init_fail;
1380
1381	/* The phase is set to power off. */
1382	ipc_imem->phase = IPC_P_OFF;
1383
1384	hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1385		     HRTIMER_MODE_REL);
1386	ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1387
1388	hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1389		     HRTIMER_MODE_REL);
1390	ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1391
1392	hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1393		     HRTIMER_MODE_REL);
1394	ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1395
1396	hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1397		     HRTIMER_MODE_REL);
1398	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1399
1400	hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1401	ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
1402
1403	if (ipc_imem_config(ipc_imem)) {
1404		dev_err(ipc_imem->dev, "failed to initialize the imem");
1405		goto imem_config_fail;
1406	}
1407
1408	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1409	if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1410		/* Alloc and Register devlink */
1411		ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1412		if (!ipc_imem->ipc_devlink) {
1413			dev_err(ipc_imem->dev, "Devlink register failed");
1414			goto imem_config_fail;
1415		}
1416
1417		if (ipc_flash_link_establish(ipc_imem))
1418			goto devlink_channel_fail;
1419
1420		set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
1421	}
1422	return ipc_imem;
1423devlink_channel_fail:
1424	ipc_devlink_deinit(ipc_imem->ipc_devlink);
1425imem_config_fail:
1426	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1427	hrtimer_cancel(&ipc_imem->fast_update_timer);
1428	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1429	hrtimer_cancel(&ipc_imem->startup_timer);
1430protocol_init_fail:
1431	cancel_work_sync(&ipc_imem->run_state_worker);
1432	ipc_task_deinit(ipc_imem->ipc_task);
1433ipc_task_init_fail:
1434	kfree(ipc_imem->ipc_task);
1435ipc_task_fail:
1436	kfree(ipc_imem->mmio);
1437mmio_init_fail:
1438	kfree(ipc_imem);
1439	return NULL;
1440}
1441
1442void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1443{
1444	/* Debounce IPC_EV_IRQ. */
1445	if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1446		ipc_imem->ev_irq_pending[irq] = true;
1447		ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1448					 NULL, 0, false);
1449	}
1450}
1451
1452void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1453{
1454	ipc_imem->td_update_timer_suspended = suspend;
1455}
1456
1457/* Verify the CP execution state, copy the chip info,
1458 * change the execution phase to ROM
1459 */
1460static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1461						 int arg, void *msg,
1462						 size_t msgsize)
1463{
1464	enum ipc_mem_exec_stage stage;
1465	struct sk_buff *skb;
1466	int rc = -EINVAL;
1467	size_t size;
1468
1469	/* Test the CP execution state. */
1470	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1471	if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1472		dev_err(ipc_imem->dev,
1473			"Execution_stage: expected BOOT, received = %X", stage);
1474		goto trigger_chip_info_fail;
1475	}
1476	/* Allocate a new sk buf for the chip info. */
1477	size = ipc_imem->mmio->chip_info_size;
1478	if (size > IOSM_CHIP_INFO_SIZE_MAX)
1479		goto trigger_chip_info_fail;
1480
1481	skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1482	if (!skb) {
1483		dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1484		rc = -ENOMEM;
1485		goto trigger_chip_info_fail;
1486	}
1487	/* Copy the chip info characters into the ipc_skb. */
1488	ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1489	/* First change to the ROM boot phase. */
1490	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1491	ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1492	ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1493	rc = 0;
1494trigger_chip_info_fail:
1495	return rc;
1496}
1497
1498int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1499{
1500	return ipc_task_queue_send_task(ipc_imem,
1501					ipc_imem_devlink_trigger_chip_info_cb,
1502					0, NULL, 0, true);
1503}
1504