1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 *  Haijun Liu <haijun.liu@mediatek.com>
8 *  Eliot Lee <eliot.lee@intel.com>
9 *  Moises Veleta <moises.veleta@intel.com>
10 *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 *
12 * Contributors:
13 *  Amir Hanania <amir.hanania@intel.com>
14 *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15 *  Sreehari Kancharla <sreehari.kancharla@intel.com>
16 */
17
18#include <linux/acpi.h>
19#include <linux/bits.h>
20#include <linux/bitfield.h>
21#include <linux/device.h>
22#include <linux/delay.h>
23#include <linux/gfp.h>
24#include <linux/io.h>
25#include <linux/irqreturn.h>
26#include <linux/kthread.h>
27#include <linux/skbuff.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/wait.h>
32#include <linux/workqueue.h>
33
34#include "t7xx_cldma.h"
35#include "t7xx_hif_cldma.h"
36#include "t7xx_mhccif.h"
37#include "t7xx_modem_ops.h"
38#include "t7xx_netdev.h"
39#include "t7xx_pci.h"
40#include "t7xx_pcie_mac.h"
41#include "t7xx_port.h"
42#include "t7xx_port_proxy.h"
43#include "t7xx_reg.h"
44#include "t7xx_state_monitor.h"
45
46#define RT_ID_MD_PORT_ENUM	0
47#define RT_ID_AP_PORT_ENUM	1
48/* Modem feature query identification code - "ICCC" */
49#define MD_FEATURE_QUERY_ID	0x49434343
50
51#define FEATURE_VER		GENMASK(7, 4)
52#define FEATURE_MSK		GENMASK(3, 0)
53
54#define RGU_RESET_DELAY_MS	10
55#define PORT_RESET_DELAY_MS	2000
56#define EX_HS_TIMEOUT_MS	5000
57#define EX_HS_POLL_DELAY_MS	10
58
59enum mtk_feature_support_type {
60	MTK_FEATURE_DOES_NOT_EXIST,
61	MTK_FEATURE_NOT_SUPPORTED,
62	MTK_FEATURE_MUST_BE_SUPPORTED,
63};
64
65static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
66{
67	return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
68}
69
70/**
71 * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
72 * @t7xx_dev: MTK device.
73 *
74 * Check the interrupt status and queue commands accordingly.
75 *
76 * Returns:
77 ** 0		- Success.
78 ** -EINVAL	- Failure to get FSM control.
79 */
80int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
81{
82	struct t7xx_modem *md = t7xx_dev->md;
83	struct t7xx_fsm_ctl *ctl;
84	unsigned int int_sta;
85	int ret = 0;
86	u32 mask;
87
88	ctl = md->fsm_ctl;
89	if (!ctl) {
90		dev_err_ratelimited(&t7xx_dev->pdev->dev,
91				    "MHCCIF interrupt received before initializing MD monitor\n");
92		return -EINVAL;
93	}
94
95	spin_lock_bh(&md->exp_lock);
96	int_sta = t7xx_get_interrupt_status(t7xx_dev);
97	md->exp_id |= int_sta;
98	if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
99		if (ctl->md_state == MD_STATE_INVALID ||
100		    ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
101		    ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
102		    ctl->md_state == MD_STATE_READY) {
103			md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
104			ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
105		}
106	} else if (md->exp_id & D2H_INT_PORT_ENUM) {
107		md->exp_id &= ~D2H_INT_PORT_ENUM;
108
109		if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
110		    ctl->curr_state == FSM_STATE_STOPPED)
111			ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
112	} else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
113		mask = t7xx_mhccif_mask_get(t7xx_dev);
114		if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
115			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
116			queue_work(md->handshake_wq, &md->handshake_work);
117		}
118	}
119	spin_unlock_bh(&md->exp_lock);
120
121	return ret;
122}
123
124static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
125{
126	struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
127	void __iomem *reset_pcie_reg;
128	u32 val;
129
130	reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
131			  pbase_addr->pcie_dev_reg_trsl_addr;
132	val = ioread32(reset_pcie_reg);
133	iowrite32(val, reset_pcie_reg);
134}
135
136void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
137{
138	/* Clear L2 */
139	t7xx_clr_device_irq_via_pcie(t7xx_dev);
140	/* Clear L1 */
141	t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
142}
143
144static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
145{
146#ifdef CONFIG_ACPI
147	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
148	struct device *dev = &t7xx_dev->pdev->dev;
149	acpi_status acpi_ret;
150	acpi_handle handle;
151
152	handle = ACPI_HANDLE(dev);
153	if (!handle) {
154		dev_err(dev, "ACPI handle not found\n");
155		return -EFAULT;
156	}
157
158	if (!acpi_has_method(handle, fn_name)) {
159		dev_err(dev, "%s method not found\n", fn_name);
160		return -EFAULT;
161	}
162
163	acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
164	if (ACPI_FAILURE(acpi_ret)) {
165		dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
166		return -EFAULT;
167	}
168
169	kfree(buffer.pointer);
170
171#endif
172	return 0;
173}
174
175int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
176{
177	return t7xx_acpi_reset(t7xx_dev, "_RST");
178}
179
180int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev)
181{
182	return t7xx_acpi_reset(t7xx_dev, "MRST._RST");
183}
184
185static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
186{
187	u32 val;
188
189	val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
190	if (val & MISC_RESET_TYPE_PLDR)
191		t7xx_acpi_reset(t7xx_dev, "MRST._RST");
192	else if (val & MISC_RESET_TYPE_FLDR)
193		t7xx_acpi_fldr_func(t7xx_dev);
194}
195
196static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
197{
198	struct t7xx_pci_dev *t7xx_dev = data;
199
200	t7xx_mode_update(t7xx_dev, T7XX_RESET);
201	msleep(RGU_RESET_DELAY_MS);
202	t7xx_reset_device_via_pmic(t7xx_dev);
203	return IRQ_HANDLED;
204}
205
206static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
207{
208	struct t7xx_pci_dev *t7xx_dev = data;
209	struct t7xx_modem *modem;
210
211	t7xx_clear_rgu_irq(t7xx_dev);
212	if (!t7xx_dev->rgu_pci_irq_en)
213		return IRQ_HANDLED;
214
215	modem = t7xx_dev->md;
216	modem->rgu_irq_asserted = true;
217	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
218	return IRQ_WAKE_THREAD;
219}
220
221static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
222{
223	/* Registers RGU callback ISR with PCIe driver */
224	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
225	t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
226
227	t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
228	t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
229	t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
230	t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
231}
232
233/**
234 * t7xx_cldma_exception() - CLDMA exception handler.
235 * @md_ctrl: modem control struct.
236 * @stage: exception stage.
237 *
238 * Part of the modem exception recovery.
239 * Stages are one after the other as describe below:
240 * HIF_EX_INIT:		Disable and clear TXQ.
241 * HIF_EX_CLEARQ_DONE:	Disable RX, flush TX/RX workqueues and clear RX.
242 * HIF_EX_ALLQ_RESET:	HW is back in safe mode for re-initialization and restart.
243 */
244
245/* Modem Exception Handshake Flow
246 *
247 * Modem HW Exception interrupt received
248 *           (MD_IRQ_CCIF_EX)
249 *                   |
250 *         +---------v--------+
251 *         |   HIF_EX_INIT    | : Disable and clear TXQ
252 *         +------------------+
253 *                   |
254 *         +---------v--------+
255 *         | HIF_EX_INIT_DONE | : Wait for the init to be done
256 *         +------------------+
257 *                   |
258 *         +---------v--------+
259 *         |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
260 *         +------------------+ : Flush TX/RX workqueues
261 *                   |
262 *         +---------v--------+
263 *         |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
264 *         +------------------+
265 */
266static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
267{
268	switch (stage) {
269	case HIF_EX_INIT:
270		t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
271		t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
272		break;
273
274	case HIF_EX_CLEARQ_DONE:
275		/* We do not want to get CLDMA IRQ when MD is
276		 * resetting CLDMA after it got clearq_ack.
277		 */
278		t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
279		t7xx_cldma_stop(md_ctrl);
280
281		if (md_ctrl->hif_id == CLDMA_ID_MD)
282			t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
283
284		t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
285		break;
286
287	case HIF_EX_ALLQ_RESET:
288		t7xx_cldma_hw_init(&md_ctrl->hw_info);
289		t7xx_cldma_start(md_ctrl);
290		break;
291
292	default:
293		break;
294	}
295}
296
297static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
298{
299	struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
300
301	if (stage == HIF_EX_CLEARQ_DONE) {
302		/* Give DHL time to flush data */
303		msleep(PORT_RESET_DELAY_MS);
304		t7xx_port_proxy_reset(md->port_prox);
305	}
306
307	t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
308	t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
309
310	if (stage == HIF_EX_INIT)
311		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
312	else if (stage == HIF_EX_CLEARQ_DONE)
313		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
314}
315
316static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
317{
318	unsigned int waited_time_ms = 0;
319
320	do {
321		if (md->exp_id & event_id)
322			return 0;
323
324		waited_time_ms += EX_HS_POLL_DELAY_MS;
325		msleep(EX_HS_POLL_DELAY_MS);
326	} while (waited_time_ms < EX_HS_TIMEOUT_MS);
327
328	return -EFAULT;
329}
330
331static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
332{
333	/* Register the MHCCIF ISR for MD exception, port enum and
334	 * async handshake notifications.
335	 */
336	t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
337	t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
338
339	/* Register RGU IRQ handler for sAP exception notification */
340	t7xx_dev->rgu_pci_irq_en = true;
341	t7xx_pcie_register_rgu_isr(t7xx_dev);
342}
343
344struct feature_query {
345	__le32 head_pattern;
346	u8 feature_set[FEATURE_COUNT];
347	__le32 tail_pattern;
348};
349
350static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
351{
352	struct feature_query *ft_query;
353	struct sk_buff *skb;
354
355	skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
356	if (!skb)
357		return;
358
359	ft_query = skb_put(skb, sizeof(*ft_query));
360	ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
361	memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
362	ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
363
364	/* Send HS1 message to device */
365	t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
366}
367
368static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
369				       void *data)
370{
371	struct feature_query *md_feature = data;
372	struct mtk_runtime_feature *rt_feature;
373	unsigned int i, rt_data_len = 0;
374	struct sk_buff *skb;
375
376	/* Parse MD runtime data query */
377	if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
378	    le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
379		dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
380			le32_to_cpu(md_feature->head_pattern),
381			le32_to_cpu(md_feature->tail_pattern));
382		return -EINVAL;
383	}
384
385	for (i = 0; i < FEATURE_COUNT; i++) {
386		if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
387		    MTK_FEATURE_MUST_BE_SUPPORTED)
388			rt_data_len += sizeof(*rt_feature);
389	}
390
391	skb = t7xx_ctrl_alloc_skb(rt_data_len);
392	if (!skb)
393		return -ENOMEM;
394
395	rt_feature = skb_put(skb, rt_data_len);
396	memset(rt_feature, 0, rt_data_len);
397
398	/* Fill runtime feature */
399	for (i = 0; i < FEATURE_COUNT; i++) {
400		u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
401
402		if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
403			continue;
404
405		rt_feature->feature_id = i;
406		if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
407			rt_feature->support_info = md_feature->feature_set[i];
408
409		rt_feature++;
410	}
411
412	/* Send HS3 message to device */
413	t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
414	return 0;
415}
416
417static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
418				   struct device *dev, void *data, int data_length)
419{
420	enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
421	struct mtk_runtime_feature *rt_feature;
422	int i, offset;
423
424	offset = sizeof(struct feature_query);
425	for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
426		rt_feature = data + offset;
427		offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
428
429		ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
430		if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
431			continue;
432
433		ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
434		if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
435			return -EINVAL;
436
437		if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
438			t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
439	}
440
441	return 0;
442}
443
444static int t7xx_core_reset(struct t7xx_modem *md)
445{
446	struct device *dev = &md->t7xx_dev->pdev->dev;
447	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
448
449	md->core_md.ready = false;
450
451	if (!ctl) {
452		dev_err(dev, "FSM is not initialized\n");
453		return -EINVAL;
454	}
455
456	if (md->core_md.handshake_ongoing) {
457		int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
458
459		if (ret)
460			return ret;
461	}
462
463	md->core_md.handshake_ongoing = false;
464	return 0;
465}
466
467static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
468				 struct t7xx_fsm_ctl *ctl,
469				 enum t7xx_fsm_event_state event_id,
470				 enum t7xx_fsm_event_state err_detect)
471{
472	struct t7xx_fsm_event *event = NULL, *event_next;
473	struct device *dev = &md->t7xx_dev->pdev->dev;
474	unsigned long flags;
475	int ret;
476
477	t7xx_prepare_host_rt_data_query(core_info);
478
479	while (!kthread_should_stop()) {
480		bool event_received = false;
481
482		spin_lock_irqsave(&ctl->event_lock, flags);
483		list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
484			if (event->event_id == err_detect) {
485				list_del(&event->entry);
486				spin_unlock_irqrestore(&ctl->event_lock, flags);
487				dev_err(dev, "Core handshake error event received\n");
488				goto err_free_event;
489			} else if (event->event_id == event_id) {
490				list_del(&event->entry);
491				event_received = true;
492				break;
493			}
494		}
495		spin_unlock_irqrestore(&ctl->event_lock, flags);
496
497		if (event_received)
498			break;
499
500		wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
501					 kthread_should_stop());
502		if (kthread_should_stop())
503			goto err_free_event;
504	}
505
506	if (!event || ctl->exp_flg)
507		goto err_free_event;
508
509	ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
510	if (ret) {
511		dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
512		goto err_free_event;
513	}
514
515	if (ctl->exp_flg)
516		goto err_free_event;
517
518	ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
519	if (ret) {
520		dev_err(dev, "Device failure parsing runtime data: %d", ret);
521		goto err_free_event;
522	}
523
524	core_info->ready = true;
525	core_info->handshake_ongoing = false;
526	wake_up(&ctl->async_hk_wq);
527err_free_event:
528	kfree(event);
529}
530
531static void t7xx_md_hk_wq(struct work_struct *work)
532{
533	struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
534	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
535
536	/* Clear the HS2 EXIT event appended in core_reset() */
537	t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
538	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
539	t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
540	t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
541	md->core_md.handshake_ongoing = true;
542	t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
543}
544
545static void t7xx_ap_hk_wq(struct work_struct *work)
546{
547	struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
548	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
549
550	 /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
551	t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
552	t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
553	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
554	t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
555	md->core_ap.handshake_ongoing = true;
556	t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
557}
558
559void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
560{
561	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
562	unsigned int int_sta;
563	unsigned long flags;
564
565	switch (evt_id) {
566	case FSM_PRE_START:
567		t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
568						   D2H_INT_ASYNC_AP_HK);
569		break;
570
571	case FSM_START:
572		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
573
574		spin_lock_irqsave(&md->exp_lock, flags);
575		int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
576		md->exp_id |= int_sta;
577		if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
578			ctl->exp_flg = true;
579			md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
580			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
581			md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
582		} else if (ctl->exp_flg) {
583			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
584			md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
585		} else {
586			void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
587
588			if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
589				queue_work(md->handshake_wq, &md->handshake_work);
590				md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
591				iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
592				t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
593			}
594
595			if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
596				queue_work(md->handshake_wq, &md->ap_handshake_work);
597				md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
598				iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
599				t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
600			}
601		}
602		spin_unlock_irqrestore(&md->exp_lock, flags);
603
604		t7xx_mhccif_mask_clr(md->t7xx_dev,
605				     D2H_INT_EXCEPTION_INIT |
606				     D2H_INT_EXCEPTION_INIT_DONE |
607				     D2H_INT_EXCEPTION_CLEARQ_DONE |
608				     D2H_INT_EXCEPTION_ALLQ_RESET);
609		break;
610
611	case FSM_READY:
612		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
613		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
614		break;
615
616	default:
617		break;
618	}
619}
620
621void t7xx_md_exception_handshake(struct t7xx_modem *md)
622{
623	struct device *dev = &md->t7xx_dev->pdev->dev;
624	int ret;
625
626	t7xx_md_exception(md, HIF_EX_INIT);
627	ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
628	if (ret)
629		dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
630
631	t7xx_md_exception(md, HIF_EX_INIT_DONE);
632	ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
633	if (ret)
634		dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
635
636	t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
637	ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
638	if (ret)
639		dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
640
641	t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
642}
643
644static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
645{
646	struct device *dev = &t7xx_dev->pdev->dev;
647	struct t7xx_modem *md;
648
649	md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
650	if (!md)
651		return NULL;
652
653	md->t7xx_dev = t7xx_dev;
654	t7xx_dev->md = md;
655	spin_lock_init(&md->exp_lock);
656	md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
657					   0, "md_hk_wq");
658	if (!md->handshake_wq)
659		return NULL;
660
661	INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
662	md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
663	md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
664		FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
665
666	INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
667	md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
668	md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
669		FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
670
671	return md;
672}
673
674int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
675{
676	struct t7xx_modem *md = t7xx_dev->md;
677
678	md->md_init_finish = false;
679	md->exp_id = 0;
680	t7xx_fsm_reset(md);
681	t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
682	t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
683	t7xx_port_proxy_reset(md->port_prox);
684	md->md_init_finish = true;
685	return t7xx_core_reset(md);
686}
687
688/**
689 * t7xx_md_init() - Initialize modem.
690 * @t7xx_dev: MTK device.
691 *
692 * Allocate and initialize MD control block, and initialize data path.
693 * Register MHCCIF ISR and RGU ISR, and start the state machine.
694 *
695 * Return:
696 ** 0		- Success.
697 ** -ENOMEM	- Allocation failure.
698 */
699int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
700{
701	struct t7xx_modem *md;
702	int ret;
703
704	md = t7xx_md_alloc(t7xx_dev);
705	if (!md)
706		return -ENOMEM;
707
708	ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
709	if (ret)
710		goto err_destroy_hswq;
711
712	ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
713	if (ret)
714		goto err_destroy_hswq;
715
716	ret = t7xx_fsm_init(md);
717	if (ret)
718		goto err_destroy_hswq;
719
720	ret = t7xx_ccmni_init(t7xx_dev);
721	if (ret)
722		goto err_uninit_fsm;
723
724	ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
725	if (ret)
726		goto err_uninit_ccmni;
727
728	ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
729	if (ret)
730		goto err_uninit_md_cldma;
731
732	ret = t7xx_port_proxy_init(md);
733	if (ret)
734		goto err_uninit_ap_cldma;
735
736	ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
737	if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
738		goto err_uninit_proxy;
739
740	t7xx_md_sys_sw_init(t7xx_dev);
741	md->md_init_finish = true;
742	return 0;
743
744err_uninit_proxy:
745	t7xx_port_proxy_uninit(md->port_prox);
746
747err_uninit_ap_cldma:
748	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
749
750err_uninit_md_cldma:
751	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
752
753err_uninit_ccmni:
754	t7xx_ccmni_exit(t7xx_dev);
755
756err_uninit_fsm:
757	t7xx_fsm_uninit(md);
758
759err_destroy_hswq:
760	destroy_workqueue(md->handshake_wq);
761	dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
762	return ret;
763}
764
765void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
766{
767	enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
768	struct t7xx_modem *md = t7xx_dev->md;
769
770	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
771
772	if (!md->md_init_finish)
773		return;
774
775	if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
776		t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
777	t7xx_port_proxy_uninit(md->port_prox);
778	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
779	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
780	t7xx_ccmni_exit(t7xx_dev);
781	t7xx_fsm_uninit(md);
782	destroy_workqueue(md->handshake_wq);
783}
784