1/* SPDX-License-Identifier: ISC */
2/*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5#ifndef __MT76_DMA_H
6#define __MT76_DMA_H
7
8#define DMA_DUMMY_DATA			((void *)~0)
9
10#define MT_RING_SIZE			0x10
11
12#define MT_DMA_CTL_SD_LEN1		GENMASK(13, 0)
13#define MT_DMA_CTL_LAST_SEC1		BIT(14)
14#define MT_DMA_CTL_BURST		BIT(15)
15#define MT_DMA_CTL_SD_LEN0		GENMASK(29, 16)
16#define MT_DMA_CTL_LAST_SEC0		BIT(30)
17#define MT_DMA_CTL_DMA_DONE		BIT(31)
18#define MT_DMA_CTL_TO_HOST		BIT(8)
19#define MT_DMA_CTL_TO_HOST_A		BIT(12)
20#define MT_DMA_CTL_DROP			BIT(14)
21#define MT_DMA_CTL_TOKEN		GENMASK(31, 16)
22#define MT_DMA_CTL_SDP1_H		GENMASK(19, 16)
23#define MT_DMA_CTL_SDP0_H		GENMASK(3, 0)
24#define MT_DMA_CTL_WO_DROP		BIT(8)
25
26#define MT_DMA_PPE_CPU_REASON		GENMASK(15, 11)
27#define MT_DMA_PPE_ENTRY		GENMASK(30, 16)
28#define MT_DMA_INFO_DMA_FRAG		BIT(9)
29#define MT_DMA_INFO_PPE_VLD		BIT(31)
30
31#define MT_DMA_CTL_PN_CHK_FAIL		BIT(13)
32#define MT_DMA_CTL_VER_MASK		BIT(7)
33
34#define MT_DMA_RRO_EN		BIT(13)
35
36#define MT_DMA_WED_IND_CMD_CNT		8
37#define MT_DMA_WED_IND_REASON		GENMASK(15, 12)
38
39#define MT_DMA_HDR_LEN			4
40#define MT_RX_INFO_LEN			4
41#define MT_FCE_INFO_LEN			4
42#define MT_RX_RXWI_LEN			32
43
44struct mt76_desc {
45	__le32 buf0;
46	__le32 ctrl;
47	__le32 buf1;
48	__le32 info;
49} __packed __aligned(4);
50
51struct mt76_wed_rro_desc {
52	__le32 buf0;
53	__le32 buf1;
54} __packed __aligned(4);
55
56enum mt76_qsel {
57	MT_QSEL_MGMT,
58	MT_QSEL_HCCA,
59	MT_QSEL_EDCA,
60	MT_QSEL_EDCA_2,
61};
62
63enum mt76_mcu_evt_type {
64	EVT_CMD_DONE,
65	EVT_CMD_ERROR,
66	EVT_CMD_RETRY,
67	EVT_EVENT_PWR_RSP,
68	EVT_EVENT_WOW_RSP,
69	EVT_EVENT_CARRIER_DETECT_RSP,
70	EVT_EVENT_DFS_DETECT_RSP,
71};
72
73enum mt76_dma_wed_ind_reason {
74	MT_DMA_WED_IND_REASON_NORMAL,
75	MT_DMA_WED_IND_REASON_REPEAT,
76	MT_DMA_WED_IND_REASON_OLDPKT,
77};
78
79int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
80void mt76_dma_attach(struct mt76_dev *dev);
81void mt76_dma_cleanup(struct mt76_dev *dev);
82int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
83		     bool allow_direct);
84void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
85			    bool reset_idx);
86void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
87
88static inline void
89mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
90{
91	dev->queue_ops->reset_q(dev, q);
92	if (mtk_wed_device_active(&dev->mmio.wed))
93		mt76_wed_dma_setup(dev, q, true);
94}
95
96static inline void
97mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info)
98{
99	if (!drop)
100		return;
101
102	*drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP));
103	if (!(ctrl & MT_DMA_CTL_VER_MASK))
104		return;
105
106	switch (FIELD_GET(MT_DMA_WED_IND_REASON, buf1)) {
107	case MT_DMA_WED_IND_REASON_REPEAT:
108		*drop = true;
109		break;
110	case MT_DMA_WED_IND_REASON_OLDPKT:
111		*drop = !(info & MT_DMA_INFO_DMA_FRAG);
112		break;
113	default:
114		*drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL);
115		break;
116	}
117}
118
119#endif
120