1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
4 */
5#include "mt76.h"
6
7static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
8{
9	/* Currently voice traffic (AC_VO) always runs without aggregation,
10	 * no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
11	 * for non AC_BK/AC_BE and set smaller timeout for it. */
12	return HZ / (tidno >= 4 ? 25 : 10);
13}
14
15static void
16mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
17{
18	struct sk_buff *skb;
19
20	tid->head = ieee80211_sn_inc(tid->head);
21
22	skb = tid->reorder_buf[idx];
23	if (!skb)
24		return;
25
26	tid->reorder_buf[idx] = NULL;
27	tid->nframes--;
28	__skb_queue_tail(frames, skb);
29}
30
31static void
32mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
33			    struct sk_buff_head *frames,
34			    u16 head)
35{
36	int idx;
37
38	while (ieee80211_sn_less(tid->head, head)) {
39		idx = tid->head % tid->size;
40		mt76_aggr_release(tid, frames, idx);
41	}
42}
43
44static void
45mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
46{
47	int idx = tid->head % tid->size;
48
49	while (tid->reorder_buf[idx]) {
50		mt76_aggr_release(tid, frames, idx);
51		idx = tid->head % tid->size;
52	}
53}
54
55static void
56mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
57{
58	struct mt76_rx_status *status;
59	struct sk_buff *skb;
60	int start, idx, nframes;
61
62	if (!tid->nframes)
63		return;
64
65	mt76_rx_aggr_release_head(tid, frames);
66
67	start = tid->head % tid->size;
68	nframes = tid->nframes;
69
70	for (idx = (tid->head + 1) % tid->size;
71	     idx != start && nframes;
72	     idx = (idx + 1) % tid->size) {
73		skb = tid->reorder_buf[idx];
74		if (!skb)
75			continue;
76
77		nframes--;
78		status = (struct mt76_rx_status *)skb->cb;
79		if (!time_after32(jiffies,
80				  status->reorder_time +
81				  mt76_aggr_tid_to_timeo(tid->num)))
82			continue;
83
84		mt76_rx_aggr_release_frames(tid, frames, status->seqno);
85	}
86
87	mt76_rx_aggr_release_head(tid, frames);
88}
89
90static void
91mt76_rx_aggr_reorder_work(struct work_struct *work)
92{
93	struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
94					       reorder_work.work);
95	struct mt76_dev *dev = tid->dev;
96	struct sk_buff_head frames;
97	int nframes;
98
99	__skb_queue_head_init(&frames);
100
101	local_bh_disable();
102	rcu_read_lock();
103
104	spin_lock(&tid->lock);
105	mt76_rx_aggr_check_release(tid, &frames);
106	nframes = tid->nframes;
107	spin_unlock(&tid->lock);
108
109	if (nframes)
110		ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
111					     mt76_aggr_tid_to_timeo(tid->num));
112	mt76_rx_complete(dev, &frames, NULL);
113
114	rcu_read_unlock();
115	local_bh_enable();
116}
117
118static void
119mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
120{
121	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
122	struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
123	struct mt76_wcid *wcid = status->wcid;
124	struct mt76_rx_tid *tid;
125	u8 tidno;
126	u16 seqno;
127
128	if (!ieee80211_is_ctl(bar->frame_control))
129		return;
130
131	if (!ieee80211_is_back_req(bar->frame_control))
132		return;
133
134	status->qos_ctl = tidno = le16_to_cpu(bar->control) >> 12;
135	seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
136	tid = rcu_dereference(wcid->aggr[tidno]);
137	if (!tid)
138		return;
139
140	spin_lock_bh(&tid->lock);
141	if (!tid->stopped) {
142		mt76_rx_aggr_release_frames(tid, frames, seqno);
143		mt76_rx_aggr_release_head(tid, frames);
144	}
145	spin_unlock_bh(&tid->lock);
146}
147
148void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
149{
150	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
151	struct mt76_wcid *wcid = status->wcid;
152	struct ieee80211_sta *sta;
153	struct mt76_rx_tid *tid;
154	bool sn_less;
155	u16 seqno, head, size, idx;
156	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
157	u8 ackp;
158
159	__skb_queue_tail(frames, skb);
160
161	sta = wcid_to_sta(wcid);
162	if (!sta)
163		return;
164
165	if (!status->aggr) {
166		if (!(status->flag & RX_FLAG_8023))
167			mt76_rx_aggr_check_ctl(skb, frames);
168		return;
169	}
170
171	/* not part of a BA session */
172	ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
173	if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
174		return;
175
176	tid = rcu_dereference(wcid->aggr[tidno]);
177	if (!tid)
178		return;
179
180	status->flag |= RX_FLAG_DUP_VALIDATED;
181	spin_lock_bh(&tid->lock);
182
183	if (tid->stopped)
184		goto out;
185
186	head = tid->head;
187	seqno = status->seqno;
188	size = tid->size;
189	sn_less = ieee80211_sn_less(seqno, head);
190
191	if (!tid->started) {
192		if (sn_less)
193			goto out;
194
195		tid->started = true;
196	}
197
198	if (sn_less) {
199		__skb_unlink(skb, frames);
200		dev_kfree_skb(skb);
201		goto out;
202	}
203
204	if (seqno == head) {
205		tid->head = ieee80211_sn_inc(head);
206		if (tid->nframes)
207			mt76_rx_aggr_release_head(tid, frames);
208		goto out;
209	}
210
211	__skb_unlink(skb, frames);
212
213	/*
214	 * Frame sequence number exceeds buffering window, free up some space
215	 * by releasing previous frames
216	 */
217	if (!ieee80211_sn_less(seqno, head + size)) {
218		head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
219		mt76_rx_aggr_release_frames(tid, frames, head);
220	}
221
222	idx = seqno % size;
223
224	/* Discard if the current slot is already in use */
225	if (tid->reorder_buf[idx]) {
226		dev_kfree_skb(skb);
227		goto out;
228	}
229
230	status->reorder_time = jiffies;
231	tid->reorder_buf[idx] = skb;
232	tid->nframes++;
233	mt76_rx_aggr_release_head(tid, frames);
234
235	ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
236				     mt76_aggr_tid_to_timeo(tid->num));
237
238out:
239	spin_unlock_bh(&tid->lock);
240}
241
242int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
243		       u16 ssn, u16 size)
244{
245	struct mt76_rx_tid *tid;
246
247	mt76_rx_aggr_stop(dev, wcid, tidno);
248
249	tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
250	if (!tid)
251		return -ENOMEM;
252
253	tid->dev = dev;
254	tid->head = ssn;
255	tid->size = size;
256	tid->num = tidno;
257	INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
258	spin_lock_init(&tid->lock);
259
260	rcu_assign_pointer(wcid->aggr[tidno], tid);
261
262	return 0;
263}
264EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
265
266static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
267{
268	u16 size = tid->size;
269	int i;
270
271	spin_lock_bh(&tid->lock);
272
273	tid->stopped = true;
274	for (i = 0; tid->nframes && i < size; i++) {
275		struct sk_buff *skb = tid->reorder_buf[i];
276
277		if (!skb)
278			continue;
279
280		tid->reorder_buf[i] = NULL;
281		tid->nframes--;
282		dev_kfree_skb(skb);
283	}
284
285	spin_unlock_bh(&tid->lock);
286
287	cancel_delayed_work_sync(&tid->reorder_work);
288}
289
290void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
291{
292	struct mt76_rx_tid *tid = NULL;
293
294	tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
295				  lockdep_is_held(&dev->mutex));
296	if (tid) {
297		mt76_rx_aggr_shutdown(dev, tid);
298		kfree_rcu(tid, rcu_head);
299	}
300}
301EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
302