Lines Matching refs:tid

16 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
20 tid->head = ieee80211_sn_inc(tid->head);
22 skb = tid->reorder_buf[idx];
26 tid->reorder_buf[idx] = NULL;
27 tid->nframes--;
32 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
38 while (ieee80211_sn_less(tid->head, head)) {
39 idx = tid->head % tid->size;
40 mt76_aggr_release(tid, frames, idx);
45 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
47 int idx = tid->head % tid->size;
49 while (tid->reorder_buf[idx]) {
50 mt76_aggr_release(tid, frames, idx);
51 idx = tid->head % tid->size;
56 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
62 if (!tid->nframes)
65 mt76_rx_aggr_release_head(tid, frames);
67 start = tid->head % tid->size;
68 nframes = tid->nframes;
70 for (idx = (tid->head + 1) % tid->size;
72 idx = (idx + 1) % tid->size) {
73 skb = tid->reorder_buf[idx];
81 mt76_aggr_tid_to_timeo(tid->num)))
84 mt76_rx_aggr_release_frames(tid, frames, status->seqno);
87 mt76_rx_aggr_release_head(tid, frames);
93 struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
95 struct mt76_dev *dev = tid->dev;
104 spin_lock(&tid->lock);
105 mt76_rx_aggr_check_release(tid, &frames);
106 nframes = tid->nframes;
107 spin_unlock(&tid->lock);
110 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
111 mt76_aggr_tid_to_timeo(tid->num));
124 struct mt76_rx_tid *tid;
136 tid = rcu_dereference(wcid->aggr[tidno]);
137 if (!tid)
140 spin_lock_bh(&tid->lock);
141 if (!tid->stopped) {
142 mt76_rx_aggr_release_frames(tid, frames, seqno);
143 mt76_rx_aggr_release_head(tid, frames);
145 spin_unlock_bh(&tid->lock);
153 struct mt76_rx_tid *tid;
176 tid = rcu_dereference(wcid->aggr[tidno]);
177 if (!tid)
181 spin_lock_bh(&tid->lock);
183 if (tid->stopped)
186 head = tid->head;
188 size = tid->size;
191 if (!tid->started) {
195 tid->started = true;
205 tid->head = ieee80211_sn_inc(head);
206 if (tid->nframes)
207 mt76_rx_aggr_release_head(tid, frames);
219 mt76_rx_aggr_release_frames(tid, frames, head);
225 if (tid->reorder_buf[idx]) {
231 tid->reorder_buf[idx] = skb;
232 tid->nframes++;
233 mt76_rx_aggr_release_head(tid, frames);
235 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
236 mt76_aggr_tid_to_timeo(tid->num));
239 spin_unlock_bh(&tid->lock);
245 struct mt76_rx_tid *tid;
249 tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
250 if (!tid)
253 tid->dev = dev;
254 tid->head = ssn;
255 tid->size = size;
256 tid->num = tidno;
257 INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
258 spin_lock_init(&tid->lock);
260 rcu_assign_pointer(wcid->aggr[tidno], tid);
266 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
268 u16 size = tid->size;
271 spin_lock_bh(&tid->lock);
273 tid->stopped = true;
274 for (i = 0; tid->nframes && i < size; i++) {
275 struct sk_buff *skb = tid->reorder_buf[i];
280 tid->reorder_buf[i] = NULL;
281 tid->nframes--;
285 spin_unlock_bh(&tid->lock);
287 cancel_delayed_work_sync(&tid->reorder_work);
292 struct mt76_rx_tid *tid = NULL;
294 tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
296 if (tid) {
297 mt76_rx_aggr_shutdown(dev, tid);
298 kfree_rcu(tid, rcu_head);