Lines Matching refs:wl

30 static int wl1271_set_default_wep_key(struct wl1271 *wl,
37 ret = wl12xx_cmd_set_default_wep_key(wl, id,
40 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
49 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
53 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
54 if (id >= wl->num_tx_desc)
57 __set_bit(id, wl->tx_frames_map);
58 wl->tx_frames[id] = skb;
59 wl->tx_frames_cnt++;
63 void wl1271_free_tx_id(struct wl1271 *wl, int id)
65 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
66 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
67 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
69 wl->tx_frames[id] = NULL;
70 wl->tx_frames_cnt--;
75 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
91 wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
98 wlcore_update_inconn_sta(wl, wlvif, NULL, true);
101 ieee80211_queue_delayed_work(wl->hw,
106 static void wl1271_tx_regulate_link(struct wl1271 *wl,
116 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
117 tx_pkts = wl->links[hlid].allocated_pkts;
129 if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
131 wl12xx_ps_link_start(wl, wlvif, hlid, true);
134 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
136 return wl->dummy_packet == skb;
140 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
152 return wl->system_hlid;
162 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
168 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
179 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
182 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
183 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
190 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
200 if (buf_offset + total_len > wl->aggr_buf_size)
203 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
206 id = wl1271_alloc_tx_id(wl, skb);
210 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
212 if (total_blocks <= wl->tx_blocks_available) {
215 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
220 wl->tx_blocks_available -= total_blocks;
221 wl->tx_allocated_blocks += total_blocks;
228 if (wl->tx_allocated_blocks == total_blocks ||
229 test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
230 wl12xx_rearm_tx_watchdog_locked(wl);
233 wl->tx_allocated_pkts[ac]++;
235 if (test_bit(hlid, wl->links_map))
236 wl->links[hlid].allocated_pkts++;
244 wl1271_free_tx_id(wl, id);
250 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
277 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
279 is_dummy = wl12xx_is_dummy_packet(wl, skb);
300 u8 session_id = wl->session_ids[hlid];
302 if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
355 wlcore_hw_set_tx_desc_csum(wl, desc, skb);
356 wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
359 /* caller must hold wl->mutex */
360 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
382 is_dummy = wl12xx_is_dummy_packet(wl, skb);
384 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
398 ret = wl1271_set_default_wep_key(wl, wlvif, idx);
407 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
412 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
415 wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
416 wl1271_tx_regulate_link(wl, wlvif, hlid);
427 total_len = wlcore_calc_packet_alignment(wl, skb->len);
429 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
430 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
439 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
446 band = wl->hw->wiphy->bands[rate_band];
465 void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
470 wl12xx_for_each_wlvif(wl, wlvif) {
472 if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
477 wlcore_wake_queue(wl, wlvif, i,
483 static int wlcore_select_ac(struct wl1271 *wl)
497 if (wl->tx_queue_count[ac] &&
498 wl->tx_allocated_pkts[ac] < min_pkts) {
500 min_pkts = wl->tx_allocated_pkts[q];
507 static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
515 spin_lock_irqsave(&wl->wl_lock, flags);
516 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
517 wl->tx_queue_count[q]--;
522 spin_unlock_irqrestore(&wl->wl_lock, flags);
528 static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
532 struct wl1271_link *lnk = &wl->links[hlid];
534 if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
537 wlcore_hw_lnk_low_prio(wl, hlid, lnk))
544 return wlcore_lnk_dequeue(wl, lnk, ac);
547 static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
556 start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
559 for (i = 0; i < wl->num_links; i++) {
560 h = (start_hlid + i) % wl->num_links;
566 skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
582 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
585 struct wl12xx_vif *wlvif = wl->last_wlvif;
590 ac = wlcore_select_ac(wl);
596 wl12xx_for_each_wlvif_continue(wl, wlvif) {
600 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
605 wl->last_wlvif = wlvif;
612 skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
615 *hlid = wl->system_hlid;
616 wl->last_wlvif = NULL;
623 wl12xx_for_each_wlvif(wl, wlvif) {
627 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
630 wl->last_wlvif = wlvif;
635 if (wlvif == wl->last_wlvif)
642 struct wl1271_link *lnk = &wl->links[low_prio_hlid];
643 skb = wlcore_lnk_dequeue(wl, lnk, ac);
649 wl->last_wlvif = lnk->wlvif;
657 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
660 skb = wl->dummy_packet;
661 *hlid = wl->system_hlid;
663 spin_lock_irqsave(&wl->wl_lock, flags);
664 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
665 wl->tx_queue_count[q]--;
666 spin_unlock_irqrestore(&wl->wl_lock, flags);
672 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
678 if (wl12xx_is_dummy_packet(wl, skb)) {
679 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
681 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
684 wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
685 wl->num_links;
688 spin_lock_irqsave(&wl->wl_lock, flags);
689 wl->tx_queue_count[q]++;
692 spin_unlock_irqrestore(&wl->wl_lock, flags);
702 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
708 if (!wl->conf.rx_streaming.interval)
711 if (!wl->conf.rx_streaming.always &&
712 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
715 timeout = wl->conf.rx_streaming.duration;
716 wl12xx_for_each_wlvif_sta(wl, wlvif) {
718 for_each_set_bit(hlid, active_hlids, wl->num_links) {
730 ieee80211_queue_work(wl->hw,
748 int wlcore_tx_work_locked(struct wl1271 *wl)
760 if (unlikely(wl->state != WLCORE_STATE_ON))
763 while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
768 if (!wl12xx_is_dummy_packet(wl, skb))
771 hlid = wl->system_hlid;
774 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
781 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
783 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
785 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
786 wl->aggr_buf, buf_offset, true);
798 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
800 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
803 if (wl12xx_is_dummy_packet(wl, skb))
808 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
810 ieee80211_free_txskb(wl->hw, skb);
815 wl->tx_packets_count++;
824 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
825 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
837 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
838 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
839 wl->tx_packets_count);
844 wl1271_handle_tx_low_watermark(wl);
846 wl12xx_rearm_rx_streaming(wl, active_hlids);
854 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
857 mutex_lock(&wl->mutex);
858 ret = pm_runtime_resume_and_get(wl->dev);
862 ret = wlcore_tx_work_locked(wl);
864 pm_runtime_put_noidle(wl->dev);
865 wl12xx_queue_recovery_work(wl);
869 pm_runtime_mark_last_busy(wl->dev);
870 pm_runtime_put_autosuspend(wl->dev);
872 mutex_unlock(&wl->mutex);
896 static void wl1271_tx_complete_packet(struct wl1271 *wl,
909 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
914 skb = wl->tx_frames[id];
917 if (wl12xx_is_dummy_packet(wl, skb)) {
918 wl1271_free_tx_id(wl, id);
930 rate = wlcore_rate_to_idx(wl, result->rate_class_index,
935 wl->stats.excessive_retries++;
944 wl->stats.retry_count += result->ack_failures;
950 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
965 skb_queue_tail(&wl->deferred_tx_queue, skb);
966 queue_work(wl->freezable_wq, &wl->netstack_work);
967 wl1271_free_tx_id(wl, result->id);
971 int wlcore_tx_complete(struct wl1271 *wl)
973 struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
979 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
980 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
984 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
987 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
993 count = fw_counter - wl->tx_results_count;
1003 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
1006 result = &(wl->tx_res_if->tx_results_queue[offset]);
1007 wl1271_tx_complete_packet(wl, result);
1009 wl->tx_results_count++;
1017 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
1024 struct wl1271_link *lnk = &wl->links[hlid];
1031 if (!wl12xx_is_dummy_packet(wl, skb)) {
1035 ieee80211_tx_status_ni(wl->hw, skb);
1042 spin_lock_irqsave(&wl->wl_lock, flags);
1044 wl->tx_queue_count[i] -= total[i];
1048 spin_unlock_irqrestore(&wl->wl_lock, flags);
1050 wl1271_handle_tx_low_watermark(wl);
1053 /* caller must hold wl->mutex and TX must be stopped */
1054 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1059 for_each_set_bit(i, wlvif->links_map, wl->num_links) {
1063 wl1271_free_sta(wl, wlvif, i);
1066 wl12xx_free_link(wl, wlvif, &hlid);
1074 /* caller must hold wl->mutex and TX must be stopped */
1075 void wl12xx_tx_reset(struct wl1271 *wl)
1082 if (wl1271_tx_total_queue_count(wl) != 0) {
1083 for (i = 0; i < wl->num_links; i++)
1084 wl1271_tx_reset_link_queues(wl, i);
1087 wl->tx_queue_count[i] = 0;
1095 wl1271_handle_tx_low_watermark(wl);
1097 for (i = 0; i < wl->num_tx_desc; i++) {
1098 if (wl->tx_frames[i] == NULL)
1101 skb = wl->tx_frames[i];
1102 wl1271_free_tx_id(wl, i);
1105 if (!wl12xx_is_dummy_packet(wl, skb)) {
1112 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1125 ieee80211_tx_status_ni(wl->hw, skb);
1132 /* caller must *NOT* hold wl->mutex */
1133 void wl1271_tx_flush(struct wl1271 *wl)
1141 mutex_lock(&wl->flush_mutex);
1143 mutex_lock(&wl->mutex);
1144 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1145 mutex_unlock(&wl->mutex);
1149 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1153 wl->tx_frames_cnt,
1154 wl1271_tx_total_queue_count(wl));
1157 mutex_unlock(&wl->mutex);
1158 if (wl1271_tx_total_queue_count(wl))
1159 wl1271_tx_work(&wl->tx_work);
1161 mutex_lock(&wl->mutex);
1163 if ((wl->tx_frames_cnt == 0) &&
1164 (wl1271_tx_total_queue_count(wl) == 0)) {
1176 for (i = 0; i < wl->num_links; i++)
1177 wl1271_tx_reset_link_queues(wl, i);
1180 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1181 mutex_unlock(&wl->mutex);
1183 mutex_unlock(&wl->flush_mutex);
1187 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1196 void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1200 bool stopped = !!wl->queue_stop_reasons[hwq];
1203 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
1208 ieee80211_stop_queue(wl->hw, hwq);
1211 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1216 spin_lock_irqsave(&wl->wl_lock, flags);
1217 wlcore_stop_queue_locked(wl, wlvif, queue, reason);
1218 spin_unlock_irqrestore(&wl->wl_lock, flags);
1221 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1227 spin_lock_irqsave(&wl->wl_lock, flags);
1230 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
1232 if (wl->queue_stop_reasons[hwq])
1235 ieee80211_wake_queue(wl->hw, hwq);
1238 spin_unlock_irqrestore(&wl->wl_lock, flags);
1241 void wlcore_stop_queues(struct wl1271 *wl,
1247 spin_lock_irqsave(&wl->wl_lock, flags);
1252 &wl->queue_stop_reasons[i]));
1257 ieee80211_stop_queues(wl->hw);
1259 spin_unlock_irqrestore(&wl->wl_lock, flags);
1262 void wlcore_wake_queues(struct wl1271 *wl,
1268 spin_lock_irqsave(&wl->wl_lock, flags);
1273 &wl->queue_stop_reasons[i]));
1278 ieee80211_wake_queues(wl->hw);
1280 spin_unlock_irqrestore(&wl->wl_lock, flags);
1283 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
1290 spin_lock_irqsave(&wl->wl_lock, flags);
1291 stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
1293 spin_unlock_irqrestore(&wl->wl_lock, flags);
1298 bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
1304 assert_spin_locked(&wl->wl_lock);
1305 return test_bit(reason, &wl->queue_stop_reasons[hwq]);
1308 bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1313 assert_spin_locked(&wl->wl_lock);
1314 return !!wl->queue_stop_reasons[hwq];