Lines Matching refs:wl

19 static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
23 data_in_count = wl->data_in_count;
40 static int wl1251_tx_path_status(struct wl1251 *wl)
45 addr = wl->data_path->tx_control_addr;
46 status = wl1251_mem_read32(wl, addr);
48 busy = wl1251_tx_double_buffer_busy(wl, data_out_count);
56 static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb)
61 if (wl->tx_frames[i] == NULL) {
62 wl->tx_frames[i] = skb;
134 static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
145 id = wl1251_tx_id(wl, skb);
153 rate = ieee80211_get_tx_rate(wl->hw, control);
167 static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
216 wl->tx_frames[tx_hdr->id] = skb = newskb;
234 if (wl->data_in_count & 0x1)
235 addr = wl->data_path->tx_packet_ring_addr +
236 wl->data_path->tx_packet_ring_chunk_size;
238 addr = wl->data_path->tx_packet_ring_addr;
240 wl1251_mem_write(wl, addr, skb->data, len);
249 static void wl1251_tx_trigger(struct wl1251 *wl)
253 if (wl->data_in_count & 0x1) {
261 wl1251_reg_write32(wl, addr, data);
264 wl->data_in_count = (wl->data_in_count + 1) &
268 static void enable_tx_for_packet_injection(struct wl1251 *wl)
272 ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel,
273 wl->beacon_int, wl->dtim_period);
279 ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
285 wl->joined = true;
288 /* caller must hold wl->mutex */
289 static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
298 if (unlikely(wl->monitor_present))
302 if (unlikely(wl->default_key != idx)) {
303 ret = wl1251_acx_default_key(wl, idx);
310 if ((wl->vif == NULL) && !wl->joined)
311 enable_tx_for_packet_injection(wl);
313 ret = wl1251_tx_path_status(wl);
317 ret = wl1251_tx_fill_hdr(wl, skb, info);
321 ret = wl1251_tx_send_packet(wl, skb, info);
325 wl1251_tx_trigger(wl);
332 struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
337 mutex_lock(&wl->mutex);
339 if (unlikely(wl->state == WL1251_STATE_OFF))
342 while ((skb = skb_dequeue(&wl->tx_queue))) {
344 ret = wl1251_ps_elp_wakeup(wl);
350 ret = wl1251_tx_frame(wl, skb);
352 skb_queue_head(&wl->tx_queue, skb);
362 wl1251_ps_elp_sleep(wl);
364 mutex_unlock(&wl->mutex);
395 static void wl1251_tx_packet_cb(struct wl1251 *wl,
403 skb = wl->tx_frames[result->id];
417 wl->stats.retry_count += result->ack_failures;
437 ieee80211_tx_status_skb(wl->hw, skb);
439 wl->tx_frames[result->id] = NULL;
443 void wl1251_tx_complete(struct wl1251 *wl)
449 if (unlikely(wl->state != WL1251_STATE_ON))
459 wl1251_mem_read(wl, wl->data_path->tx_complete_addr, result,
462 result_index = wl->next_tx_complete;
469 wl1251_tx_packet_cb(wl, result_ptr);
482 queue_len = skb_queue_len(&wl->tx_queue);
487 ieee80211_queue_work(wl->hw, &wl->tx_work);
490 if (wl->tx_queue_stopped &&
494 spin_lock_irqsave(&wl->wl_lock, flags);
495 ieee80211_wake_queues(wl->hw);
496 wl->tx_queue_stopped = false;
497 spin_unlock_irqrestore(&wl->wl_lock, flags);
506 if (result_index > wl->next_tx_complete) {
508 wl1251_mem_write(wl,
509 wl->data_path->tx_complete_addr +
510 (wl->next_tx_complete *
512 &result[wl->next_tx_complete],
517 } else if (result_index < wl->next_tx_complete) {
519 wl1251_mem_write(wl,
520 wl->data_path->tx_complete_addr +
521 (wl->next_tx_complete *
523 &result[wl->next_tx_complete],
525 wl->next_tx_complete) *
528 wl1251_mem_write(wl,
529 wl->data_path->tx_complete_addr,
533 wl->next_tx_complete) *
538 wl1251_mem_write(wl,
539 wl->data_path->tx_complete_addr,
548 wl->next_tx_complete = result_index;
551 /* caller must hold wl->mutex */
552 void wl1251_tx_flush(struct wl1251 *wl)
561 while ((skb = skb_dequeue(&wl->tx_queue))) {
569 ieee80211_tx_status_skb(wl->hw, skb);
573 if (wl->tx_frames[i] != NULL) {
574 skb = wl->tx_frames[i];
580 ieee80211_tx_status_skb(wl->hw, skb);
581 wl->tx_frames[i] = NULL;