Lines Matching refs:urb

320 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
333 sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
340 for (j = nsgs; j < urb->num_sgs; j++)
341 mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
342 urb->num_sgs = i;
345 urb->num_sgs = max_t(int, i, urb->num_sgs);
346 urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
347 sg_init_marker(urb->sg, urb->num_sgs);
354 struct urb *urb, int nsgs)
360 return mt76u_fill_rx_sg(dev, q, urb, nsgs);
362 urb->transfer_buffer_length = q->buf_size;
363 urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
365 return urb->transfer_buffer ? 0 : -ENOMEM;
372 unsigned int size = sizeof(struct urb);
377 e->urb = kzalloc(size, GFP_KERNEL);
378 if (!e->urb)
381 usb_init_urb(e->urb);
384 e->urb->sg = (struct scatterlist *)(e->urb + 1);
401 return mt76u_refill_rx(dev, q, e->urb, sg_size);
404 static void mt76u_urb_free(struct urb *urb)
408 for (i = 0; i < urb->num_sgs; i++)
409 mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
411 if (urb->transfer_buffer)
412 mt76_put_page_pool_buf(urb->transfer_buffer, false);
414 usb_free_urb(urb);
419 struct urb *urb, usb_complete_t complete_fn,
431 urb->dev = udev;
432 urb->pipe = pipe;
433 urb->complete = complete_fn;
434 urb->context = context;
437 static struct urb *
440 struct urb *urb = NULL;
445 urb = q->entry[q->tail].urb;
451 return urb;
512 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
515 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
516 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
523 len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
539 while (len > 0 && nsgs < urb->num_sgs) {
540 data_len = min_t(int, len, urb->sg[nsgs].length);
542 sg_page(&urb->sg[nsgs]),
543 urb->sg[nsgs].offset, data_len,
555 static void mt76u_complete_rx(struct urb *urb)
557 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
558 struct mt76_queue *q = urb->context;
561 trace_rx_urb(dev, urb);
563 switch (urb->status) {
570 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
571 urb->status);
578 if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
590 struct urb *urb)
594 mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
596 trace_submit_urb(dev, urb);
598 return usb_submit_urb(urb, GFP_ATOMIC);
605 struct urb *urb;
609 urb = mt76u_get_next_rx_entry(q);
610 if (!urb)
613 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
615 err = mt76u_refill_rx(dev, q, urb, count);
619 mt76u_submit_rx_buf(dev, qid, urb);
649 err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
701 if (!q->entry[i].urb)
704 mt76u_urb_free(q->entry[i].urb);
705 q->entry[i].urb = NULL;
732 usb_poison_urb(q->entry[j].urb);
746 usb_unpoison_urb(q->entry[j].urb);
821 static void mt76u_complete_tx(struct urb *urb)
823 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
824 struct mt76_queue_entry *e = urb->context;
826 if (mt76u_urb_error(urb))
827 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
835 struct urb *urb)
837 urb->transfer_buffer_length = skb->len;
840 urb->transfer_buffer = skb->data;
844 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
845 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
846 if (!urb->num_sgs)
849 return urb->num_sgs;
872 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
876 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb,
889 struct urb *urb;
893 urb = q->entry[q->first].urb;
895 trace_submit_urb(dev, urb);
896 err = usb_submit_urb(urb, GFP_ATOMIC);
901 dev_err(dev->dev, "tx urb submit failed:%d\n",
988 usb_free_urb(q->entry[j].urb);
989 q->entry[j].urb = NULL;
1015 usb_kill_urb(q->entry[j].urb);
1021 * will fail to submit urb, cleanup those skb's manually.