Lines Matching refs:queue

235  * @queue: Linked list of outstanding requests for this EP.
243 struct list_head queue;
249 * @queue: Links back to the EP's request list.
251 * @offset: Current byte offset into the data buffer (next byte to queue).
256 struct list_head queue; /* ep's requests */
585 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
590 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
771 INIT_LIST_HEAD(&bep->queue);
964 INIT_LIST_HEAD(&bep->queue);
1051 BUG_ON(!list_empty(&bep->queue));
1090 if (!list_empty(&bep->queue)) {
1091 list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1094 list_del(&breq->queue);
1142 * If the queue is empty, start this request immediately. Otherwise, add
1184 list_add_tail(&breq->queue, &bep->queue);
1185 if (list_is_singular(&bep->queue))
1195 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1199 * If the request is not at the head of the queue, this is easy - just nuke
1200 * it. If the request is at the head of the queue, we'll need to stop the
1201 * DMA transaction and then queue up the successor.
1212 if (list_empty(&bep->queue)) {
1217 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1222 list_del(&breq->queue);
1224 if (!list_empty(&bep->queue)) {
1227 next = list_first_entry(&bep->queue,
1228 struct bcm63xx_req, queue);
1232 list_del(&breq->queue);
1292 .queue = bcm63xx_udc_queue,
1600 * queue anything else now.
2034 * ep0 worker thread. For normal bulk/intr channels, either queue up
2073 /* queue up the next BD (same request) */
2077 } else if (!list_empty(&bep->queue)) {
2078 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2086 list_del(&breq->queue);
2090 if (!list_empty(&bep->queue)) {
2093 next = list_first_entry(&bep->queue,
2094 struct bcm63xx_req, queue);
2207 seq_printf(s, "; %zu queued\n", list_count_nodes(&iudma->bep->queue));