Lines Matching defs:pq

81 	struct qib_user_sdma_queue *pq;	/* which pq this pkt belongs to */
185 struct qib_user_sdma_queue *pq =
189 if (!pq)
192 pq->counter = 0;
193 pq->sent_counter = 0;
194 pq->num_pending = 0;
195 pq->num_sending = 0;
196 pq->added = 0;
197 pq->sdma_rb_node = NULL;
199 INIT_LIST_HEAD(&pq->sent);
200 spin_lock_init(&pq->sent_lock);
201 mutex_init(&pq->lock);
203 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
205 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
209 if (!pq->pkt_slab)
212 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
214 pq->header_cache = dma_pool_create(pq->header_cache_name,
218 if (!pq->header_cache)
221 pq->dma_pages_root = RB_ROOT;
238 pq->sdma_rb_node = sdma_rb_node;
243 dma_pool_destroy(pq->header_cache);
245 kmem_cache_destroy(pq->pkt_slab);
247 kfree(pq);
248 pq = NULL;
251 return pq;
273 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
279 hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
296 struct qib_user_sdma_queue *pq,
432 pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
557 struct qib_user_sdma_queue *pq,
590 ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
614 struct qib_user_sdma_queue *pq,
643 dma_pool_free(pq->header_cache,
654 struct qib_user_sdma_queue *pq,
682 ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
711 struct qib_user_sdma_queue *pq,
723 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
734 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
753 struct qib_user_sdma_queue *pq,
762 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
764 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
771 struct qib_user_sdma_queue *pq,
780 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
785 kmem_cache_free(pq->pkt_slab, pkt);
791 * copy headers, coalesce etc -- pq->lock must be held
799 struct qib_user_sdma_queue *pq,
813 u32 counter = pq->counter;
836 pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
967 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
992 ret = qib_user_sdma_init_payload(dd, pq, pkt,
1021 pkt->pq = pq;
1036 kmem_cache_free(pq->pkt_slab, pkt);
1039 dma_pool_free(pq->header_cache, pbc, dma_addr);
1043 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
1048 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
1051 pq->sent_counter = c;
1054 /* try to clean out queue -- needs pq->lock */
1056 struct qib_user_sdma_queue *pq)
1065 if (!pq->num_sending)
1075 spin_lock_irqsave(&pq->sent_lock, flags);
1076 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
1086 pq->num_sending--;
1088 spin_unlock_irqrestore(&pq->sent_lock, flags);
1097 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1098 qib_user_sdma_set_complete_counter(pq, counter);
1104 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
1106 if (!pq)
1109 pq->sdma_rb_node->refcount--;
1110 if (pq->sdma_rb_node->refcount == 0) {
1111 rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
1112 kfree(pq->sdma_rb_node);
1114 dma_pool_destroy(pq->header_cache);
1115 kmem_cache_destroy(pq->pkt_slab);
1116 kfree(pq);
1134 struct qib_user_sdma_queue *pq)
1140 if (!pq)
1144 mutex_lock(&pq->lock);
1145 if (!pq->num_pending && !pq->num_sending) {
1146 mutex_unlock(&pq->lock);
1150 qib_user_sdma_queue_clean(ppd, pq);
1151 mutex_unlock(&pq->lock);
1155 if (pq->num_pending || pq->num_sending) {
1160 mutex_lock(&pq->lock);
1165 if (pq->num_pending) {
1168 if (pkt->pq == pq) {
1169 list_move_tail(&pkt->list, &pq->sent);
1170 pq->num_pending--;
1171 pq->num_sending++;
1179 list_splice_init(&pq->sent, &free_list);
1180 pq->num_sending = 0;
1181 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1182 mutex_unlock(&pq->lock);
1307 pkt->pq->added = pkt->added;
1308 pkt->pq->num_pending--;
1309 spin_lock(&pkt->pq->sent_lock);
1310 pkt->pq->num_sending++;
1311 list_move_tail(&pkt->list, &pkt->pq->sent);
1312 spin_unlock(&pkt->pq->sent_lock);
1328 /* pq->lock must be held, get packets on the wire... */
1330 struct qib_user_sdma_queue *pq,
1339 if (pq->sdma_rb_node->refcount > 1) {
1345 pq->num_pending += count;
1359 pq->num_pending += count;
1382 struct qib_user_sdma_queue *pq,
1394 mutex_lock(&pq->lock);
1401 if (pq->added > ppd->sdma_descq_removed)
1404 if (pq->num_sending)
1405 qib_user_sdma_queue_clean(ppd, pq);
1411 ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
1427 if (pq->num_sending)
1428 qib_user_sdma_queue_clean(ppd, pq);
1431 ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
1436 pq->counter += mxp;
1443 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
1444 mutex_unlock(&pq->lock);
1450 struct qib_user_sdma_queue *pq)
1454 mutex_lock(&pq->lock);
1456 ret = qib_user_sdma_queue_clean(ppd, pq);
1457 mutex_unlock(&pq->lock);
1462 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
1464 return pq ? pq->sent_counter : 0;
1467 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
1469 return pq ? pq->counter : 0;