• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/dma/

Lines Matching defs:desc

205 	struct intel_mid_dma_desc *desc, *_desc;
209 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
210 if (async_tx_test_ack(&desc->txd)) {
211 list_del(&desc->desc_node);
212 ret = desc;
223 * @desc: descriptor to put
228 struct intel_mid_dma_desc *desc)
230 if (desc) {
232 list_add_tail(&desc->desc_node, &midc->free_list);
275 * @desc: the descriptor itself
282 struct intel_mid_dma_desc *desc)
284 struct dma_async_tx_descriptor *txd = &desc->txd;
292 list_move(&desc->desc_node, &midc->free_list);
316 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
319 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
320 if (desc->status == DMA_IN_PROGRESS) {
321 desc->status = DMA_SUCCESS;
322 midc_descriptor_complete(midc, desc);
338 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
349 desc->txd.cookie = cookie;
353 midc_dostart(midc, desc);
354 list_add_tail(&desc->desc_node, &midc->active_list);
356 list_add_tail(&desc->desc_node, &midc->queue);
430 struct intel_mid_dma_desc *desc, *_desc;
449 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
450 pr_debug("MDMA: freeing descriptor %p\n", desc);
451 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
492 struct intel_mid_dma_desc *desc = NULL;
589 desc = midc_desc_get(midc);
590 if (desc == NULL)
592 desc->sar = src;
593 desc->dar = dest ;
594 desc->len = len;
595 desc->cfg_hi = cfg_hi.cfg_hi;
596 desc->cfg_lo = cfg_lo.cfg_lo;
597 desc->ctl_lo = ctl_lo.ctl_lo;
598 desc->ctl_hi = ctl_hi.ctl_hi;
599 desc->width = width;
600 desc->dirn = mids->dirn;
601 return &desc->txd;
604 pr_err("ERR_MDMA: Failed to get desc\n");
605 midc_desc_put(midc, desc);
619 struct intel_mid_dma_desc *desc, *_desc;
628 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
629 list_del(&desc->desc_node);
630 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
632 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
633 list_del(&desc->desc_node);
634 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
636 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
637 list_del(&desc->desc_node);
638 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
658 struct intel_mid_dma_desc *desc;
674 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
675 if (!desc) {
676 pr_err("ERR_MDMA: desc failed\n");
680 dma_async_tx_descriptor_init(&desc->txd, chan);
681 desc->txd.tx_submit = intel_mid_dma_tx_submit;
682 desc->txd.flags = DMA_CTRL_ACK;
683 desc->txd.phys = phys;
686 list_add_tail(&desc->desc_node, &midc->free_list);
690 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
750 pr_debug("MDMA:Scan of desc... complete, unmasking\n");