• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/dma/

Lines Matching refs:midc

117  * @midc: dma channel for which masking is required
123 static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
126 struct middma_device *mid = to_middma_device(midc->chan.device);
138 * @midc: dma channel for which masking is required
144 static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
147 struct middma_device *mid = to_middma_device(midc->chan.device);
159 * @midc: dma channel for which enable interrupt is required
165 static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
167 dmac1_unmask_periphral_intr(midc);
170 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
171 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
177 * @midc: dma channel for which disable interrupt is required
183 static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
186 dmac1_mask_periphral_intr(midc);
187 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
197 * @midc: dma channel for which descriptor is required
203 static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
208 spin_lock_bh(&midc->lock);
209 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
216 spin_unlock_bh(&midc->lock);
222 * @midc: dma channel for which descriptor is required
227 static void midc_desc_put(struct intel_mid_dma_chan *midc,
231 spin_lock_bh(&midc->lock);
232 list_add_tail(&desc->desc_node, &midc->free_list);
233 spin_unlock_bh(&midc->lock);
238 * @midc: channel for which txn is to be started
241 * Load a transaction into the engine. This must be called with midc->lock
244 static void midc_dostart(struct intel_mid_dma_chan *midc,
247 struct middma_device *mid = to_middma_device(midc->chan.device);
250 if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) {
258 iowrite32(first->sar, midc->ch_regs + SAR);
259 iowrite32(first->dar, midc->ch_regs + DAR);
260 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
261 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
262 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
263 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
268 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
274 * @midc: channel owning the descriptor
281 static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
288 midc->completed = txd->cookie;
292 list_move(&desc->desc_node, &midc->free_list);
294 spin_unlock_bh(&midc->lock);
298 spin_lock_bh(&midc->lock);
301 spin_lock_bh(&midc->lock);
308 * @midc: channel to scan
314 struct intel_mid_dma_chan *midc)
319 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
322 midc_descriptor_complete(midc, desc);
339 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
342 spin_lock_bh(&midc->lock);
343 cookie = midc->chan.cookie;
348 midc->chan.cookie = cookie;
352 if (list_empty(&midc->active_list)) {
353 midc_dostart(midc, desc);
354 list_add_tail(&desc->desc_node, &midc->active_list);
356 list_add_tail(&desc->desc_node, &midc->queue);
358 spin_unlock_bh(&midc->lock);
371 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
373 spin_lock_bh(&midc->lock);
374 if (!list_empty(&midc->queue))
375 midc_scan_descriptors(to_middma_device(chan->device), midc);
376 spin_unlock_bh(&midc->lock);
391 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
396 last_complete = midc->completed;
401 midc_scan_descriptors(to_middma_device(chan->device), midc);
403 last_complete = midc->completed;
428 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
436 spin_lock_bh(&midc->lock);
437 if (midc->in_use == false) {
438 spin_unlock_bh(&midc->lock);
441 list_splice_init(&midc->free_list, &list);
442 midc->descs_allocated = 0;
443 midc->slave = NULL;
446 disable_dma_interrupt(midc);
448 spin_unlock_bh(&midc->lock);
491 struct intel_mid_dma_chan *midc;
508 midc = to_intel_mid_dma_chan(chan);
509 WARN_ON(!midc);
512 midc->dma->pci_id, midc->ch_id, len);
530 if (midc->dma->pimr_mask) {
549 midc->ch_id - midc->dma->chan_base;
557 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
559 ctl_hi.ctlx.block_ts, midc->dma->block_size);
587 enable_dma_interrupt(midc);
589 desc = midc_desc_get(midc);
605 midc_desc_put(midc, desc);
617 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
621 if (true == midc->in_use) {
626 spin_lock_bh(&midc->lock);
627 midc->descs_allocated = 0;
628 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
632 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
636 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
640 spin_unlock_bh(&midc->lock);
641 midc->in_use = false;
643 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
644 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
656 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
664 if (test_ch_en(mid->dma_base, midc->ch_id)) {
669 midc->completed = chan->cookie = 1;
671 spin_lock_bh(&midc->lock);
672 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
673 spin_unlock_bh(&midc->lock);
684 spin_lock_bh(&midc->lock);
685 i = ++midc->descs_allocated;
686 list_add_tail(&desc->desc_node, &midc->free_list);
688 spin_unlock_bh(&midc->lock);
689 midc->in_use = false;
697 * @midc: chan where error occured
702 struct intel_mid_dma_chan *midc)
704 midc_scan_descriptors(mid, midc);
717 struct intel_mid_dma_chan *midc = NULL;
737 midc = &mid->ch[i];
738 if (midc == NULL) {
739 pr_err("ERR_MDMA:Null param midc\n");
743 status, midc->ch_id, i);
745 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
746 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
748 spin_lock_bh(&midc->lock);
749 midc_scan_descriptors(mid, midc);
751 iowrite32(UNMASK_INTR_REG(midc->ch_id),
753 spin_unlock_bh(&midc->lock);
765 midc = &mid->ch[i];
766 if (midc == NULL) {
767 pr_err("ERR_MDMA:Null param midc\n");
771 status, midc->ch_id, i);
773 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
774 spin_lock_bh(&midc->lock);
775 midc_handle_error(mid, midc);
776 iowrite32(UNMASK_INTR_REG(midc->ch_id),
778 spin_unlock_bh(&midc->lock);