Lines Matching defs:lldev

116 void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
120 if (tre_ch >= lldev->nr_tres) {
121 dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
125 tre = &lldev->trepool[tre_ch];
127 dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
134 int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
141 if (!tre_ch || !lldev)
145 for (i = 0; i < lldev->nr_tres - 1; i++) {
146 if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
150 if (i == (lldev->nr_tres - 1))
153 tre = &lldev->trepool[i];
163 tre->lldev = lldev;
165 tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
178 struct hidma_lldev *lldev = from_tasklet(lldev, t, task);
181 while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
188 static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
195 spin_lock_irqsave(&lldev->lock, flags);
197 tre_iterator = lldev->tre_processed_off;
198 tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
200 spin_unlock_irqrestore(&lldev->lock, flags);
201 dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
205 lldev->pending_tre_list[tre->tre_index] = NULL;
211 if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
212 dev_warn(lldev->dev, "tre count mismatch on completion");
213 atomic_set(&lldev->pending_tre_count, 0);
217 lldev->tre_ring_size);
218 lldev->tre_processed_off = tre_iterator;
219 spin_unlock_irqrestore(&lldev->lock, flags);
225 kfifo_put(&lldev->handoff_fifo, tre);
226 tasklet_schedule(&lldev->task);
237 static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
239 u32 evre_ring_size = lldev->evre_ring_size;
244 evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
245 evre_iterator = lldev->evre_processed_off;
249 dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
258 u32 *current_evre = lldev->evre_ring + evre_iterator;
267 if (hidma_post_completed(lldev, err_info, err_code))
279 readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
286 if (!hidma_ll_isenabled(lldev))
291 u32 evre_read_off = (lldev->evre_processed_off +
294 writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
297 lldev->evre_processed_off = evre_read_off;
303 void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
306 while (atomic_read(&lldev->pending_tre_count)) {
307 if (hidma_post_completed(lldev, err_info, err_code))
312 static int hidma_ll_reset(struct hidma_lldev *lldev)
317 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
320 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
326 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
330 dev_err(lldev->dev, "transfer channel did not reset\n");
334 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
337 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
343 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
349 lldev->trch_state = HIDMA_CH_DISABLED;
350 lldev->evch_state = HIDMA_CH_DISABLED;
386 static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
391 dev_err(lldev->dev, "error 0x%x, disabling...\n",
395 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
398 hidma_ll_disable(lldev);
401 hidma_cleanup_pending_tre(lldev, 0xFF,
407 spin_lock_irqsave(&lldev->lock, irqflags);
408 writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
409 spin_unlock_irqrestore(&lldev->lock, irqflags);
421 hidma_handle_tre_completion(lldev);
426 struct hidma_lldev *lldev = arg;
431 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
432 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
436 hidma_ll_int_handler_internal(lldev, cause);
442 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
443 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
452 struct hidma_lldev *lldev = arg;
454 hidma_ll_int_handler_internal(lldev, cause);
458 int hidma_ll_enable(struct hidma_lldev *lldev)
463 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
466 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
468 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
472 dev_err(lldev->dev, "event channel did not get enabled\n");
476 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
479 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
481 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
485 dev_err(lldev->dev, "transfer channel did not get enabled\n");
489 lldev->trch_state = HIDMA_CH_ENABLED;
490 lldev->evch_state = HIDMA_CH_ENABLED;
493 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
498 void hidma_ll_start(struct hidma_lldev *lldev)
502 spin_lock_irqsave(&lldev->lock, irqflags);
503 writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
504 spin_unlock_irqrestore(&lldev->lock, irqflags);
507 bool hidma_ll_isenabled(struct hidma_lldev *lldev)
511 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
512 lldev->trch_state = HIDMA_CH_STATE(val);
513 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
514 lldev->evch_state = HIDMA_CH_STATE(val);
517 if (hidma_is_chan_enabled(lldev->trch_state) &&
518 hidma_is_chan_enabled(lldev->evch_state))
524 void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
529 tre = &lldev->trepool[tre_ch];
532 spin_lock_irqsave(&lldev->lock, flags);
533 tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
534 lldev->pending_tre_list[tre->tre_index] = tre;
535 memcpy(lldev->tre_ring + lldev->tre_write_offset,
540 atomic_inc(&lldev->pending_tre_count);
541 lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
542 % lldev->tre_ring_size;
543 spin_unlock_irqrestore(&lldev->lock, flags);
551 int hidma_ll_disable(struct hidma_lldev *lldev)
557 if (!hidma_ll_isenabled(lldev))
560 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
563 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
569 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
575 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
578 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
584 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
590 lldev->trch_state = HIDMA_CH_SUSPENDED;
591 lldev->evch_state = HIDMA_CH_SUSPENDED;
594 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
598 void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
605 if (tre_ch >= lldev->nr_tres) {
606 dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
611 tre = &lldev->trepool[tre_ch];
613 dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
633 int hidma_ll_setup(struct hidma_lldev *lldev)
638 u32 nr_tres = lldev->nr_tres;
640 atomic_set(&lldev->pending_tre_count, 0);
641 lldev->tre_processed_off = 0;
642 lldev->evre_processed_off = 0;
643 lldev->tre_write_offset = 0;
646 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
649 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
650 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
652 rc = hidma_ll_reset(lldev);
660 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
661 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
664 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
666 addr = lldev->tre_dma;
667 writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
668 writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
669 writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
671 addr = lldev->evre_dma;
672 writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
673 writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
675 lldev->evca + HIDMA_EVCA_RING_LEN_REG);
678 hidma_ll_setup_irq(lldev, lldev->msi_support);
680 rc = hidma_ll_enable(lldev);
687 void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
691 lldev->msi_support = msi;
694 writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
695 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
698 val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
700 if (!lldev->msi_support)
702 writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
705 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
706 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
714 struct hidma_lldev *lldev;
728 lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
729 if (!lldev)
732 lldev->evca = evca;
733 lldev->trca = trca;
734 lldev->dev = dev;
736 lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
737 if (!lldev->trepool)
740 required_bytes = sizeof(lldev->pending_tre_list[0]);
741 lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
743 if (!lldev->pending_tre_list)
747 lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
749 if (!lldev->tre_ring)
752 lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
753 lldev->nr_tres = nr_tres;
756 if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
759 tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
761 lldev->tre_dma += tre_ring_shift;
762 lldev->tre_ring += tre_ring_shift;
766 lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
768 if (!lldev->evre_ring)
771 lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
774 if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
777 evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
779 lldev->evre_dma += evre_ring_shift;
780 lldev->evre_ring += evre_ring_shift;
782 lldev->nr_tres = nr_tres;
783 lldev->chidx = chidx;
786 rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
790 rc = hidma_ll_setup(lldev);
794 spin_lock_init(&lldev->lock);
795 tasklet_setup(&lldev->task, hidma_ll_tre_complete);
796 lldev->initialized = 1;
797 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
798 return lldev;
801 int hidma_ll_uninit(struct hidma_lldev *lldev)
807 if (!lldev)
810 if (!lldev->initialized)
813 lldev->initialized = 0;
815 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
816 tasklet_kill(&lldev->task);
817 memset(lldev->trepool, 0, required_bytes);
818 lldev->trepool = NULL;
819 atomic_set(&lldev->pending_tre_count, 0);
820 lldev->tre_write_offset = 0;
822 rc = hidma_ll_reset(lldev);
828 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
829 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
830 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
834 enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
841 spin_lock_irqsave(&lldev->lock, flags);
843 tre = &lldev->trepool[tre_ch];
852 spin_unlock_irqrestore(&lldev->lock, flags);