Lines Matching refs:bchan
423 static void bam_reset_channel(struct bam_chan *bchan) in bam_reset_channel() argument
425 struct bam_device *bdev = bchan->bdev; in bam_reset_channel()
427 lockdep_assert_held(&bchan->vc.lock); in bam_reset_channel()
430 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST)); in bam_reset_channel()
431 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST)); in bam_reset_channel()
437 bchan->initialized = 0; in bam_reset_channel()
446 static void bam_chan_init_hw(struct bam_chan *bchan, in bam_chan_init_hw() argument
449 struct bam_device *bdev = bchan->bdev; in bam_chan_init_hw()
453 bam_reset_channel(bchan); in bam_chan_init_hw()
459 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), in bam_chan_init_hw()
460 bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); in bam_chan_init_hw()
462 bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); in bam_chan_init_hw()
466 bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); in bam_chan_init_hw()
470 val |= BIT(bchan->id); in bam_chan_init_hw()
481 writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL)); in bam_chan_init_hw()
483 bchan->initialized = 1; in bam_chan_init_hw()
486 bchan->head = 0; in bam_chan_init_hw()
487 bchan->tail = 0; in bam_chan_init_hw()
498 struct bam_chan *bchan = to_bam_chan(chan); in bam_alloc_chan() local
499 struct bam_device *bdev = bchan->bdev; in bam_alloc_chan()
501 if (bchan->fifo_virt) in bam_alloc_chan()
505 bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, in bam_alloc_chan()
506 &bchan->fifo_phys, GFP_KERNEL); in bam_alloc_chan()
508 if (!bchan->fifo_virt) { in bam_alloc_chan()
525 struct bam_chan *bchan = to_bam_chan(chan); in bam_free_chan() local
526 struct bam_device *bdev = bchan->bdev; in bam_free_chan()
532 if (bchan->curr_txd) { in bam_free_chan()
533 dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); in bam_free_chan()
537 spin_lock_irqsave(&bchan->vc.lock, flags); in bam_free_chan()
538 bam_reset_channel(bchan); in bam_free_chan()
539 spin_unlock_irqrestore(&bchan->vc.lock, flags); in bam_free_chan()
541 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, in bam_free_chan()
542 bchan->fifo_phys); in bam_free_chan()
543 bchan->fifo_virt = NULL; in bam_free_chan()
547 val &= ~BIT(bchan->id); in bam_free_chan()
551 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); in bam_free_chan()
565 struct bam_chan *bchan = to_bam_chan(chan); in bam_slave_config() local
568 spin_lock_irqsave(&bchan->vc.lock, flag); in bam_slave_config()
569 memcpy(&bchan->slave, cfg, sizeof(*cfg)); in bam_slave_config()
570 bchan->reconfigure = 1; in bam_slave_config()
571 spin_unlock_irqrestore(&bchan->vc.lock, flag); in bam_slave_config()
591 struct bam_chan *bchan = to_bam_chan(chan); in bam_prep_slave_sg() local
592 struct bam_device *bdev = bchan->bdev; in bam_prep_slave_sg()
651 return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); in bam_prep_slave_sg()
668 struct bam_chan *bchan = to_bam_chan(chan); in bam_dma_terminate_all() local
673 spin_lock_irqsave(&bchan->vc.lock, flag); in bam_dma_terminate_all()
674 if (bchan->curr_txd) { in bam_dma_terminate_all()
675 list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); in bam_dma_terminate_all()
676 bchan->curr_txd = NULL; in bam_dma_terminate_all()
679 vchan_get_all_descriptors(&bchan->vc, &head); in bam_dma_terminate_all()
680 spin_unlock_irqrestore(&bchan->vc.lock, flag); in bam_dma_terminate_all()
682 vchan_dma_desc_free_list(&bchan->vc, &head); in bam_dma_terminate_all()
694 struct bam_chan *bchan = to_bam_chan(chan); in bam_pause() local
695 struct bam_device *bdev = bchan->bdev; in bam_pause()
698 spin_lock_irqsave(&bchan->vc.lock, flag); in bam_pause()
699 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); in bam_pause()
700 bchan->paused = 1; in bam_pause()
701 spin_unlock_irqrestore(&bchan->vc.lock, flag); in bam_pause()
713 struct bam_chan *bchan = to_bam_chan(chan); in bam_resume() local
714 struct bam_device *bdev = bchan->bdev; in bam_resume()
717 spin_lock_irqsave(&bchan->vc.lock, flag); in bam_resume()
718 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); in bam_resume()
719 bchan->paused = 0; in bam_resume()
720 spin_unlock_irqrestore(&bchan->vc.lock, flag); in bam_resume()
745 struct bam_chan *bchan = &bdev->channels[i]; in process_channel_irqs() local
755 spin_lock_irqsave(&bchan->vc.lock, flags); in process_channel_irqs()
756 async_desc = bchan->curr_txd; in process_channel_irqs()
761 bchan->curr_txd = NULL; in process_channel_irqs()
764 bchan->head += async_desc->xfer_len; in process_channel_irqs()
765 bchan->head %= MAX_DESCRIPTORS; in process_channel_irqs()
776 &bchan->vc.desc_issued); in process_channel_irqs()
779 spin_unlock_irqrestore(&bchan->vc.lock, flags); in process_channel_irqs()
825 struct bam_chan *bchan = to_bam_chan(chan); in bam_tx_status() local
837 return bchan->paused ? DMA_PAUSED : ret; in bam_tx_status()
839 spin_lock_irqsave(&bchan->vc.lock, flags); in bam_tx_status()
840 vd = vchan_find_desc(&bchan->vc, cookie); in bam_tx_status()
843 else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) in bam_tx_status()
844 for (i = 0; i < bchan->curr_txd->num_desc; i++) in bam_tx_status()
845 residue += bchan->curr_txd->curr_desc[i].size; in bam_tx_status()
847 spin_unlock_irqrestore(&bchan->vc.lock, flags); in bam_tx_status()
851 if (ret == DMA_IN_PROGRESS && bchan->paused) in bam_tx_status()
862 static void bam_apply_new_config(struct bam_chan *bchan, in bam_apply_new_config() argument
865 struct bam_device *bdev = bchan->bdev; in bam_apply_new_config()
869 maxburst = bchan->slave.src_maxburst; in bam_apply_new_config()
871 maxburst = bchan->slave.dst_maxburst; in bam_apply_new_config()
875 bchan->reconfigure = 0; in bam_apply_new_config()
882 static void bam_start_dma(struct bam_chan *bchan) in bam_start_dma() argument
884 struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); in bam_start_dma()
885 struct bam_device *bdev = bchan->bdev; in bam_start_dma()
888 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, in bam_start_dma()
891 lockdep_assert_held(&bchan->vc.lock); in bam_start_dma()
899 bchan->curr_txd = async_desc; in bam_start_dma()
902 if (!bchan->initialized) in bam_start_dma()
903 bam_chan_init_hw(bchan, async_desc->dir); in bam_start_dma()
906 if (bchan->reconfigure) in bam_start_dma()
907 bam_apply_new_config(bchan, async_desc->dir); in bam_start_dma()
909 desc = bchan->curr_txd->curr_desc; in bam_start_dma()
922 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { in bam_start_dma()
923 u32 partial = MAX_DESCRIPTORS - bchan->tail; in bam_start_dma()
925 memcpy(&fifo[bchan->tail], desc, in bam_start_dma()
930 memcpy(&fifo[bchan->tail], desc, in bam_start_dma()
934 bchan->tail += async_desc->xfer_len; in bam_start_dma()
935 bchan->tail %= MAX_DESCRIPTORS; in bam_start_dma()
939 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), in bam_start_dma()
940 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); in bam_start_dma()
952 struct bam_chan *bchan; in dma_tasklet() local
958 bchan = &bdev->channels[i]; in dma_tasklet()
959 spin_lock_irqsave(&bchan->vc.lock, flags); in dma_tasklet()
961 if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) in dma_tasklet()
962 bam_start_dma(bchan); in dma_tasklet()
963 spin_unlock_irqrestore(&bchan->vc.lock, flags); in dma_tasklet()
975 struct bam_chan *bchan = to_bam_chan(chan); in bam_issue_pending() local
978 spin_lock_irqsave(&bchan->vc.lock, flags); in bam_issue_pending()
981 if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) in bam_issue_pending()
982 bam_start_dma(bchan); in bam_issue_pending()
984 spin_unlock_irqrestore(&bchan->vc.lock, flags); in bam_issue_pending()
1070 static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, in bam_channel_init() argument
1073 bchan->id = index; in bam_channel_init()
1074 bchan->bdev = bdev; in bam_channel_init()
1076 vchan_init(&bchan->vc, &bdev->common); in bam_channel_init()
1077 bchan->vc.desc_free = bam_dma_free_desc; in bam_channel_init()