Lines Matching refs:schan
52 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) in shdma_chan_xfer_ld_queue() argument
54 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_chan_xfer_ld_queue()
59 if (ops->channel_busy(schan)) in shdma_chan_xfer_ld_queue()
63 list_for_each_entry(sdesc, &schan->ld_queue, node) in shdma_chan_xfer_ld_queue()
65 ops->start_xfer(schan, sdesc); in shdma_chan_xfer_ld_queue()
74 struct shdma_chan *schan = to_shdma_chan(tx->chan); in shdma_tx_submit() local
79 spin_lock_irq(&schan->chan_lock); in shdma_tx_submit()
81 power_up = list_empty(&schan->ld_queue); in shdma_tx_submit()
94 &chunk->node == &schan->ld_free)) in shdma_tx_submit()
105 list_move_tail(&chunk->node, &schan->ld_queue); in shdma_tx_submit()
107 dev_dbg(schan->dev, "submit #%d@%p on %d\n", in shdma_tx_submit()
108 tx->cookie, &chunk->async_tx, schan->id); in shdma_tx_submit()
113 schan->pm_state = SHDMA_PM_BUSY; in shdma_tx_submit()
115 ret = pm_runtime_get(schan->dev); in shdma_tx_submit()
117 spin_unlock_irq(&schan->chan_lock); in shdma_tx_submit()
119 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); in shdma_tx_submit()
121 pm_runtime_barrier(schan->dev); in shdma_tx_submit()
123 spin_lock_irq(&schan->chan_lock); in shdma_tx_submit()
126 if (schan->pm_state != SHDMA_PM_ESTABLISHED) { in shdma_tx_submit()
128 to_shdma_dev(schan->dma_chan.device); in shdma_tx_submit()
130 dev_dbg(schan->dev, "Bring up channel %d\n", in shdma_tx_submit()
131 schan->id); in shdma_tx_submit()
137 ops->setup_xfer(schan, schan->slave_id); in shdma_tx_submit()
139 if (schan->pm_state == SHDMA_PM_PENDING) in shdma_tx_submit()
140 shdma_chan_xfer_ld_queue(schan); in shdma_tx_submit()
141 schan->pm_state = SHDMA_PM_ESTABLISHED; in shdma_tx_submit()
148 schan->pm_state = SHDMA_PM_PENDING; in shdma_tx_submit()
151 spin_unlock_irq(&schan->chan_lock); in shdma_tx_submit()
157 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) in shdma_get_desc() argument
161 list_for_each_entry(sdesc, &schan->ld_free, node) in shdma_get_desc()
171 static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr) in shdma_setup_slave() argument
173 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_setup_slave()
177 if (schan->dev->of_node) { in shdma_setup_slave()
178 match = schan->hw_req; in shdma_setup_slave()
179 ret = ops->set_slave(schan, match, slave_addr, true); in shdma_setup_slave()
183 match = schan->real_slave_id; in shdma_setup_slave()
186 if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num) in shdma_setup_slave()
189 if (test_and_set_bit(schan->real_slave_id, shdma_slave_used)) in shdma_setup_slave()
192 ret = ops->set_slave(schan, match, slave_addr, false); in shdma_setup_slave()
194 clear_bit(schan->real_slave_id, shdma_slave_used); in shdma_setup_slave()
198 schan->slave_id = schan->real_slave_id; in shdma_setup_slave()
205 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_alloc_chan_resources() local
206 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_alloc_chan_resources()
218 schan->real_slave_id = slave->slave_id; in shdma_alloc_chan_resources()
219 ret = shdma_setup_slave(schan, 0); in shdma_alloc_chan_resources()
224 schan->slave_id = -EINVAL; in shdma_alloc_chan_resources()
227 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, in shdma_alloc_chan_resources()
229 if (!schan->desc) { in shdma_alloc_chan_resources()
233 schan->desc_num = NR_DESCS_PER_CHANNEL; in shdma_alloc_chan_resources()
236 desc = ops->embedded_desc(schan->desc, i); in shdma_alloc_chan_resources()
238 &schan->dma_chan); in shdma_alloc_chan_resources()
242 list_add(&desc->node, &schan->ld_free); in shdma_alloc_chan_resources()
276 struct shdma_chan *schan; in shdma_chan_filter() local
286 schan = to_shdma_chan(chan); in shdma_chan_filter()
295 if (schan->dev->of_node) { in shdma_chan_filter()
296 ret = sdev->ops->set_slave(schan, slave_id, 0, true); in shdma_chan_filter()
300 schan->real_slave_id = schan->slave_id; in shdma_chan_filter()
313 ret = sdev->ops->set_slave(schan, slave_id, 0, true); in shdma_chan_filter()
317 schan->real_slave_id = slave_id; in shdma_chan_filter()
323 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) in __ld_cleanup() argument
335 spin_lock_irqsave(&schan->chan_lock, flags); in __ld_cleanup()
336 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { in __ld_cleanup()
357 if (schan->dma_chan.completed_cookie != desc->cookie - 1) in __ld_cleanup()
358 dev_dbg(schan->dev, in __ld_cleanup()
361 schan->dma_chan.completed_cookie + 1); in __ld_cleanup()
362 schan->dma_chan.completed_cookie = desc->cookie; in __ld_cleanup()
370 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", in __ld_cleanup()
371 tx->cookie, tx, schan->id); in __ld_cleanup()
393 dev_dbg(schan->dev, "descriptor %p #%d completed.\n", in __ld_cleanup()
403 list_move(&desc->node, &schan->ld_free); in __ld_cleanup()
410 if (list_empty(&schan->ld_queue)) { in __ld_cleanup()
411 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); in __ld_cleanup()
412 pm_runtime_put(schan->dev); in __ld_cleanup()
413 schan->pm_state = SHDMA_PM_ESTABLISHED; in __ld_cleanup()
414 } else if (schan->pm_state == SHDMA_PM_PENDING) { in __ld_cleanup()
415 shdma_chan_xfer_ld_queue(schan); in __ld_cleanup()
425 schan->dma_chan.completed_cookie = schan->dma_chan.cookie; in __ld_cleanup()
427 list_splice_tail(&cyclic_list, &schan->ld_queue); in __ld_cleanup()
429 spin_unlock_irqrestore(&schan->chan_lock, flags); in __ld_cleanup()
441 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) in shdma_chan_ld_cleanup() argument
443 while (__ld_cleanup(schan, all)) in shdma_chan_ld_cleanup()
452 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_free_chan_resources() local
458 spin_lock_irq(&schan->chan_lock); in shdma_free_chan_resources()
459 ops->halt_channel(schan); in shdma_free_chan_resources()
460 spin_unlock_irq(&schan->chan_lock); in shdma_free_chan_resources()
465 if (!list_empty(&schan->ld_queue)) in shdma_free_chan_resources()
466 shdma_chan_ld_cleanup(schan, true); in shdma_free_chan_resources()
468 if (schan->slave_id >= 0) { in shdma_free_chan_resources()
470 clear_bit(schan->slave_id, shdma_slave_used); in shdma_free_chan_resources()
474 schan->real_slave_id = 0; in shdma_free_chan_resources()
476 spin_lock_irq(&schan->chan_lock); in shdma_free_chan_resources()
478 list_splice_init(&schan->ld_free, &list); in shdma_free_chan_resources()
479 schan->desc_num = 0; in shdma_free_chan_resources()
481 spin_unlock_irq(&schan->chan_lock); in shdma_free_chan_resources()
483 kfree(schan->desc); in shdma_free_chan_resources()
501 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, in shdma_add_desc() argument
505 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_add_desc()
514 new = shdma_get_desc(schan); in shdma_add_desc()
516 dev_err(schan->dev, "No free link descriptor available\n"); in shdma_add_desc()
520 ops->desc_setup(schan, new, *src, *dst, ©_size); in shdma_add_desc()
531 dev_dbg(schan->dev, in shdma_add_desc()
560 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, in shdma_prep_sg() argument
572 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg()
575 spin_lock_irqsave(&schan->chan_lock, irq_flags); in shdma_prep_sg()
596 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n", in shdma_prep_sg()
600 new = shdma_add_desc(schan, flags, in shdma_prep_sg()
604 new = shdma_add_desc(schan, flags, in shdma_prep_sg()
623 list_splice_tail(&tx_list, &schan->ld_free); in shdma_prep_sg()
625 spin_unlock_irqrestore(&schan->chan_lock, irq_flags); in shdma_prep_sg()
632 list_splice(&tx_list, &schan->ld_free); in shdma_prep_sg()
634 spin_unlock_irqrestore(&schan->chan_lock, irq_flags); in shdma_prep_sg()
643 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_prep_memcpy() local
649 BUG_ON(!schan->desc_num); in shdma_prep_memcpy()
657 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, in shdma_prep_memcpy()
665 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_prep_slave_sg() local
666 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_prep_slave_sg()
668 int slave_id = schan->slave_id; in shdma_prep_slave_sg()
674 BUG_ON(!schan->desc_num); in shdma_prep_slave_sg()
678 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", in shdma_prep_slave_sg()
683 slave_addr = ops->slave_addr(schan); in shdma_prep_slave_sg()
685 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, in shdma_prep_slave_sg()
696 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_prep_dma_cyclic() local
697 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_prep_dma_cyclic()
701 int slave_id = schan->slave_id; in shdma_prep_dma_cyclic()
709 BUG_ON(!schan->desc_num); in shdma_prep_dma_cyclic()
712 dev_err(schan->dev, "sg length %d exceeds limit %d", in shdma_prep_dma_cyclic()
719 dev_warn(schan->dev, in shdma_prep_dma_cyclic()
725 slave_addr = ops->slave_addr(schan); in shdma_prep_dma_cyclic()
746 desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, in shdma_prep_dma_cyclic()
755 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_terminate_all() local
760 spin_lock_irqsave(&schan->chan_lock, flags); in shdma_terminate_all()
761 ops->halt_channel(schan); in shdma_terminate_all()
763 if (ops->get_partial && !list_empty(&schan->ld_queue)) { in shdma_terminate_all()
765 struct shdma_desc *desc = list_first_entry(&schan->ld_queue, in shdma_terminate_all()
767 desc->partial = ops->get_partial(schan, desc); in shdma_terminate_all()
770 spin_unlock_irqrestore(&schan->chan_lock, flags); in shdma_terminate_all()
772 shdma_chan_ld_cleanup(schan, true); in shdma_terminate_all()
780 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_config() local
794 config->slave_id != schan->real_slave_id)) in shdma_config()
795 schan->real_slave_id = config->slave_id; in shdma_config()
801 return shdma_setup_slave(schan, in shdma_config()
808 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_issue_pending() local
810 spin_lock_irq(&schan->chan_lock); in shdma_issue_pending()
811 if (schan->pm_state == SHDMA_PM_ESTABLISHED) in shdma_issue_pending()
812 shdma_chan_xfer_ld_queue(schan); in shdma_issue_pending()
814 schan->pm_state = SHDMA_PM_PENDING; in shdma_issue_pending()
815 spin_unlock_irq(&schan->chan_lock); in shdma_issue_pending()
822 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_tx_status() local
826 shdma_chan_ld_cleanup(schan, false); in shdma_tx_status()
828 spin_lock_irqsave(&schan->chan_lock, flags); in shdma_tx_status()
839 list_for_each_entry(sdesc, &schan->ld_queue, node) in shdma_tx_status()
846 spin_unlock_irqrestore(&schan->chan_lock, flags); in shdma_tx_status()
855 struct shdma_chan *schan; in shdma_reset() local
860 shdma_for_each_chan(schan, sdev, i) { in shdma_reset()
864 if (!schan) in shdma_reset()
867 spin_lock(&schan->chan_lock); in shdma_reset()
870 ops->halt_channel(schan); in shdma_reset()
872 list_splice_init(&schan->ld_queue, &dl); in shdma_reset()
875 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); in shdma_reset()
876 pm_runtime_put(schan->dev); in shdma_reset()
878 schan->pm_state = SHDMA_PM_ESTABLISHED; in shdma_reset()
880 spin_unlock(&schan->chan_lock); in shdma_reset()
890 spin_lock(&schan->chan_lock); in shdma_reset()
891 list_splice(&dl, &schan->ld_free); in shdma_reset()
892 spin_unlock(&schan->chan_lock); in shdma_reset()
903 struct shdma_chan *schan = dev; in chan_irq() local
905 to_shdma_dev(schan->dma_chan.device)->ops; in chan_irq()
908 spin_lock(&schan->chan_lock); in chan_irq()
910 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; in chan_irq()
912 spin_unlock(&schan->chan_lock); in chan_irq()
919 struct shdma_chan *schan = dev; in chan_irqt() local
921 to_shdma_dev(schan->dma_chan.device)->ops; in chan_irqt()
924 spin_lock_irq(&schan->chan_lock); in chan_irqt()
925 list_for_each_entry(sdesc, &schan->ld_queue, node) { in chan_irqt()
927 ops->desc_completed(schan, sdesc)) { in chan_irqt()
928 dev_dbg(schan->dev, "done #%d@%p\n", in chan_irqt()
935 shdma_chan_xfer_ld_queue(schan); in chan_irqt()
936 spin_unlock_irq(&schan->chan_lock); in chan_irqt()
938 shdma_chan_ld_cleanup(schan, false); in chan_irqt()
943 int shdma_request_irq(struct shdma_chan *schan, int irq, in shdma_request_irq() argument
946 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, in shdma_request_irq()
947 chan_irqt, flags, name, schan); in shdma_request_irq()
949 schan->irq = ret < 0 ? ret : irq; in shdma_request_irq()
956 struct shdma_chan *schan, int id) in shdma_chan_probe() argument
958 schan->pm_state = SHDMA_PM_ESTABLISHED; in shdma_chan_probe()
961 schan->dma_chan.device = &sdev->dma_dev; in shdma_chan_probe()
962 dma_cookie_init(&schan->dma_chan); in shdma_chan_probe()
964 schan->dev = sdev->dma_dev.dev; in shdma_chan_probe()
965 schan->id = id; in shdma_chan_probe()
967 if (!schan->max_xfer_len) in shdma_chan_probe()
968 schan->max_xfer_len = PAGE_SIZE; in shdma_chan_probe()
970 spin_lock_init(&schan->chan_lock); in shdma_chan_probe()
973 INIT_LIST_HEAD(&schan->ld_queue); in shdma_chan_probe()
974 INIT_LIST_HEAD(&schan->ld_free); in shdma_chan_probe()
977 list_add_tail(&schan->dma_chan.device_node, in shdma_chan_probe()
979 sdev->schan[id] = schan; in shdma_chan_probe()
983 void shdma_chan_remove(struct shdma_chan *schan) in shdma_chan_remove() argument
985 list_del(&schan->dma_chan.device_node); in shdma_chan_remove()
1011 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); in shdma_init()
1012 if (!sdev->schan) in shdma_init()
1039 kfree(sdev->schan); in shdma_cleanup()