Lines Matching +full:psi +full:- +full:l
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
11 #include <linux/dma-mapping.h>
16 #include <linux/soc/ti/k3-ringacc.h>
17 #include <linux/dma/ti-cppi5.h>
18 #include <linux/dma/k3-udma-glue.h>
20 #include "k3-udma.h"
21 #include "k3-psil-priv.h"
89 common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np, in of_k3_udma_glue_parse()
91 if (IS_ERR(common->ringacc)) in of_k3_udma_glue_parse()
92 return PTR_ERR(common->ringacc); in of_k3_udma_glue_parse()
94 common->udmax = of_xudma_dev_get(udmax_np, NULL); in of_k3_udma_glue_parse()
95 if (IS_ERR(common->udmax)) in of_k3_udma_glue_parse()
96 return PTR_ERR(common->udmax); in of_k3_udma_glue_parse()
98 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); in of_k3_udma_glue_parse()
114 return -EINVAL; in of_k3_udma_glue_parse_chn()
116 index = of_property_match_string(chn_np, "dma-names", name); in of_k3_udma_glue_parse_chn()
120 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, in of_k3_udma_glue_parse_chn()
122 return -ENOENT; in of_k3_udma_glue_parse_chn()
127 dev_err(common->dev, "Invalid channel atype: %u\n", in of_k3_udma_glue_parse_chn()
129 ret = -EINVAL; in of_k3_udma_glue_parse_chn()
132 common->atype = dma_spec.args[1]; in of_k3_udma_glue_parse_chn()
136 ret = -EINVAL; in of_k3_udma_glue_parse_chn()
141 ret = -EINVAL; in of_k3_udma_glue_parse_chn()
148 dev_err(common->dev, in of_k3_udma_glue_parse_chn()
149 "No configuration for psi-l thread 0x%04x\n", in of_k3_udma_glue_parse_chn()
155 common->epib = ep_config->needs_epib; in of_k3_udma_glue_parse_chn()
156 common->psdata_size = ep_config->psd_size; in of_k3_udma_glue_parse_chn()
159 common->dst_thread = thread_id; in of_k3_udma_glue_parse_chn()
161 common->src_thread = thread_id; in of_k3_udma_glue_parse_chn()
172 struct device *dev = tx_chn->common.dev; in k3_udma_glue_dump_tx_chn()
178 tx_chn->udma_tchan_id, in k3_udma_glue_dump_tx_chn()
179 tx_chn->common.src_thread, in k3_udma_glue_dump_tx_chn()
180 tx_chn->common.dst_thread); in k3_udma_glue_dump_tx_chn()
186 struct device *dev = chn->common.dev; in k3_udma_glue_dump_tx_rt_chn()
190 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG)); in k3_udma_glue_dump_tx_rt_chn()
192 xudma_tchanrt_read(chn->udma_tchanx, in k3_udma_glue_dump_tx_rt_chn()
195 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG)); in k3_udma_glue_dump_tx_rt_chn()
197 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG)); in k3_udma_glue_dump_tx_rt_chn()
199 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG)); in k3_udma_glue_dump_tx_rt_chn()
204 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; in k3_udma_glue_cfg_tx_chn()
217 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_tx_chn()
218 req.index = tx_chn->udma_tchan_id; in k3_udma_glue_cfg_tx_chn()
219 if (tx_chn->tx_pause_on_err) in k3_udma_glue_cfg_tx_chn()
221 if (tx_chn->tx_filt_einfo) in k3_udma_glue_cfg_tx_chn()
223 if (tx_chn->tx_filt_pswords) in k3_udma_glue_cfg_tx_chn()
226 if (tx_chn->tx_supr_tdpkt) in k3_udma_glue_cfg_tx_chn()
228 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; in k3_udma_glue_cfg_tx_chn()
229 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); in k3_udma_glue_cfg_tx_chn()
230 req.tx_atype = tx_chn->common.atype; in k3_udma_glue_cfg_tx_chn()
232 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); in k3_udma_glue_cfg_tx_chn()
243 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_tx_chn()
245 tx_chn->common.dev = dev; in k3_udma_glue_request_tx_chn()
246 tx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_tx_chn()
247 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; in k3_udma_glue_request_tx_chn()
248 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; in k3_udma_glue_request_tx_chn()
249 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; in k3_udma_glue_request_tx_chn()
250 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; in k3_udma_glue_request_tx_chn()
253 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, in k3_udma_glue_request_tx_chn()
254 &tx_chn->common, true); in k3_udma_glue_request_tx_chn()
258 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, in k3_udma_glue_request_tx_chn()
259 tx_chn->common.psdata_size, in k3_udma_glue_request_tx_chn()
260 tx_chn->common.swdata_size); in k3_udma_glue_request_tx_chn()
263 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1); in k3_udma_glue_request_tx_chn()
264 if (IS_ERR(tx_chn->udma_tchanx)) { in k3_udma_glue_request_tx_chn()
265 ret = PTR_ERR(tx_chn->udma_tchanx); in k3_udma_glue_request_tx_chn()
269 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); in k3_udma_glue_request_tx_chn()
271 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); in k3_udma_glue_request_tx_chn()
274 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc, in k3_udma_glue_request_tx_chn()
275 tx_chn->udma_tchan_id, -1, in k3_udma_glue_request_tx_chn()
276 &tx_chn->ringtx, in k3_udma_glue_request_tx_chn()
277 &tx_chn->ringtxcq); in k3_udma_glue_request_tx_chn()
283 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); in k3_udma_glue_request_tx_chn()
289 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); in k3_udma_glue_request_tx_chn()
295 /* request and cfg psi-l */ in k3_udma_glue_request_tx_chn()
296 tx_chn->common.src_thread = in k3_udma_glue_request_tx_chn()
297 xudma_dev_get_psil_base(tx_chn->common.udmax) + in k3_udma_glue_request_tx_chn()
298 tx_chn->udma_tchan_id; in k3_udma_glue_request_tx_chn()
306 ret = xudma_navss_psil_pair(tx_chn->common.udmax, in k3_udma_glue_request_tx_chn()
307 tx_chn->common.src_thread, in k3_udma_glue_request_tx_chn()
308 tx_chn->common.dst_thread); in k3_udma_glue_request_tx_chn()
310 dev_err(dev, "PSI-L request err %d\n", ret); in k3_udma_glue_request_tx_chn()
314 tx_chn->psil_paired = true; in k3_udma_glue_request_tx_chn()
331 if (tx_chn->psil_paired) { in k3_udma_glue_release_tx_chn()
332 xudma_navss_psil_unpair(tx_chn->common.udmax, in k3_udma_glue_release_tx_chn()
333 tx_chn->common.src_thread, in k3_udma_glue_release_tx_chn()
334 tx_chn->common.dst_thread); in k3_udma_glue_release_tx_chn()
335 tx_chn->psil_paired = false; in k3_udma_glue_release_tx_chn()
338 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) in k3_udma_glue_release_tx_chn()
339 xudma_tchan_put(tx_chn->common.udmax, in k3_udma_glue_release_tx_chn()
340 tx_chn->udma_tchanx); in k3_udma_glue_release_tx_chn()
342 if (tx_chn->ringtxcq) in k3_udma_glue_release_tx_chn()
343 k3_ringacc_ring_free(tx_chn->ringtxcq); in k3_udma_glue_release_tx_chn()
345 if (tx_chn->ringtx) in k3_udma_glue_release_tx_chn()
346 k3_ringacc_ring_free(tx_chn->ringtx); in k3_udma_glue_release_tx_chn()
356 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) in k3_udma_glue_push_tx_chn()
357 return -ENOMEM; in k3_udma_glue_push_tx_chn()
359 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); in k3_udma_glue_push_tx_chn()
360 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); in k3_udma_glue_push_tx_chn()
362 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); in k3_udma_glue_push_tx_chn()
371 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); in k3_udma_glue_pop_tx_chn()
373 atomic_inc(&tx_chn->free_pkts); in k3_udma_glue_pop_tx_chn()
381 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_enable_tx_chn()
384 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_enable_tx_chn()
396 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0); in k3_udma_glue_disable_tx_chn()
398 xudma_tchanrt_write(tx_chn->udma_tchanx, in k3_udma_glue_disable_tx_chn()
412 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_tdown_tx_chn()
415 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG); in k3_udma_glue_tdown_tx_chn()
418 val = xudma_tchanrt_read(tx_chn->udma_tchanx, in k3_udma_glue_tdown_tx_chn()
422 dev_err(tx_chn->common.dev, "TX tdown timeout\n"); in k3_udma_glue_tdown_tx_chn()
428 val = xudma_tchanrt_read(tx_chn->udma_tchanx, in k3_udma_glue_tdown_tx_chn()
431 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); in k3_udma_glue_tdown_tx_chn()
443 /* reset TXCQ as it is not input for udma - expected to be empty */ in k3_udma_glue_reset_tx_chn()
444 if (tx_chn->ringtxcq) in k3_udma_glue_reset_tx_chn()
445 k3_ringacc_ring_reset(tx_chn->ringtxcq); in k3_udma_glue_reset_tx_chn()
454 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); in k3_udma_glue_reset_tx_chn()
455 dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx); in k3_udma_glue_reset_tx_chn()
458 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); in k3_udma_glue_reset_tx_chn()
460 dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret); in k3_udma_glue_reset_tx_chn()
466 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); in k3_udma_glue_reset_tx_chn()
472 return tx_chn->common.hdesc_size; in k3_udma_glue_tx_get_hdesc_size()
478 return k3_ringacc_get_ring_id(tx_chn->ringtxcq); in k3_udma_glue_tx_get_txcq_id()
484 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); in k3_udma_glue_tx_get_irq()
486 return tx_chn->virq; in k3_udma_glue_tx_get_irq()
492 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_chn()
505 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_rx_chn()
506 req.index = rx_chn->udma_rchan_id; in k3_udma_glue_cfg_rx_chn()
507 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; in k3_udma_glue_cfg_rx_chn()
511 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); in k3_udma_glue_cfg_rx_chn()
514 if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) { in k3_udma_glue_cfg_rx_chn()
516 req.flowid_start = rx_chn->flow_id_base; in k3_udma_glue_cfg_rx_chn()
517 req.flowid_cnt = rx_chn->flow_num; in k3_udma_glue_cfg_rx_chn()
520 req.rx_atype = rx_chn->common.atype; in k3_udma_glue_cfg_rx_chn()
522 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); in k3_udma_glue_cfg_rx_chn()
524 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", in k3_udma_glue_cfg_rx_chn()
525 rx_chn->udma_rchan_id, ret); in k3_udma_glue_cfg_rx_chn()
533 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow()
535 if (IS_ERR_OR_NULL(flow->udma_rflow)) in k3_udma_glue_release_rx_flow()
538 if (flow->ringrxfdq) in k3_udma_glue_release_rx_flow()
539 k3_ringacc_ring_free(flow->ringrxfdq); in k3_udma_glue_release_rx_flow()
541 if (flow->ringrx) in k3_udma_glue_release_rx_flow()
542 k3_ringacc_ring_free(flow->ringrx); in k3_udma_glue_release_rx_flow()
544 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_release_rx_flow()
545 flow->udma_rflow = NULL; in k3_udma_glue_release_rx_flow()
546 rx_chn->flows_ready--; in k3_udma_glue_release_rx_flow()
553 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow()
554 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_flow()
555 struct device *dev = rx_chn->common.dev; in k3_udma_glue_cfg_rx_flow()
561 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, in k3_udma_glue_cfg_rx_flow()
562 flow->udma_rflow_id); in k3_udma_glue_cfg_rx_flow()
563 if (IS_ERR(flow->udma_rflow)) { in k3_udma_glue_cfg_rx_flow()
564 ret = PTR_ERR(flow->udma_rflow); in k3_udma_glue_cfg_rx_flow()
569 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { in k3_udma_glue_cfg_rx_flow()
570 ret = -ENODEV; in k3_udma_glue_cfg_rx_flow()
575 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc, in k3_udma_glue_cfg_rx_flow()
576 flow_cfg->ring_rxfdq0_id, in k3_udma_glue_cfg_rx_flow()
577 flow_cfg->ring_rxq_id, in k3_udma_glue_cfg_rx_flow()
578 &flow->ringrxfdq, in k3_udma_glue_cfg_rx_flow()
579 &flow->ringrx); in k3_udma_glue_cfg_rx_flow()
585 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); in k3_udma_glue_cfg_rx_flow()
591 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); in k3_udma_glue_cfg_rx_flow()
597 if (rx_chn->remote) { in k3_udma_glue_cfg_rx_flow()
601 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); in k3_udma_glue_cfg_rx_flow()
602 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); in k3_udma_glue_cfg_rx_flow()
621 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_rx_flow()
622 req.flow_index = flow->udma_rflow_id; in k3_udma_glue_cfg_rx_flow()
623 if (rx_chn->common.epib) in k3_udma_glue_cfg_rx_flow()
625 if (rx_chn->common.psdata_size) in k3_udma_glue_cfg_rx_flow()
627 if (flow_cfg->rx_error_handling) in k3_udma_glue_cfg_rx_flow()
632 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; in k3_udma_glue_cfg_rx_flow()
640 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); in k3_udma_glue_cfg_rx_flow()
642 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, in k3_udma_glue_cfg_rx_flow()
647 rx_chn->flows_ready++; in k3_udma_glue_cfg_rx_flow()
649 flow->udma_rflow_id, rx_chn->flows_ready); in k3_udma_glue_cfg_rx_flow()
654 k3_ringacc_ring_free(flow->ringrxfdq); in k3_udma_glue_cfg_rx_flow()
655 k3_ringacc_ring_free(flow->ringrx); in k3_udma_glue_cfg_rx_flow()
658 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_cfg_rx_flow()
659 flow->udma_rflow = NULL; in k3_udma_glue_cfg_rx_flow()
666 struct device *dev = chn->common.dev; in k3_udma_glue_dump_rx_chn()
678 chn->udma_rchan_id, in k3_udma_glue_dump_rx_chn()
679 chn->common.src_thread, in k3_udma_glue_dump_rx_chn()
680 chn->common.dst_thread, in k3_udma_glue_dump_rx_chn()
681 chn->common.epib, in k3_udma_glue_dump_rx_chn()
682 chn->common.hdesc_size, in k3_udma_glue_dump_rx_chn()
683 chn->common.psdata_size, in k3_udma_glue_dump_rx_chn()
684 chn->common.swdata_size, in k3_udma_glue_dump_rx_chn()
685 chn->flow_id_base, in k3_udma_glue_dump_rx_chn()
686 chn->flow_num); in k3_udma_glue_dump_rx_chn()
692 struct device *dev = chn->common.dev; in k3_udma_glue_dump_rx_rt_chn()
697 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG)); in k3_udma_glue_dump_rx_rt_chn()
699 xudma_rchanrt_read(chn->udma_rchanx, in k3_udma_glue_dump_rx_rt_chn()
702 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG)); in k3_udma_glue_dump_rx_rt_chn()
704 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG)); in k3_udma_glue_dump_rx_rt_chn()
706 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG)); in k3_udma_glue_dump_rx_rt_chn()
716 if (cfg->flow_id_use_rxchan_id) in k3_udma_glue_allocate_rx_flows()
720 if (rx_chn->flow_id_base != -1 && in k3_udma_glue_allocate_rx_flows()
721 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_allocate_rx_flows()
725 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_allocate_rx_flows()
726 rx_chn->flow_id_base, in k3_udma_glue_allocate_rx_flows()
727 rx_chn->flow_num); in k3_udma_glue_allocate_rx_flows()
729 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", in k3_udma_glue_allocate_rx_flows()
730 rx_chn->flow_id_base, rx_chn->flow_num, ret); in k3_udma_glue_allocate_rx_flows()
733 rx_chn->flow_id_base = ret; in k3_udma_glue_allocate_rx_flows()
745 if (cfg->flow_id_num <= 0) in k3_udma_glue_request_rx_chn_priv()
746 return ERR_PTR(-EINVAL); in k3_udma_glue_request_rx_chn_priv()
748 if (cfg->flow_id_num != 1 && in k3_udma_glue_request_rx_chn_priv()
749 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) in k3_udma_glue_request_rx_chn_priv()
750 return ERR_PTR(-EINVAL); in k3_udma_glue_request_rx_chn_priv()
754 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_rx_chn_priv()
756 rx_chn->common.dev = dev; in k3_udma_glue_request_rx_chn_priv()
757 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_rx_chn_priv()
758 rx_chn->remote = false; in k3_udma_glue_request_rx_chn_priv()
761 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, in k3_udma_glue_request_rx_chn_priv()
762 &rx_chn->common, false); in k3_udma_glue_request_rx_chn_priv()
766 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_rx_chn_priv()
767 rx_chn->common.psdata_size, in k3_udma_glue_request_rx_chn_priv()
768 rx_chn->common.swdata_size); in k3_udma_glue_request_rx_chn_priv()
771 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1); in k3_udma_glue_request_rx_chn_priv()
772 if (IS_ERR(rx_chn->udma_rchanx)) { in k3_udma_glue_request_rx_chn_priv()
773 ret = PTR_ERR(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
777 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
779 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_rx_chn_priv()
780 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_rx_chn_priv()
783 if (cfg->flow_id_use_rxchan_id) in k3_udma_glue_request_rx_chn_priv()
784 rx_chn->flow_id_base = rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
786 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv()
787 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv()
788 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv()
789 ret = -ENOMEM; in k3_udma_glue_request_rx_chn_priv()
797 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_rx_chn_priv()
798 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv()
800 /* request and cfg psi-l */ in k3_udma_glue_request_rx_chn_priv()
801 rx_chn->common.dst_thread = in k3_udma_glue_request_rx_chn_priv()
802 xudma_dev_get_psil_base(rx_chn->common.udmax) + in k3_udma_glue_request_rx_chn_priv()
803 rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
812 if (cfg->def_flow_cfg) { in k3_udma_glue_request_rx_chn_priv()
813 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); in k3_udma_glue_request_rx_chn_priv()
818 ret = xudma_navss_psil_pair(rx_chn->common.udmax, in k3_udma_glue_request_rx_chn_priv()
819 rx_chn->common.src_thread, in k3_udma_glue_request_rx_chn_priv()
820 rx_chn->common.dst_thread); in k3_udma_glue_request_rx_chn_priv()
822 dev_err(dev, "PSI-L request err %d\n", ret); in k3_udma_glue_request_rx_chn_priv()
826 rx_chn->psil_paired = true; in k3_udma_glue_request_rx_chn_priv()
847 if (cfg->flow_id_num <= 0 || in k3_udma_glue_request_remote_rx_chn()
848 cfg->flow_id_use_rxchan_id || in k3_udma_glue_request_remote_rx_chn()
849 cfg->def_flow_cfg || in k3_udma_glue_request_remote_rx_chn()
850 cfg->flow_id_base < 0) in k3_udma_glue_request_remote_rx_chn()
851 return ERR_PTR(-EINVAL); in k3_udma_glue_request_remote_rx_chn()
860 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_remote_rx_chn()
862 rx_chn->common.dev = dev; in k3_udma_glue_request_remote_rx_chn()
863 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_remote_rx_chn()
864 rx_chn->remote = true; in k3_udma_glue_request_remote_rx_chn()
865 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_remote_rx_chn()
866 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_remote_rx_chn()
867 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_remote_rx_chn()
868 rx_chn->psil_paired = false; in k3_udma_glue_request_remote_rx_chn()
871 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, in k3_udma_glue_request_remote_rx_chn()
872 &rx_chn->common, false); in k3_udma_glue_request_remote_rx_chn()
876 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_remote_rx_chn()
877 rx_chn->common.psdata_size, in k3_udma_glue_request_remote_rx_chn()
878 rx_chn->common.swdata_size); in k3_udma_glue_request_remote_rx_chn()
880 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn()
881 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn()
882 if (!rx_chn->flows) { in k3_udma_glue_request_remote_rx_chn()
883 ret = -ENOMEM; in k3_udma_glue_request_remote_rx_chn()
891 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_remote_rx_chn()
892 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_remote_rx_chn()
907 if (cfg->remote) in k3_udma_glue_request_rx_chn()
918 if (IS_ERR_OR_NULL(rx_chn->common.udmax)) in k3_udma_glue_release_rx_chn()
921 if (rx_chn->psil_paired) { in k3_udma_glue_release_rx_chn()
922 xudma_navss_psil_unpair(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
923 rx_chn->common.src_thread, in k3_udma_glue_release_rx_chn()
924 rx_chn->common.dst_thread); in k3_udma_glue_release_rx_chn()
925 rx_chn->psil_paired = false; in k3_udma_glue_release_rx_chn()
928 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_release_rx_chn()
931 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_release_rx_chn()
932 xudma_free_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
933 rx_chn->flow_id_base, in k3_udma_glue_release_rx_chn()
934 rx_chn->flow_num); in k3_udma_glue_release_rx_chn()
936 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) in k3_udma_glue_release_rx_chn()
937 xudma_rchan_put(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
938 rx_chn->udma_rchanx); in k3_udma_glue_release_rx_chn()
946 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_init()
947 return -EINVAL; in k3_udma_glue_rx_flow_init()
958 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_get_fdq_id()
959 return -EINVAL; in k3_udma_glue_rx_flow_get_fdq_id()
961 flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_get_fdq_id()
963 return k3_ringacc_get_ring_id(flow->ringrxfdq); in k3_udma_glue_rx_flow_get_fdq_id()
969 return rx_chn->flow_id_base; in k3_udma_glue_rx_get_flow_id_base()
976 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_enable()
977 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_enable()
978 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_enable()
984 if (!rx_chn->remote) in k3_udma_glue_rx_flow_enable()
985 return -EINVAL; in k3_udma_glue_rx_flow_enable()
987 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); in k3_udma_glue_rx_flow_enable()
988 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); in k3_udma_glue_rx_flow_enable()
998 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_rx_flow_enable()
999 req.flow_index = flow->udma_rflow_id; in k3_udma_glue_rx_flow_enable()
1006 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); in k3_udma_glue_rx_flow_enable()
1008 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, in k3_udma_glue_rx_flow_enable()
1019 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_disable()
1020 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_disable()
1021 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_disable()
1025 if (!rx_chn->remote) in k3_udma_glue_rx_flow_disable()
1026 return -EINVAL; in k3_udma_glue_rx_flow_disable()
1035 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_rx_flow_disable()
1036 req.flow_index = flow->udma_rflow_id; in k3_udma_glue_rx_flow_disable()
1043 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); in k3_udma_glue_rx_flow_disable()
1045 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, in k3_udma_glue_rx_flow_disable()
1055 if (rx_chn->remote) in k3_udma_glue_enable_rx_chn()
1056 return -EINVAL; in k3_udma_glue_enable_rx_chn()
1058 if (rx_chn->flows_ready < rx_chn->flow_num) in k3_udma_glue_enable_rx_chn()
1059 return -EINVAL; in k3_udma_glue_enable_rx_chn()
1061 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_enable_rx_chn()
1064 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_enable_rx_chn()
1076 xudma_rchanrt_write(rx_chn->udma_rchanx, in k3_udma_glue_disable_rx_chn()
1078 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0); in k3_udma_glue_disable_rx_chn()
1090 if (rx_chn->remote) in k3_udma_glue_tdown_rx_chn()
1095 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_tdown_rx_chn()
1098 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG); in k3_udma_glue_tdown_rx_chn()
1101 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1105 dev_err(rx_chn->common.dev, "RX tdown timeout\n"); in k3_udma_glue_tdown_rx_chn()
1111 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1114 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); in k3_udma_glue_tdown_rx_chn()
1123 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_reset_rx_chn()
1124 struct device *dev = rx_chn->common.dev; in k3_udma_glue_reset_rx_chn()
1128 /* reset RXCQ as it is not input for udma - expected to be empty */ in k3_udma_glue_reset_rx_chn()
1129 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); in k3_udma_glue_reset_rx_chn()
1131 if (flow->ringrx) in k3_udma_glue_reset_rx_chn()
1132 k3_ringacc_ring_reset(flow->ringrx); in k3_udma_glue_reset_rx_chn()
1145 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); in k3_udma_glue_reset_rx_chn()
1149 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); in k3_udma_glue_reset_rx_chn()
1157 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); in k3_udma_glue_reset_rx_chn()
1165 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_push_rx_chn()
1167 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); in k3_udma_glue_push_rx_chn()
1174 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_pop_rx_chn()
1176 return k3_ringacc_ring_pop(flow->ringrx, desc_dma); in k3_udma_glue_pop_rx_chn()
1185 flow = &rx_chn->flows[flow_num]; in k3_udma_glue_rx_get_irq()
1187 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); in k3_udma_glue_rx_get_irq()
1189 return flow->virq; in k3_udma_glue_rx_get_irq()