Lines Matching refs:rx_chn

490 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)  in k3_udma_glue_cfg_rx_chn()  argument
492 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_chn()
506 req.index = rx_chn->udma_rchan_id; in k3_udma_glue_cfg_rx_chn()
507 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; in k3_udma_glue_cfg_rx_chn()
514 if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) { in k3_udma_glue_cfg_rx_chn()
516 req.flowid_start = rx_chn->flow_id_base; in k3_udma_glue_cfg_rx_chn()
517 req.flowid_cnt = rx_chn->flow_num; in k3_udma_glue_cfg_rx_chn()
520 req.rx_atype = rx_chn->common.atype; in k3_udma_glue_cfg_rx_chn()
524 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", in k3_udma_glue_cfg_rx_chn()
525 rx_chn->udma_rchan_id, ret); in k3_udma_glue_cfg_rx_chn()
530 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_release_rx_flow() argument
533 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow()
544 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_release_rx_flow()
546 rx_chn->flows_ready--; in k3_udma_glue_release_rx_flow()
549 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_cfg_rx_flow() argument
553 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow()
554 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_flow()
555 struct device *dev = rx_chn->common.dev; in k3_udma_glue_cfg_rx_flow()
561 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, in k3_udma_glue_cfg_rx_flow()
575 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc, in k3_udma_glue_cfg_rx_flow()
597 if (rx_chn->remote) { in k3_udma_glue_cfg_rx_flow()
623 if (rx_chn->common.epib) in k3_udma_glue_cfg_rx_flow()
625 if (rx_chn->common.psdata_size) in k3_udma_glue_cfg_rx_flow()
647 rx_chn->flows_ready++; in k3_udma_glue_cfg_rx_flow()
649 flow->udma_rflow_id, rx_chn->flows_ready); in k3_udma_glue_cfg_rx_flow()
658 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_cfg_rx_flow()
710 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_allocate_rx_flows() argument
720 if (rx_chn->flow_id_base != -1 && in k3_udma_glue_allocate_rx_flows()
721 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_allocate_rx_flows()
725 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_allocate_rx_flows()
726 rx_chn->flow_id_base, in k3_udma_glue_allocate_rx_flows()
727 rx_chn->flow_num); in k3_udma_glue_allocate_rx_flows()
729 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", in k3_udma_glue_allocate_rx_flows()
730 rx_chn->flow_id_base, rx_chn->flow_num, ret); in k3_udma_glue_allocate_rx_flows()
733 rx_chn->flow_id_base = ret; in k3_udma_glue_allocate_rx_flows()
742 struct k3_udma_glue_rx_channel *rx_chn; in k3_udma_glue_request_rx_chn_priv() local
752 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv()
753 if (!rx_chn) in k3_udma_glue_request_rx_chn_priv()
756 rx_chn->common.dev = dev; in k3_udma_glue_request_rx_chn_priv()
757 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_rx_chn_priv()
758 rx_chn->remote = false; in k3_udma_glue_request_rx_chn_priv()
762 &rx_chn->common, false); in k3_udma_glue_request_rx_chn_priv()
766 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_rx_chn_priv()
767 rx_chn->common.psdata_size, in k3_udma_glue_request_rx_chn_priv()
768 rx_chn->common.swdata_size); in k3_udma_glue_request_rx_chn_priv()
771 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1); in k3_udma_glue_request_rx_chn_priv()
772 if (IS_ERR(rx_chn->udma_rchanx)) { in k3_udma_glue_request_rx_chn_priv()
773 ret = PTR_ERR(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
777 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
779 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_rx_chn_priv()
780 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_rx_chn_priv()
784 rx_chn->flow_id_base = rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
786 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv()
787 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv()
788 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv()
793 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); in k3_udma_glue_request_rx_chn_priv()
797 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_rx_chn_priv()
798 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv()
801 rx_chn->common.dst_thread = in k3_udma_glue_request_rx_chn_priv()
802 xudma_dev_get_psil_base(rx_chn->common.udmax) + in k3_udma_glue_request_rx_chn_priv()
803 rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
805 ret = k3_udma_glue_cfg_rx_chn(rx_chn); in k3_udma_glue_request_rx_chn_priv()
813 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); in k3_udma_glue_request_rx_chn_priv()
818 ret = xudma_navss_psil_pair(rx_chn->common.udmax, in k3_udma_glue_request_rx_chn_priv()
819 rx_chn->common.src_thread, in k3_udma_glue_request_rx_chn_priv()
820 rx_chn->common.dst_thread); in k3_udma_glue_request_rx_chn_priv()
826 rx_chn->psil_paired = true; in k3_udma_glue_request_rx_chn_priv()
829 k3_udma_glue_disable_rx_chn(rx_chn); in k3_udma_glue_request_rx_chn_priv()
831 k3_udma_glue_dump_rx_chn(rx_chn); in k3_udma_glue_request_rx_chn_priv()
833 return rx_chn; in k3_udma_glue_request_rx_chn_priv()
836 k3_udma_glue_release_rx_chn(rx_chn); in k3_udma_glue_request_rx_chn_priv()
844 struct k3_udma_glue_rx_channel *rx_chn; in k3_udma_glue_request_remote_rx_chn() local
858 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn()
859 if (!rx_chn) in k3_udma_glue_request_remote_rx_chn()
862 rx_chn->common.dev = dev; in k3_udma_glue_request_remote_rx_chn()
863 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_remote_rx_chn()
864 rx_chn->remote = true; in k3_udma_glue_request_remote_rx_chn()
865 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_remote_rx_chn()
866 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_remote_rx_chn()
867 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_remote_rx_chn()
868 rx_chn->psil_paired = false; in k3_udma_glue_request_remote_rx_chn()
872 &rx_chn->common, false); in k3_udma_glue_request_remote_rx_chn()
876 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_remote_rx_chn()
877 rx_chn->common.psdata_size, in k3_udma_glue_request_remote_rx_chn()
878 rx_chn->common.swdata_size); in k3_udma_glue_request_remote_rx_chn()
880 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn()
881 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn()
882 if (!rx_chn->flows) { in k3_udma_glue_request_remote_rx_chn()
887 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); in k3_udma_glue_request_remote_rx_chn()
891 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_remote_rx_chn()
892 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_remote_rx_chn()
894 k3_udma_glue_dump_rx_chn(rx_chn); in k3_udma_glue_request_remote_rx_chn()
896 return rx_chn; in k3_udma_glue_request_remote_rx_chn()
899 k3_udma_glue_release_rx_chn(rx_chn); in k3_udma_glue_request_remote_rx_chn()
914 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) in k3_udma_glue_release_rx_chn() argument
918 if (IS_ERR_OR_NULL(rx_chn->common.udmax)) in k3_udma_glue_release_rx_chn()
921 if (rx_chn->psil_paired) { in k3_udma_glue_release_rx_chn()
922 xudma_navss_psil_unpair(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
923 rx_chn->common.src_thread, in k3_udma_glue_release_rx_chn()
924 rx_chn->common.dst_thread); in k3_udma_glue_release_rx_chn()
925 rx_chn->psil_paired = false; in k3_udma_glue_release_rx_chn()
928 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_release_rx_chn()
929 k3_udma_glue_release_rx_flow(rx_chn, i); in k3_udma_glue_release_rx_chn()
931 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_release_rx_chn()
932 xudma_free_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
933 rx_chn->flow_id_base, in k3_udma_glue_release_rx_chn()
934 rx_chn->flow_num); in k3_udma_glue_release_rx_chn()
936 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) in k3_udma_glue_release_rx_chn()
937 xudma_rchan_put(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
938 rx_chn->udma_rchanx); in k3_udma_glue_release_rx_chn()
942 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_flow_init() argument
946 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_init()
949 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg); in k3_udma_glue_rx_flow_init()
953 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_flow_get_fdq_id() argument
958 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_get_fdq_id()
961 flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_get_fdq_id()
967 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn) in k3_udma_glue_rx_get_flow_id_base() argument
969 return rx_chn->flow_id_base; in k3_udma_glue_rx_get_flow_id_base()
973 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_flow_enable() argument
976 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_enable()
977 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_enable()
978 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_enable()
984 if (!rx_chn->remote) in k3_udma_glue_rx_flow_enable()
1016 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_flow_disable() argument
1019 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_disable()
1020 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_disable()
1021 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_disable()
1025 if (!rx_chn->remote) in k3_udma_glue_rx_flow_disable()
1053 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) in k3_udma_glue_enable_rx_chn() argument
1055 if (rx_chn->remote) in k3_udma_glue_enable_rx_chn()
1058 if (rx_chn->flows_ready < rx_chn->flow_num) in k3_udma_glue_enable_rx_chn()
1061 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_enable_rx_chn()
1064 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_enable_rx_chn()
1067 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en"); in k3_udma_glue_enable_rx_chn()
1072 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) in k3_udma_glue_disable_rx_chn() argument
1074 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1"); in k3_udma_glue_disable_rx_chn()
1076 xudma_rchanrt_write(rx_chn->udma_rchanx, in k3_udma_glue_disable_rx_chn()
1078 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0); in k3_udma_glue_disable_rx_chn()
1080 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2"); in k3_udma_glue_disable_rx_chn()
1084 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_tdown_rx_chn() argument
1090 if (rx_chn->remote) in k3_udma_glue_tdown_rx_chn()
1093 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1"); in k3_udma_glue_tdown_rx_chn()
1095 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_tdown_rx_chn()
1098 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG); in k3_udma_glue_tdown_rx_chn()
1101 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1105 dev_err(rx_chn->common.dev, "RX tdown timeout\n"); in k3_udma_glue_tdown_rx_chn()
1111 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1114 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); in k3_udma_glue_tdown_rx_chn()
1115 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2"); in k3_udma_glue_tdown_rx_chn()
1119 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_reset_rx_chn() argument
1123 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_reset_rx_chn()
1124 struct device *dev = rx_chn->common.dev; in k3_udma_glue_reset_rx_chn()
1161 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_push_rx_chn() argument
1165 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_push_rx_chn()
1171 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_pop_rx_chn() argument
1174 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_pop_rx_chn()
1180 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_get_irq() argument
1185 flow = &rx_chn->flows[flow_num]; in k3_udma_glue_rx_get_irq()