1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ 2*4882a593Smuzhiyun /* QLogic qed NIC Driver 3*4882a593Smuzhiyun * Copyright (c) 2015-2017 QLogic Corporation 4*4882a593Smuzhiyun * Copyright (c) 2019-2020 Marvell International Ltd. 5*4882a593Smuzhiyun */ 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #ifndef _QED_L2_H 8*4882a593Smuzhiyun #define _QED_L2_H 9*4882a593Smuzhiyun #include <linux/types.h> 10*4882a593Smuzhiyun #include <linux/io.h> 11*4882a593Smuzhiyun #include <linux/kernel.h> 12*4882a593Smuzhiyun #include <linux/slab.h> 13*4882a593Smuzhiyun #include <linux/qed/qed_eth_if.h> 14*4882a593Smuzhiyun #include "qed.h" 15*4882a593Smuzhiyun #include "qed_hw.h" 16*4882a593Smuzhiyun #include "qed_sp.h" 17*4882a593Smuzhiyun struct qed_rss_params { 18*4882a593Smuzhiyun u8 update_rss_config; 19*4882a593Smuzhiyun u8 rss_enable; 20*4882a593Smuzhiyun u8 rss_eng_id; 21*4882a593Smuzhiyun u8 update_rss_capabilities; 22*4882a593Smuzhiyun u8 update_rss_ind_table; 23*4882a593Smuzhiyun u8 update_rss_key; 24*4882a593Smuzhiyun u8 rss_caps; 25*4882a593Smuzhiyun u8 rss_table_size_log; 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun /* Indirection table consist of rx queue handles */ 28*4882a593Smuzhiyun void *rss_ind_table[QED_RSS_IND_TABLE_SIZE]; 29*4882a593Smuzhiyun u32 rss_key[QED_RSS_KEY_SIZE]; 30*4882a593Smuzhiyun }; 31*4882a593Smuzhiyun 32*4882a593Smuzhiyun struct qed_sge_tpa_params { 33*4882a593Smuzhiyun u8 max_buffers_per_cqe; 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun u8 update_tpa_en_flg; 36*4882a593Smuzhiyun u8 tpa_ipv4_en_flg; 37*4882a593Smuzhiyun u8 tpa_ipv6_en_flg; 38*4882a593Smuzhiyun u8 tpa_ipv4_tunn_en_flg; 39*4882a593Smuzhiyun u8 tpa_ipv6_tunn_en_flg; 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun u8 update_tpa_param_flg; 42*4882a593Smuzhiyun u8 tpa_pkt_split_flg; 43*4882a593Smuzhiyun u8 tpa_hdr_data_split_flg; 44*4882a593Smuzhiyun u8 tpa_gro_consistent_flg; 45*4882a593Smuzhiyun u8 tpa_max_aggs_num; 46*4882a593Smuzhiyun u16 tpa_max_size; 47*4882a593Smuzhiyun u16 tpa_min_size_to_start; 48*4882a593Smuzhiyun u16 tpa_min_size_to_cont; 49*4882a593Smuzhiyun }; 50*4882a593Smuzhiyun 51*4882a593Smuzhiyun enum qed_filter_opcode { 52*4882a593Smuzhiyun QED_FILTER_ADD, 53*4882a593Smuzhiyun QED_FILTER_REMOVE, 54*4882a593Smuzhiyun QED_FILTER_MOVE, 55*4882a593Smuzhiyun QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ 56*4882a593Smuzhiyun QED_FILTER_FLUSH, /* Removes all filters */ 57*4882a593Smuzhiyun }; 58*4882a593Smuzhiyun 59*4882a593Smuzhiyun enum qed_filter_ucast_type { 60*4882a593Smuzhiyun QED_FILTER_MAC, 61*4882a593Smuzhiyun QED_FILTER_VLAN, 62*4882a593Smuzhiyun QED_FILTER_MAC_VLAN, 63*4882a593Smuzhiyun QED_FILTER_INNER_MAC, 64*4882a593Smuzhiyun QED_FILTER_INNER_VLAN, 65*4882a593Smuzhiyun QED_FILTER_INNER_PAIR, 66*4882a593Smuzhiyun QED_FILTER_INNER_MAC_VNI_PAIR, 67*4882a593Smuzhiyun QED_FILTER_MAC_VNI_PAIR, 68*4882a593Smuzhiyun QED_FILTER_VNI, 69*4882a593Smuzhiyun }; 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun struct qed_filter_ucast { 72*4882a593Smuzhiyun enum qed_filter_opcode opcode; 73*4882a593Smuzhiyun enum qed_filter_ucast_type type; 74*4882a593Smuzhiyun u8 is_rx_filter; 75*4882a593Smuzhiyun u8 is_tx_filter; 76*4882a593Smuzhiyun u8 vport_to_add_to; 77*4882a593Smuzhiyun u8 vport_to_remove_from; 78*4882a593Smuzhiyun unsigned char mac[ETH_ALEN]; 79*4882a593Smuzhiyun u8 assert_on_error; 80*4882a593Smuzhiyun u16 vlan; 81*4882a593Smuzhiyun u32 vni; 82*4882a593Smuzhiyun }; 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun struct qed_filter_mcast { 85*4882a593Smuzhiyun /* MOVE is not supported for multicast */ 86*4882a593Smuzhiyun enum qed_filter_opcode opcode; 87*4882a593Smuzhiyun u8 vport_to_add_to; 88*4882a593Smuzhiyun u8 vport_to_remove_from; 89*4882a593Smuzhiyun u8 num_mc_addrs; 90*4882a593Smuzhiyun #define QED_MAX_MC_ADDRS 64 91*4882a593Smuzhiyun unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; 92*4882a593Smuzhiyun }; 93*4882a593Smuzhiyun 94*4882a593Smuzhiyun /** 95*4882a593Smuzhiyun * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue 96*4882a593Smuzhiyun * 97*4882a593Smuzhiyun * @param p_hwfn 98*4882a593Smuzhiyun * @param p_rxq Handler of queue to close 99*4882a593Smuzhiyun * @param eq_completion_only If True completion will be on 100*4882a593Smuzhiyun * EQe, if False completion will be 101*4882a593Smuzhiyun * on EQe if p_hwfn opaque 102*4882a593Smuzhiyun * different from the RXQ opaque 103*4882a593Smuzhiyun * otherwise on CQe. 104*4882a593Smuzhiyun * @param cqe_completion If True completion will be 105*4882a593Smuzhiyun * receive on CQe. 106*4882a593Smuzhiyun * @return int 107*4882a593Smuzhiyun */ 108*4882a593Smuzhiyun int 109*4882a593Smuzhiyun qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, 110*4882a593Smuzhiyun void *p_rxq, 111*4882a593Smuzhiyun bool eq_completion_only, bool cqe_completion); 112*4882a593Smuzhiyun 113*4882a593Smuzhiyun /** 114*4882a593Smuzhiyun * @brief qed_eth_tx_queue_stop - closes a Tx queue 115*4882a593Smuzhiyun * 116*4882a593Smuzhiyun * @param p_hwfn 117*4882a593Smuzhiyun * @param p_txq - handle to Tx queue needed to be closed 118*4882a593Smuzhiyun * 119*4882a593Smuzhiyun * @return int 120*4882a593Smuzhiyun */ 121*4882a593Smuzhiyun int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); 122*4882a593Smuzhiyun 123*4882a593Smuzhiyun enum qed_tpa_mode { 124*4882a593Smuzhiyun QED_TPA_MODE_NONE, 125*4882a593Smuzhiyun QED_TPA_MODE_UNUSED, 126*4882a593Smuzhiyun QED_TPA_MODE_GRO, 127*4882a593Smuzhiyun QED_TPA_MODE_MAX 128*4882a593Smuzhiyun }; 129*4882a593Smuzhiyun 130*4882a593Smuzhiyun struct qed_sp_vport_start_params { 131*4882a593Smuzhiyun enum qed_tpa_mode tpa_mode; 132*4882a593Smuzhiyun bool remove_inner_vlan; 133*4882a593Smuzhiyun bool tx_switching; 134*4882a593Smuzhiyun bool handle_ptp_pkts; 135*4882a593Smuzhiyun bool only_untagged; 136*4882a593Smuzhiyun bool drop_ttl0; 137*4882a593Smuzhiyun u8 max_buffers_per_cqe; 138*4882a593Smuzhiyun u32 concrete_fid; 139*4882a593Smuzhiyun u16 opaque_fid; 140*4882a593Smuzhiyun u8 vport_id; 141*4882a593Smuzhiyun u16 mtu; 142*4882a593Smuzhiyun bool check_mac; 143*4882a593Smuzhiyun bool check_ethtype; 144*4882a593Smuzhiyun }; 145*4882a593Smuzhiyun 146*4882a593Smuzhiyun int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, 147*4882a593Smuzhiyun struct qed_sp_vport_start_params *p_params); 148*4882a593Smuzhiyun 149*4882a593Smuzhiyun 150*4882a593Smuzhiyun struct qed_filter_accept_flags { 151*4882a593Smuzhiyun u8 update_rx_mode_config; 152*4882a593Smuzhiyun u8 update_tx_mode_config; 153*4882a593Smuzhiyun u8 rx_accept_filter; 154*4882a593Smuzhiyun u8 tx_accept_filter; 155*4882a593Smuzhiyun #define QED_ACCEPT_NONE 0x01 156*4882a593Smuzhiyun #define QED_ACCEPT_UCAST_MATCHED 0x02 157*4882a593Smuzhiyun #define QED_ACCEPT_UCAST_UNMATCHED 0x04 158*4882a593Smuzhiyun #define QED_ACCEPT_MCAST_MATCHED 0x08 159*4882a593Smuzhiyun #define QED_ACCEPT_MCAST_UNMATCHED 0x10 160*4882a593Smuzhiyun #define QED_ACCEPT_BCAST 0x20 161*4882a593Smuzhiyun #define QED_ACCEPT_ANY_VNI 0x40 162*4882a593Smuzhiyun }; 163*4882a593Smuzhiyun 164*4882a593Smuzhiyun struct qed_arfs_config_params { 165*4882a593Smuzhiyun bool tcp; 166*4882a593Smuzhiyun bool udp; 167*4882a593Smuzhiyun bool ipv4; 168*4882a593Smuzhiyun bool ipv6; 169*4882a593Smuzhiyun enum qed_filter_config_mode mode; 170*4882a593Smuzhiyun }; 171*4882a593Smuzhiyun 172*4882a593Smuzhiyun struct qed_sp_vport_update_params { 173*4882a593Smuzhiyun u16 opaque_fid; 174*4882a593Smuzhiyun u8 vport_id; 175*4882a593Smuzhiyun u8 update_vport_active_rx_flg; 176*4882a593Smuzhiyun u8 vport_active_rx_flg; 177*4882a593Smuzhiyun u8 update_vport_active_tx_flg; 178*4882a593Smuzhiyun u8 vport_active_tx_flg; 179*4882a593Smuzhiyun u8 update_inner_vlan_removal_flg; 180*4882a593Smuzhiyun u8 inner_vlan_removal_flg; 181*4882a593Smuzhiyun u8 silent_vlan_removal_flg; 182*4882a593Smuzhiyun u8 update_default_vlan_enable_flg; 183*4882a593Smuzhiyun u8 default_vlan_enable_flg; 184*4882a593Smuzhiyun u8 update_default_vlan_flg; 185*4882a593Smuzhiyun u16 default_vlan; 186*4882a593Smuzhiyun u8 update_tx_switching_flg; 187*4882a593Smuzhiyun u8 tx_switching_flg; 188*4882a593Smuzhiyun u8 update_approx_mcast_flg; 189*4882a593Smuzhiyun u8 update_anti_spoofing_en_flg; 190*4882a593Smuzhiyun u8 anti_spoofing_en; 191*4882a593Smuzhiyun u8 update_accept_any_vlan_flg; 192*4882a593Smuzhiyun u8 accept_any_vlan; 193*4882a593Smuzhiyun u32 bins[8]; 194*4882a593Smuzhiyun struct qed_rss_params *rss_params; 195*4882a593Smuzhiyun struct qed_filter_accept_flags accept_flags; 196*4882a593Smuzhiyun struct qed_sge_tpa_params *sge_tpa_params; 197*4882a593Smuzhiyun u8 update_ctl_frame_check; 198*4882a593Smuzhiyun u8 mac_chk_en; 199*4882a593Smuzhiyun u8 ethtype_chk_en; 200*4882a593Smuzhiyun }; 201*4882a593Smuzhiyun 202*4882a593Smuzhiyun int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 203*4882a593Smuzhiyun struct qed_sp_vport_update_params *p_params, 204*4882a593Smuzhiyun enum spq_mode comp_mode, 205*4882a593Smuzhiyun struct qed_spq_comp_cb *p_comp_data); 206*4882a593Smuzhiyun 207*4882a593Smuzhiyun /** 208*4882a593Smuzhiyun * @brief qed_sp_vport_stop - 209*4882a593Smuzhiyun * 210*4882a593Smuzhiyun * This ramrod closes a VPort after all its RX and TX queues are terminated. 211*4882a593Smuzhiyun * An Assert is generated if any queues are left open. 212*4882a593Smuzhiyun * 213*4882a593Smuzhiyun * @param p_hwfn 214*4882a593Smuzhiyun * @param opaque_fid 215*4882a593Smuzhiyun * @param vport_id VPort ID 216*4882a593Smuzhiyun * 217*4882a593Smuzhiyun * @return int 218*4882a593Smuzhiyun */ 219*4882a593Smuzhiyun int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); 220*4882a593Smuzhiyun 221*4882a593Smuzhiyun int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, 222*4882a593Smuzhiyun u16 opaque_fid, 223*4882a593Smuzhiyun struct qed_filter_ucast *p_filter_cmd, 224*4882a593Smuzhiyun enum spq_mode comp_mode, 225*4882a593Smuzhiyun struct qed_spq_comp_cb *p_comp_data); 226*4882a593Smuzhiyun 227*4882a593Smuzhiyun /** 228*4882a593Smuzhiyun * @brief qed_sp_rx_eth_queues_update - 229*4882a593Smuzhiyun * 230*4882a593Smuzhiyun * This ramrod updates an RX queue. It is used for setting the active state 231*4882a593Smuzhiyun * of the queue and updating the TPA and SGE parameters. 232*4882a593Smuzhiyun * 233*4882a593Smuzhiyun * @note At the moment - only used by non-linux VFs. 234*4882a593Smuzhiyun * 235*4882a593Smuzhiyun * @param p_hwfn 236*4882a593Smuzhiyun * @param pp_rxq_handlers An array of queue handlers to be updated. 237*4882a593Smuzhiyun * @param num_rxqs number of queues to update. 238*4882a593Smuzhiyun * @param complete_cqe_flg Post completion to the CQE Ring if set 239*4882a593Smuzhiyun * @param complete_event_flg Post completion to the Event Ring if set 240*4882a593Smuzhiyun * @param comp_mode 241*4882a593Smuzhiyun * @param p_comp_data 242*4882a593Smuzhiyun * 243*4882a593Smuzhiyun * @return int 244*4882a593Smuzhiyun */ 245*4882a593Smuzhiyun 246*4882a593Smuzhiyun int 247*4882a593Smuzhiyun qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, 248*4882a593Smuzhiyun void **pp_rxq_handlers, 249*4882a593Smuzhiyun u8 num_rxqs, 250*4882a593Smuzhiyun u8 complete_cqe_flg, 251*4882a593Smuzhiyun u8 complete_event_flg, 252*4882a593Smuzhiyun enum spq_mode comp_mode, 253*4882a593Smuzhiyun struct qed_spq_comp_cb *p_comp_data); 254*4882a593Smuzhiyun 255*4882a593Smuzhiyun void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); 256*4882a593Smuzhiyun 257*4882a593Smuzhiyun void qed_reset_vport_stats(struct qed_dev *cdev); 258*4882a593Smuzhiyun 259*4882a593Smuzhiyun /** 260*4882a593Smuzhiyun * *@brief qed_arfs_mode_configure - 261*4882a593Smuzhiyun * 262*4882a593Smuzhiyun **Enable or disable rfs mode. It must accept atleast one of tcp or udp true 263*4882a593Smuzhiyun **and atleast one of ipv4 or ipv6 true to enable rfs mode. 264*4882a593Smuzhiyun * 265*4882a593Smuzhiyun **@param p_hwfn 266*4882a593Smuzhiyun **@param p_ptt 267*4882a593Smuzhiyun **@param p_cfg_params - arfs mode configuration parameters. 268*4882a593Smuzhiyun * 269*4882a593Smuzhiyun */ 270*4882a593Smuzhiyun void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, 271*4882a593Smuzhiyun struct qed_ptt *p_ptt, 272*4882a593Smuzhiyun struct qed_arfs_config_params *p_cfg_params); 273*4882a593Smuzhiyun 274*4882a593Smuzhiyun /** 275*4882a593Smuzhiyun * @brief - qed_configure_rfs_ntuple_filter 276*4882a593Smuzhiyun * 277*4882a593Smuzhiyun * This ramrod should be used to add or remove arfs hw filter 278*4882a593Smuzhiyun * 279*4882a593Smuzhiyun * @params p_hwfn 280*4882a593Smuzhiyun * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize 281*4882a593Smuzhiyun * it with cookie and callback function address, if not 282*4882a593Smuzhiyun * using this mode then client must pass NULL. 283*4882a593Smuzhiyun * @params p_params 284*4882a593Smuzhiyun */ 285*4882a593Smuzhiyun int 286*4882a593Smuzhiyun qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, 287*4882a593Smuzhiyun struct qed_spq_comp_cb *p_cb, 288*4882a593Smuzhiyun struct qed_ntuple_filter_params *p_params); 289*4882a593Smuzhiyun 290*4882a593Smuzhiyun #define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) 291*4882a593Smuzhiyun #define QED_QUEUE_CID_SELF (0xff) 292*4882a593Smuzhiyun 293*4882a593Smuzhiyun /* Almost identical to the qed_queue_start_common_params, 294*4882a593Smuzhiyun * but here we maintain the SB index in IGU CAM. 295*4882a593Smuzhiyun */ 296*4882a593Smuzhiyun struct qed_queue_cid_params { 297*4882a593Smuzhiyun u8 vport_id; 298*4882a593Smuzhiyun u16 queue_id; 299*4882a593Smuzhiyun u8 stats_id; 300*4882a593Smuzhiyun }; 301*4882a593Smuzhiyun 302*4882a593Smuzhiyun /* Additional parameters required for initialization of the queue_cid 303*4882a593Smuzhiyun * and are relevant only for a PF initializing one for its VFs. 304*4882a593Smuzhiyun */ 305*4882a593Smuzhiyun struct qed_queue_cid_vf_params { 306*4882a593Smuzhiyun /* Should match the VF's relative index */ 307*4882a593Smuzhiyun u8 vfid; 308*4882a593Smuzhiyun 309*4882a593Smuzhiyun /* 0-based queue index. Should reflect the relative qzone the 310*4882a593Smuzhiyun * VF thinks is associated with it [in its range]. 311*4882a593Smuzhiyun */ 312*4882a593Smuzhiyun u8 vf_qid; 313*4882a593Smuzhiyun 314*4882a593Smuzhiyun /* Indicates a VF is legacy, making it differ in several things: 315*4882a593Smuzhiyun * - Producers would be placed in a different place. 316*4882a593Smuzhiyun * - Makes assumptions regarding the CIDs. 317*4882a593Smuzhiyun */ 318*4882a593Smuzhiyun u8 vf_legacy; 319*4882a593Smuzhiyun 320*4882a593Smuzhiyun u8 qid_usage_idx; 321*4882a593Smuzhiyun }; 322*4882a593Smuzhiyun 323*4882a593Smuzhiyun struct qed_queue_cid { 324*4882a593Smuzhiyun /* For stats-id, the `rel' is actually absolute as well */ 325*4882a593Smuzhiyun struct qed_queue_cid_params rel; 326*4882a593Smuzhiyun struct qed_queue_cid_params abs; 327*4882a593Smuzhiyun 328*4882a593Smuzhiyun /* These have no 'relative' meaning */ 329*4882a593Smuzhiyun u16 sb_igu_id; 330*4882a593Smuzhiyun u8 sb_idx; 331*4882a593Smuzhiyun 332*4882a593Smuzhiyun u32 cid; 333*4882a593Smuzhiyun u16 opaque_fid; 334*4882a593Smuzhiyun 335*4882a593Smuzhiyun bool b_is_rx; 336*4882a593Smuzhiyun 337*4882a593Smuzhiyun /* VFs queues are mapped differently, so we need to know the 338*4882a593Smuzhiyun * relative queue associated with them [0-based]. 339*4882a593Smuzhiyun * Notice this is relevant on the *PF* queue-cid of its VF's queues, 340*4882a593Smuzhiyun * and not on the VF itself. 341*4882a593Smuzhiyun */ 342*4882a593Smuzhiyun u8 vfid; 343*4882a593Smuzhiyun u8 vf_qid; 344*4882a593Smuzhiyun 345*4882a593Smuzhiyun /* We need an additional index to differentiate between queues opened 346*4882a593Smuzhiyun * for same queue-zone, as VFs would have to communicate the info 347*4882a593Smuzhiyun * to the PF [otherwise PF has no way to differentiate]. 348*4882a593Smuzhiyun */ 349*4882a593Smuzhiyun u8 qid_usage_idx; 350*4882a593Smuzhiyun 351*4882a593Smuzhiyun u8 vf_legacy; 352*4882a593Smuzhiyun #define QED_QCID_LEGACY_VF_RX_PROD (BIT(0)) 353*4882a593Smuzhiyun #define QED_QCID_LEGACY_VF_CID (BIT(1)) 354*4882a593Smuzhiyun 355*4882a593Smuzhiyun struct qed_hwfn *p_owner; 356*4882a593Smuzhiyun }; 357*4882a593Smuzhiyun 358*4882a593Smuzhiyun int qed_l2_alloc(struct qed_hwfn *p_hwfn); 359*4882a593Smuzhiyun void qed_l2_setup(struct qed_hwfn *p_hwfn); 360*4882a593Smuzhiyun void qed_l2_free(struct qed_hwfn *p_hwfn); 361*4882a593Smuzhiyun 362*4882a593Smuzhiyun void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, 363*4882a593Smuzhiyun struct qed_queue_cid *p_cid); 364*4882a593Smuzhiyun 365*4882a593Smuzhiyun struct qed_queue_cid * 366*4882a593Smuzhiyun qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 367*4882a593Smuzhiyun u16 opaque_fid, 368*4882a593Smuzhiyun struct qed_queue_start_common_params *p_params, 369*4882a593Smuzhiyun bool b_is_rx, 370*4882a593Smuzhiyun struct qed_queue_cid_vf_params *p_vf_params); 371*4882a593Smuzhiyun 372*4882a593Smuzhiyun int 373*4882a593Smuzhiyun qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, 374*4882a593Smuzhiyun struct qed_sp_vport_start_params *p_params); 375*4882a593Smuzhiyun 376*4882a593Smuzhiyun /** 377*4882a593Smuzhiyun * @brief - Starts an Rx queue, when queue_cid is already prepared 378*4882a593Smuzhiyun * 379*4882a593Smuzhiyun * @param p_hwfn 380*4882a593Smuzhiyun * @param p_cid 381*4882a593Smuzhiyun * @param bd_max_bytes 382*4882a593Smuzhiyun * @param bd_chain_phys_addr 383*4882a593Smuzhiyun * @param cqe_pbl_addr 384*4882a593Smuzhiyun * @param cqe_pbl_size 385*4882a593Smuzhiyun * 386*4882a593Smuzhiyun * @return int 387*4882a593Smuzhiyun */ 388*4882a593Smuzhiyun int 389*4882a593Smuzhiyun qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, 390*4882a593Smuzhiyun struct qed_queue_cid *p_cid, 391*4882a593Smuzhiyun u16 bd_max_bytes, 392*4882a593Smuzhiyun dma_addr_t bd_chain_phys_addr, 393*4882a593Smuzhiyun dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); 394*4882a593Smuzhiyun 395*4882a593Smuzhiyun /** 396*4882a593Smuzhiyun * @brief - Starts a Tx queue, where queue_cid is already prepared 397*4882a593Smuzhiyun * 398*4882a593Smuzhiyun * @param p_hwfn 399*4882a593Smuzhiyun * @param p_cid 400*4882a593Smuzhiyun * @param pbl_addr 401*4882a593Smuzhiyun * @param pbl_size 402*4882a593Smuzhiyun * @param p_pq_params - parameters for choosing the PQ for this Tx queue 403*4882a593Smuzhiyun * 404*4882a593Smuzhiyun * @return int 405*4882a593Smuzhiyun */ 406*4882a593Smuzhiyun int 407*4882a593Smuzhiyun qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, 408*4882a593Smuzhiyun struct qed_queue_cid *p_cid, 409*4882a593Smuzhiyun dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id); 410*4882a593Smuzhiyun 411*4882a593Smuzhiyun u8 qed_mcast_bin_from_mac(u8 *mac); 412*4882a593Smuzhiyun 413*4882a593Smuzhiyun int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, 414*4882a593Smuzhiyun struct qed_ptt *p_ptt, 415*4882a593Smuzhiyun u16 coalesce, struct qed_queue_cid *p_cid); 416*4882a593Smuzhiyun 417*4882a593Smuzhiyun int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, 418*4882a593Smuzhiyun struct qed_ptt *p_ptt, 419*4882a593Smuzhiyun u16 coalesce, struct qed_queue_cid *p_cid); 420*4882a593Smuzhiyun 421*4882a593Smuzhiyun int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, 422*4882a593Smuzhiyun struct qed_ptt *p_ptt, 423*4882a593Smuzhiyun struct qed_queue_cid *p_cid, u16 *p_hw_coal); 424*4882a593Smuzhiyun 425*4882a593Smuzhiyun int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, 426*4882a593Smuzhiyun struct qed_ptt *p_ptt, 427*4882a593Smuzhiyun struct qed_queue_cid *p_cid, u16 *p_hw_coal); 428*4882a593Smuzhiyun 429*4882a593Smuzhiyun #endif 430