1*4882a593Smuzhiyun /* Copyright 2008 - 2016 Freescale Semiconductor Inc. 2*4882a593Smuzhiyun * 3*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without 4*4882a593Smuzhiyun * modification, are permitted provided that the following conditions are met: 5*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright 6*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer. 7*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright 8*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in the 9*4882a593Smuzhiyun * documentation and/or other materials provided with the distribution. 10*4882a593Smuzhiyun * * Neither the name of Freescale Semiconductor nor the 11*4882a593Smuzhiyun * names of its contributors may be used to endorse or promote products 12*4882a593Smuzhiyun * derived from this software without specific prior written permission. 13*4882a593Smuzhiyun * 14*4882a593Smuzhiyun * ALTERNATIVELY, this software may be distributed under the terms of the 15*4882a593Smuzhiyun * GNU General Public License ("GPL") as published by the Free Software 16*4882a593Smuzhiyun * Foundation, either version 2 of that License or (at your option) any 17*4882a593Smuzhiyun * later version. 18*4882a593Smuzhiyun * 19*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20*4882a593Smuzhiyun * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21*4882a593Smuzhiyun * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22*4882a593Smuzhiyun * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23*4882a593Smuzhiyun * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24*4882a593Smuzhiyun * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25*4882a593Smuzhiyun * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26*4882a593Smuzhiyun * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28*4882a593Smuzhiyun * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29*4882a593Smuzhiyun */ 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun #ifndef __DPAA_H 32*4882a593Smuzhiyun #define __DPAA_H 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun #include <linux/netdevice.h> 35*4882a593Smuzhiyun #include <linux/refcount.h> 36*4882a593Smuzhiyun #include <soc/fsl/qman.h> 37*4882a593Smuzhiyun #include <soc/fsl/bman.h> 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun #include "fman.h" 40*4882a593Smuzhiyun #include "mac.h" 41*4882a593Smuzhiyun #include "dpaa_eth_trace.h" 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun /* Number of prioritised traffic classes */ 44*4882a593Smuzhiyun #define DPAA_TC_NUM 4 45*4882a593Smuzhiyun /* Number of Tx queues per traffic class */ 46*4882a593Smuzhiyun #define DPAA_TC_TXQ_NUM NR_CPUS 47*4882a593Smuzhiyun /* Total number of Tx queues */ 48*4882a593Smuzhiyun #define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM) 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun /* More detailed FQ types - used for fine-grained WQ assignments */ 51*4882a593Smuzhiyun enum dpaa_fq_type { 52*4882a593Smuzhiyun FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */ 53*4882a593Smuzhiyun FQ_TYPE_RX_ERROR, /* Rx Error FQs */ 54*4882a593Smuzhiyun FQ_TYPE_RX_PCD, /* Rx Parse Classify Distribute FQs */ 55*4882a593Smuzhiyun FQ_TYPE_TX, /* "Real" Tx FQs */ 56*4882a593Smuzhiyun FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */ 57*4882a593Smuzhiyun FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */ 58*4882a593Smuzhiyun FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */ 59*4882a593Smuzhiyun }; 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun struct dpaa_fq { 62*4882a593Smuzhiyun struct qman_fq fq_base; 63*4882a593Smuzhiyun struct list_head list; 64*4882a593Smuzhiyun struct net_device *net_dev; 65*4882a593Smuzhiyun bool init; 66*4882a593Smuzhiyun u32 fqid; 67*4882a593Smuzhiyun u32 flags; 68*4882a593Smuzhiyun u16 channel; 69*4882a593Smuzhiyun u8 wq; 70*4882a593Smuzhiyun enum dpaa_fq_type fq_type; 71*4882a593Smuzhiyun }; 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun struct dpaa_fq_cbs { 74*4882a593Smuzhiyun struct qman_fq rx_defq; 75*4882a593Smuzhiyun struct qman_fq tx_defq; 76*4882a593Smuzhiyun struct qman_fq rx_errq; 77*4882a593Smuzhiyun struct qman_fq tx_errq; 78*4882a593Smuzhiyun struct qman_fq egress_ern; 79*4882a593Smuzhiyun }; 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun struct dpaa_priv; 82*4882a593Smuzhiyun 83*4882a593Smuzhiyun struct dpaa_bp { 84*4882a593Smuzhiyun /* used in the DMA mapping operations */ 85*4882a593Smuzhiyun struct dpaa_priv *priv; 86*4882a593Smuzhiyun /* current number of buffers in the buffer pool alloted to each CPU */ 87*4882a593Smuzhiyun int __percpu *percpu_count; 88*4882a593Smuzhiyun /* all buffers allocated for this pool have this raw size */ 89*4882a593Smuzhiyun size_t raw_size; 90*4882a593Smuzhiyun /* all buffers in this pool have this same usable size */ 91*4882a593Smuzhiyun size_t size; 92*4882a593Smuzhiyun /* the buffer pools are initialized with config_count buffers for each 93*4882a593Smuzhiyun * CPU; at runtime the number of buffers per CPU is constantly brought 94*4882a593Smuzhiyun * back to this level 95*4882a593Smuzhiyun */ 96*4882a593Smuzhiyun u16 config_count; 97*4882a593Smuzhiyun u8 bpid; 98*4882a593Smuzhiyun struct bman_pool *pool; 99*4882a593Smuzhiyun /* bpool can be seeded before use by this cb */ 100*4882a593Smuzhiyun int (*seed_cb)(struct dpaa_bp *); 101*4882a593Smuzhiyun /* bpool can be emptied before freeing by this cb */ 102*4882a593Smuzhiyun void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *); 103*4882a593Smuzhiyun refcount_t refs; 104*4882a593Smuzhiyun }; 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun struct dpaa_rx_errors { 107*4882a593Smuzhiyun u64 dme; /* DMA Error */ 108*4882a593Smuzhiyun u64 fpe; /* Frame Physical Error */ 109*4882a593Smuzhiyun u64 fse; /* Frame Size Error */ 110*4882a593Smuzhiyun u64 phe; /* Header Error */ 111*4882a593Smuzhiyun }; 112*4882a593Smuzhiyun 113*4882a593Smuzhiyun /* Counters for QMan ERN frames - one counter per rejection code */ 114*4882a593Smuzhiyun struct dpaa_ern_cnt { 115*4882a593Smuzhiyun u64 cg_tdrop; /* Congestion group taildrop */ 116*4882a593Smuzhiyun u64 wred; /* WRED congestion */ 117*4882a593Smuzhiyun u64 err_cond; /* Error condition */ 118*4882a593Smuzhiyun u64 early_window; /* Order restoration, frame too early */ 119*4882a593Smuzhiyun u64 late_window; /* Order restoration, frame too late */ 120*4882a593Smuzhiyun u64 fq_tdrop; /* FQ taildrop */ 121*4882a593Smuzhiyun u64 fq_retired; /* FQ is retired */ 122*4882a593Smuzhiyun u64 orp_zero; /* ORP disabled */ 123*4882a593Smuzhiyun }; 124*4882a593Smuzhiyun 125*4882a593Smuzhiyun struct dpaa_napi_portal { 126*4882a593Smuzhiyun struct napi_struct napi; 127*4882a593Smuzhiyun struct qman_portal *p; 128*4882a593Smuzhiyun bool down; 129*4882a593Smuzhiyun }; 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun struct dpaa_percpu_priv { 132*4882a593Smuzhiyun struct net_device *net_dev; 133*4882a593Smuzhiyun struct dpaa_napi_portal np; 134*4882a593Smuzhiyun u64 in_interrupt; 135*4882a593Smuzhiyun u64 tx_confirm; 136*4882a593Smuzhiyun /* fragmented (non-linear) skbuffs received from the stack */ 137*4882a593Smuzhiyun u64 tx_frag_skbuffs; 138*4882a593Smuzhiyun struct rtnl_link_stats64 stats; 139*4882a593Smuzhiyun struct dpaa_rx_errors rx_errors; 140*4882a593Smuzhiyun struct dpaa_ern_cnt ern_cnt; 141*4882a593Smuzhiyun }; 142*4882a593Smuzhiyun 143*4882a593Smuzhiyun struct dpaa_buffer_layout { 144*4882a593Smuzhiyun u16 priv_data_size; 145*4882a593Smuzhiyun }; 146*4882a593Smuzhiyun 147*4882a593Smuzhiyun struct dpaa_priv { 148*4882a593Smuzhiyun struct dpaa_percpu_priv __percpu *percpu_priv; 149*4882a593Smuzhiyun struct dpaa_bp *dpaa_bp; 150*4882a593Smuzhiyun /* Store here the needed Tx headroom for convenience and speed 151*4882a593Smuzhiyun * (even though it can be computed based on the fields of buf_layout) 152*4882a593Smuzhiyun */ 153*4882a593Smuzhiyun u16 tx_headroom; 154*4882a593Smuzhiyun struct net_device *net_dev; 155*4882a593Smuzhiyun struct mac_device *mac_dev; 156*4882a593Smuzhiyun struct device *rx_dma_dev; 157*4882a593Smuzhiyun struct device *tx_dma_dev; 158*4882a593Smuzhiyun struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM]; 159*4882a593Smuzhiyun struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM]; 160*4882a593Smuzhiyun 161*4882a593Smuzhiyun u16 channel; 162*4882a593Smuzhiyun struct list_head dpaa_fq_list; 163*4882a593Smuzhiyun 164*4882a593Smuzhiyun u8 num_tc; 165*4882a593Smuzhiyun bool keygen_in_use; 166*4882a593Smuzhiyun u32 msg_enable; /* net_device message level */ 167*4882a593Smuzhiyun 168*4882a593Smuzhiyun struct { 169*4882a593Smuzhiyun /* All egress queues to a given net device belong to one 170*4882a593Smuzhiyun * (and the same) congestion group. 171*4882a593Smuzhiyun */ 172*4882a593Smuzhiyun struct qman_cgr cgr; 173*4882a593Smuzhiyun /* If congested, when it began. Used for performance stats. */ 174*4882a593Smuzhiyun u32 congestion_start_jiffies; 175*4882a593Smuzhiyun /* Number of jiffies the Tx port was congested. */ 176*4882a593Smuzhiyun u32 congested_jiffies; 177*4882a593Smuzhiyun /* Counter for the number of times the CGR 178*4882a593Smuzhiyun * entered congestion state 179*4882a593Smuzhiyun */ 180*4882a593Smuzhiyun u32 cgr_congested_count; 181*4882a593Smuzhiyun } cgr_data; 182*4882a593Smuzhiyun /* Use a per-port CGR for ingress traffic. */ 183*4882a593Smuzhiyun bool use_ingress_cgr; 184*4882a593Smuzhiyun struct qman_cgr ingress_cgr; 185*4882a593Smuzhiyun 186*4882a593Smuzhiyun struct dpaa_buffer_layout buf_layout[2]; 187*4882a593Smuzhiyun u16 rx_headroom; 188*4882a593Smuzhiyun 189*4882a593Smuzhiyun bool tx_tstamp; /* Tx timestamping enabled */ 190*4882a593Smuzhiyun bool rx_tstamp; /* Rx timestamping enabled */ 191*4882a593Smuzhiyun }; 192*4882a593Smuzhiyun 193*4882a593Smuzhiyun /* from dpaa_ethtool.c */ 194*4882a593Smuzhiyun extern const struct ethtool_ops dpaa_ethtool_ops; 195*4882a593Smuzhiyun 196*4882a593Smuzhiyun /* from dpaa_eth_sysfs.c */ 197*4882a593Smuzhiyun void dpaa_eth_sysfs_remove(struct device *dev); 198*4882a593Smuzhiyun void dpaa_eth_sysfs_init(struct device *dev); 199*4882a593Smuzhiyun #endif /* __DPAA_H */ 200