1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 3*4882a593Smuzhiyun */ 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun /* EMAC DMA HW engine uses three rings: 6*4882a593Smuzhiyun * Tx: 7*4882a593Smuzhiyun * TPD: Transmit Packet Descriptor ring. 8*4882a593Smuzhiyun * Rx: 9*4882a593Smuzhiyun * RFD: Receive Free Descriptor ring. 10*4882a593Smuzhiyun * Ring of descriptors with empty buffers to be filled by Rx HW. 11*4882a593Smuzhiyun * RRD: Receive Return Descriptor ring. 12*4882a593Smuzhiyun * Ring of descriptors with buffers filled with received data. 13*4882a593Smuzhiyun */ 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun #ifndef _EMAC_HW_H_ 16*4882a593Smuzhiyun #define _EMAC_HW_H_ 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun /* EMAC_CSR register offsets */ 19*4882a593Smuzhiyun #define EMAC_EMAC_WRAPPER_CSR1 0x000000 20*4882a593Smuzhiyun #define EMAC_EMAC_WRAPPER_CSR2 0x000004 21*4882a593Smuzhiyun #define EMAC_EMAC_WRAPPER_TX_TS_LO 0x000104 22*4882a593Smuzhiyun #define EMAC_EMAC_WRAPPER_TX_TS_HI 0x000108 23*4882a593Smuzhiyun #define EMAC_EMAC_WRAPPER_TX_TS_INX 0x00010c 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun /* DMA Order Settings */ 26*4882a593Smuzhiyun enum emac_dma_order { 27*4882a593Smuzhiyun emac_dma_ord_in = 1, 28*4882a593Smuzhiyun emac_dma_ord_enh = 2, 29*4882a593Smuzhiyun emac_dma_ord_out = 4 30*4882a593Smuzhiyun }; 31*4882a593Smuzhiyun 32*4882a593Smuzhiyun enum emac_dma_req_block { 33*4882a593Smuzhiyun emac_dma_req_128 = 0, 34*4882a593Smuzhiyun emac_dma_req_256 = 1, 35*4882a593Smuzhiyun emac_dma_req_512 = 2, 36*4882a593Smuzhiyun emac_dma_req_1024 = 3, 37*4882a593Smuzhiyun emac_dma_req_2048 = 4, 38*4882a593Smuzhiyun emac_dma_req_4096 = 5 39*4882a593Smuzhiyun }; 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun /* Returns the value of bits idx...idx+n_bits */ 42*4882a593Smuzhiyun #define BITS_GET(val, lo, hi) ((le32_to_cpu(val) & GENMASK((hi), (lo))) >> lo) 43*4882a593Smuzhiyun #define BITS_SET(val, lo, hi, new_val) \ 44*4882a593Smuzhiyun val = cpu_to_le32((le32_to_cpu(val) & (~GENMASK((hi), (lo)))) | \ 45*4882a593Smuzhiyun (((new_val) << (lo)) & GENMASK((hi), (lo)))) 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun /* RRD (Receive Return Descriptor) */ 48*4882a593Smuzhiyun struct emac_rrd { 49*4882a593Smuzhiyun u32 word[6]; 50*4882a593Smuzhiyun 51*4882a593Smuzhiyun /* number of RFD */ 52*4882a593Smuzhiyun #define RRD_NOR(rrd) BITS_GET((rrd)->word[0], 16, 19) 53*4882a593Smuzhiyun /* start consumer index of rfd-ring */ 54*4882a593Smuzhiyun #define RRD_SI(rrd) BITS_GET((rrd)->word[0], 20, 31) 55*4882a593Smuzhiyun /* vlan-tag (CVID, CFI and PRI) */ 56*4882a593Smuzhiyun #define RRD_CVALN_TAG(rrd) BITS_GET((rrd)->word[2], 0, 15) 57*4882a593Smuzhiyun /* length of the packet */ 58*4882a593Smuzhiyun #define RRD_PKT_SIZE(rrd) BITS_GET((rrd)->word[3], 0, 13) 59*4882a593Smuzhiyun /* L4(TCP/UDP) checksum failed */ 60*4882a593Smuzhiyun #define RRD_L4F(rrd) BITS_GET((rrd)->word[3], 14, 14) 61*4882a593Smuzhiyun /* vlan tagged */ 62*4882a593Smuzhiyun #define RRD_CVTAG(rrd) BITS_GET((rrd)->word[3], 16, 16) 63*4882a593Smuzhiyun /* When set, indicates that the descriptor is updated by the IP core. 64*4882a593Smuzhiyun * When cleared, indicates that the descriptor is invalid. 65*4882a593Smuzhiyun */ 66*4882a593Smuzhiyun #define RRD_UPDT(rrd) BITS_GET((rrd)->word[3], 31, 31) 67*4882a593Smuzhiyun #define RRD_UPDT_SET(rrd, val) BITS_SET((rrd)->word[3], 31, 31, val) 68*4882a593Smuzhiyun /* timestamp low */ 69*4882a593Smuzhiyun #define RRD_TS_LOW(rrd) BITS_GET((rrd)->word[4], 0, 29) 70*4882a593Smuzhiyun /* timestamp high */ 71*4882a593Smuzhiyun #define RRD_TS_HI(rrd) le32_to_cpu((rrd)->word[5]) 72*4882a593Smuzhiyun }; 73*4882a593Smuzhiyun 74*4882a593Smuzhiyun /* TPD (Transmit Packet Descriptor) */ 75*4882a593Smuzhiyun struct emac_tpd { 76*4882a593Smuzhiyun u32 word[4]; 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun /* Number of bytes of the transmit packet. (include 4-byte CRC) */ 79*4882a593Smuzhiyun #define TPD_BUF_LEN_SET(tpd, val) BITS_SET((tpd)->word[0], 0, 15, val) 80*4882a593Smuzhiyun /* Custom Checksum Offload: When set, ask IP core to offload custom checksum */ 81*4882a593Smuzhiyun #define TPD_CSX_SET(tpd, val) BITS_SET((tpd)->word[1], 8, 8, val) 82*4882a593Smuzhiyun /* TCP Large Send Offload: When set, ask IP core to do offload TCP Large Send */ 83*4882a593Smuzhiyun #define TPD_LSO(tpd) BITS_GET((tpd)->word[1], 12, 12) 84*4882a593Smuzhiyun #define TPD_LSO_SET(tpd, val) BITS_SET((tpd)->word[1], 12, 12, val) 85*4882a593Smuzhiyun /* Large Send Offload Version: When set, indicates this is an LSOv2 86*4882a593Smuzhiyun * (for both IPv4 and IPv6). When cleared, indicates this is an LSOv1 87*4882a593Smuzhiyun * (only for IPv4). 88*4882a593Smuzhiyun */ 89*4882a593Smuzhiyun #define TPD_LSOV_SET(tpd, val) BITS_SET((tpd)->word[1], 13, 13, val) 90*4882a593Smuzhiyun /* IPv4 packet: When set, indicates this is an IPv4 packet, this bit is only 91*4882a593Smuzhiyun * for LSOV2 format. 92*4882a593Smuzhiyun */ 93*4882a593Smuzhiyun #define TPD_IPV4_SET(tpd, val) BITS_SET((tpd)->word[1], 16, 16, val) 94*4882a593Smuzhiyun /* 0: Ethernet frame (DA+SA+TYPE+DATA+CRC) 95*4882a593Smuzhiyun * 1: IEEE 802.3 frame (DA+SA+LEN+DSAP+SSAP+CTL+ORG+TYPE+DATA+CRC) 96*4882a593Smuzhiyun */ 97*4882a593Smuzhiyun #define TPD_TYP_SET(tpd, val) BITS_SET((tpd)->word[1], 17, 17, val) 98*4882a593Smuzhiyun /* Low-32bit Buffer Address */ 99*4882a593Smuzhiyun #define TPD_BUFFER_ADDR_L_SET(tpd, val) ((tpd)->word[2] = cpu_to_le32(val)) 100*4882a593Smuzhiyun /* CVLAN Tag to be inserted if INS_VLAN_TAG is set, CVLAN TPID based on global 101*4882a593Smuzhiyun * register configuration. 102*4882a593Smuzhiyun */ 103*4882a593Smuzhiyun #define TPD_CVLAN_TAG_SET(tpd, val) BITS_SET((tpd)->word[3], 0, 15, val) 104*4882a593Smuzhiyun /* Insert CVlan Tag: When set, ask MAC to insert CVLAN TAG to outgoing packet 105*4882a593Smuzhiyun */ 106*4882a593Smuzhiyun #define TPD_INSTC_SET(tpd, val) BITS_SET((tpd)->word[3], 17, 17, val) 107*4882a593Smuzhiyun /* High-14bit Buffer Address, So, the 64b-bit address is 108*4882a593Smuzhiyun * {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L} 109*4882a593Smuzhiyun * Extend TPD_BUFFER_ADDR_H to [31, 18], because we never enable timestamping. 110*4882a593Smuzhiyun */ 111*4882a593Smuzhiyun #define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 31, val) 112*4882a593Smuzhiyun /* Format D. Word offset from the 1st byte of this packet to start to calculate 113*4882a593Smuzhiyun * the custom checksum. 114*4882a593Smuzhiyun */ 115*4882a593Smuzhiyun #define TPD_PAYLOAD_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 0, 7, val) 116*4882a593Smuzhiyun /* Format D. Word offset from the 1st byte of this packet to fill the custom 117*4882a593Smuzhiyun * checksum to 118*4882a593Smuzhiyun */ 119*4882a593Smuzhiyun #define TPD_CXSUM_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 18, 25, val) 120*4882a593Smuzhiyun 121*4882a593Smuzhiyun /* Format C. TCP Header offset from the 1st byte of this packet. (byte unit) */ 122*4882a593Smuzhiyun #define TPD_TCPHDR_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 0, 7, val) 123*4882a593Smuzhiyun /* Format C. MSS (Maximum Segment Size) got from the protocol layer. (byte unit) 124*4882a593Smuzhiyun */ 125*4882a593Smuzhiyun #define TPD_MSS_SET(tpd, val) BITS_SET((tpd)->word[1], 18, 30, val) 126*4882a593Smuzhiyun /* packet length in ext tpd */ 127*4882a593Smuzhiyun #define TPD_PKT_LEN_SET(tpd, val) ((tpd)->word[2] = cpu_to_le32(val)) 128*4882a593Smuzhiyun }; 129*4882a593Smuzhiyun 130*4882a593Smuzhiyun /* emac_ring_header represents a single, contiguous block of DMA space 131*4882a593Smuzhiyun * mapped for the three descriptor rings (tpd, rfd, rrd) 132*4882a593Smuzhiyun */ 133*4882a593Smuzhiyun struct emac_ring_header { 134*4882a593Smuzhiyun void *v_addr; /* virtual address */ 135*4882a593Smuzhiyun dma_addr_t dma_addr; /* dma address */ 136*4882a593Smuzhiyun size_t size; /* length in bytes */ 137*4882a593Smuzhiyun size_t used; 138*4882a593Smuzhiyun }; 139*4882a593Smuzhiyun 140*4882a593Smuzhiyun /* emac_buffer is wrapper around a pointer to a socket buffer 141*4882a593Smuzhiyun * so a DMA handle can be stored along with the skb 142*4882a593Smuzhiyun */ 143*4882a593Smuzhiyun struct emac_buffer { 144*4882a593Smuzhiyun struct sk_buff *skb; /* socket buffer */ 145*4882a593Smuzhiyun u16 length; /* rx buffer length */ 146*4882a593Smuzhiyun dma_addr_t dma_addr; /* dma address */ 147*4882a593Smuzhiyun }; 148*4882a593Smuzhiyun 149*4882a593Smuzhiyun /* receive free descriptor (rfd) ring */ 150*4882a593Smuzhiyun struct emac_rfd_ring { 151*4882a593Smuzhiyun struct emac_buffer *rfbuff; 152*4882a593Smuzhiyun u32 *v_addr; /* virtual address */ 153*4882a593Smuzhiyun dma_addr_t dma_addr; /* dma address */ 154*4882a593Smuzhiyun size_t size; /* length in bytes */ 155*4882a593Smuzhiyun unsigned int count; /* number of desc in the ring */ 156*4882a593Smuzhiyun unsigned int produce_idx; 157*4882a593Smuzhiyun unsigned int process_idx; 158*4882a593Smuzhiyun unsigned int consume_idx; /* unused */ 159*4882a593Smuzhiyun }; 160*4882a593Smuzhiyun 161*4882a593Smuzhiyun /* Receive Return Desciptor (RRD) ring */ 162*4882a593Smuzhiyun struct emac_rrd_ring { 163*4882a593Smuzhiyun u32 *v_addr; /* virtual address */ 164*4882a593Smuzhiyun dma_addr_t dma_addr; /* physical address */ 165*4882a593Smuzhiyun size_t size; /* length in bytes */ 166*4882a593Smuzhiyun unsigned int count; /* number of desc in the ring */ 167*4882a593Smuzhiyun unsigned int produce_idx; /* unused */ 168*4882a593Smuzhiyun unsigned int consume_idx; 169*4882a593Smuzhiyun }; 170*4882a593Smuzhiyun 171*4882a593Smuzhiyun /* Rx queue */ 172*4882a593Smuzhiyun struct emac_rx_queue { 173*4882a593Smuzhiyun struct net_device *netdev; /* netdev ring belongs to */ 174*4882a593Smuzhiyun struct emac_rrd_ring rrd; 175*4882a593Smuzhiyun struct emac_rfd_ring rfd; 176*4882a593Smuzhiyun struct napi_struct napi; 177*4882a593Smuzhiyun struct emac_irq *irq; 178*4882a593Smuzhiyun 179*4882a593Smuzhiyun u32 intr; 180*4882a593Smuzhiyun u32 produce_mask; 181*4882a593Smuzhiyun u32 process_mask; 182*4882a593Smuzhiyun u32 consume_mask; 183*4882a593Smuzhiyun 184*4882a593Smuzhiyun u16 produce_reg; 185*4882a593Smuzhiyun u16 process_reg; 186*4882a593Smuzhiyun u16 consume_reg; 187*4882a593Smuzhiyun 188*4882a593Smuzhiyun u8 produce_shift; 189*4882a593Smuzhiyun u8 process_shft; 190*4882a593Smuzhiyun u8 consume_shift; 191*4882a593Smuzhiyun }; 192*4882a593Smuzhiyun 193*4882a593Smuzhiyun /* Transimit Packet Descriptor (tpd) ring */ 194*4882a593Smuzhiyun struct emac_tpd_ring { 195*4882a593Smuzhiyun struct emac_buffer *tpbuff; 196*4882a593Smuzhiyun u32 *v_addr; /* virtual address */ 197*4882a593Smuzhiyun dma_addr_t dma_addr; /* dma address */ 198*4882a593Smuzhiyun 199*4882a593Smuzhiyun size_t size; /* length in bytes */ 200*4882a593Smuzhiyun unsigned int count; /* number of desc in the ring */ 201*4882a593Smuzhiyun unsigned int produce_idx; 202*4882a593Smuzhiyun unsigned int consume_idx; 203*4882a593Smuzhiyun unsigned int last_produce_idx; 204*4882a593Smuzhiyun }; 205*4882a593Smuzhiyun 206*4882a593Smuzhiyun /* Tx queue */ 207*4882a593Smuzhiyun struct emac_tx_queue { 208*4882a593Smuzhiyun struct emac_tpd_ring tpd; 209*4882a593Smuzhiyun 210*4882a593Smuzhiyun u32 produce_mask; 211*4882a593Smuzhiyun u32 consume_mask; 212*4882a593Smuzhiyun 213*4882a593Smuzhiyun u16 max_packets; /* max packets per interrupt */ 214*4882a593Smuzhiyun u16 produce_reg; 215*4882a593Smuzhiyun u16 consume_reg; 216*4882a593Smuzhiyun 217*4882a593Smuzhiyun u8 produce_shift; 218*4882a593Smuzhiyun u8 consume_shift; 219*4882a593Smuzhiyun }; 220*4882a593Smuzhiyun 221*4882a593Smuzhiyun struct emac_adapter; 222*4882a593Smuzhiyun 223*4882a593Smuzhiyun int emac_mac_up(struct emac_adapter *adpt); 224*4882a593Smuzhiyun void emac_mac_down(struct emac_adapter *adpt); 225*4882a593Smuzhiyun void emac_mac_reset(struct emac_adapter *adpt); 226*4882a593Smuzhiyun void emac_mac_stop(struct emac_adapter *adpt); 227*4882a593Smuzhiyun void emac_mac_mode_config(struct emac_adapter *adpt); 228*4882a593Smuzhiyun void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, 229*4882a593Smuzhiyun int *num_pkts, int max_pkts); 230*4882a593Smuzhiyun netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt, 231*4882a593Smuzhiyun struct emac_tx_queue *tx_q, 232*4882a593Smuzhiyun struct sk_buff *skb); 233*4882a593Smuzhiyun void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q); 234*4882a593Smuzhiyun void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev, 235*4882a593Smuzhiyun struct emac_adapter *adpt); 236*4882a593Smuzhiyun int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt); 237*4882a593Smuzhiyun void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt); 238*4882a593Smuzhiyun void emac_mac_multicast_addr_clear(struct emac_adapter *adpt); 239*4882a593Smuzhiyun void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr); 240*4882a593Smuzhiyun 241*4882a593Smuzhiyun #endif /*_EMAC_HW_H_*/ 242