1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0 OR MIT) 2*4882a593Smuzhiyun * Google virtual Ethernet (gve) driver 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Copyright (C) 2015-2019 Google, Inc. 5*4882a593Smuzhiyun */ 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun /* GVE Transmit Descriptor formats */ 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun #ifndef _GVE_DESC_H_ 10*4882a593Smuzhiyun #define _GVE_DESC_H_ 11*4882a593Smuzhiyun 12*4882a593Smuzhiyun #include <linux/build_bug.h> 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun /* A note on seg_addrs 15*4882a593Smuzhiyun * 16*4882a593Smuzhiyun * Base addresses encoded in seg_addr are not assumed to be physical 17*4882a593Smuzhiyun * addresses. The ring format assumes these come from some linear address 18*4882a593Smuzhiyun * space. This could be physical memory, kernel virtual memory, user virtual 19*4882a593Smuzhiyun * memory. gVNIC uses lists of registered pages. Each queue is assumed 20*4882a593Smuzhiyun * to be associated with a single such linear address space to ensure a 21*4882a593Smuzhiyun * consistent meaning for seg_addrs posted to its rings. 22*4882a593Smuzhiyun */ 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun struct gve_tx_pkt_desc { 25*4882a593Smuzhiyun u8 type_flags; /* desc type is lower 4 bits, flags upper */ 26*4882a593Smuzhiyun u8 l4_csum_offset; /* relative offset of L4 csum word */ 27*4882a593Smuzhiyun u8 l4_hdr_offset; /* Offset of start of L4 headers in packet */ 28*4882a593Smuzhiyun u8 desc_cnt; /* Total descriptors for this packet */ 29*4882a593Smuzhiyun __be16 len; /* Total length of this packet (in bytes) */ 30*4882a593Smuzhiyun __be16 seg_len; /* Length of this descriptor's segment */ 31*4882a593Smuzhiyun __be64 seg_addr; /* Base address (see note) of this segment */ 32*4882a593Smuzhiyun } __packed; 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun struct gve_tx_seg_desc { 35*4882a593Smuzhiyun u8 type_flags; /* type is lower 4 bits, flags upper */ 36*4882a593Smuzhiyun u8 l3_offset; /* TSO: 2 byte units to start of IPH */ 37*4882a593Smuzhiyun __be16 reserved; 38*4882a593Smuzhiyun __be16 mss; /* TSO MSS */ 39*4882a593Smuzhiyun __be16 seg_len; 40*4882a593Smuzhiyun __be64 seg_addr; 41*4882a593Smuzhiyun } __packed; 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun /* GVE Transmit Descriptor Types */ 44*4882a593Smuzhiyun #define GVE_TXD_STD (0x0 << 4) /* Std with Host Address */ 45*4882a593Smuzhiyun #define GVE_TXD_TSO (0x1 << 4) /* TSO with Host Address */ 46*4882a593Smuzhiyun #define GVE_TXD_SEG (0x2 << 4) /* Seg with Host Address */ 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun /* GVE Transmit Descriptor Flags for Std Pkts */ 49*4882a593Smuzhiyun #define GVE_TXF_L4CSUM BIT(0) /* Need csum offload */ 50*4882a593Smuzhiyun #define GVE_TXF_TSTAMP BIT(2) /* Timestamp required */ 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun /* GVE Transmit Descriptor Flags for TSO Segs */ 53*4882a593Smuzhiyun #define GVE_TXSF_IPV6 BIT(1) /* IPv6 TSO */ 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun /* GVE Receive Packet Descriptor */ 56*4882a593Smuzhiyun /* The start of an ethernet packet comes 2 bytes into the rx buffer. 57*4882a593Smuzhiyun * gVNIC adds this padding so that both the DMA and the L3/4 protocol header 58*4882a593Smuzhiyun * access is aligned. 59*4882a593Smuzhiyun */ 60*4882a593Smuzhiyun #define GVE_RX_PAD 2 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun struct gve_rx_desc { 63*4882a593Smuzhiyun u8 padding[48]; 64*4882a593Smuzhiyun __be32 rss_hash; /* Receive-side scaling hash (Toeplitz for gVNIC) */ 65*4882a593Smuzhiyun __be16 mss; 66*4882a593Smuzhiyun __be16 reserved; /* Reserved to zero */ 67*4882a593Smuzhiyun u8 hdr_len; /* Header length (L2-L4) including padding */ 68*4882a593Smuzhiyun u8 hdr_off; /* 64-byte-scaled offset into RX_DATA entry */ 69*4882a593Smuzhiyun __sum16 csum; /* 1's-complement partial checksum of L3+ bytes */ 70*4882a593Smuzhiyun __be16 len; /* Length of the received packet */ 71*4882a593Smuzhiyun __be16 flags_seq; /* Flags [15:3] and sequence number [2:0] (1-7) */ 72*4882a593Smuzhiyun } __packed; 73*4882a593Smuzhiyun static_assert(sizeof(struct gve_rx_desc) == 64); 74*4882a593Smuzhiyun 75*4882a593Smuzhiyun /* As with the Tx ring format, the qpl_offset entries below are offsets into an 76*4882a593Smuzhiyun * ordered list of registered pages. 77*4882a593Smuzhiyun */ 78*4882a593Smuzhiyun struct gve_rx_data_slot { 79*4882a593Smuzhiyun /* byte offset into the rx registered segment of this slot */ 80*4882a593Smuzhiyun __be64 qpl_offset; 81*4882a593Smuzhiyun }; 82*4882a593Smuzhiyun 83*4882a593Smuzhiyun /* GVE Recive Packet Descriptor Seq No */ 84*4882a593Smuzhiyun #define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7) 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun /* GVE Recive Packet Descriptor Flags */ 87*4882a593Smuzhiyun #define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x))) 88*4882a593Smuzhiyun #define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */ 89*4882a593Smuzhiyun #define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */ 90*4882a593Smuzhiyun #define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */ 91*4882a593Smuzhiyun #define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */ 92*4882a593Smuzhiyun #define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */ 93*4882a593Smuzhiyun #define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */ 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun /* GVE IRQ */ 96*4882a593Smuzhiyun #define GVE_IRQ_ACK BIT(31) 97*4882a593Smuzhiyun #define GVE_IRQ_MASK BIT(30) 98*4882a593Smuzhiyun #define GVE_IRQ_EVENT BIT(29) 99*4882a593Smuzhiyun gve_needs_rss(__be16 flag)100*4882a593Smuzhiyunstatic inline bool gve_needs_rss(__be16 flag) 101*4882a593Smuzhiyun { 102*4882a593Smuzhiyun if (flag & GVE_RXF_FRAG) 103*4882a593Smuzhiyun return false; 104*4882a593Smuzhiyun if (flag & (GVE_RXF_IPV4 | GVE_RXF_IPV6)) 105*4882a593Smuzhiyun return true; 106*4882a593Smuzhiyun return false; 107*4882a593Smuzhiyun } 108*4882a593Smuzhiyun gve_next_seqno(u8 seq)109*4882a593Smuzhiyunstatic inline u8 gve_next_seqno(u8 seq) 110*4882a593Smuzhiyun { 111*4882a593Smuzhiyun return (seq + 1) == 8 ? 1 : seq + 1; 112*4882a593Smuzhiyun } 113*4882a593Smuzhiyun #endif /* _GVE_DESC_H_ */ 114