1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef _FORE200E_H 3*4882a593Smuzhiyun #define _FORE200E_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #ifdef __KERNEL__ 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun /* rx buffer sizes */ 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun #define SMALL_BUFFER_SIZE 384 /* size of small buffers (multiple of 48 (PCA) and 64 (SBA) bytes) */ 10*4882a593Smuzhiyun #define LARGE_BUFFER_SIZE 4032 /* size of large buffers (multiple of 48 (PCA) and 64 (SBA) bytes) */ 11*4882a593Smuzhiyun 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun #define RBD_BLK_SIZE 32 /* nbr of supplied rx buffers per rbd */ 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun #define MAX_PDU_SIZE 65535 /* maximum PDU size supported by AALs */ 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun #define BUFFER_S1_SIZE SMALL_BUFFER_SIZE /* size of small buffers, scheme 1 */ 20*4882a593Smuzhiyun #define BUFFER_L1_SIZE LARGE_BUFFER_SIZE /* size of large buffers, scheme 1 */ 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun #define BUFFER_S2_SIZE SMALL_BUFFER_SIZE /* size of small buffers, scheme 2 */ 23*4882a593Smuzhiyun #define BUFFER_L2_SIZE LARGE_BUFFER_SIZE /* size of large buffers, scheme 2 */ 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun #define BUFFER_S1_NBR (RBD_BLK_SIZE * 6) 26*4882a593Smuzhiyun #define BUFFER_L1_NBR (RBD_BLK_SIZE * 4) 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun #define BUFFER_S2_NBR (RBD_BLK_SIZE * 6) 29*4882a593Smuzhiyun #define BUFFER_L2_NBR (RBD_BLK_SIZE * 4) 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun 32*4882a593Smuzhiyun #define QUEUE_SIZE_CMD 16 /* command queue capacity */ 33*4882a593Smuzhiyun #define QUEUE_SIZE_RX 64 /* receive queue capacity */ 34*4882a593Smuzhiyun #define QUEUE_SIZE_TX 256 /* transmit queue capacity */ 35*4882a593Smuzhiyun #define QUEUE_SIZE_BS 32 /* buffer supply queue capacity */ 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun #define FORE200E_VPI_BITS 0 38*4882a593Smuzhiyun #define FORE200E_VCI_BITS 10 39*4882a593Smuzhiyun #define NBR_CONNECT (1 << (FORE200E_VPI_BITS + FORE200E_VCI_BITS)) /* number of connections */ 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun #define TSD_FIXED 2 43*4882a593Smuzhiyun #define TSD_EXTENSION 0 44*4882a593Smuzhiyun #define TSD_NBR (TSD_FIXED + TSD_EXTENSION) 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun /* the cp starts putting a received PDU into one *small* buffer, 48*4882a593Smuzhiyun then it uses a number of *large* buffers for the trailing data. 49*4882a593Smuzhiyun we compute here the total number of receive segment descriptors 50*4882a593Smuzhiyun required to hold the largest possible PDU */ 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun #define RSD_REQUIRED (((MAX_PDU_SIZE - SMALL_BUFFER_SIZE + LARGE_BUFFER_SIZE) / LARGE_BUFFER_SIZE) + 1) 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun #define RSD_FIXED 3 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun /* RSD_REQUIRED receive segment descriptors are enough to describe a max-sized PDU, 57*4882a593Smuzhiyun but we have to keep the size of the receive PDU descriptor multiple of 32 bytes, 58*4882a593Smuzhiyun so we add one extra RSD to RSD_EXTENSION 59*4882a593Smuzhiyun (WARNING: THIS MAY CHANGE IF BUFFER SIZES ARE MODIFIED) */ 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun #define RSD_EXTENSION ((RSD_REQUIRED - RSD_FIXED) + 1) 62*4882a593Smuzhiyun #define RSD_NBR (RSD_FIXED + RSD_EXTENSION) 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun #define FORE200E_DEV(d) ((struct fore200e*)((d)->dev_data)) 66*4882a593Smuzhiyun #define FORE200E_VCC(d) ((struct fore200e_vcc*)((d)->dev_data)) 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun /* bitfields endian games */ 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun #if defined(__LITTLE_ENDIAN_BITFIELD) 71*4882a593Smuzhiyun #define BITFIELD2(b1, b2) b1; b2; 72*4882a593Smuzhiyun #define BITFIELD3(b1, b2, b3) b1; b2; b3; 73*4882a593Smuzhiyun #define BITFIELD4(b1, b2, b3, b4) b1; b2; b3; b4; 74*4882a593Smuzhiyun #define BITFIELD5(b1, b2, b3, b4, b5) b1; b2; b3; b4; b5; 75*4882a593Smuzhiyun #define BITFIELD6(b1, b2, b3, b4, b5, b6) b1; b2; b3; b4; b5; b6; 76*4882a593Smuzhiyun #elif defined(__BIG_ENDIAN_BITFIELD) 77*4882a593Smuzhiyun #define BITFIELD2(b1, b2) b2; b1; 78*4882a593Smuzhiyun #define BITFIELD3(b1, b2, b3) b3; b2; b1; 79*4882a593Smuzhiyun #define BITFIELD4(b1, b2, b3, b4) b4; b3; b2; b1; 80*4882a593Smuzhiyun #define BITFIELD5(b1, b2, b3, b4, b5) b5; b4; b3; b2; b1; 81*4882a593Smuzhiyun #define BITFIELD6(b1, b2, b3, b4, b5, b6) b6; b5; b4; b3; b2; b1; 82*4882a593Smuzhiyun #else 83*4882a593Smuzhiyun #error unknown bitfield endianess 84*4882a593Smuzhiyun #endif 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun 87*4882a593Smuzhiyun /* ATM cell header (minus HEC byte) */ 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun typedef struct atm_header { 90*4882a593Smuzhiyun BITFIELD5( 91*4882a593Smuzhiyun u32 clp : 1, /* cell loss priority */ 92*4882a593Smuzhiyun u32 plt : 3, /* payload type */ 93*4882a593Smuzhiyun u32 vci : 16, /* virtual channel identifier */ 94*4882a593Smuzhiyun u32 vpi : 8, /* virtual path identifier */ 95*4882a593Smuzhiyun u32 gfc : 4 /* generic flow control */ 96*4882a593Smuzhiyun ) 97*4882a593Smuzhiyun } atm_header_t; 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun 100*4882a593Smuzhiyun /* ATM adaptation layer id */ 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun typedef enum fore200e_aal { 103*4882a593Smuzhiyun FORE200E_AAL0 = 0, 104*4882a593Smuzhiyun FORE200E_AAL34 = 4, 105*4882a593Smuzhiyun FORE200E_AAL5 = 5, 106*4882a593Smuzhiyun } fore200e_aal_t; 107*4882a593Smuzhiyun 108*4882a593Smuzhiyun 109*4882a593Smuzhiyun /* transmit PDU descriptor specification */ 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun typedef struct tpd_spec { 112*4882a593Smuzhiyun BITFIELD4( 113*4882a593Smuzhiyun u32 length : 16, /* total PDU length */ 114*4882a593Smuzhiyun u32 nseg : 8, /* number of transmit segments */ 115*4882a593Smuzhiyun enum fore200e_aal aal : 4, /* adaptation layer */ 116*4882a593Smuzhiyun u32 intr : 4 /* interrupt requested */ 117*4882a593Smuzhiyun ) 118*4882a593Smuzhiyun } tpd_spec_t; 119*4882a593Smuzhiyun 120*4882a593Smuzhiyun 121*4882a593Smuzhiyun /* transmit PDU rate control */ 122*4882a593Smuzhiyun 123*4882a593Smuzhiyun typedef struct tpd_rate 124*4882a593Smuzhiyun { 125*4882a593Smuzhiyun BITFIELD2( 126*4882a593Smuzhiyun u32 idle_cells : 16, /* number of idle cells to insert */ 127*4882a593Smuzhiyun u32 data_cells : 16 /* number of data cells to transmit */ 128*4882a593Smuzhiyun ) 129*4882a593Smuzhiyun } tpd_rate_t; 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun 132*4882a593Smuzhiyun /* transmit segment descriptor */ 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun typedef struct tsd { 135*4882a593Smuzhiyun u32 buffer; /* transmit buffer DMA address */ 136*4882a593Smuzhiyun u32 length; /* number of bytes in buffer */ 137*4882a593Smuzhiyun } tsd_t; 138*4882a593Smuzhiyun 139*4882a593Smuzhiyun 140*4882a593Smuzhiyun /* transmit PDU descriptor */ 141*4882a593Smuzhiyun 142*4882a593Smuzhiyun typedef struct tpd { 143*4882a593Smuzhiyun struct atm_header atm_header; /* ATM header minus HEC byte */ 144*4882a593Smuzhiyun struct tpd_spec spec; /* tpd specification */ 145*4882a593Smuzhiyun struct tpd_rate rate; /* tpd rate control */ 146*4882a593Smuzhiyun u32 pad; /* reserved */ 147*4882a593Smuzhiyun struct tsd tsd[ TSD_NBR ]; /* transmit segment descriptors */ 148*4882a593Smuzhiyun } tpd_t; 149*4882a593Smuzhiyun 150*4882a593Smuzhiyun 151*4882a593Smuzhiyun /* receive segment descriptor */ 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun typedef struct rsd { 154*4882a593Smuzhiyun u32 handle; /* host supplied receive buffer handle */ 155*4882a593Smuzhiyun u32 length; /* number of bytes in buffer */ 156*4882a593Smuzhiyun } rsd_t; 157*4882a593Smuzhiyun 158*4882a593Smuzhiyun 159*4882a593Smuzhiyun /* receive PDU descriptor */ 160*4882a593Smuzhiyun 161*4882a593Smuzhiyun typedef struct rpd { 162*4882a593Smuzhiyun struct atm_header atm_header; /* ATM header minus HEC byte */ 163*4882a593Smuzhiyun u32 nseg; /* number of receive segments */ 164*4882a593Smuzhiyun struct rsd rsd[ RSD_NBR ]; /* receive segment descriptors */ 165*4882a593Smuzhiyun } rpd_t; 166*4882a593Smuzhiyun 167*4882a593Smuzhiyun 168*4882a593Smuzhiyun /* buffer scheme */ 169*4882a593Smuzhiyun 170*4882a593Smuzhiyun typedef enum buffer_scheme { 171*4882a593Smuzhiyun BUFFER_SCHEME_ONE, 172*4882a593Smuzhiyun BUFFER_SCHEME_TWO, 173*4882a593Smuzhiyun BUFFER_SCHEME_NBR /* always last */ 174*4882a593Smuzhiyun } buffer_scheme_t; 175*4882a593Smuzhiyun 176*4882a593Smuzhiyun 177*4882a593Smuzhiyun /* buffer magnitude */ 178*4882a593Smuzhiyun 179*4882a593Smuzhiyun typedef enum buffer_magn { 180*4882a593Smuzhiyun BUFFER_MAGN_SMALL, 181*4882a593Smuzhiyun BUFFER_MAGN_LARGE, 182*4882a593Smuzhiyun BUFFER_MAGN_NBR /* always last */ 183*4882a593Smuzhiyun } buffer_magn_t; 184*4882a593Smuzhiyun 185*4882a593Smuzhiyun 186*4882a593Smuzhiyun /* receive buffer descriptor */ 187*4882a593Smuzhiyun 188*4882a593Smuzhiyun typedef struct rbd { 189*4882a593Smuzhiyun u32 handle; /* host supplied handle */ 190*4882a593Smuzhiyun u32 buffer_haddr; /* host DMA address of host buffer */ 191*4882a593Smuzhiyun } rbd_t; 192*4882a593Smuzhiyun 193*4882a593Smuzhiyun 194*4882a593Smuzhiyun /* receive buffer descriptor block */ 195*4882a593Smuzhiyun 196*4882a593Smuzhiyun typedef struct rbd_block { 197*4882a593Smuzhiyun struct rbd rbd[ RBD_BLK_SIZE ]; /* receive buffer descriptor */ 198*4882a593Smuzhiyun } rbd_block_t; 199*4882a593Smuzhiyun 200*4882a593Smuzhiyun 201*4882a593Smuzhiyun /* tpd DMA address */ 202*4882a593Smuzhiyun 203*4882a593Smuzhiyun typedef struct tpd_haddr { 204*4882a593Smuzhiyun BITFIELD3( 205*4882a593Smuzhiyun u32 size : 4, /* tpd size expressed in 32 byte blocks */ 206*4882a593Smuzhiyun u32 pad : 1, /* reserved */ 207*4882a593Smuzhiyun u32 haddr : 27 /* tpd DMA addr aligned on 32 byte boundary */ 208*4882a593Smuzhiyun ) 209*4882a593Smuzhiyun } tpd_haddr_t; 210*4882a593Smuzhiyun 211*4882a593Smuzhiyun #define TPD_HADDR_SHIFT 5 /* addr aligned on 32 byte boundary */ 212*4882a593Smuzhiyun 213*4882a593Smuzhiyun /* cp resident transmit queue entry */ 214*4882a593Smuzhiyun 215*4882a593Smuzhiyun typedef struct cp_txq_entry { 216*4882a593Smuzhiyun struct tpd_haddr tpd_haddr; /* host DMA address of tpd */ 217*4882a593Smuzhiyun u32 status_haddr; /* host DMA address of completion status */ 218*4882a593Smuzhiyun } cp_txq_entry_t; 219*4882a593Smuzhiyun 220*4882a593Smuzhiyun 221*4882a593Smuzhiyun /* cp resident receive queue entry */ 222*4882a593Smuzhiyun 223*4882a593Smuzhiyun typedef struct cp_rxq_entry { 224*4882a593Smuzhiyun u32 rpd_haddr; /* host DMA address of rpd */ 225*4882a593Smuzhiyun u32 status_haddr; /* host DMA address of completion status */ 226*4882a593Smuzhiyun } cp_rxq_entry_t; 227*4882a593Smuzhiyun 228*4882a593Smuzhiyun 229*4882a593Smuzhiyun /* cp resident buffer supply queue entry */ 230*4882a593Smuzhiyun 231*4882a593Smuzhiyun typedef struct cp_bsq_entry { 232*4882a593Smuzhiyun u32 rbd_block_haddr; /* host DMA address of rbd block */ 233*4882a593Smuzhiyun u32 status_haddr; /* host DMA address of completion status */ 234*4882a593Smuzhiyun } cp_bsq_entry_t; 235*4882a593Smuzhiyun 236*4882a593Smuzhiyun 237*4882a593Smuzhiyun /* completion status */ 238*4882a593Smuzhiyun 239*4882a593Smuzhiyun typedef volatile enum status { 240*4882a593Smuzhiyun STATUS_PENDING = (1<<0), /* initial status (written by host) */ 241*4882a593Smuzhiyun STATUS_COMPLETE = (1<<1), /* completion status (written by cp) */ 242*4882a593Smuzhiyun STATUS_FREE = (1<<2), /* initial status (written by host) */ 243*4882a593Smuzhiyun STATUS_ERROR = (1<<3) /* completion status (written by cp) */ 244*4882a593Smuzhiyun } status_t; 245*4882a593Smuzhiyun 246*4882a593Smuzhiyun 247*4882a593Smuzhiyun /* cp operation code */ 248*4882a593Smuzhiyun 249*4882a593Smuzhiyun typedef enum opcode { 250*4882a593Smuzhiyun OPCODE_INITIALIZE = 1, /* initialize board */ 251*4882a593Smuzhiyun OPCODE_ACTIVATE_VCIN, /* activate incoming VCI */ 252*4882a593Smuzhiyun OPCODE_ACTIVATE_VCOUT, /* activate outgoing VCI */ 253*4882a593Smuzhiyun OPCODE_DEACTIVATE_VCIN, /* deactivate incoming VCI */ 254*4882a593Smuzhiyun OPCODE_DEACTIVATE_VCOUT, /* deactivate incoing VCI */ 255*4882a593Smuzhiyun OPCODE_GET_STATS, /* get board statistics */ 256*4882a593Smuzhiyun OPCODE_SET_OC3, /* set OC-3 registers */ 257*4882a593Smuzhiyun OPCODE_GET_OC3, /* get OC-3 registers */ 258*4882a593Smuzhiyun OPCODE_RESET_STATS, /* reset board statistics */ 259*4882a593Smuzhiyun OPCODE_GET_PROM, /* get expansion PROM data (PCI specific) */ 260*4882a593Smuzhiyun OPCODE_SET_VPI_BITS, /* set x bits of those decoded by the 261*4882a593Smuzhiyun firmware to be low order bits from 262*4882a593Smuzhiyun the VPI field of the ATM cell header */ 263*4882a593Smuzhiyun OPCODE_REQUEST_INTR = (1<<7) /* request interrupt */ 264*4882a593Smuzhiyun } opcode_t; 265*4882a593Smuzhiyun 266*4882a593Smuzhiyun 267*4882a593Smuzhiyun /* virtual path / virtual channel identifiers */ 268*4882a593Smuzhiyun 269*4882a593Smuzhiyun typedef struct vpvc { 270*4882a593Smuzhiyun BITFIELD3( 271*4882a593Smuzhiyun u32 vci : 16, /* virtual channel identifier */ 272*4882a593Smuzhiyun u32 vpi : 8, /* virtual path identifier */ 273*4882a593Smuzhiyun u32 pad : 8 /* reserved */ 274*4882a593Smuzhiyun ) 275*4882a593Smuzhiyun } vpvc_t; 276*4882a593Smuzhiyun 277*4882a593Smuzhiyun 278*4882a593Smuzhiyun /* activate VC command opcode */ 279*4882a593Smuzhiyun 280*4882a593Smuzhiyun typedef struct activate_opcode { 281*4882a593Smuzhiyun BITFIELD4( 282*4882a593Smuzhiyun enum opcode opcode : 8, /* cp opcode */ 283*4882a593Smuzhiyun enum fore200e_aal aal : 8, /* adaptation layer */ 284*4882a593Smuzhiyun enum buffer_scheme scheme : 8, /* buffer scheme */ 285*4882a593Smuzhiyun u32 pad : 8 /* reserved */ 286*4882a593Smuzhiyun ) 287*4882a593Smuzhiyun } activate_opcode_t; 288*4882a593Smuzhiyun 289*4882a593Smuzhiyun 290*4882a593Smuzhiyun /* activate VC command block */ 291*4882a593Smuzhiyun 292*4882a593Smuzhiyun typedef struct activate_block { 293*4882a593Smuzhiyun struct activate_opcode opcode; /* activate VC command opcode */ 294*4882a593Smuzhiyun struct vpvc vpvc; /* VPI/VCI */ 295*4882a593Smuzhiyun u32 mtu; /* for AAL0 only */ 296*4882a593Smuzhiyun 297*4882a593Smuzhiyun } activate_block_t; 298*4882a593Smuzhiyun 299*4882a593Smuzhiyun 300*4882a593Smuzhiyun /* deactivate VC command opcode */ 301*4882a593Smuzhiyun 302*4882a593Smuzhiyun typedef struct deactivate_opcode { 303*4882a593Smuzhiyun BITFIELD2( 304*4882a593Smuzhiyun enum opcode opcode : 8, /* cp opcode */ 305*4882a593Smuzhiyun u32 pad : 24 /* reserved */ 306*4882a593Smuzhiyun ) 307*4882a593Smuzhiyun } deactivate_opcode_t; 308*4882a593Smuzhiyun 309*4882a593Smuzhiyun 310*4882a593Smuzhiyun /* deactivate VC command block */ 311*4882a593Smuzhiyun 312*4882a593Smuzhiyun typedef struct deactivate_block { 313*4882a593Smuzhiyun struct deactivate_opcode opcode; /* deactivate VC command opcode */ 314*4882a593Smuzhiyun struct vpvc vpvc; /* VPI/VCI */ 315*4882a593Smuzhiyun } deactivate_block_t; 316*4882a593Smuzhiyun 317*4882a593Smuzhiyun 318*4882a593Smuzhiyun /* OC-3 registers */ 319*4882a593Smuzhiyun 320*4882a593Smuzhiyun typedef struct oc3_regs { 321*4882a593Smuzhiyun u32 reg[ 128 ]; /* see the PMC Sierra PC5346 S/UNI-155-Lite 322*4882a593Smuzhiyun Saturn User Network Interface documentation 323*4882a593Smuzhiyun for a description of the OC-3 chip registers */ 324*4882a593Smuzhiyun } oc3_regs_t; 325*4882a593Smuzhiyun 326*4882a593Smuzhiyun 327*4882a593Smuzhiyun /* set/get OC-3 regs command opcode */ 328*4882a593Smuzhiyun 329*4882a593Smuzhiyun typedef struct oc3_opcode { 330*4882a593Smuzhiyun BITFIELD4( 331*4882a593Smuzhiyun enum opcode opcode : 8, /* cp opcode */ 332*4882a593Smuzhiyun u32 reg : 8, /* register index */ 333*4882a593Smuzhiyun u32 value : 8, /* register value */ 334*4882a593Smuzhiyun u32 mask : 8 /* register mask that specifies which 335*4882a593Smuzhiyun bits of the register value field 336*4882a593Smuzhiyun are significant */ 337*4882a593Smuzhiyun ) 338*4882a593Smuzhiyun } oc3_opcode_t; 339*4882a593Smuzhiyun 340*4882a593Smuzhiyun 341*4882a593Smuzhiyun /* set/get OC-3 regs command block */ 342*4882a593Smuzhiyun 343*4882a593Smuzhiyun typedef struct oc3_block { 344*4882a593Smuzhiyun struct oc3_opcode opcode; /* set/get OC-3 regs command opcode */ 345*4882a593Smuzhiyun u32 regs_haddr; /* host DMA address of OC-3 regs buffer */ 346*4882a593Smuzhiyun } oc3_block_t; 347*4882a593Smuzhiyun 348*4882a593Smuzhiyun 349*4882a593Smuzhiyun /* physical encoding statistics */ 350*4882a593Smuzhiyun 351*4882a593Smuzhiyun typedef struct stats_phy { 352*4882a593Smuzhiyun __be32 crc_header_errors; /* cells received with bad header CRC */ 353*4882a593Smuzhiyun __be32 framing_errors; /* cells received with bad framing */ 354*4882a593Smuzhiyun __be32 pad[ 2 ]; /* i960 padding */ 355*4882a593Smuzhiyun } stats_phy_t; 356*4882a593Smuzhiyun 357*4882a593Smuzhiyun 358*4882a593Smuzhiyun /* OC-3 statistics */ 359*4882a593Smuzhiyun 360*4882a593Smuzhiyun typedef struct stats_oc3 { 361*4882a593Smuzhiyun __be32 section_bip8_errors; /* section 8 bit interleaved parity */ 362*4882a593Smuzhiyun __be32 path_bip8_errors; /* path 8 bit interleaved parity */ 363*4882a593Smuzhiyun __be32 line_bip24_errors; /* line 24 bit interleaved parity */ 364*4882a593Smuzhiyun __be32 line_febe_errors; /* line far end block errors */ 365*4882a593Smuzhiyun __be32 path_febe_errors; /* path far end block errors */ 366*4882a593Smuzhiyun __be32 corr_hcs_errors; /* correctable header check sequence */ 367*4882a593Smuzhiyun __be32 ucorr_hcs_errors; /* uncorrectable header check sequence */ 368*4882a593Smuzhiyun __be32 pad[ 1 ]; /* i960 padding */ 369*4882a593Smuzhiyun } stats_oc3_t; 370*4882a593Smuzhiyun 371*4882a593Smuzhiyun 372*4882a593Smuzhiyun /* ATM statistics */ 373*4882a593Smuzhiyun 374*4882a593Smuzhiyun typedef struct stats_atm { 375*4882a593Smuzhiyun __be32 cells_transmitted; /* cells transmitted */ 376*4882a593Smuzhiyun __be32 cells_received; /* cells received */ 377*4882a593Smuzhiyun __be32 vpi_bad_range; /* cell drops: VPI out of range */ 378*4882a593Smuzhiyun __be32 vpi_no_conn; /* cell drops: no connection for VPI */ 379*4882a593Smuzhiyun __be32 vci_bad_range; /* cell drops: VCI out of range */ 380*4882a593Smuzhiyun __be32 vci_no_conn; /* cell drops: no connection for VCI */ 381*4882a593Smuzhiyun __be32 pad[ 2 ]; /* i960 padding */ 382*4882a593Smuzhiyun } stats_atm_t; 383*4882a593Smuzhiyun 384*4882a593Smuzhiyun /* AAL0 statistics */ 385*4882a593Smuzhiyun 386*4882a593Smuzhiyun typedef struct stats_aal0 { 387*4882a593Smuzhiyun __be32 cells_transmitted; /* cells transmitted */ 388*4882a593Smuzhiyun __be32 cells_received; /* cells received */ 389*4882a593Smuzhiyun __be32 cells_dropped; /* cells dropped */ 390*4882a593Smuzhiyun __be32 pad[ 1 ]; /* i960 padding */ 391*4882a593Smuzhiyun } stats_aal0_t; 392*4882a593Smuzhiyun 393*4882a593Smuzhiyun 394*4882a593Smuzhiyun /* AAL3/4 statistics */ 395*4882a593Smuzhiyun 396*4882a593Smuzhiyun typedef struct stats_aal34 { 397*4882a593Smuzhiyun __be32 cells_transmitted; /* cells transmitted from segmented PDUs */ 398*4882a593Smuzhiyun __be32 cells_received; /* cells reassembled into PDUs */ 399*4882a593Smuzhiyun __be32 cells_crc_errors; /* payload CRC error count */ 400*4882a593Smuzhiyun __be32 cells_protocol_errors; /* SAR or CS layer protocol errors */ 401*4882a593Smuzhiyun __be32 cells_dropped; /* cells dropped: partial reassembly */ 402*4882a593Smuzhiyun __be32 cspdus_transmitted; /* CS PDUs transmitted */ 403*4882a593Smuzhiyun __be32 cspdus_received; /* CS PDUs received */ 404*4882a593Smuzhiyun __be32 cspdus_protocol_errors; /* CS layer protocol errors */ 405*4882a593Smuzhiyun __be32 cspdus_dropped; /* reassembled PDUs drop'd (in cells) */ 406*4882a593Smuzhiyun __be32 pad[ 3 ]; /* i960 padding */ 407*4882a593Smuzhiyun } stats_aal34_t; 408*4882a593Smuzhiyun 409*4882a593Smuzhiyun 410*4882a593Smuzhiyun /* AAL5 statistics */ 411*4882a593Smuzhiyun 412*4882a593Smuzhiyun typedef struct stats_aal5 { 413*4882a593Smuzhiyun __be32 cells_transmitted; /* cells transmitted from segmented SDUs */ 414*4882a593Smuzhiyun __be32 cells_received; /* cells reassembled into SDUs */ 415*4882a593Smuzhiyun __be32 cells_dropped; /* reassembled PDUs dropped (in cells) */ 416*4882a593Smuzhiyun __be32 congestion_experienced; /* CRC error and length wrong */ 417*4882a593Smuzhiyun __be32 cspdus_transmitted; /* CS PDUs transmitted */ 418*4882a593Smuzhiyun __be32 cspdus_received; /* CS PDUs received */ 419*4882a593Smuzhiyun __be32 cspdus_crc_errors; /* CS PDUs CRC errors */ 420*4882a593Smuzhiyun __be32 cspdus_protocol_errors; /* CS layer protocol errors */ 421*4882a593Smuzhiyun __be32 cspdus_dropped; /* reassembled PDUs dropped */ 422*4882a593Smuzhiyun __be32 pad[ 3 ]; /* i960 padding */ 423*4882a593Smuzhiyun } stats_aal5_t; 424*4882a593Smuzhiyun 425*4882a593Smuzhiyun 426*4882a593Smuzhiyun /* auxiliary statistics */ 427*4882a593Smuzhiyun 428*4882a593Smuzhiyun typedef struct stats_aux { 429*4882a593Smuzhiyun __be32 small_b1_failed; /* receive BD allocation failures */ 430*4882a593Smuzhiyun __be32 large_b1_failed; /* receive BD allocation failures */ 431*4882a593Smuzhiyun __be32 small_b2_failed; /* receive BD allocation failures */ 432*4882a593Smuzhiyun __be32 large_b2_failed; /* receive BD allocation failures */ 433*4882a593Smuzhiyun __be32 rpd_alloc_failed; /* receive PDU allocation failures */ 434*4882a593Smuzhiyun __be32 receive_carrier; /* no carrier = 0, carrier = 1 */ 435*4882a593Smuzhiyun __be32 pad[ 2 ]; /* i960 padding */ 436*4882a593Smuzhiyun } stats_aux_t; 437*4882a593Smuzhiyun 438*4882a593Smuzhiyun 439*4882a593Smuzhiyun /* whole statistics buffer */ 440*4882a593Smuzhiyun 441*4882a593Smuzhiyun typedef struct stats { 442*4882a593Smuzhiyun struct stats_phy phy; /* physical encoding statistics */ 443*4882a593Smuzhiyun struct stats_oc3 oc3; /* OC-3 statistics */ 444*4882a593Smuzhiyun struct stats_atm atm; /* ATM statistics */ 445*4882a593Smuzhiyun struct stats_aal0 aal0; /* AAL0 statistics */ 446*4882a593Smuzhiyun struct stats_aal34 aal34; /* AAL3/4 statistics */ 447*4882a593Smuzhiyun struct stats_aal5 aal5; /* AAL5 statistics */ 448*4882a593Smuzhiyun struct stats_aux aux; /* auxiliary statistics */ 449*4882a593Smuzhiyun } stats_t; 450*4882a593Smuzhiyun 451*4882a593Smuzhiyun 452*4882a593Smuzhiyun /* get statistics command opcode */ 453*4882a593Smuzhiyun 454*4882a593Smuzhiyun typedef struct stats_opcode { 455*4882a593Smuzhiyun BITFIELD2( 456*4882a593Smuzhiyun enum opcode opcode : 8, /* cp opcode */ 457*4882a593Smuzhiyun u32 pad : 24 /* reserved */ 458*4882a593Smuzhiyun ) 459*4882a593Smuzhiyun } stats_opcode_t; 460*4882a593Smuzhiyun 461*4882a593Smuzhiyun 462*4882a593Smuzhiyun /* get statistics command block */ 463*4882a593Smuzhiyun 464*4882a593Smuzhiyun typedef struct stats_block { 465*4882a593Smuzhiyun struct stats_opcode opcode; /* get statistics command opcode */ 466*4882a593Smuzhiyun u32 stats_haddr; /* host DMA address of stats buffer */ 467*4882a593Smuzhiyun } stats_block_t; 468*4882a593Smuzhiyun 469*4882a593Smuzhiyun 470*4882a593Smuzhiyun /* expansion PROM data (PCI specific) */ 471*4882a593Smuzhiyun 472*4882a593Smuzhiyun typedef struct prom_data { 473*4882a593Smuzhiyun u32 hw_revision; /* hardware revision */ 474*4882a593Smuzhiyun u32 serial_number; /* board serial number */ 475*4882a593Smuzhiyun u8 mac_addr[ 8 ]; /* board MAC address */ 476*4882a593Smuzhiyun } prom_data_t; 477*4882a593Smuzhiyun 478*4882a593Smuzhiyun 479*4882a593Smuzhiyun /* get expansion PROM data command opcode */ 480*4882a593Smuzhiyun 481*4882a593Smuzhiyun typedef struct prom_opcode { 482*4882a593Smuzhiyun BITFIELD2( 483*4882a593Smuzhiyun enum opcode opcode : 8, /* cp opcode */ 484*4882a593Smuzhiyun u32 pad : 24 /* reserved */ 485*4882a593Smuzhiyun ) 486*4882a593Smuzhiyun } prom_opcode_t; 487*4882a593Smuzhiyun 488*4882a593Smuzhiyun 489*4882a593Smuzhiyun /* get expansion PROM data command block */ 490*4882a593Smuzhiyun 491*4882a593Smuzhiyun typedef struct prom_block { 492*4882a593Smuzhiyun struct prom_opcode opcode; /* get PROM data command opcode */ 493*4882a593Smuzhiyun u32 prom_haddr; /* host DMA address of PROM buffer */ 494*4882a593Smuzhiyun } prom_block_t; 495*4882a593Smuzhiyun 496*4882a593Smuzhiyun 497*4882a593Smuzhiyun /* cp command */ 498*4882a593Smuzhiyun 499*4882a593Smuzhiyun typedef union cmd { 500*4882a593Smuzhiyun enum opcode opcode; /* operation code */ 501*4882a593Smuzhiyun struct activate_block activate_block; /* activate VC */ 502*4882a593Smuzhiyun struct deactivate_block deactivate_block; /* deactivate VC */ 503*4882a593Smuzhiyun struct stats_block stats_block; /* get statistics */ 504*4882a593Smuzhiyun struct prom_block prom_block; /* get expansion PROM data */ 505*4882a593Smuzhiyun struct oc3_block oc3_block; /* get/set OC-3 registers */ 506*4882a593Smuzhiyun u32 pad[ 4 ]; /* i960 padding */ 507*4882a593Smuzhiyun } cmd_t; 508*4882a593Smuzhiyun 509*4882a593Smuzhiyun 510*4882a593Smuzhiyun /* cp resident command queue */ 511*4882a593Smuzhiyun 512*4882a593Smuzhiyun typedef struct cp_cmdq_entry { 513*4882a593Smuzhiyun union cmd cmd; /* command */ 514*4882a593Smuzhiyun u32 status_haddr; /* host DMA address of completion status */ 515*4882a593Smuzhiyun u32 pad[ 3 ]; /* i960 padding */ 516*4882a593Smuzhiyun } cp_cmdq_entry_t; 517*4882a593Smuzhiyun 518*4882a593Smuzhiyun 519*4882a593Smuzhiyun /* host resident transmit queue entry */ 520*4882a593Smuzhiyun 521*4882a593Smuzhiyun typedef struct host_txq_entry { 522*4882a593Smuzhiyun struct cp_txq_entry __iomem *cp_entry; /* addr of cp resident tx queue entry */ 523*4882a593Smuzhiyun enum status* status; /* addr of host resident status */ 524*4882a593Smuzhiyun struct tpd* tpd; /* addr of transmit PDU descriptor */ 525*4882a593Smuzhiyun u32 tpd_dma; /* DMA address of tpd */ 526*4882a593Smuzhiyun struct sk_buff* skb; /* related skb */ 527*4882a593Smuzhiyun void* data; /* copy of misaligned data */ 528*4882a593Smuzhiyun unsigned long incarn; /* vc_map incarnation when submitted for tx */ 529*4882a593Smuzhiyun struct fore200e_vc_map* vc_map; 530*4882a593Smuzhiyun 531*4882a593Smuzhiyun } host_txq_entry_t; 532*4882a593Smuzhiyun 533*4882a593Smuzhiyun 534*4882a593Smuzhiyun /* host resident receive queue entry */ 535*4882a593Smuzhiyun 536*4882a593Smuzhiyun typedef struct host_rxq_entry { 537*4882a593Smuzhiyun struct cp_rxq_entry __iomem *cp_entry; /* addr of cp resident rx queue entry */ 538*4882a593Smuzhiyun enum status* status; /* addr of host resident status */ 539*4882a593Smuzhiyun struct rpd* rpd; /* addr of receive PDU descriptor */ 540*4882a593Smuzhiyun u32 rpd_dma; /* DMA address of rpd */ 541*4882a593Smuzhiyun } host_rxq_entry_t; 542*4882a593Smuzhiyun 543*4882a593Smuzhiyun 544*4882a593Smuzhiyun /* host resident buffer supply queue entry */ 545*4882a593Smuzhiyun 546*4882a593Smuzhiyun typedef struct host_bsq_entry { 547*4882a593Smuzhiyun struct cp_bsq_entry __iomem *cp_entry; /* addr of cp resident buffer supply queue entry */ 548*4882a593Smuzhiyun enum status* status; /* addr of host resident status */ 549*4882a593Smuzhiyun struct rbd_block* rbd_block; /* addr of receive buffer descriptor block */ 550*4882a593Smuzhiyun u32 rbd_block_dma; /* DMA address od rdb */ 551*4882a593Smuzhiyun } host_bsq_entry_t; 552*4882a593Smuzhiyun 553*4882a593Smuzhiyun 554*4882a593Smuzhiyun /* host resident command queue entry */ 555*4882a593Smuzhiyun 556*4882a593Smuzhiyun typedef struct host_cmdq_entry { 557*4882a593Smuzhiyun struct cp_cmdq_entry __iomem *cp_entry; /* addr of cp resident cmd queue entry */ 558*4882a593Smuzhiyun enum status *status; /* addr of host resident status */ 559*4882a593Smuzhiyun } host_cmdq_entry_t; 560*4882a593Smuzhiyun 561*4882a593Smuzhiyun 562*4882a593Smuzhiyun /* chunk of memory */ 563*4882a593Smuzhiyun 564*4882a593Smuzhiyun typedef struct chunk { 565*4882a593Smuzhiyun void* alloc_addr; /* base address of allocated chunk */ 566*4882a593Smuzhiyun void* align_addr; /* base address of aligned chunk */ 567*4882a593Smuzhiyun dma_addr_t dma_addr; /* DMA address of aligned chunk */ 568*4882a593Smuzhiyun int direction; /* direction of DMA mapping */ 569*4882a593Smuzhiyun u32 alloc_size; /* length of allocated chunk */ 570*4882a593Smuzhiyun u32 align_size; /* length of aligned chunk */ 571*4882a593Smuzhiyun } chunk_t; 572*4882a593Smuzhiyun 573*4882a593Smuzhiyun #define dma_size align_size /* DMA useable size */ 574*4882a593Smuzhiyun 575*4882a593Smuzhiyun 576*4882a593Smuzhiyun /* host resident receive buffer */ 577*4882a593Smuzhiyun 578*4882a593Smuzhiyun typedef struct buffer { 579*4882a593Smuzhiyun struct buffer* next; /* next receive buffer */ 580*4882a593Smuzhiyun enum buffer_scheme scheme; /* buffer scheme */ 581*4882a593Smuzhiyun enum buffer_magn magn; /* buffer magnitude */ 582*4882a593Smuzhiyun struct chunk data; /* data buffer */ 583*4882a593Smuzhiyun #ifdef FORE200E_BSQ_DEBUG 584*4882a593Smuzhiyun unsigned long index; /* buffer # in queue */ 585*4882a593Smuzhiyun int supplied; /* 'buffer supplied' flag */ 586*4882a593Smuzhiyun #endif 587*4882a593Smuzhiyun } buffer_t; 588*4882a593Smuzhiyun 589*4882a593Smuzhiyun 590*4882a593Smuzhiyun #if (BITS_PER_LONG == 32) 591*4882a593Smuzhiyun #define FORE200E_BUF2HDL(buffer) ((u32)(buffer)) 592*4882a593Smuzhiyun #define FORE200E_HDL2BUF(handle) ((struct buffer*)(handle)) 593*4882a593Smuzhiyun #else /* deal with 64 bit pointers */ 594*4882a593Smuzhiyun #define FORE200E_BUF2HDL(buffer) ((u32)((u64)(buffer))) 595*4882a593Smuzhiyun #define FORE200E_HDL2BUF(handle) ((struct buffer*)(((u64)(handle)) | PAGE_OFFSET)) 596*4882a593Smuzhiyun #endif 597*4882a593Smuzhiyun 598*4882a593Smuzhiyun 599*4882a593Smuzhiyun /* host resident command queue */ 600*4882a593Smuzhiyun 601*4882a593Smuzhiyun typedef struct host_cmdq { 602*4882a593Smuzhiyun struct host_cmdq_entry host_entry[ QUEUE_SIZE_CMD ]; /* host resident cmd queue entries */ 603*4882a593Smuzhiyun int head; /* head of cmd queue */ 604*4882a593Smuzhiyun struct chunk status; /* array of completion status */ 605*4882a593Smuzhiyun } host_cmdq_t; 606*4882a593Smuzhiyun 607*4882a593Smuzhiyun 608*4882a593Smuzhiyun /* host resident transmit queue */ 609*4882a593Smuzhiyun 610*4882a593Smuzhiyun typedef struct host_txq { 611*4882a593Smuzhiyun struct host_txq_entry host_entry[ QUEUE_SIZE_TX ]; /* host resident tx queue entries */ 612*4882a593Smuzhiyun int head; /* head of tx queue */ 613*4882a593Smuzhiyun int tail; /* tail of tx queue */ 614*4882a593Smuzhiyun struct chunk tpd; /* array of tpds */ 615*4882a593Smuzhiyun struct chunk status; /* arry of completion status */ 616*4882a593Smuzhiyun int txing; /* number of pending PDUs in tx queue */ 617*4882a593Smuzhiyun } host_txq_t; 618*4882a593Smuzhiyun 619*4882a593Smuzhiyun 620*4882a593Smuzhiyun /* host resident receive queue */ 621*4882a593Smuzhiyun 622*4882a593Smuzhiyun typedef struct host_rxq { 623*4882a593Smuzhiyun struct host_rxq_entry host_entry[ QUEUE_SIZE_RX ]; /* host resident rx queue entries */ 624*4882a593Smuzhiyun int head; /* head of rx queue */ 625*4882a593Smuzhiyun struct chunk rpd; /* array of rpds */ 626*4882a593Smuzhiyun struct chunk status; /* array of completion status */ 627*4882a593Smuzhiyun } host_rxq_t; 628*4882a593Smuzhiyun 629*4882a593Smuzhiyun 630*4882a593Smuzhiyun /* host resident buffer supply queues */ 631*4882a593Smuzhiyun 632*4882a593Smuzhiyun typedef struct host_bsq { 633*4882a593Smuzhiyun struct host_bsq_entry host_entry[ QUEUE_SIZE_BS ]; /* host resident buffer supply queue entries */ 634*4882a593Smuzhiyun int head; /* head of buffer supply queue */ 635*4882a593Smuzhiyun struct chunk rbd_block; /* array of rbds */ 636*4882a593Smuzhiyun struct chunk status; /* array of completion status */ 637*4882a593Smuzhiyun struct buffer* buffer; /* array of rx buffers */ 638*4882a593Smuzhiyun struct buffer* freebuf; /* list of free rx buffers */ 639*4882a593Smuzhiyun volatile int freebuf_count; /* count of free rx buffers */ 640*4882a593Smuzhiyun } host_bsq_t; 641*4882a593Smuzhiyun 642*4882a593Smuzhiyun 643*4882a593Smuzhiyun /* header of the firmware image */ 644*4882a593Smuzhiyun 645*4882a593Smuzhiyun typedef struct fw_header { 646*4882a593Smuzhiyun __le32 magic; /* magic number */ 647*4882a593Smuzhiyun __le32 version; /* firmware version id */ 648*4882a593Smuzhiyun __le32 load_offset; /* fw load offset in board memory */ 649*4882a593Smuzhiyun __le32 start_offset; /* fw execution start address in board memory */ 650*4882a593Smuzhiyun } fw_header_t; 651*4882a593Smuzhiyun 652*4882a593Smuzhiyun #define FW_HEADER_MAGIC 0x65726f66 /* 'fore' */ 653*4882a593Smuzhiyun 654*4882a593Smuzhiyun 655*4882a593Smuzhiyun /* receive buffer supply queues scheme specification */ 656*4882a593Smuzhiyun 657*4882a593Smuzhiyun typedef struct bs_spec { 658*4882a593Smuzhiyun u32 queue_length; /* queue capacity */ 659*4882a593Smuzhiyun u32 buffer_size; /* host buffer size */ 660*4882a593Smuzhiyun u32 pool_size; /* number of rbds */ 661*4882a593Smuzhiyun u32 supply_blksize; /* num of rbds in I/O block (multiple 662*4882a593Smuzhiyun of 4 between 4 and 124 inclusive) */ 663*4882a593Smuzhiyun } bs_spec_t; 664*4882a593Smuzhiyun 665*4882a593Smuzhiyun 666*4882a593Smuzhiyun /* initialization command block (one-time command, not in cmd queue) */ 667*4882a593Smuzhiyun 668*4882a593Smuzhiyun typedef struct init_block { 669*4882a593Smuzhiyun enum opcode opcode; /* initialize command */ 670*4882a593Smuzhiyun enum status status; /* related status word */ 671*4882a593Smuzhiyun u32 receive_threshold; /* not used */ 672*4882a593Smuzhiyun u32 num_connect; /* ATM connections */ 673*4882a593Smuzhiyun u32 cmd_queue_len; /* length of command queue */ 674*4882a593Smuzhiyun u32 tx_queue_len; /* length of transmit queue */ 675*4882a593Smuzhiyun u32 rx_queue_len; /* length of receive queue */ 676*4882a593Smuzhiyun u32 rsd_extension; /* number of extra 32 byte blocks */ 677*4882a593Smuzhiyun u32 tsd_extension; /* number of extra 32 byte blocks */ 678*4882a593Smuzhiyun u32 conless_vpvc; /* not used */ 679*4882a593Smuzhiyun u32 pad[ 2 ]; /* force quad alignment */ 680*4882a593Smuzhiyun struct bs_spec bs_spec[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ]; /* buffer supply queues spec */ 681*4882a593Smuzhiyun } init_block_t; 682*4882a593Smuzhiyun 683*4882a593Smuzhiyun 684*4882a593Smuzhiyun typedef enum media_type { 685*4882a593Smuzhiyun MEDIA_TYPE_CAT5_UTP = 0x06, /* unshielded twisted pair */ 686*4882a593Smuzhiyun MEDIA_TYPE_MM_OC3_ST = 0x16, /* multimode fiber ST */ 687*4882a593Smuzhiyun MEDIA_TYPE_MM_OC3_SC = 0x26, /* multimode fiber SC */ 688*4882a593Smuzhiyun MEDIA_TYPE_SM_OC3_ST = 0x36, /* single-mode fiber ST */ 689*4882a593Smuzhiyun MEDIA_TYPE_SM_OC3_SC = 0x46 /* single-mode fiber SC */ 690*4882a593Smuzhiyun } media_type_t; 691*4882a593Smuzhiyun 692*4882a593Smuzhiyun #define FORE200E_MEDIA_INDEX(media_type) ((media_type)>>4) 693*4882a593Smuzhiyun 694*4882a593Smuzhiyun 695*4882a593Smuzhiyun /* cp resident queues */ 696*4882a593Smuzhiyun 697*4882a593Smuzhiyun typedef struct cp_queues { 698*4882a593Smuzhiyun u32 cp_cmdq; /* command queue */ 699*4882a593Smuzhiyun u32 cp_txq; /* transmit queue */ 700*4882a593Smuzhiyun u32 cp_rxq; /* receive queue */ 701*4882a593Smuzhiyun u32 cp_bsq[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ]; /* buffer supply queues */ 702*4882a593Smuzhiyun u32 imask; /* 1 enables cp to host interrupts */ 703*4882a593Smuzhiyun u32 istat; /* 1 for interrupt posted */ 704*4882a593Smuzhiyun u32 heap_base; /* offset form beginning of ram */ 705*4882a593Smuzhiyun u32 heap_size; /* space available for queues */ 706*4882a593Smuzhiyun u32 hlogger; /* non zero for host logging */ 707*4882a593Smuzhiyun u32 heartbeat; /* cp heartbeat */ 708*4882a593Smuzhiyun u32 fw_release; /* firmware version */ 709*4882a593Smuzhiyun u32 mon960_release; /* i960 monitor version */ 710*4882a593Smuzhiyun u32 tq_plen; /* transmit throughput measurements */ 711*4882a593Smuzhiyun /* make sure the init block remains on a quad word boundary */ 712*4882a593Smuzhiyun struct init_block init; /* one time cmd, not in cmd queue */ 713*4882a593Smuzhiyun enum media_type media_type; /* media type id */ 714*4882a593Smuzhiyun u32 oc3_revision; /* OC-3 revision number */ 715*4882a593Smuzhiyun } cp_queues_t; 716*4882a593Smuzhiyun 717*4882a593Smuzhiyun 718*4882a593Smuzhiyun /* boot status */ 719*4882a593Smuzhiyun 720*4882a593Smuzhiyun typedef enum boot_status { 721*4882a593Smuzhiyun BSTAT_COLD_START = (u32) 0xc01dc01d, /* cold start */ 722*4882a593Smuzhiyun BSTAT_SELFTEST_OK = (u32) 0x02201958, /* self-test ok */ 723*4882a593Smuzhiyun BSTAT_SELFTEST_FAIL = (u32) 0xadbadbad, /* self-test failed */ 724*4882a593Smuzhiyun BSTAT_CP_RUNNING = (u32) 0xce11feed, /* cp is running */ 725*4882a593Smuzhiyun BSTAT_MON_TOO_BIG = (u32) 0x10aded00 /* i960 monitor is too big */ 726*4882a593Smuzhiyun } boot_status_t; 727*4882a593Smuzhiyun 728*4882a593Smuzhiyun 729*4882a593Smuzhiyun /* software UART */ 730*4882a593Smuzhiyun 731*4882a593Smuzhiyun typedef struct soft_uart { 732*4882a593Smuzhiyun u32 send; /* write register */ 733*4882a593Smuzhiyun u32 recv; /* read register */ 734*4882a593Smuzhiyun } soft_uart_t; 735*4882a593Smuzhiyun 736*4882a593Smuzhiyun #define FORE200E_CP_MONITOR_UART_FREE 0x00000000 737*4882a593Smuzhiyun #define FORE200E_CP_MONITOR_UART_AVAIL 0x01000000 738*4882a593Smuzhiyun 739*4882a593Smuzhiyun 740*4882a593Smuzhiyun /* i960 monitor */ 741*4882a593Smuzhiyun 742*4882a593Smuzhiyun typedef struct cp_monitor { 743*4882a593Smuzhiyun struct soft_uart soft_uart; /* software UART */ 744*4882a593Smuzhiyun enum boot_status bstat; /* boot status */ 745*4882a593Smuzhiyun u32 app_base; /* application base offset */ 746*4882a593Smuzhiyun u32 mon_version; /* i960 monitor version */ 747*4882a593Smuzhiyun } cp_monitor_t; 748*4882a593Smuzhiyun 749*4882a593Smuzhiyun 750*4882a593Smuzhiyun /* device state */ 751*4882a593Smuzhiyun 752*4882a593Smuzhiyun typedef enum fore200e_state { 753*4882a593Smuzhiyun FORE200E_STATE_BLANK, /* initial state */ 754*4882a593Smuzhiyun FORE200E_STATE_REGISTER, /* device registered */ 755*4882a593Smuzhiyun FORE200E_STATE_CONFIGURE, /* bus interface configured */ 756*4882a593Smuzhiyun FORE200E_STATE_MAP, /* board space mapped in host memory */ 757*4882a593Smuzhiyun FORE200E_STATE_RESET, /* board resetted */ 758*4882a593Smuzhiyun FORE200E_STATE_START_FW, /* firmware started */ 759*4882a593Smuzhiyun FORE200E_STATE_INITIALIZE, /* initialize command successful */ 760*4882a593Smuzhiyun FORE200E_STATE_INIT_CMDQ, /* command queue initialized */ 761*4882a593Smuzhiyun FORE200E_STATE_INIT_TXQ, /* transmit queue initialized */ 762*4882a593Smuzhiyun FORE200E_STATE_INIT_RXQ, /* receive queue initialized */ 763*4882a593Smuzhiyun FORE200E_STATE_INIT_BSQ, /* buffer supply queue initialized */ 764*4882a593Smuzhiyun FORE200E_STATE_ALLOC_BUF, /* receive buffers allocated */ 765*4882a593Smuzhiyun FORE200E_STATE_IRQ, /* host interrupt requested */ 766*4882a593Smuzhiyun FORE200E_STATE_COMPLETE /* initialization completed */ 767*4882a593Smuzhiyun } fore200e_state; 768*4882a593Smuzhiyun 769*4882a593Smuzhiyun 770*4882a593Smuzhiyun /* PCA-200E registers */ 771*4882a593Smuzhiyun 772*4882a593Smuzhiyun typedef struct fore200e_pca_regs { 773*4882a593Smuzhiyun volatile u32 __iomem * hcr; /* address of host control register */ 774*4882a593Smuzhiyun volatile u32 __iomem * imr; /* address of host interrupt mask register */ 775*4882a593Smuzhiyun volatile u32 __iomem * psr; /* address of PCI specific register */ 776*4882a593Smuzhiyun } fore200e_pca_regs_t; 777*4882a593Smuzhiyun 778*4882a593Smuzhiyun 779*4882a593Smuzhiyun /* SBA-200E registers */ 780*4882a593Smuzhiyun 781*4882a593Smuzhiyun typedef struct fore200e_sba_regs { 782*4882a593Smuzhiyun u32 __iomem *hcr; /* address of host control register */ 783*4882a593Smuzhiyun u32 __iomem *bsr; /* address of burst transfer size register */ 784*4882a593Smuzhiyun u32 __iomem *isr; /* address of interrupt level selection register */ 785*4882a593Smuzhiyun } fore200e_sba_regs_t; 786*4882a593Smuzhiyun 787*4882a593Smuzhiyun 788*4882a593Smuzhiyun /* model-specific registers */ 789*4882a593Smuzhiyun 790*4882a593Smuzhiyun typedef union fore200e_regs { 791*4882a593Smuzhiyun struct fore200e_pca_regs pca; /* PCA-200E registers */ 792*4882a593Smuzhiyun struct fore200e_sba_regs sba; /* SBA-200E registers */ 793*4882a593Smuzhiyun } fore200e_regs; 794*4882a593Smuzhiyun 795*4882a593Smuzhiyun 796*4882a593Smuzhiyun struct fore200e; 797*4882a593Smuzhiyun 798*4882a593Smuzhiyun /* bus-dependent data */ 799*4882a593Smuzhiyun 800*4882a593Smuzhiyun typedef struct fore200e_bus { 801*4882a593Smuzhiyun char* model_name; /* board model name */ 802*4882a593Smuzhiyun char* proc_name; /* board name under /proc/atm */ 803*4882a593Smuzhiyun int descr_alignment; /* tpd/rpd/rbd DMA alignment requirement */ 804*4882a593Smuzhiyun int buffer_alignment; /* rx buffers DMA alignment requirement */ 805*4882a593Smuzhiyun int status_alignment; /* status words DMA alignment requirement */ 806*4882a593Smuzhiyun u32 (*read)(volatile u32 __iomem *); 807*4882a593Smuzhiyun void (*write)(u32, volatile u32 __iomem *); 808*4882a593Smuzhiyun int (*configure)(struct fore200e*); 809*4882a593Smuzhiyun int (*map)(struct fore200e*); 810*4882a593Smuzhiyun void (*reset)(struct fore200e*); 811*4882a593Smuzhiyun int (*prom_read)(struct fore200e*, struct prom_data*); 812*4882a593Smuzhiyun void (*unmap)(struct fore200e*); 813*4882a593Smuzhiyun void (*irq_enable)(struct fore200e*); 814*4882a593Smuzhiyun int (*irq_check)(struct fore200e*); 815*4882a593Smuzhiyun void (*irq_ack)(struct fore200e*); 816*4882a593Smuzhiyun int (*proc_read)(struct fore200e*, char*); 817*4882a593Smuzhiyun } fore200e_bus_t; 818*4882a593Smuzhiyun 819*4882a593Smuzhiyun /* vc mapping */ 820*4882a593Smuzhiyun 821*4882a593Smuzhiyun typedef struct fore200e_vc_map { 822*4882a593Smuzhiyun struct atm_vcc* vcc; /* vcc entry */ 823*4882a593Smuzhiyun unsigned long incarn; /* vcc incarnation number */ 824*4882a593Smuzhiyun } fore200e_vc_map_t; 825*4882a593Smuzhiyun 826*4882a593Smuzhiyun #define FORE200E_VC_MAP(fore200e, vpi, vci) \ 827*4882a593Smuzhiyun (& (fore200e)->vc_map[ ((vpi) << FORE200E_VCI_BITS) | (vci) ]) 828*4882a593Smuzhiyun 829*4882a593Smuzhiyun 830*4882a593Smuzhiyun /* per-device data */ 831*4882a593Smuzhiyun 832*4882a593Smuzhiyun typedef struct fore200e { 833*4882a593Smuzhiyun struct list_head entry; /* next device */ 834*4882a593Smuzhiyun const struct fore200e_bus* bus; /* bus-dependent code and data */ 835*4882a593Smuzhiyun union fore200e_regs regs; /* bus-dependent registers */ 836*4882a593Smuzhiyun struct atm_dev* atm_dev; /* ATM device */ 837*4882a593Smuzhiyun 838*4882a593Smuzhiyun enum fore200e_state state; /* device state */ 839*4882a593Smuzhiyun 840*4882a593Smuzhiyun char name[16]; /* device name */ 841*4882a593Smuzhiyun struct device *dev; 842*4882a593Smuzhiyun int irq; /* irq number */ 843*4882a593Smuzhiyun unsigned long phys_base; /* physical base address */ 844*4882a593Smuzhiyun void __iomem * virt_base; /* virtual base address */ 845*4882a593Smuzhiyun 846*4882a593Smuzhiyun unsigned char esi[ ESI_LEN ]; /* end system identifier */ 847*4882a593Smuzhiyun 848*4882a593Smuzhiyun struct cp_monitor __iomem * cp_monitor; /* i960 monitor address */ 849*4882a593Smuzhiyun struct cp_queues __iomem * cp_queues; /* cp resident queues */ 850*4882a593Smuzhiyun struct host_cmdq host_cmdq; /* host resident cmd queue */ 851*4882a593Smuzhiyun struct host_txq host_txq; /* host resident tx queue */ 852*4882a593Smuzhiyun struct host_rxq host_rxq; /* host resident rx queue */ 853*4882a593Smuzhiyun /* host resident buffer supply queues */ 854*4882a593Smuzhiyun struct host_bsq host_bsq[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ]; 855*4882a593Smuzhiyun 856*4882a593Smuzhiyun u32 available_cell_rate; /* remaining pseudo-CBR bw on link */ 857*4882a593Smuzhiyun 858*4882a593Smuzhiyun int loop_mode; /* S/UNI loopback mode */ 859*4882a593Smuzhiyun 860*4882a593Smuzhiyun struct stats* stats; /* last snapshot of the stats */ 861*4882a593Smuzhiyun 862*4882a593Smuzhiyun struct mutex rate_mtx; /* protects rate reservation ops */ 863*4882a593Smuzhiyun spinlock_t q_lock; /* protects queue ops */ 864*4882a593Smuzhiyun #ifdef FORE200E_USE_TASKLET 865*4882a593Smuzhiyun struct tasklet_struct tx_tasklet; /* performs tx interrupt work */ 866*4882a593Smuzhiyun struct tasklet_struct rx_tasklet; /* performs rx interrupt work */ 867*4882a593Smuzhiyun #endif 868*4882a593Smuzhiyun unsigned long tx_sat; /* tx queue saturation count */ 869*4882a593Smuzhiyun 870*4882a593Smuzhiyun unsigned long incarn_count; 871*4882a593Smuzhiyun struct fore200e_vc_map vc_map[ NBR_CONNECT ]; /* vc mapping */ 872*4882a593Smuzhiyun } fore200e_t; 873*4882a593Smuzhiyun 874*4882a593Smuzhiyun 875*4882a593Smuzhiyun /* per-vcc data */ 876*4882a593Smuzhiyun 877*4882a593Smuzhiyun typedef struct fore200e_vcc { 878*4882a593Smuzhiyun enum buffer_scheme scheme; /* rx buffer scheme */ 879*4882a593Smuzhiyun struct tpd_rate rate; /* tx rate control data */ 880*4882a593Smuzhiyun int rx_min_pdu; /* size of smallest PDU received */ 881*4882a593Smuzhiyun int rx_max_pdu; /* size of largest PDU received */ 882*4882a593Smuzhiyun int tx_min_pdu; /* size of smallest PDU transmitted */ 883*4882a593Smuzhiyun int tx_max_pdu; /* size of largest PDU transmitted */ 884*4882a593Smuzhiyun unsigned long tx_pdu; /* nbr of tx pdus */ 885*4882a593Smuzhiyun unsigned long rx_pdu; /* nbr of rx pdus */ 886*4882a593Smuzhiyun } fore200e_vcc_t; 887*4882a593Smuzhiyun 888*4882a593Smuzhiyun 889*4882a593Smuzhiyun 890*4882a593Smuzhiyun /* 200E-series common memory layout */ 891*4882a593Smuzhiyun 892*4882a593Smuzhiyun #define FORE200E_CP_MONITOR_OFFSET 0x00000400 /* i960 monitor interface */ 893*4882a593Smuzhiyun #define FORE200E_CP_QUEUES_OFFSET 0x00004d40 /* cp resident queues */ 894*4882a593Smuzhiyun 895*4882a593Smuzhiyun 896*4882a593Smuzhiyun /* PCA-200E memory layout */ 897*4882a593Smuzhiyun 898*4882a593Smuzhiyun #define PCA200E_IOSPACE_LENGTH 0x00200000 899*4882a593Smuzhiyun 900*4882a593Smuzhiyun #define PCA200E_HCR_OFFSET 0x00100000 /* board control register */ 901*4882a593Smuzhiyun #define PCA200E_IMR_OFFSET 0x00100004 /* host IRQ mask register */ 902*4882a593Smuzhiyun #define PCA200E_PSR_OFFSET 0x00100008 /* PCI specific register */ 903*4882a593Smuzhiyun 904*4882a593Smuzhiyun 905*4882a593Smuzhiyun /* PCA-200E host control register */ 906*4882a593Smuzhiyun 907*4882a593Smuzhiyun #define PCA200E_HCR_RESET (1<<0) /* read / write */ 908*4882a593Smuzhiyun #define PCA200E_HCR_HOLD_LOCK (1<<1) /* read / write */ 909*4882a593Smuzhiyun #define PCA200E_HCR_I960FAIL (1<<2) /* read */ 910*4882a593Smuzhiyun #define PCA200E_HCR_INTRB (1<<2) /* write */ 911*4882a593Smuzhiyun #define PCA200E_HCR_HOLD_ACK (1<<3) /* read */ 912*4882a593Smuzhiyun #define PCA200E_HCR_INTRA (1<<3) /* write */ 913*4882a593Smuzhiyun #define PCA200E_HCR_OUTFULL (1<<4) /* read */ 914*4882a593Smuzhiyun #define PCA200E_HCR_CLRINTR (1<<4) /* write */ 915*4882a593Smuzhiyun #define PCA200E_HCR_ESPHOLD (1<<5) /* read */ 916*4882a593Smuzhiyun #define PCA200E_HCR_INFULL (1<<6) /* read */ 917*4882a593Smuzhiyun #define PCA200E_HCR_TESTMODE (1<<7) /* read */ 918*4882a593Smuzhiyun 919*4882a593Smuzhiyun 920*4882a593Smuzhiyun /* PCA-200E PCI bus interface regs (offsets in PCI config space) */ 921*4882a593Smuzhiyun 922*4882a593Smuzhiyun #define PCA200E_PCI_LATENCY 0x40 /* maximum slave latenty */ 923*4882a593Smuzhiyun #define PCA200E_PCI_MASTER_CTRL 0x41 /* master control */ 924*4882a593Smuzhiyun #define PCA200E_PCI_THRESHOLD 0x42 /* burst / continuous req threshold */ 925*4882a593Smuzhiyun 926*4882a593Smuzhiyun /* PBI master control register */ 927*4882a593Smuzhiyun 928*4882a593Smuzhiyun #define PCA200E_CTRL_DIS_CACHE_RD (1<<0) /* disable cache-line reads */ 929*4882a593Smuzhiyun #define PCA200E_CTRL_DIS_WRT_INVAL (1<<1) /* disable writes and invalidates */ 930*4882a593Smuzhiyun #define PCA200E_CTRL_2_CACHE_WRT_INVAL (1<<2) /* require 2 cache-lines for writes and invalidates */ 931*4882a593Smuzhiyun #define PCA200E_CTRL_IGN_LAT_TIMER (1<<3) /* ignore the latency timer */ 932*4882a593Smuzhiyun #define PCA200E_CTRL_ENA_CONT_REQ_MODE (1<<4) /* enable continuous request mode */ 933*4882a593Smuzhiyun #define PCA200E_CTRL_LARGE_PCI_BURSTS (1<<5) /* force large PCI bus bursts */ 934*4882a593Smuzhiyun #define PCA200E_CTRL_CONVERT_ENDIAN (1<<6) /* convert endianess of slave RAM accesses */ 935*4882a593Smuzhiyun 936*4882a593Smuzhiyun 937*4882a593Smuzhiyun 938*4882a593Smuzhiyun #define SBA200E_PROM_NAME "FORE,sba-200e" /* device name in openprom tree */ 939*4882a593Smuzhiyun 940*4882a593Smuzhiyun 941*4882a593Smuzhiyun /* size of SBA-200E registers */ 942*4882a593Smuzhiyun 943*4882a593Smuzhiyun #define SBA200E_HCR_LENGTH 4 944*4882a593Smuzhiyun #define SBA200E_BSR_LENGTH 4 945*4882a593Smuzhiyun #define SBA200E_ISR_LENGTH 4 946*4882a593Smuzhiyun #define SBA200E_RAM_LENGTH 0x40000 947*4882a593Smuzhiyun 948*4882a593Smuzhiyun 949*4882a593Smuzhiyun /* SBA-200E SBUS burst transfer size register */ 950*4882a593Smuzhiyun 951*4882a593Smuzhiyun #define SBA200E_BSR_BURST4 0x04 952*4882a593Smuzhiyun #define SBA200E_BSR_BURST8 0x08 953*4882a593Smuzhiyun #define SBA200E_BSR_BURST16 0x10 954*4882a593Smuzhiyun 955*4882a593Smuzhiyun 956*4882a593Smuzhiyun /* SBA-200E host control register */ 957*4882a593Smuzhiyun 958*4882a593Smuzhiyun #define SBA200E_HCR_RESET (1<<0) /* read / write (sticky) */ 959*4882a593Smuzhiyun #define SBA200E_HCR_HOLD_LOCK (1<<1) /* read / write (sticky) */ 960*4882a593Smuzhiyun #define SBA200E_HCR_I960FAIL (1<<2) /* read */ 961*4882a593Smuzhiyun #define SBA200E_HCR_I960SETINTR (1<<2) /* write */ 962*4882a593Smuzhiyun #define SBA200E_HCR_OUTFULL (1<<3) /* read */ 963*4882a593Smuzhiyun #define SBA200E_HCR_INTR_CLR (1<<3) /* write */ 964*4882a593Smuzhiyun #define SBA200E_HCR_INTR_ENA (1<<4) /* read / write (sticky) */ 965*4882a593Smuzhiyun #define SBA200E_HCR_ESPHOLD (1<<5) /* read */ 966*4882a593Smuzhiyun #define SBA200E_HCR_INFULL (1<<6) /* read */ 967*4882a593Smuzhiyun #define SBA200E_HCR_TESTMODE (1<<7) /* read */ 968*4882a593Smuzhiyun #define SBA200E_HCR_INTR_REQ (1<<8) /* read */ 969*4882a593Smuzhiyun 970*4882a593Smuzhiyun #define SBA200E_HCR_STICKY (SBA200E_HCR_RESET | SBA200E_HCR_HOLD_LOCK | SBA200E_HCR_INTR_ENA) 971*4882a593Smuzhiyun 972*4882a593Smuzhiyun 973*4882a593Smuzhiyun #endif /* __KERNEL__ */ 974*4882a593Smuzhiyun #endif /* _FORE200E_H */ 975