1 /* 2 * Generic Broadcom Home Networking Division (HND) DMA engine HW interface 3 * This supports the following chips: BCM42xx, 44xx, 47xx . 4 * 5 * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation 6 * 7 * Copyright (C) 1999-2017, Broadcom Corporation 8 * 9 * Unless you and Broadcom execute a separate written software license 10 * agreement governing use of this software, this software is licensed to you 11 * under the terms of the GNU General Public License version 2 (the "GPL"), 12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the 13 * following added to such license: 14 * 15 * As a special exception, the copyright holders of this software give you 16 * permission to link this software with independent modules, and to copy and 17 * distribute the resulting executable under terms of your choice, provided that 18 * you also meet, for each linked independent module, the terms and conditions of 19 * the license of that module. An independent module is a module which is not 20 * derived from this software. The special exception does not apply to any 21 * modifications of the software. 22 * 23 * Notwithstanding the above, under no circumstances may you combine this 24 * software in any way with any other Broadcom software provided under a license 25 * other than the GPL, without Broadcom's express prior written consent. 26 * 27 * 28 * <<Broadcom-WL-IPTag/Open:>> 29 * 30 * $Id: sbhnddma.h 694506 2017-04-13 05:10:05Z $ 31 */ 32 33 #ifndef _sbhnddma_h_ 34 #define _sbhnddma_h_ 35 36 /* DMA structure: 37 * support two DMA engines: 32 bits address or 64 bit addressing 38 * basic DMA register set is per channel(transmit or receive) 39 * a pair of channels is defined for convenience 40 */ 41 42 /* 32 bits addressing */ 43 44 /** dma registers per channel(xmt or rcv) */ 45 typedef volatile struct { 46 uint32 control; /**< enable, et al */ 47 uint32 addr; /**< descriptor ring base address (4K aligned) */ 48 uint32 ptr; /**< last descriptor posted to chip */ 49 uint32 status; /**< current active descriptor, et al */ 50 } dma32regs_t; 51 52 typedef volatile struct { 53 dma32regs_t xmt; /**< dma tx channel */ 54 dma32regs_t rcv; /**< dma rx channel */ 55 } dma32regp_t; 56 57 typedef volatile struct { /* diag access */ 58 uint32 fifoaddr; /**< diag address */ 59 uint32 fifodatalow; /**< low 32bits of data */ 60 uint32 fifodatahigh; /**< high 32bits of data */ 61 uint32 pad; /**< reserved */ 62 } dma32diag_t; 63 64 /** 65 * DMA Descriptor 66 * Descriptors are only read by the hardware, never written back. 67 */ 68 typedef volatile struct { 69 uint32 ctrl; /**< misc control bits & bufcount */ 70 uint32 addr; /**< data buffer address */ 71 } dma32dd_t; 72 73 /** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */ 74 #define D32RINGALIGN_BITS 12 75 #define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) 76 #define D32RINGALIGN (1 << D32RINGALIGN_BITS) 77 78 #define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) 79 80 /* transmit channel control */ 81 #define XC_XE ((uint32)1 << 0) /**< transmit enable */ 82 #define XC_SE ((uint32)1 << 1) /**< transmit suspend request */ 83 #define XC_LE ((uint32)1 << 2) /**< loopback enable */ 84 #define XC_FL ((uint32)1 << 4) /**< flush request */ 85 #define XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ 86 #define XC_MR_SHIFT 6 87 #define XC_PD ((uint32)1 << 11) /**< parity check disable */ 88 #define XC_AE ((uint32)3 << 16) /**< address extension bits */ 89 #define XC_AE_SHIFT 16 90 #define XC_BL_MASK 0x001C0000 /**< BurstLen bits */ 91 #define XC_BL_SHIFT 18 92 #define XC_PC_MASK 0x00E00000 /**< Prefetch control */ 93 #define XC_PC_SHIFT 21 94 #define XC_PT_MASK 0x03000000 /**< Prefetch threshold */ 95 #define XC_PT_SHIFT 24 96 97 /** Multiple outstanding reads */ 98 #define DMA_MR_1 0 99 #define DMA_MR_2 1 100 #define DMA_MR_4 2 101 #define DMA_MR_8 3 102 #define DMA_MR_12 4 103 #define DMA_MR_16 5 104 #define DMA_MR_20 6 105 #define DMA_MR_32 7 106 107 /** DMA Burst Length in bytes */ 108 #define DMA_BL_16 0 109 #define DMA_BL_32 1 110 #define DMA_BL_64 2 111 #define DMA_BL_128 3 112 #define DMA_BL_256 4 113 #define DMA_BL_512 5 114 #define DMA_BL_1024 6 115 116 /** Prefetch control */ 117 #define DMA_PC_0 0 118 #define DMA_PC_4 1 119 #define DMA_PC_8 2 120 #define DMA_PC_16 3 121 #define DMA_PC_32 4 122 /* others: reserved */ 123 124 /** Prefetch threshold */ 125 #define DMA_PT_1 0 126 #define DMA_PT_2 1 127 #define DMA_PT_4 2 128 #define DMA_PT_8 3 129 130 /** Channel Switch */ 131 #define DMA_CS_OFF 0 132 #define DMA_CS_ON 1 133 134 /* transmit descriptor table pointer */ 135 #define XP_LD_MASK 0xfff /**< last valid descriptor */ 136 137 /* transmit channel status */ 138 #define XS_CD_MASK 0x0fff /**< current descriptor pointer */ 139 #define XS_XS_MASK 0xf000 /**< transmit state */ 140 #define XS_XS_SHIFT 12 141 #define XS_XS_DISABLED 0x0000 /**< disabled */ 142 #define XS_XS_ACTIVE 0x1000 /**< active */ 143 #define XS_XS_IDLE 0x2000 /**< idle wait */ 144 #define XS_XS_STOPPED 0x3000 /**< stopped */ 145 #define XS_XS_SUSP 0x4000 /**< suspend pending */ 146 #define XS_XE_MASK 0xf0000 /**< transmit errors */ 147 #define XS_XE_SHIFT 16 148 #define XS_XE_NOERR 0x00000 /**< no error */ 149 #define XS_XE_DPE 0x10000 /**< descriptor protocol error */ 150 #define XS_XE_DFU 0x20000 /**< data fifo underrun */ 151 #define XS_XE_BEBR 0x30000 /**< bus error on buffer read */ 152 #define XS_XE_BEDA 0x40000 /**< bus error on descriptor access */ 153 #define XS_AD_MASK 0xfff00000 /**< active descriptor */ 154 #define XS_AD_SHIFT 20 155 156 /* receive channel control */ 157 #define RC_RE ((uint32)1 << 0) /**< receive enable */ 158 #define RC_RO_MASK 0xfe /**< receive frame offset */ 159 #define RC_RO_SHIFT 1 160 #define RC_FM ((uint32)1 << 8) /**< direct fifo receive (pio) mode */ 161 #define RC_SH ((uint32)1 << 9) /**< separate rx header descriptor enable */ 162 #define RC_OC ((uint32)1 << 10) /**< overflow continue */ 163 #define RC_PD ((uint32)1 << 11) /**< parity check disable */ 164 #define RC_AE ((uint32)3 << 16) /**< address extension bits */ 165 #define RC_AE_SHIFT 16 166 #define RC_BL_MASK 0x001C0000 /**< BurstLen bits */ 167 #define RC_BL_SHIFT 18 168 #define RC_PC_MASK 0x00E00000 /**< Prefetch control */ 169 #define RC_PC_SHIFT 21 170 #define RC_PT_MASK 0x03000000 /**< Prefetch threshold */ 171 #define RC_PT_SHIFT 24 172 #define RC_WAITCMP_MASK 0x00001000 173 #define RC_WAITCMP_SHIFT 12 174 /* receive descriptor table pointer */ 175 #define RP_LD_MASK 0xfff /**< last valid descriptor */ 176 177 /* receive channel status */ 178 #define RS_CD_MASK 0x0fff /**< current descriptor pointer */ 179 #define RS_RS_MASK 0xf000 /**< receive state */ 180 #define RS_RS_SHIFT 12 181 #define RS_RS_DISABLED 0x0000 /**< disabled */ 182 #define RS_RS_ACTIVE 0x1000 /**< active */ 183 #define RS_RS_IDLE 0x2000 /**< idle wait */ 184 #define RS_RS_STOPPED 0x3000 /**< reserved */ 185 #define RS_RE_MASK 0xf0000 /**< receive errors */ 186 #define RS_RE_SHIFT 16 187 #define RS_RE_NOERR 0x00000 /**< no error */ 188 #define RS_RE_DPE 0x10000 /**< descriptor protocol error */ 189 #define RS_RE_DFO 0x20000 /**< data fifo overflow */ 190 #define RS_RE_BEBW 0x30000 /**< bus error on buffer write */ 191 #define RS_RE_BEDA 0x40000 /**< bus error on descriptor access */ 192 #define RS_AD_MASK 0xfff00000 /**< active descriptor */ 193 #define RS_AD_SHIFT 20 194 195 /* fifoaddr */ 196 #define FA_OFF_MASK 0xffff /**< offset */ 197 #define FA_SEL_MASK 0xf0000 /**< select */ 198 #define FA_SEL_SHIFT 16 199 #define FA_SEL_XDD 0x00000 /**< transmit dma data */ 200 #define FA_SEL_XDP 0x10000 /**< transmit dma pointers */ 201 #define FA_SEL_RDD 0x40000 /**< receive dma data */ 202 #define FA_SEL_RDP 0x50000 /**< receive dma pointers */ 203 #define FA_SEL_XFD 0x80000 /**< transmit fifo data */ 204 #define FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ 205 #define FA_SEL_RFD 0xc0000 /**< receive fifo data */ 206 #define FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ 207 #define FA_SEL_RSD 0xe0000 /**< receive frame status data */ 208 #define FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ 209 210 /* descriptor control flags */ 211 #define CTRL_BC_MASK 0x00001fff /**< buffer byte count, real data len must <= 4KB */ 212 #define CTRL_AE ((uint32)3 << 16) /**< address extension bits */ 213 #define CTRL_AE_SHIFT 16 214 #define CTRL_PARITY ((uint32)3 << 18) /**< parity bit */ 215 #define CTRL_EOT ((uint32)1 << 28) /**< end of descriptor table */ 216 #define CTRL_IOC ((uint32)1 << 29) /**< interrupt on completion */ 217 #define CTRL_EOF ((uint32)1 << 30) /**< end of frame */ 218 #define CTRL_SOF ((uint32)1 << 31) /**< start of frame */ 219 220 /** control flags in the range [27:20] are core-specific and not defined here */ 221 #define CTRL_CORE_MASK 0x0ff00000 222 223 /* 64 bits addressing */ 224 225 /** dma registers per channel(xmt or rcv) */ 226 typedef volatile struct { 227 uint32 control; /**< enable, et al */ 228 uint32 ptr; /**< last descriptor posted to chip */ 229 uint32 addrlow; /**< descriptor ring base address low 32-bits (8K aligned) */ 230 uint32 addrhigh; /**< descriptor ring base address bits 63:32 (8K aligned) */ 231 uint32 status0; /**< current descriptor, xmt state */ 232 uint32 status1; /**< active descriptor, xmt error */ 233 } dma64regs_t; 234 235 typedef volatile struct { 236 dma64regs_t tx; /**< dma64 tx channel */ 237 dma64regs_t rx; /**< dma64 rx channel */ 238 } dma64regp_t; 239 240 typedef volatile struct { /**< diag access */ 241 uint32 fifoaddr; /**< diag address */ 242 uint32 fifodatalow; /**< low 32bits of data */ 243 uint32 fifodatahigh; /**< high 32bits of data */ 244 uint32 pad; /**< reserved */ 245 } dma64diag_t; 246 247 /** 248 * DMA Descriptor 249 * Descriptors are only read by the hardware, never written back. 250 */ 251 typedef volatile struct { 252 uint32 ctrl1; /**< misc control bits */ 253 uint32 ctrl2; /**< buffer count and address extension */ 254 uint32 addrlow; /**< memory address of the date buffer, bits 31:0 */ 255 uint32 addrhigh; /**< memory address of the date buffer, bits 63:32 */ 256 } dma64dd_t; 257 258 /** 259 * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss. 260 */ 261 #define D64RINGALIGN_BITS 13 262 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) 263 #define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS) 264 265 #define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) 266 267 /** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */ 268 #define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t)) 269 270 /** 271 * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross 272 * 64K boundary 273 */ 274 #define D64RINGBOUNDARY_LARGE (1 << 16) 275 276 /* 277 * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11. 278 * When this field contains the value N, the burst length is 2**(N + 4) bytes. 279 */ 280 #define D64_DEF_USBBURSTLEN 2 281 #define D64_DEF_SDIOBURSTLEN 1 282 283 #ifndef D64_USBBURSTLEN 284 #define D64_USBBURSTLEN DMA_BL_64 285 #endif // endif 286 #ifndef D64_SDIOBURSTLEN 287 #define D64_SDIOBURSTLEN DMA_BL_32 288 #endif // endif 289 290 /* transmit channel control */ 291 #define D64_XC_XE 0x00000001 /**< transmit enable */ 292 #define D64_XC_SE 0x00000002 /**< transmit suspend request */ 293 #define D64_XC_LE 0x00000004 /**< loopback enable */ 294 #define D64_XC_FL 0x00000010 /**< flush request */ 295 #define D64_XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ 296 #define D64_XC_MR_SHIFT 6 297 #define D64_XC_CS_SHIFT 9 /**< channel switch enable */ 298 #define D64_XC_CS_MASK 0x00000200 /**< channel switch enable */ 299 #define D64_XC_PD 0x00000800 /**< parity check disable */ 300 #define D64_XC_AE 0x00030000 /**< address extension bits */ 301 #define D64_XC_AE_SHIFT 16 302 #define D64_XC_BL_MASK 0x001C0000 /**< BurstLen bits */ 303 #define D64_XC_BL_SHIFT 18 304 #define D64_XC_PC_MASK 0x00E00000 /**< Prefetch control */ 305 #define D64_XC_PC_SHIFT 21 306 #define D64_XC_PT_MASK 0x03000000 /**< Prefetch threshold */ 307 #define D64_XC_PT_SHIFT 24 308 #define D64_XC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */ 309 #define D64_XC_CO_SHIFT 26 310 311 /* transmit descriptor table pointer */ 312 #define D64_XP_LD_MASK 0x00001fff /**< last valid descriptor */ 313 314 /* transmit channel status */ 315 #define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /**< current descriptor pointer */ 316 #define D64_XS0_XS_MASK 0xf0000000 /**< transmit state */ 317 #define D64_XS0_XS_SHIFT 28 318 #define D64_XS0_XS_DISABLED 0x00000000 /**< disabled */ 319 #define D64_XS0_XS_ACTIVE 0x10000000 /**< active */ 320 #define D64_XS0_XS_IDLE 0x20000000 /**< idle wait */ 321 #define D64_XS0_XS_STOPPED 0x30000000 /**< stopped */ 322 #define D64_XS0_XS_SUSP 0x40000000 /**< suspend pending */ 323 324 #define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /**< active descriptor */ 325 #define D64_XS1_XE_MASK 0xf0000000 /**< transmit errors */ 326 #define D64_XS1_XE_SHIFT 28 327 #define D64_XS1_XE_NOERR 0x00000000 /**< no error */ 328 #define D64_XS1_XE_DPE 0x10000000 /**< descriptor protocol error */ 329 #define D64_XS1_XE_DFU 0x20000000 /**< data fifo underrun */ 330 #define D64_XS1_XE_DTE 0x30000000 /**< data transfer error */ 331 #define D64_XS1_XE_DESRE 0x40000000 /**< descriptor read error */ 332 #define D64_XS1_XE_COREE 0x50000000 /**< core error */ 333 334 /* receive channel control */ 335 #define D64_RC_RE 0x00000001 /**< receive enable */ 336 #define D64_RC_RO_MASK 0x000000fe /**< receive frame offset */ 337 #define D64_RC_RO_SHIFT 1 338 #define D64_RC_FM 0x00000100 /**< direct fifo receive (pio) mode */ 339 #define D64_RC_SH 0x00000200 /**< separate rx header descriptor enable */ 340 #define D64_RC_SHIFT 9 /**< separate rx header descriptor enable */ 341 #define D64_RC_OC 0x00000400 /**< overflow continue */ 342 #define D64_RC_PD 0x00000800 /**< parity check disable */ 343 #define D64_RC_WAITCMP_MASK 0x00001000 344 #define D64_RC_WAITCMP_SHIFT 12 345 #define D64_RC_SA 0x00002000 /**< select active */ 346 #define D64_RC_GE 0x00004000 /**< Glom enable */ 347 #define D64_RC_AE 0x00030000 /**< address extension bits */ 348 #define D64_RC_AE_SHIFT 16 349 #define D64_RC_BL_MASK 0x001C0000 /**< BurstLen bits */ 350 #define D64_RC_BL_SHIFT 18 351 #define D64_RC_PC_MASK 0x00E00000 /**< Prefetch control */ 352 #define D64_RC_PC_SHIFT 21 353 #define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */ 354 #define D64_RC_PT_SHIFT 24 355 #define D64_RC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */ 356 #define D64_RC_CO_SHIFT 26 357 #define D64_RC_ROEXT_MASK 0x08000000 /**< receive frame offset extension bit */ 358 #define D64_RC_ROEXT_SHIFT 27 359 360 /* flags for dma controller */ 361 #define DMA_CTRL_PEN (1 << 0) /**< partity enable */ 362 #define DMA_CTRL_ROC (1 << 1) /**< rx overflow continue */ 363 #define DMA_CTRL_RXMULTI (1 << 2) /**< allow rx scatter to multiple descriptors */ 364 #define DMA_CTRL_UNFRAMED (1 << 3) /**< Unframed Rx/Tx data */ 365 #define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4) 366 #define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /**< DMA avoidance WAR for 4331 */ 367 #define DMA_CTRL_RXSINGLE (1 << 6) /**< always single buffer */ 368 #define DMA_CTRL_SDIO_RXGLOM (1 << 7) /**< DMA Rx glome is enabled */ 369 #define DMA_CTRL_DESC_ONLY_FLAG (1 << 8) /**< For DMA which posts only descriptors, 370 * no packets 371 */ 372 #define DMA_CTRL_DESC_CD_WAR (1 << 9) /**< WAR for descriptor only DMA's CD not being 373 * updated correctly by HW in CT mode. 374 */ 375 #define DMA_CTRL_CS (1 << 10) /* channel switch enable */ 376 #define DMA_CTRL_ROEXT (1 << 11) /* receive frame offset extension support */ 377 #define DMA_CTRL_RX_ALIGN_8BYTE (1 << 12) /* RXDMA address 8-byte aligned for 43684A0 */ 378 379 /* receive descriptor table pointer */ 380 #define D64_RP_LD_MASK 0x00001fff /**< last valid descriptor */ 381 382 /* receive channel status */ 383 #define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /**< current descriptor pointer */ 384 #define D64_RS0_RS_MASK 0xf0000000 /**< receive state */ 385 #define D64_RS0_RS_SHIFT 28 386 #define D64_RS0_RS_DISABLED 0x00000000 /**< disabled */ 387 #define D64_RS0_RS_ACTIVE 0x10000000 /**< active */ 388 #define D64_RS0_RS_IDLE 0x20000000 /**< idle wait */ 389 #define D64_RS0_RS_STOPPED 0x30000000 /**< stopped */ 390 #define D64_RS0_RS_SUSP 0x40000000 /**< suspend pending */ 391 392 #define D64_RS1_AD_MASK (di->d64_rs1_ad_mask) /* active descriptor pointer */ 393 #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ 394 #define D64_RS1_RE_SHIFT 28 395 #define D64_RS1_RE_NOERR 0x00000000 /**< no error */ 396 #define D64_RS1_RE_DPO 0x10000000 /**< descriptor protocol error */ 397 #define D64_RS1_RE_DFU 0x20000000 /**< data fifo overflow */ 398 #define D64_RS1_RE_DTE 0x30000000 /**< data transfer error */ 399 #define D64_RS1_RE_DESRE 0x40000000 /**< descriptor read error */ 400 #define D64_RS1_RE_COREE 0x50000000 /**< core error */ 401 402 /* fifoaddr */ 403 #define D64_FA_OFF_MASK 0xffff /**< offset */ 404 #define D64_FA_SEL_MASK 0xf0000 /**< select */ 405 #define D64_FA_SEL_SHIFT 16 406 #define D64_FA_SEL_XDD 0x00000 /**< transmit dma data */ 407 #define D64_FA_SEL_XDP 0x10000 /**< transmit dma pointers */ 408 #define D64_FA_SEL_RDD 0x40000 /**< receive dma data */ 409 #define D64_FA_SEL_RDP 0x50000 /**< receive dma pointers */ 410 #define D64_FA_SEL_XFD 0x80000 /**< transmit fifo data */ 411 #define D64_FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ 412 #define D64_FA_SEL_RFD 0xc0000 /**< receive fifo data */ 413 #define D64_FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ 414 #define D64_FA_SEL_RSD 0xe0000 /**< receive frame status data */ 415 #define D64_FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ 416 417 /* descriptor control flags 1 */ 418 #define D64_CTRL_COREFLAGS 0x0ff00000 /**< core specific flags */ 419 #define D64_CTRL1_COHERENT ((uint32)1 << 17) /* cache coherent per transaction */ 420 #define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /**< buirst size control */ 421 #define D64_CTRL1_EOT ((uint32)1 << 28) /**< end of descriptor table */ 422 #define D64_CTRL1_IOC ((uint32)1 << 29) /**< interrupt on completion */ 423 #define D64_CTRL1_EOF ((uint32)1 << 30) /**< end of frame */ 424 #define D64_CTRL1_SOF ((uint32)1 << 31) /**< start of frame */ 425 426 /* descriptor control flags 2 */ 427 #define D64_CTRL2_MAX_LEN 0x0000fff7 /* Max transfer length (buffer byte count) <= 65527 */ 428 #define D64_CTRL2_BC_MASK 0x0000ffff /**< mask for buffer byte count */ 429 #define D64_CTRL2_AE 0x00030000 /**< address extension bits */ 430 #define D64_CTRL2_AE_SHIFT 16 431 #define D64_CTRL2_PARITY 0x00040000 /* parity bit */ 432 433 /** control flags in the range [27:20] are core-specific and not defined here */ 434 #define D64_CTRL_CORE_MASK 0x0ff00000 435 436 #define D64_RX_FRM_STS_LEN 0x0000ffff /**< frame length mask */ 437 #define D64_RX_FRM_STS_OVFL 0x00800000 /**< RxOverFlow */ 438 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */ 439 #define D64_RX_FRM_STS_DSCRCNT_SHIFT 24 /* Shift for no .of dma descriptor field */ 440 #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /**< core-dependent data type */ 441 442 #define BCM_D64_CTRL2_BOUND_DMA_LENGTH(len) \ 443 (((len) > D64_CTRL2_MAX_LEN) ? D64_CTRL2_MAX_LEN : (len)) 444 445 /** receive frame status */ 446 typedef volatile struct { 447 uint16 len; 448 uint16 flags; 449 } dma_rxh_t; 450 451 #endif /* _sbhnddma_h_ */ 452