1*4d3c95f5SJorgen Lundman /* 2*4d3c95f5SJorgen Lundman * GRUB -- GRand Unified Bootloader 3*4d3c95f5SJorgen Lundman * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc. 4*4d3c95f5SJorgen Lundman * 5*4d3c95f5SJorgen Lundman * This program is free software; you can redistribute it and/or modify 6*4d3c95f5SJorgen Lundman * it under the terms of the GNU General Public License as published by 7*4d3c95f5SJorgen Lundman * the Free Software Foundation; either version 2 of the License, or 8*4d3c95f5SJorgen Lundman * (at your option) any later version. 9*4d3c95f5SJorgen Lundman * 10*4d3c95f5SJorgen Lundman * This program is distributed in the hope that it will be useful, 11*4d3c95f5SJorgen Lundman * but WITHOUT ANY WARRANTY; without even the implied warranty of 12*4d3c95f5SJorgen Lundman * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13*4d3c95f5SJorgen Lundman * GNU General Public License for more details. 14*4d3c95f5SJorgen Lundman * 15*4d3c95f5SJorgen Lundman * You should have received a copy of the GNU General Public License 16*4d3c95f5SJorgen Lundman * along with this program; if not, write to the Free Software 17*4d3c95f5SJorgen Lundman * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18*4d3c95f5SJorgen Lundman */ 19*4d3c95f5SJorgen Lundman /* 20*4d3c95f5SJorgen Lundman * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. 21*4d3c95f5SJorgen Lundman */ 22*4d3c95f5SJorgen Lundman 23*4d3c95f5SJorgen Lundman #ifndef ZFS_SPA_HEADER 24*4d3c95f5SJorgen Lundman #define ZFS_SPA_HEADER 1 25*4d3c95f5SJorgen Lundman 26*4d3c95f5SJorgen Lundman 27*4d3c95f5SJorgen Lundman /* 28*4d3c95f5SJorgen Lundman * General-purpose 32-bit and 64-bit bitfield encodings. 29*4d3c95f5SJorgen Lundman */ 30*4d3c95f5SJorgen Lundman #define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len)) 31*4d3c95f5SJorgen Lundman #define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len)) 32*4d3c95f5SJorgen Lundman #define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low)) 33*4d3c95f5SJorgen Lundman #define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low)) 34*4d3c95f5SJorgen Lundman 35*4d3c95f5SJorgen Lundman #define BF32_GET(x, low, len) BF32_DECODE(x, low, len) 36*4d3c95f5SJorgen Lundman #define BF64_GET(x, low, len) BF64_DECODE(x, low, len) 37*4d3c95f5SJorgen Lundman 38*4d3c95f5SJorgen Lundman #define BF32_SET(x, low, len, val) \ 39*4d3c95f5SJorgen Lundman ((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len)) 40*4d3c95f5SJorgen Lundman #define BF64_SET(x, low, len, val) \ 41*4d3c95f5SJorgen Lundman ((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len)) 42*4d3c95f5SJorgen Lundman 43*4d3c95f5SJorgen Lundman #define BF32_GET_SB(x, low, len, shift, bias) \ 44*4d3c95f5SJorgen Lundman ((BF32_GET(x, low, len) + (bias)) << (shift)) 45*4d3c95f5SJorgen Lundman #define BF64_GET_SB(x, low, len, shift, bias) \ 46*4d3c95f5SJorgen Lundman ((BF64_GET(x, low, len) + (bias)) << (shift)) 47*4d3c95f5SJorgen Lundman 48*4d3c95f5SJorgen Lundman #define BF32_SET_SB(x, low, len, shift, bias, val) \ 49*4d3c95f5SJorgen Lundman BF32_SET(x, low, len, ((val) >> (shift)) - (bias)) 50*4d3c95f5SJorgen Lundman #define BF64_SET_SB(x, low, len, shift, bias, val) \ 51*4d3c95f5SJorgen Lundman BF64_SET(x, low, len, ((val) >> (shift)) - (bias)) 52*4d3c95f5SJorgen Lundman 53*4d3c95f5SJorgen Lundman /* 54*4d3c95f5SJorgen Lundman * We currently support nine block sizes, from 512 bytes to 128K. 55*4d3c95f5SJorgen Lundman * We could go higher, but the benefits are near-zero and the cost 56*4d3c95f5SJorgen Lundman * of COWing a giant block to modify one byte would become excessive. 57*4d3c95f5SJorgen Lundman */ 58*4d3c95f5SJorgen Lundman #define SPA_MINBLOCKSHIFT 9 59*4d3c95f5SJorgen Lundman #define SPA_MAXBLOCKSHIFT 17 60*4d3c95f5SJorgen Lundman #define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT) 61*4d3c95f5SJorgen Lundman #define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT) 62*4d3c95f5SJorgen Lundman 63*4d3c95f5SJorgen Lundman #define SPA_BLOCKSIZES (SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1) 64*4d3c95f5SJorgen Lundman 65*4d3c95f5SJorgen Lundman /* 66*4d3c95f5SJorgen Lundman * Size of block to hold the configuration data (a packed nvlist) 67*4d3c95f5SJorgen Lundman */ 68*4d3c95f5SJorgen Lundman #define SPA_CONFIG_BLOCKSIZE (1 << 14) 69*4d3c95f5SJorgen Lundman 70*4d3c95f5SJorgen Lundman /* 71*4d3c95f5SJorgen Lundman * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB. 72*4d3c95f5SJorgen Lundman * The ASIZE encoding should be at least 64 times larger (6 more bits) 73*4d3c95f5SJorgen Lundman * to support up to 4-way RAID-Z mirror mode with worst-case gang block 74*4d3c95f5SJorgen Lundman * overhead, three DVAs per bp, plus one more bit in case we do anything 75*4d3c95f5SJorgen Lundman * else that expands the ASIZE. 76*4d3c95f5SJorgen Lundman */ 77*4d3c95f5SJorgen Lundman #define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */ 78*4d3c95f5SJorgen Lundman #define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */ 79*4d3c95f5SJorgen Lundman #define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */ 80*4d3c95f5SJorgen Lundman 81*4d3c95f5SJorgen Lundman /* 82*4d3c95f5SJorgen Lundman * All SPA data is represented by 128-bit data virtual addresses (DVAs). 83*4d3c95f5SJorgen Lundman * The members of the dva_t should be considered opaque outside the SPA. 84*4d3c95f5SJorgen Lundman */ 85*4d3c95f5SJorgen Lundman typedef struct dva { 86*4d3c95f5SJorgen Lundman uint64_t dva_word[2]; 87*4d3c95f5SJorgen Lundman } dva_t; 88*4d3c95f5SJorgen Lundman 89*4d3c95f5SJorgen Lundman /* 90*4d3c95f5SJorgen Lundman * Each block has a 256-bit checksum -- strong enough for cryptographic hashes. 91*4d3c95f5SJorgen Lundman */ 92*4d3c95f5SJorgen Lundman typedef struct zio_cksum { 93*4d3c95f5SJorgen Lundman uint64_t zc_word[4]; 94*4d3c95f5SJorgen Lundman } zio_cksum_t; 95*4d3c95f5SJorgen Lundman 96*4d3c95f5SJorgen Lundman /* 97*4d3c95f5SJorgen Lundman * Each block is described by its DVAs, time of birth, checksum, etc. 98*4d3c95f5SJorgen Lundman * The word-by-word, bit-by-bit layout of the blkptr is as follows: 99*4d3c95f5SJorgen Lundman * 100*4d3c95f5SJorgen Lundman * 64 56 48 40 32 24 16 8 0 101*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 102*4d3c95f5SJorgen Lundman * 0 | vdev1 | GRID | ASIZE | 103*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 104*4d3c95f5SJorgen Lundman * 1 |G| offset1 | 105*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 106*4d3c95f5SJorgen Lundman * 2 | vdev2 | GRID | ASIZE | 107*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 108*4d3c95f5SJorgen Lundman * 3 |G| offset2 | 109*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 110*4d3c95f5SJorgen Lundman * 4 | vdev3 | GRID | ASIZE | 111*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 112*4d3c95f5SJorgen Lundman * 5 |G| offset3 | 113*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 114*4d3c95f5SJorgen Lundman * 6 |BDX|lvl| type | cksum | comp | PSIZE | LSIZE | 115*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 116*4d3c95f5SJorgen Lundman * 7 | padding | 117*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 118*4d3c95f5SJorgen Lundman * 8 | padding | 119*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 120*4d3c95f5SJorgen Lundman * 9 | physical birth txg | 121*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 122*4d3c95f5SJorgen Lundman * a | logical birth txg | 123*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 124*4d3c95f5SJorgen Lundman * b | fill count | 125*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 126*4d3c95f5SJorgen Lundman * c | checksum[0] | 127*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 128*4d3c95f5SJorgen Lundman * d | checksum[1] | 129*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 130*4d3c95f5SJorgen Lundman * e | checksum[2] | 131*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 132*4d3c95f5SJorgen Lundman * f | checksum[3] | 133*4d3c95f5SJorgen Lundman * +-------+-------+-------+-------+-------+-------+-------+-------+ 134*4d3c95f5SJorgen Lundman * 135*4d3c95f5SJorgen Lundman * Legend: 136*4d3c95f5SJorgen Lundman * 137*4d3c95f5SJorgen Lundman * vdev virtual device ID 138*4d3c95f5SJorgen Lundman * offset offset into virtual device 139*4d3c95f5SJorgen Lundman * LSIZE logical size 140*4d3c95f5SJorgen Lundman * PSIZE physical size (after compression) 141*4d3c95f5SJorgen Lundman * ASIZE allocated size (including RAID-Z parity and gang block headers) 142*4d3c95f5SJorgen Lundman * GRID RAID-Z layout information (reserved for future use) 143*4d3c95f5SJorgen Lundman * cksum checksum function 144*4d3c95f5SJorgen Lundman * comp compression function 145*4d3c95f5SJorgen Lundman * G gang block indicator 146*4d3c95f5SJorgen Lundman * B byteorder (endianness) 147*4d3c95f5SJorgen Lundman * D dedup 148*4d3c95f5SJorgen Lundman * X unused 149*4d3c95f5SJorgen Lundman * lvl level of indirection 150*4d3c95f5SJorgen Lundman * type DMU object type 151*4d3c95f5SJorgen Lundman * phys birth txg of block allocation; zero if same as logical birth txg 152*4d3c95f5SJorgen Lundman * log. birth transaction group in which the block was logically born 153*4d3c95f5SJorgen Lundman * fill count number of non-zero blocks under this bp 154*4d3c95f5SJorgen Lundman * checksum[4] 256-bit checksum of the data this bp describes 155*4d3c95f5SJorgen Lundman */ 156*4d3c95f5SJorgen Lundman #define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */ 157*4d3c95f5SJorgen Lundman #define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */ 158*4d3c95f5SJorgen Lundman 159*4d3c95f5SJorgen Lundman typedef struct blkptr { 160*4d3c95f5SJorgen Lundman dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */ 161*4d3c95f5SJorgen Lundman uint64_t blk_prop; /* size, compression, type, etc */ 162*4d3c95f5SJorgen Lundman uint64_t blk_pad[2]; /* Extra space for the future */ 163*4d3c95f5SJorgen Lundman uint64_t blk_phys_birth; /* txg when block was allocated */ 164*4d3c95f5SJorgen Lundman uint64_t blk_birth; /* transaction group at birth */ 165*4d3c95f5SJorgen Lundman uint64_t blk_fill; /* fill count */ 166*4d3c95f5SJorgen Lundman zio_cksum_t blk_cksum; /* 256-bit checksum */ 167*4d3c95f5SJorgen Lundman } blkptr_t; 168*4d3c95f5SJorgen Lundman 169*4d3c95f5SJorgen Lundman /* 170*4d3c95f5SJorgen Lundman * Macros to get and set fields in a bp or DVA. 171*4d3c95f5SJorgen Lundman */ 172*4d3c95f5SJorgen Lundman #define DVA_GET_ASIZE(dva) \ 173*4d3c95f5SJorgen Lundman BF64_GET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0) 174*4d3c95f5SJorgen Lundman #define DVA_SET_ASIZE(dva, x) \ 175*4d3c95f5SJorgen Lundman BF64_SET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0, x) 176*4d3c95f5SJorgen Lundman 177*4d3c95f5SJorgen Lundman #define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8) 178*4d3c95f5SJorgen Lundman #define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x) 179*4d3c95f5SJorgen Lundman 180*4d3c95f5SJorgen Lundman #define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, 32) 181*4d3c95f5SJorgen Lundman #define DVA_SET_VDEV(dva, x) BF64_SET((dva)->dva_word[0], 32, 32, x) 182*4d3c95f5SJorgen Lundman 183*4d3c95f5SJorgen Lundman #define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1) 184*4d3c95f5SJorgen Lundman #define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x) 185*4d3c95f5SJorgen Lundman 186*4d3c95f5SJorgen Lundman #define BP_GET_LSIZE(bp) \ 187*4d3c95f5SJorgen Lundman BF64_GET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1) 188*4d3c95f5SJorgen Lundman #define BP_SET_LSIZE(bp, x) \ 189*4d3c95f5SJorgen Lundman BF64_SET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1, x) 190*4d3c95f5SJorgen Lundman 191*4d3c95f5SJorgen Lundman #define BP_GET_COMPRESS(bp) BF64_GET((bp)->blk_prop, 32, 8) 192*4d3c95f5SJorgen Lundman #define BP_SET_COMPRESS(bp, x) BF64_SET((bp)->blk_prop, 32, 8, x) 193*4d3c95f5SJorgen Lundman 194*4d3c95f5SJorgen Lundman #define BP_GET_CHECKSUM(bp) BF64_GET((bp)->blk_prop, 40, 8) 195*4d3c95f5SJorgen Lundman #define BP_SET_CHECKSUM(bp, x) BF64_SET((bp)->blk_prop, 40, 8, x) 196*4d3c95f5SJorgen Lundman 197*4d3c95f5SJorgen Lundman #define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8) 198*4d3c95f5SJorgen Lundman #define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x) 199*4d3c95f5SJorgen Lundman 200*4d3c95f5SJorgen Lundman #define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5) 201*4d3c95f5SJorgen Lundman #define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x) 202*4d3c95f5SJorgen Lundman 203*4d3c95f5SJorgen Lundman #define BP_GET_PROP_BIT_61(bp) BF64_GET((bp)->blk_prop, 61, 1) 204*4d3c95f5SJorgen Lundman #define BP_SET_PROP_BIT_61(bp, x) BF64_SET((bp)->blk_prop, 61, 1, x) 205*4d3c95f5SJorgen Lundman 206*4d3c95f5SJorgen Lundman #define BP_GET_DEDUP(bp) BF64_GET((bp)->blk_prop, 62, 1) 207*4d3c95f5SJorgen Lundman #define BP_SET_DEDUP(bp, x) BF64_SET((bp)->blk_prop, 62, 1, x) 208*4d3c95f5SJorgen Lundman 209*4d3c95f5SJorgen Lundman #define BP_GET_BYTEORDER(bp) (0 - BF64_GET((bp)->blk_prop, 63, 1)) 210*4d3c95f5SJorgen Lundman #define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x) 211*4d3c95f5SJorgen Lundman 212*4d3c95f5SJorgen Lundman #define BP_PHYSICAL_BIRTH(bp) \ 213*4d3c95f5SJorgen Lundman ((bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth) 214*4d3c95f5SJorgen Lundman 215*4d3c95f5SJorgen Lundman #define BP_SET_BIRTH(bp, logical, physical) \ 216*4d3c95f5SJorgen Lundman { \ 217*4d3c95f5SJorgen Lundman (bp)->blk_birth = (logical); \ 218*4d3c95f5SJorgen Lundman (bp)->blk_phys_birth = ((logical) == (physical) ? 0 : (physical)); \ 219*4d3c95f5SJorgen Lundman } 220*4d3c95f5SJorgen Lundman 221*4d3c95f5SJorgen Lundman #define BP_GET_ASIZE(bp) \ 222*4d3c95f5SJorgen Lundman (DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ 223*4d3c95f5SJorgen Lundman DVA_GET_ASIZE(&(bp)->blk_dva[2])) 224*4d3c95f5SJorgen Lundman 225*4d3c95f5SJorgen Lundman #define BP_GET_UCSIZE(bp) \ 226*4d3c95f5SJorgen Lundman ((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \ 227*4d3c95f5SJorgen Lundman BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp)); 228*4d3c95f5SJorgen Lundman 229*4d3c95f5SJorgen Lundman #define BP_GET_NDVAS(bp) \ 230*4d3c95f5SJorgen Lundman (!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \ 231*4d3c95f5SJorgen Lundman !!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ 232*4d3c95f5SJorgen Lundman !!DVA_GET_ASIZE(&(bp)->blk_dva[2])) 233*4d3c95f5SJorgen Lundman 234*4d3c95f5SJorgen Lundman #define BP_COUNT_GANG(bp) \ 235*4d3c95f5SJorgen Lundman (DVA_GET_GANG(&(bp)->blk_dva[0]) + \ 236*4d3c95f5SJorgen Lundman DVA_GET_GANG(&(bp)->blk_dva[1]) + \ 237*4d3c95f5SJorgen Lundman DVA_GET_GANG(&(bp)->blk_dva[2])) 238*4d3c95f5SJorgen Lundman 239*4d3c95f5SJorgen Lundman #define DVA_EQUAL(dva1, dva2) \ 240*4d3c95f5SJorgen Lundman ((dva1)->dva_word[1] == (dva2)->dva_word[1] && \ 241*4d3c95f5SJorgen Lundman (dva1)->dva_word[0] == (dva2)->dva_word[0]) 242*4d3c95f5SJorgen Lundman 243*4d3c95f5SJorgen Lundman #define BP_EQUAL(bp1, bp2) \ 244*4d3c95f5SJorgen Lundman (BP_PHYSICAL_BIRTH(bp1) == BP_PHYSICAL_BIRTH(bp2) && \ 245*4d3c95f5SJorgen Lundman DVA_EQUAL(&(bp1)->blk_dva[0], &(bp2)->blk_dva[0]) && \ 246*4d3c95f5SJorgen Lundman DVA_EQUAL(&(bp1)->blk_dva[1], &(bp2)->blk_dva[1]) && \ 247*4d3c95f5SJorgen Lundman DVA_EQUAL(&(bp1)->blk_dva[2], &(bp2)->blk_dva[2])) 248*4d3c95f5SJorgen Lundman 249*4d3c95f5SJorgen Lundman #define ZIO_CHECKSUM_EQUAL(zc1, zc2) \ 250*4d3c95f5SJorgen Lundman (0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \ 251*4d3c95f5SJorgen Lundman ((zc1).zc_word[1] - (zc2).zc_word[1]) | \ 252*4d3c95f5SJorgen Lundman ((zc1).zc_word[2] - (zc2).zc_word[2]) | \ 253*4d3c95f5SJorgen Lundman ((zc1).zc_word[3] - (zc2).zc_word[3]))) 254*4d3c95f5SJorgen Lundman 255*4d3c95f5SJorgen Lundman #define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0) 256*4d3c95f5SJorgen Lundman 257*4d3c95f5SJorgen Lundman #define ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3) \ 258*4d3c95f5SJorgen Lundman { \ 259*4d3c95f5SJorgen Lundman (zcp)->zc_word[0] = w0; \ 260*4d3c95f5SJorgen Lundman (zcp)->zc_word[1] = w1; \ 261*4d3c95f5SJorgen Lundman (zcp)->zc_word[2] = w2; \ 262*4d3c95f5SJorgen Lundman (zcp)->zc_word[3] = w3; \ 263*4d3c95f5SJorgen Lundman } 264*4d3c95f5SJorgen Lundman 265*4d3c95f5SJorgen Lundman #define BP_IDENTITY(bp) (&(bp)->blk_dva[0]) 266*4d3c95f5SJorgen Lundman #define BP_IS_GANG(bp) DVA_GET_GANG(BP_IDENTITY(bp)) 267*4d3c95f5SJorgen Lundman #define BP_IS_HOLE(bp) ((bp)->blk_birth == 0) 268*4d3c95f5SJorgen Lundman 269*4d3c95f5SJorgen Lundman /* BP_IS_RAIDZ(bp) assumes no block compression */ 270*4d3c95f5SJorgen Lundman #define BP_IS_RAIDZ(bp) (DVA_GET_ASIZE(&(bp)->blk_dva[0]) > \ 271*4d3c95f5SJorgen Lundman BP_GET_PSIZE(bp)) 272*4d3c95f5SJorgen Lundman 273*4d3c95f5SJorgen Lundman #define BP_ZERO(bp) \ 274*4d3c95f5SJorgen Lundman { \ 275*4d3c95f5SJorgen Lundman (bp)->blk_dva[0].dva_word[0] = 0; \ 276*4d3c95f5SJorgen Lundman (bp)->blk_dva[0].dva_word[1] = 0; \ 277*4d3c95f5SJorgen Lundman (bp)->blk_dva[1].dva_word[0] = 0; \ 278*4d3c95f5SJorgen Lundman (bp)->blk_dva[1].dva_word[1] = 0; \ 279*4d3c95f5SJorgen Lundman (bp)->blk_dva[2].dva_word[0] = 0; \ 280*4d3c95f5SJorgen Lundman (bp)->blk_dva[2].dva_word[1] = 0; \ 281*4d3c95f5SJorgen Lundman (bp)->blk_prop = 0; \ 282*4d3c95f5SJorgen Lundman (bp)->blk_pad[0] = 0; \ 283*4d3c95f5SJorgen Lundman (bp)->blk_pad[1] = 0; \ 284*4d3c95f5SJorgen Lundman (bp)->blk_phys_birth = 0; \ 285*4d3c95f5SJorgen Lundman (bp)->blk_birth = 0; \ 286*4d3c95f5SJorgen Lundman (bp)->blk_fill = 0; \ 287*4d3c95f5SJorgen Lundman ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \ 288*4d3c95f5SJorgen Lundman } 289*4d3c95f5SJorgen Lundman 290*4d3c95f5SJorgen Lundman #define BP_SPRINTF_LEN 320 291*4d3c95f5SJorgen Lundman 292*4d3c95f5SJorgen Lundman #endif /* ! ZFS_SPA_HEADER */ 293