1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Copyright (C) 2011 Red Hat, Inc. 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * This file is released under the GPL. 5*4882a593Smuzhiyun */ 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #ifndef _LINUX_DM_BLOCK_MANAGER_H 8*4882a593Smuzhiyun #define _LINUX_DM_BLOCK_MANAGER_H 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun #include <linux/types.h> 11*4882a593Smuzhiyun #include <linux/blkdev.h> 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun /*----------------------------------------------------------------*/ 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun /* 16*4882a593Smuzhiyun * Block number. 17*4882a593Smuzhiyun */ 18*4882a593Smuzhiyun typedef uint64_t dm_block_t; 19*4882a593Smuzhiyun struct dm_block; 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun dm_block_t dm_block_location(struct dm_block *b); 22*4882a593Smuzhiyun void *dm_block_data(struct dm_block *b); 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun /*----------------------------------------------------------------*/ 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun /* 27*4882a593Smuzhiyun * @name should be a unique identifier for the block manager, no longer 28*4882a593Smuzhiyun * than 32 chars. 29*4882a593Smuzhiyun * 30*4882a593Smuzhiyun * @max_held_per_thread should be the maximum number of locks, read or 31*4882a593Smuzhiyun * write, that an individual thread holds at any one time. 32*4882a593Smuzhiyun */ 33*4882a593Smuzhiyun struct dm_block_manager; 34*4882a593Smuzhiyun struct dm_block_manager *dm_block_manager_create( 35*4882a593Smuzhiyun struct block_device *bdev, unsigned block_size, 36*4882a593Smuzhiyun unsigned max_held_per_thread); 37*4882a593Smuzhiyun void dm_block_manager_destroy(struct dm_block_manager *bm); 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun unsigned dm_bm_block_size(struct dm_block_manager *bm); 40*4882a593Smuzhiyun dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm); 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun /*----------------------------------------------------------------*/ 43*4882a593Smuzhiyun 44*4882a593Smuzhiyun /* 45*4882a593Smuzhiyun * The validator allows the caller to verify newly-read data and modify 46*4882a593Smuzhiyun * the data just before writing, e.g. to calculate checksums. It's 47*4882a593Smuzhiyun * important to be consistent with your use of validators. The only time 48*4882a593Smuzhiyun * you can change validators is if you call dm_bm_write_lock_zero. 49*4882a593Smuzhiyun */ 50*4882a593Smuzhiyun struct dm_block_validator { 51*4882a593Smuzhiyun const char *name; 52*4882a593Smuzhiyun void (*prepare_for_write)(struct dm_block_validator *v, struct dm_block *b, size_t block_size); 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun /* 55*4882a593Smuzhiyun * Return 0 if the checksum is valid or < 0 on error. 56*4882a593Smuzhiyun */ 57*4882a593Smuzhiyun int (*check)(struct dm_block_validator *v, struct dm_block *b, size_t block_size); 58*4882a593Smuzhiyun }; 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun /*----------------------------------------------------------------*/ 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun /* 63*4882a593Smuzhiyun * You can have multiple concurrent readers or a single writer holding a 64*4882a593Smuzhiyun * block lock. 65*4882a593Smuzhiyun */ 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun /* 68*4882a593Smuzhiyun * dm_bm_lock() locks a block and returns through @result a pointer to 69*4882a593Smuzhiyun * memory that holds a copy of that block. If you have write-locked the 70*4882a593Smuzhiyun * block then any changes you make to memory pointed to by @result will be 71*4882a593Smuzhiyun * written back to the disk sometime after dm_bm_unlock is called. 72*4882a593Smuzhiyun */ 73*4882a593Smuzhiyun int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b, 74*4882a593Smuzhiyun struct dm_block_validator *v, 75*4882a593Smuzhiyun struct dm_block **result); 76*4882a593Smuzhiyun 77*4882a593Smuzhiyun int dm_bm_write_lock(struct dm_block_manager *bm, dm_block_t b, 78*4882a593Smuzhiyun struct dm_block_validator *v, 79*4882a593Smuzhiyun struct dm_block **result); 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun /* 82*4882a593Smuzhiyun * The *_try_lock variants return -EWOULDBLOCK if the block isn't 83*4882a593Smuzhiyun * available immediately. 84*4882a593Smuzhiyun */ 85*4882a593Smuzhiyun int dm_bm_read_try_lock(struct dm_block_manager *bm, dm_block_t b, 86*4882a593Smuzhiyun struct dm_block_validator *v, 87*4882a593Smuzhiyun struct dm_block **result); 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun /* 90*4882a593Smuzhiyun * Use dm_bm_write_lock_zero() when you know you're going to 91*4882a593Smuzhiyun * overwrite the block completely. It saves a disk read. 92*4882a593Smuzhiyun */ 93*4882a593Smuzhiyun int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b, 94*4882a593Smuzhiyun struct dm_block_validator *v, 95*4882a593Smuzhiyun struct dm_block **result); 96*4882a593Smuzhiyun 97*4882a593Smuzhiyun void dm_bm_unlock(struct dm_block *b); 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun /* 100*4882a593Smuzhiyun * It's a common idiom to have a superblock that should be committed last. 101*4882a593Smuzhiyun * 102*4882a593Smuzhiyun * @superblock should be write-locked on entry. It will be unlocked during 103*4882a593Smuzhiyun * this function. All dirty blocks are guaranteed to be written and flushed 104*4882a593Smuzhiyun * before the superblock. 105*4882a593Smuzhiyun * 106*4882a593Smuzhiyun * This method always blocks. 107*4882a593Smuzhiyun */ 108*4882a593Smuzhiyun int dm_bm_flush(struct dm_block_manager *bm); 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun /* 111*4882a593Smuzhiyun * Request data is prefetched into the cache. 112*4882a593Smuzhiyun */ 113*4882a593Smuzhiyun void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); 114*4882a593Smuzhiyun 115*4882a593Smuzhiyun /* 116*4882a593Smuzhiyun * Switches the bm to a read only mode. Once read-only mode 117*4882a593Smuzhiyun * has been entered the following functions will return -EPERM. 118*4882a593Smuzhiyun * 119*4882a593Smuzhiyun * dm_bm_write_lock 120*4882a593Smuzhiyun * dm_bm_write_lock_zero 121*4882a593Smuzhiyun * dm_bm_flush_and_unlock 122*4882a593Smuzhiyun * 123*4882a593Smuzhiyun * Additionally you should not use dm_bm_unlock_move, however no error will 124*4882a593Smuzhiyun * be returned if you do. 125*4882a593Smuzhiyun */ 126*4882a593Smuzhiyun bool dm_bm_is_read_only(struct dm_block_manager *bm); 127*4882a593Smuzhiyun void dm_bm_set_read_only(struct dm_block_manager *bm); 128*4882a593Smuzhiyun void dm_bm_set_read_write(struct dm_block_manager *bm); 129*4882a593Smuzhiyun 130*4882a593Smuzhiyun u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); 131*4882a593Smuzhiyun 132*4882a593Smuzhiyun /*----------------------------------------------------------------*/ 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun #endif /* _LINUX_DM_BLOCK_MANAGER_H */ 135