1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Copyright (C) 2003 Sistina Software 3*4882a593Smuzhiyun * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved. 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Device-Mapper low-level I/O. 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * This file is released under the GPL. 8*4882a593Smuzhiyun */ 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun #ifndef _LINUX_DM_IO_H 11*4882a593Smuzhiyun #define _LINUX_DM_IO_H 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun #ifdef __KERNEL__ 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun #include <linux/types.h> 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun struct dm_io_region { 18*4882a593Smuzhiyun struct block_device *bdev; 19*4882a593Smuzhiyun sector_t sector; 20*4882a593Smuzhiyun sector_t count; /* If this is zero the region is ignored. */ 21*4882a593Smuzhiyun }; 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun struct page_list { 24*4882a593Smuzhiyun struct page_list *next; 25*4882a593Smuzhiyun struct page *page; 26*4882a593Smuzhiyun }; 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun typedef void (*io_notify_fn)(unsigned long error, void *context); 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun enum dm_io_mem_type { 31*4882a593Smuzhiyun DM_IO_PAGE_LIST,/* Page list */ 32*4882a593Smuzhiyun DM_IO_BIO, /* Bio vector */ 33*4882a593Smuzhiyun DM_IO_VMA, /* Virtual memory area */ 34*4882a593Smuzhiyun DM_IO_KMEM, /* Kernel memory */ 35*4882a593Smuzhiyun }; 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun struct dm_io_memory { 38*4882a593Smuzhiyun enum dm_io_mem_type type; 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun unsigned offset; 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun union { 43*4882a593Smuzhiyun struct page_list *pl; 44*4882a593Smuzhiyun struct bio *bio; 45*4882a593Smuzhiyun void *vma; 46*4882a593Smuzhiyun void *addr; 47*4882a593Smuzhiyun } ptr; 48*4882a593Smuzhiyun }; 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun struct dm_io_notify { 51*4882a593Smuzhiyun io_notify_fn fn; /* Callback for asynchronous requests */ 52*4882a593Smuzhiyun void *context; /* Passed to callback */ 53*4882a593Smuzhiyun }; 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun /* 56*4882a593Smuzhiyun * IO request structure 57*4882a593Smuzhiyun */ 58*4882a593Smuzhiyun struct dm_io_client; 59*4882a593Smuzhiyun struct dm_io_request { 60*4882a593Smuzhiyun int bi_op; /* REQ_OP */ 61*4882a593Smuzhiyun int bi_op_flags; /* req_flag_bits */ 62*4882a593Smuzhiyun struct dm_io_memory mem; /* Memory to use for io */ 63*4882a593Smuzhiyun struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ 64*4882a593Smuzhiyun struct dm_io_client *client; /* Client memory handler */ 65*4882a593Smuzhiyun }; 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun /* 68*4882a593Smuzhiyun * For async io calls, users can alternatively use the dm_io() function below 69*4882a593Smuzhiyun * and dm_io_client_create() to create private mempools for the client. 70*4882a593Smuzhiyun * 71*4882a593Smuzhiyun * Create/destroy may block. 72*4882a593Smuzhiyun */ 73*4882a593Smuzhiyun struct dm_io_client *dm_io_client_create(void); 74*4882a593Smuzhiyun void dm_io_client_destroy(struct dm_io_client *client); 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun /* 77*4882a593Smuzhiyun * IO interface using private per-client pools. 78*4882a593Smuzhiyun * Each bit in the optional 'sync_error_bits' bitset indicates whether an 79*4882a593Smuzhiyun * error occurred doing io to the corresponding region. 80*4882a593Smuzhiyun */ 81*4882a593Smuzhiyun int dm_io(struct dm_io_request *io_req, unsigned num_regions, 82*4882a593Smuzhiyun struct dm_io_region *region, unsigned long *sync_error_bits); 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun #endif /* __KERNEL__ */ 85*4882a593Smuzhiyun #endif /* _LINUX_DM_IO_H */ 86