xref: /OK3568_Linux_fs/kernel/include/linux/dm-bufio.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2009-2011 Red Hat, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Author: Mikulas Patocka <mpatocka@redhat.com>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This file is released under the GPL.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifndef _LINUX_DM_BUFIO_H
10*4882a593Smuzhiyun #define _LINUX_DM_BUFIO_H
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/blkdev.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*----------------------------------------------------------------*/
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun struct dm_bufio_client;
18*4882a593Smuzhiyun struct dm_buffer;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Create a buffered IO cache on a given device
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun struct dm_bufio_client *
24*4882a593Smuzhiyun dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
25*4882a593Smuzhiyun 		       unsigned reserved_buffers, unsigned aux_size,
26*4882a593Smuzhiyun 		       void (*alloc_callback)(struct dm_buffer *),
27*4882a593Smuzhiyun 		       void (*write_callback)(struct dm_buffer *));
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * Release a buffered IO cache.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun void dm_bufio_client_destroy(struct dm_bufio_client *c);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * Set the sector range.
36*4882a593Smuzhiyun  * When this function is called, there must be no I/O in progress on the bufio
37*4882a593Smuzhiyun  * client.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun  * WARNING: to avoid deadlocks, these conditions are observed:
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  * - At most one thread can hold at most "reserved_buffers" simultaneously.
45*4882a593Smuzhiyun  * - Each other threads can hold at most one buffer.
46*4882a593Smuzhiyun  * - Threads which call only dm_bufio_get can hold unlimited number of
47*4882a593Smuzhiyun  *   buffers.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * Read a given block from disk. Returns pointer to data.  Returns a
52*4882a593Smuzhiyun  * pointer to dm_buffer that can be used to release the buffer or to make
53*4882a593Smuzhiyun  * it dirty.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
56*4882a593Smuzhiyun 		    struct dm_buffer **bp);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  * Like dm_bufio_read, but return buffer from cache, don't read
60*4882a593Smuzhiyun  * it. If the buffer is not in the cache, return NULL.
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
63*4882a593Smuzhiyun 		   struct dm_buffer **bp);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * Like dm_bufio_read, but don't read anything from the disk.  It is
67*4882a593Smuzhiyun  * expected that the caller initializes the buffer and marks it dirty.
68*4882a593Smuzhiyun  */
69*4882a593Smuzhiyun void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
70*4882a593Smuzhiyun 		   struct dm_buffer **bp);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun  * Prefetch the specified blocks to the cache.
74*4882a593Smuzhiyun  * The function starts to read the blocks and returns without waiting for
75*4882a593Smuzhiyun  * I/O to finish.
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun void dm_bufio_prefetch(struct dm_bufio_client *c,
78*4882a593Smuzhiyun 		       sector_t block, unsigned n_blocks);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun  * Release a reference obtained with dm_bufio_{read,get,new}. The data
82*4882a593Smuzhiyun  * pointer and dm_buffer pointer is no longer valid after this call.
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun void dm_bufio_release(struct dm_buffer *b);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun  * Mark a buffer dirty. It should be called after the buffer is modified.
88*4882a593Smuzhiyun  *
89*4882a593Smuzhiyun  * In case of memory pressure, the buffer may be written after
90*4882a593Smuzhiyun  * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers.  So
91*4882a593Smuzhiyun  * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
92*4882a593Smuzhiyun  * the actual writing may occur earlier.
93*4882a593Smuzhiyun  */
94*4882a593Smuzhiyun void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun  * Mark a part of the buffer dirty.
98*4882a593Smuzhiyun  *
99*4882a593Smuzhiyun  * The specified part of the buffer is scheduled to be written. dm-bufio may
100*4882a593Smuzhiyun  * write the specified part of the buffer or it may write a larger superset.
101*4882a593Smuzhiyun  */
102*4882a593Smuzhiyun void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
103*4882a593Smuzhiyun 					unsigned start, unsigned end);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun  * Initiate writing of dirty buffers, without waiting for completion.
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun  * Write all dirty buffers. Guarantees that all dirty buffers created prior
112*4882a593Smuzhiyun  * to this call are on disk when this call exits.
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun  * Send an empty write barrier to the device to flush hardware disk cache.
118*4882a593Smuzhiyun  */
119*4882a593Smuzhiyun int dm_bufio_issue_flush(struct dm_bufio_client *c);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun  * Send a discard request to the underlying device.
123*4882a593Smuzhiyun  */
124*4882a593Smuzhiyun int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun  * Like dm_bufio_release but also move the buffer to the new
128*4882a593Smuzhiyun  * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
129*4882a593Smuzhiyun  */
130*4882a593Smuzhiyun void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun  * Free the given buffer.
134*4882a593Smuzhiyun  * This is just a hint, if the buffer is in use or dirty, this function
135*4882a593Smuzhiyun  * does nothing.
136*4882a593Smuzhiyun  */
137*4882a593Smuzhiyun void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun  * Free the given range of buffers.
141*4882a593Smuzhiyun  * This is just a hint, if the buffer is in use or dirty, this function
142*4882a593Smuzhiyun  * does nothing.
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun  * Set the minimum number of buffers before cleanup happens.
148*4882a593Smuzhiyun  */
149*4882a593Smuzhiyun void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
152*4882a593Smuzhiyun sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
153*4882a593Smuzhiyun struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
154*4882a593Smuzhiyun sector_t dm_bufio_get_block_number(struct dm_buffer *b);
155*4882a593Smuzhiyun void *dm_bufio_get_block_data(struct dm_buffer *b);
156*4882a593Smuzhiyun void *dm_bufio_get_aux_data(struct dm_buffer *b);
157*4882a593Smuzhiyun struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun /*----------------------------------------------------------------*/
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #endif
162