Lines Matching refs:bh
55 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
60 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument
62 trace_block_touch_buffer(bh); in touch_buffer()
63 mark_page_accessed(bh->b_page); in touch_buffer()
67 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
69 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
73 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
75 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
77 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
89 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
102 bh = head; in buffer_check_dirty_writeback()
104 if (buffer_locked(bh)) in buffer_check_dirty_writeback()
107 if (buffer_dirty(bh)) in buffer_check_dirty_writeback()
110 bh = bh->b_this_page; in buffer_check_dirty_writeback()
111 } while (bh != head); in buffer_check_dirty_writeback()
120 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
122 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
126 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error() argument
128 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
131 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
142 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
145 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
148 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
150 unlock_buffer(bh); in __end_buffer_read_notouch()
157 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
159 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
160 put_bh(bh); in end_buffer_read_sync()
164 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
167 set_buffer_uptodate(bh); in end_buffer_write_sync()
169 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync()
170 mark_buffer_write_io_error(bh); in end_buffer_write_sync()
171 clear_buffer_uptodate(bh); in end_buffer_write_sync()
173 unlock_buffer(bh); in end_buffer_write_sync()
174 put_bh(bh); in end_buffer_write_sync()
195 struct buffer_head *bh; in __find_get_block_slow() local
210 bh = head; in __find_get_block_slow()
212 if (!buffer_mapped(bh)) in __find_get_block_slow()
214 else if (bh->b_blocknr == block) { in __find_get_block_slow()
215 ret = bh; in __find_get_block_slow()
216 get_bh(bh); in __find_get_block_slow()
219 bh = bh->b_this_page; in __find_get_block_slow()
220 } while (bh != head); in __find_get_block_slow()
233 (unsigned long long)bh->b_blocknr, in __find_get_block_slow()
234 bh->b_state, bh->b_size, bdev, in __find_get_block_slow()
244 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
252 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
254 page = bh->b_page; in end_buffer_async_read()
256 set_buffer_uptodate(bh); in end_buffer_async_read()
258 clear_buffer_uptodate(bh); in end_buffer_async_read()
259 buffer_io_error(bh, ", async page read"); in end_buffer_async_read()
270 clear_buffer_async_read(bh); in end_buffer_async_read()
271 unlock_buffer(bh); in end_buffer_async_read()
272 tmp = bh; in end_buffer_async_read()
281 } while (tmp != bh); in end_buffer_async_read()
300 struct buffer_head *bh; member
307 struct buffer_head *bh = ctx->bh; in decrypt_bh() local
310 err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size, in decrypt_bh()
311 bh_offset(bh)); in decrypt_bh()
312 end_buffer_async_read(bh, err == 0); in decrypt_bh()
320 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) in end_buffer_async_read_io() argument
324 fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { in end_buffer_async_read_io()
329 ctx->bh = bh; in end_buffer_async_read_io()
335 end_buffer_async_read(bh, uptodate); in end_buffer_async_read_io()
342 void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
349 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
351 page = bh->b_page; in end_buffer_async_write()
353 set_buffer_uptodate(bh); in end_buffer_async_write()
355 buffer_io_error(bh, ", lost async page write"); in end_buffer_async_write()
356 mark_buffer_write_io_error(bh); in end_buffer_async_write()
357 clear_buffer_uptodate(bh); in end_buffer_async_write()
364 clear_buffer_async_write(bh); in end_buffer_async_write()
365 unlock_buffer(bh); in end_buffer_async_write()
366 tmp = bh->b_this_page; in end_buffer_async_write()
367 while (tmp != bh) { in end_buffer_async_write()
405 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
407 bh->b_end_io = end_buffer_async_read_io; in mark_buffer_async_read()
408 set_buffer_async_read(bh); in mark_buffer_async_read()
411 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio() argument
414 bh->b_end_io = handler; in mark_buffer_async_write_endio()
415 set_buffer_async_write(bh); in mark_buffer_async_write_endio()
418 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
420 mark_buffer_async_write_endio(bh, end_buffer_async_write); in mark_buffer_async_write()
477 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
479 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
480 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
481 bh->b_assoc_map = NULL; in __remove_assoc_queue()
501 struct buffer_head *bh; in osync_buffers_list() local
508 bh = BH_ENTRY(p); in osync_buffers_list()
509 if (buffer_locked(bh)) { in osync_buffers_list()
510 get_bh(bh); in osync_buffers_list()
512 wait_on_buffer(bh); in osync_buffers_list()
513 if (!buffer_uptodate(bh)) in osync_buffers_list()
515 brelse(bh); in osync_buffers_list()
562 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block() local
563 if (bh) { in write_boundary_block()
564 if (buffer_dirty(bh)) in write_boundary_block()
565 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); in write_boundary_block()
566 put_bh(bh); in write_boundary_block()
570 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
573 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
575 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
581 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
583 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
585 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
652 struct buffer_head *bh = head; in __set_page_dirty_buffers() local
655 set_buffer_dirty(bh); in __set_page_dirty_buffers()
656 bh = bh->b_this_page; in __set_page_dirty_buffers()
657 } while (bh != head); in __set_page_dirty_buffers()
700 struct buffer_head *bh; in fsync_buffers_list() local
711 bh = BH_ENTRY(list->next); in fsync_buffers_list()
712 mapping = bh->b_assoc_map; in fsync_buffers_list()
713 __remove_assoc_queue(bh); in fsync_buffers_list()
717 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
718 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
719 bh->b_assoc_map = mapping; in fsync_buffers_list()
720 if (buffer_dirty(bh)) { in fsync_buffers_list()
721 get_bh(bh); in fsync_buffers_list()
730 write_dirty_buffer(bh, REQ_SYNC); in fsync_buffers_list()
738 brelse(bh); in fsync_buffers_list()
749 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
750 get_bh(bh); in fsync_buffers_list()
751 mapping = bh->b_assoc_map; in fsync_buffers_list()
752 __remove_assoc_queue(bh); in fsync_buffers_list()
756 if (buffer_dirty(bh)) { in fsync_buffers_list()
757 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
759 bh->b_assoc_map = mapping; in fsync_buffers_list()
762 wait_on_buffer(bh); in fsync_buffers_list()
763 if (!buffer_uptodate(bh)) in fsync_buffers_list()
765 brelse(bh); in fsync_buffers_list()
818 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
819 if (buffer_dirty(bh)) { in remove_inode_buffers()
823 __remove_assoc_queue(bh); in remove_inode_buffers()
842 struct buffer_head *bh, *head; in alloc_page_buffers() local
856 bh = alloc_buffer_head(gfp); in alloc_page_buffers()
857 if (!bh) in alloc_page_buffers()
860 bh->b_this_page = head; in alloc_page_buffers()
861 bh->b_blocknr = -1; in alloc_page_buffers()
862 head = bh; in alloc_page_buffers()
864 bh->b_size = size; in alloc_page_buffers()
867 set_bh_page(bh, page, offset); in alloc_page_buffers()
879 bh = head; in alloc_page_buffers()
881 free_buffer_head(bh); in alloc_page_buffers()
892 struct buffer_head *bh, *tail; in link_dev_buffers() local
894 bh = head; in link_dev_buffers()
896 tail = bh; in link_dev_buffers()
897 bh = bh->b_this_page; in link_dev_buffers()
898 } while (bh); in link_dev_buffers()
923 struct buffer_head *bh = head; in init_page_buffers() local
928 if (!buffer_mapped(bh)) { in init_page_buffers()
929 bh->b_end_io = NULL; in init_page_buffers()
930 bh->b_private = NULL; in init_page_buffers()
931 bh->b_bdev = bdev; in init_page_buffers()
932 bh->b_blocknr = block; in init_page_buffers()
934 set_buffer_uptodate(bh); in init_page_buffers()
936 set_buffer_mapped(bh); in init_page_buffers()
939 bh = bh->b_this_page; in init_page_buffers()
940 } while (bh != head); in init_page_buffers()
959 struct buffer_head *bh; in grow_dev_page() local
979 bh = page_buffers(page); in grow_dev_page()
980 if (bh->b_size == size) { in grow_dev_page()
993 bh = alloc_page_buffers(page, size, true); in grow_dev_page()
1001 link_dev_buffers(page, bh); in grow_dev_page()
1063 struct buffer_head *bh; in __getblk_slow() local
1066 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1067 if (bh) in __getblk_slow()
1068 return bh; in __getblk_slow()
1111 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1113 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1115 trace_block_dirty_buffer(bh); in mark_buffer_dirty()
1123 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1125 if (buffer_dirty(bh)) in mark_buffer_dirty()
1129 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1130 struct page *page = bh->b_page; in mark_buffer_dirty()
1146 void mark_buffer_write_io_error(struct buffer_head *bh) in mark_buffer_write_io_error() argument
1150 set_buffer_write_io_error(bh); in mark_buffer_write_io_error()
1152 if (bh->b_page && bh->b_page->mapping) in mark_buffer_write_io_error()
1153 mapping_set_error(bh->b_page->mapping, -EIO); in mark_buffer_write_io_error()
1154 if (bh->b_assoc_map) in mark_buffer_write_io_error()
1155 mapping_set_error(bh->b_assoc_map, -EIO); in mark_buffer_write_io_error()
1157 sb = READ_ONCE(bh->b_bdev->bd_super); in mark_buffer_write_io_error()
1185 void __bforget(struct buffer_head *bh) in __bforget() argument
1187 clear_buffer_dirty(bh); in __bforget()
1188 if (bh->b_assoc_map) { in __bforget()
1189 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1192 list_del_init(&bh->b_assoc_buffers); in __bforget()
1193 bh->b_assoc_map = NULL; in __bforget()
1196 __brelse(bh); in __bforget()
1200 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1202 lock_buffer(bh); in __bread_slow()
1203 if (buffer_uptodate(bh)) { in __bread_slow()
1204 unlock_buffer(bh); in __bread_slow()
1205 return bh; in __bread_slow()
1207 get_bh(bh); in __bread_slow()
1208 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1209 submit_bh(REQ_OP_READ, 0, bh); in __bread_slow()
1210 wait_on_buffer(bh); in __bread_slow()
1211 if (buffer_uptodate(bh)) in __bread_slow()
1212 return bh; in __bread_slow()
1214 brelse(bh); in __bread_slow()
1260 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1262 struct buffer_head *evictee = bh; in bh_lru_install()
1281 if (evictee == bh) { in bh_lru_install()
1287 get_bh(bh); in bh_lru_install()
1304 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() local
1306 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1307 bh->b_size == size) { in lookup_bh_lru()
1314 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1316 get_bh(bh); in lookup_bh_lru()
1317 ret = bh; in lookup_bh_lru()
1333 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block() local
1335 if (bh == NULL) { in __find_get_block()
1337 bh = __find_get_block_slow(bdev, block); in __find_get_block()
1338 if (bh) in __find_get_block()
1339 bh_lru_install(bh); in __find_get_block()
1341 touch_buffer(bh); in __find_get_block()
1343 return bh; in __find_get_block()
1359 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk_gfp() local
1362 if (bh == NULL) in __getblk_gfp()
1363 bh = __getblk_slow(bdev, block, size, gfp); in __getblk_gfp()
1364 return bh; in __getblk_gfp()
1373 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead() local
1374 if (likely(bh)) { in __breadahead()
1375 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); in __breadahead()
1376 brelse(bh); in __breadahead()
1384 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __breadahead_gfp() local
1385 if (likely(bh)) { in __breadahead_gfp()
1386 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); in __breadahead_gfp()
1387 brelse(bh); in __breadahead_gfp()
1408 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __bread_gfp() local
1410 if (likely(bh) && !buffer_uptodate(bh)) in __bread_gfp()
1411 bh = __bread_slow(bh); in __bread_gfp()
1412 return bh; in __bread_gfp()
1471 void set_bh_page(struct buffer_head *bh, in set_bh_page() argument
1474 bh->b_page = page; in set_bh_page()
1480 bh->b_data = (char *)(0 + offset); in set_bh_page()
1482 bh->b_data = page_address(page) + offset; in set_bh_page()
1495 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1499 lock_buffer(bh); in discard_buffer()
1500 clear_buffer_dirty(bh); in discard_buffer()
1501 bh->b_bdev = NULL; in discard_buffer()
1502 b_state = bh->b_state; in discard_buffer()
1504 b_state_old = cmpxchg(&bh->b_state, b_state, in discard_buffer()
1510 unlock_buffer(bh); in discard_buffer()
1532 struct buffer_head *head, *bh, *next; in block_invalidatepage() local
1546 bh = head; in block_invalidatepage()
1548 unsigned int next_off = curr_off + bh->b_size; in block_invalidatepage()
1549 next = bh->b_this_page; in block_invalidatepage()
1561 discard_buffer(bh); in block_invalidatepage()
1563 bh = next; in block_invalidatepage()
1564 } while (bh != head); in block_invalidatepage()
1587 struct buffer_head *bh, *head, *tail; in create_empty_buffers() local
1590 bh = head; in create_empty_buffers()
1592 bh->b_state |= b_state; in create_empty_buffers()
1593 tail = bh; in create_empty_buffers()
1594 bh = bh->b_this_page; in create_empty_buffers()
1595 } while (bh); in create_empty_buffers()
1600 bh = head; in create_empty_buffers()
1603 set_buffer_dirty(bh); in create_empty_buffers()
1605 set_buffer_uptodate(bh); in create_empty_buffers()
1606 bh = bh->b_this_page; in create_empty_buffers()
1607 } while (bh != head); in create_empty_buffers()
1642 struct buffer_head *bh; in clean_bdev_aliases() local
1664 bh = head; in clean_bdev_aliases()
1666 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) in clean_bdev_aliases()
1668 if (bh->b_blocknr >= block + len) in clean_bdev_aliases()
1670 clear_buffer_dirty(bh); in clean_bdev_aliases()
1671 wait_on_buffer(bh); in clean_bdev_aliases()
1672 clear_buffer_req(bh); in clean_bdev_aliases()
1674 bh = bh->b_this_page; in clean_bdev_aliases()
1675 } while (bh != head); in clean_bdev_aliases()
1747 struct buffer_head *bh, *head; in __block_write_full_page() local
1765 bh = head; in __block_write_full_page()
1766 blocksize = bh->b_size; in __block_write_full_page()
1786 clear_buffer_dirty(bh); in __block_write_full_page()
1787 set_buffer_uptodate(bh); in __block_write_full_page()
1788 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_page()
1789 buffer_dirty(bh)) { in __block_write_full_page()
1790 WARN_ON(bh->b_size != blocksize); in __block_write_full_page()
1791 err = get_block(inode, block, bh, 1); in __block_write_full_page()
1794 clear_buffer_delay(bh); in __block_write_full_page()
1795 if (buffer_new(bh)) { in __block_write_full_page()
1797 clear_buffer_new(bh); in __block_write_full_page()
1798 clean_bdev_bh_alias(bh); in __block_write_full_page()
1801 bh = bh->b_this_page; in __block_write_full_page()
1803 } while (bh != head); in __block_write_full_page()
1806 if (!buffer_mapped(bh)) in __block_write_full_page()
1816 lock_buffer(bh); in __block_write_full_page()
1817 } else if (!trylock_buffer(bh)) { in __block_write_full_page()
1821 if (test_clear_buffer_dirty(bh)) { in __block_write_full_page()
1822 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1824 unlock_buffer(bh); in __block_write_full_page()
1826 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1836 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1837 if (buffer_async_write(bh)) { in __block_write_full_page()
1838 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, in __block_write_full_page()
1842 bh = next; in __block_write_full_page()
1843 } while (bh != head); in __block_write_full_page()
1870 bh = head; in __block_write_full_page()
1873 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_page()
1874 !buffer_delay(bh)) { in __block_write_full_page()
1875 lock_buffer(bh); in __block_write_full_page()
1876 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1882 clear_buffer_dirty(bh); in __block_write_full_page()
1884 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1890 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1891 if (buffer_async_write(bh)) { in __block_write_full_page()
1892 clear_buffer_dirty(bh); in __block_write_full_page()
1893 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, in __block_write_full_page()
1897 bh = next; in __block_write_full_page()
1898 } while (bh != head); in __block_write_full_page()
1912 struct buffer_head *head, *bh; in page_zero_new_buffers() local
1918 bh = head = page_buffers(page); in page_zero_new_buffers()
1921 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1923 if (buffer_new(bh)) { in page_zero_new_buffers()
1932 set_buffer_uptodate(bh); in page_zero_new_buffers()
1935 clear_buffer_new(bh); in page_zero_new_buffers()
1936 mark_buffer_dirty(bh); in page_zero_new_buffers()
1941 bh = bh->b_this_page; in page_zero_new_buffers()
1942 } while (bh != head); in page_zero_new_buffers()
1947 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, in iomap_to_bh() argument
1952 bh->b_bdev = iomap->bdev; in iomap_to_bh()
1969 if (!buffer_uptodate(bh) || in iomap_to_bh()
1971 set_buffer_new(bh); in iomap_to_bh()
1974 if (!buffer_uptodate(bh) || in iomap_to_bh()
1976 set_buffer_new(bh); in iomap_to_bh()
1977 set_buffer_uptodate(bh); in iomap_to_bh()
1978 set_buffer_mapped(bh); in iomap_to_bh()
1979 set_buffer_delay(bh); in iomap_to_bh()
1987 set_buffer_new(bh); in iomap_to_bh()
1988 set_buffer_unwritten(bh); in iomap_to_bh()
1993 set_buffer_new(bh); in iomap_to_bh()
1994 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> in iomap_to_bh()
1996 set_buffer_mapped(bh); in iomap_to_bh()
2011 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin_int() local
2024 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int()
2025 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int()
2029 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2030 set_buffer_uptodate(bh); in __block_write_begin_int()
2034 if (buffer_new(bh)) in __block_write_begin_int()
2035 clear_buffer_new(bh); in __block_write_begin_int()
2036 if (!buffer_mapped(bh)) { in __block_write_begin_int()
2037 WARN_ON(bh->b_size != blocksize); in __block_write_begin_int()
2039 err = get_block(inode, block, bh, 1); in __block_write_begin_int()
2043 iomap_to_bh(inode, block, bh, iomap); in __block_write_begin_int()
2046 if (buffer_new(bh)) { in __block_write_begin_int()
2047 clean_bdev_bh_alias(bh); in __block_write_begin_int()
2049 clear_buffer_new(bh); in __block_write_begin_int()
2050 set_buffer_uptodate(bh); in __block_write_begin_int()
2051 mark_buffer_dirty(bh); in __block_write_begin_int()
2062 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2063 set_buffer_uptodate(bh); in __block_write_begin_int()
2066 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_write_begin_int()
2067 !buffer_unwritten(bh) && in __block_write_begin_int()
2069 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in __block_write_begin_int()
2070 *wait_bh++=bh; in __block_write_begin_int()
2099 struct buffer_head *bh, *head; in __block_commit_write() local
2101 bh = head = page_buffers(page); in __block_commit_write()
2102 blocksize = bh->b_size; in __block_commit_write()
2108 if (!buffer_uptodate(bh)) in __block_commit_write()
2111 set_buffer_uptodate(bh); in __block_commit_write()
2112 mark_buffer_dirty(bh); in __block_commit_write()
2114 clear_buffer_new(bh); in __block_commit_write()
2117 bh = bh->b_this_page; in __block_commit_write()
2118 } while (bh != head); in __block_commit_write()
2247 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2260 bh = head; in block_is_partially_uptodate()
2265 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2273 bh = bh->b_this_page; in block_is_partially_uptodate()
2274 } while (bh != head); in block_is_partially_uptodate()
2291 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_page() local
2302 bh = head; in block_read_full_page()
2307 if (buffer_uptodate(bh)) in block_read_full_page()
2310 if (!buffer_mapped(bh)) { in block_read_full_page()
2315 WARN_ON(bh->b_size != blocksize); in block_read_full_page()
2316 err = get_block(inode, iblock, bh, 0); in block_read_full_page()
2320 if (!buffer_mapped(bh)) { in block_read_full_page()
2323 set_buffer_uptodate(bh); in block_read_full_page()
2330 if (buffer_uptodate(bh)) in block_read_full_page()
2333 arr[nr++] = bh; in block_read_full_page()
2334 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_page()
2352 bh = arr[i]; in block_read_full_page()
2353 lock_buffer(bh); in block_read_full_page()
2354 mark_buffer_async_read(bh); in block_read_full_page()
2363 bh = arr[i]; in block_read_full_page()
2364 if (buffer_uptodate(bh)) in block_read_full_page()
2365 end_buffer_async_read(bh, 1); in block_read_full_page()
2367 submit_bh(REQ_OP_READ, 0, bh); in block_read_full_page()
2571 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) in end_buffer_read_nobh() argument
2573 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_nobh()
2583 struct buffer_head *bh; in attach_nobh_buffers() local
2588 bh = head; in attach_nobh_buffers()
2591 set_buffer_dirty(bh); in attach_nobh_buffers()
2592 if (!bh->b_this_page) in attach_nobh_buffers()
2593 bh->b_this_page = head; in attach_nobh_buffers()
2594 bh = bh->b_this_page; in attach_nobh_buffers()
2595 } while (bh != head); in attach_nobh_buffers()
2613 struct buffer_head *head, *bh; in nobh_write_begin() local
2666 for (block_start = 0, block_in_page = 0, bh = head; in nobh_write_begin()
2668 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { in nobh_write_begin()
2672 bh->b_state = 0; in nobh_write_begin()
2677 bh, create); in nobh_write_begin()
2680 if (!buffer_mapped(bh)) in nobh_write_begin()
2682 if (buffer_new(bh)) in nobh_write_begin()
2683 clean_bdev_bh_alias(bh); in nobh_write_begin()
2685 set_buffer_uptodate(bh); in nobh_write_begin()
2688 if (buffer_new(bh) || !buffer_mapped(bh)) { in nobh_write_begin()
2693 if (buffer_uptodate(bh)) in nobh_write_begin()
2696 lock_buffer(bh); in nobh_write_begin()
2697 bh->b_end_io = end_buffer_read_nobh; in nobh_write_begin()
2698 submit_bh(REQ_OP_READ, 0, bh); in nobh_write_begin()
2709 for (bh = head; bh; bh = bh->b_this_page) { in nobh_write_begin()
2710 wait_on_buffer(bh); in nobh_write_begin()
2711 if (!buffer_uptodate(bh)) in nobh_write_begin()
2752 struct buffer_head *bh; in nobh_write_end() local
2772 bh = head; in nobh_write_end()
2774 free_buffer_head(bh); in nobh_write_end()
2911 struct buffer_head *bh; in block_truncate_page() local
2933 bh = page_buffers(page); in block_truncate_page()
2936 bh = bh->b_this_page; in block_truncate_page()
2942 if (!buffer_mapped(bh)) { in block_truncate_page()
2943 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2944 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2948 if (!buffer_mapped(bh)) in block_truncate_page()
2954 set_buffer_uptodate(bh); in block_truncate_page()
2956 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2958 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in block_truncate_page()
2959 wait_on_buffer(bh); in block_truncate_page()
2961 if (!buffer_uptodate(bh)) in block_truncate_page()
2966 mark_buffer_dirty(bh); in block_truncate_page()
3028 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
3031 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
3033 bh->b_end_io(bh, !bio->bi_status); in end_bio_bh_io_sync()
3037 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, in submit_bh_wbc() argument
3042 BUG_ON(!buffer_locked(bh)); in submit_bh_wbc()
3043 BUG_ON(!buffer_mapped(bh)); in submit_bh_wbc()
3044 BUG_ON(!bh->b_end_io); in submit_bh_wbc()
3045 BUG_ON(buffer_delay(bh)); in submit_bh_wbc()
3046 BUG_ON(buffer_unwritten(bh)); in submit_bh_wbc()
3051 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) in submit_bh_wbc()
3052 clear_buffer_write_io_error(bh); in submit_bh_wbc()
3056 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); in submit_bh_wbc()
3058 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
3059 bio_set_dev(bio, bh->b_bdev); in submit_bh_wbc()
3062 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in submit_bh_wbc()
3063 BUG_ON(bio->bi_iter.bi_size != bh->b_size); in submit_bh_wbc()
3066 bio->bi_private = bh; in submit_bh_wbc()
3068 if (buffer_meta(bh)) in submit_bh_wbc()
3070 if (buffer_prio(bh)) in submit_bh_wbc()
3079 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); in submit_bh_wbc()
3086 int submit_bh(int op, int op_flags, struct buffer_head *bh) in submit_bh() argument
3088 return submit_bh_wbc(op, op_flags, bh, 0, NULL); in submit_bh()
3123 struct buffer_head *bh = bhs[i]; in ll_rw_block() local
3125 if (!trylock_buffer(bh)) in ll_rw_block()
3128 if (test_clear_buffer_dirty(bh)) { in ll_rw_block()
3129 bh->b_end_io = end_buffer_write_sync; in ll_rw_block()
3130 get_bh(bh); in ll_rw_block()
3131 submit_bh(op, op_flags, bh); in ll_rw_block()
3135 if (!buffer_uptodate(bh)) { in ll_rw_block()
3136 bh->b_end_io = end_buffer_read_sync; in ll_rw_block()
3137 get_bh(bh); in ll_rw_block()
3138 submit_bh(op, op_flags, bh); in ll_rw_block()
3142 unlock_buffer(bh); in ll_rw_block()
3147 void write_dirty_buffer(struct buffer_head *bh, int op_flags) in write_dirty_buffer() argument
3149 lock_buffer(bh); in write_dirty_buffer()
3150 if (!test_clear_buffer_dirty(bh)) { in write_dirty_buffer()
3151 unlock_buffer(bh); in write_dirty_buffer()
3154 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
3155 get_bh(bh); in write_dirty_buffer()
3156 submit_bh(REQ_OP_WRITE, op_flags, bh); in write_dirty_buffer()
3165 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) in __sync_dirty_buffer() argument
3169 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
3170 lock_buffer(bh); in __sync_dirty_buffer()
3171 if (test_clear_buffer_dirty(bh)) { in __sync_dirty_buffer()
3176 if (!buffer_mapped(bh)) { in __sync_dirty_buffer()
3177 unlock_buffer(bh); in __sync_dirty_buffer()
3181 get_bh(bh); in __sync_dirty_buffer()
3182 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
3183 ret = submit_bh(REQ_OP_WRITE, op_flags, bh); in __sync_dirty_buffer()
3184 wait_on_buffer(bh); in __sync_dirty_buffer()
3185 if (!ret && !buffer_uptodate(bh)) in __sync_dirty_buffer()
3188 unlock_buffer(bh); in __sync_dirty_buffer()
3194 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
3196 return __sync_dirty_buffer(bh, REQ_SYNC); in sync_dirty_buffer()
3220 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
3222 return atomic_read(&bh->b_count) | in buffer_busy()
3223 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
3230 struct buffer_head *bh; in drop_buffers() local
3232 bh = head; in drop_buffers()
3234 if (buffer_busy(bh)) in drop_buffers()
3236 bh = bh->b_this_page; in drop_buffers()
3237 } while (bh != head); in drop_buffers()
3240 struct buffer_head *next = bh->b_this_page; in drop_buffers()
3242 if (bh->b_assoc_map) in drop_buffers()
3243 __remove_assoc_queue(bh); in drop_buffers()
3244 bh = next; in drop_buffers()
3245 } while (bh != head); in drop_buffers()
3290 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
3293 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3294 free_buffer_head(bh); in try_to_free_buffers()
3295 bh = next; in try_to_free_buffers()
3296 } while (bh != buffers_to_free); in try_to_free_buffers()
3377 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3379 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3380 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3409 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3411 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3412 lock_buffer(bh); in bh_uptodate_or_lock()
3413 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3415 unlock_buffer(bh); in bh_uptodate_or_lock()
3427 int bh_submit_read(struct buffer_head *bh) in bh_submit_read() argument
3429 BUG_ON(!buffer_locked(bh)); in bh_submit_read()
3431 if (buffer_uptodate(bh)) { in bh_submit_read()
3432 unlock_buffer(bh); in bh_submit_read()
3436 get_bh(bh); in bh_submit_read()
3437 bh->b_end_io = end_buffer_read_sync; in bh_submit_read()
3438 submit_bh(REQ_OP_READ, 0, bh); in bh_submit_read()
3439 wait_on_buffer(bh); in bh_submit_read()
3440 if (buffer_uptodate(bh)) in bh_submit_read()