Lines Matching refs:mapping
324 fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { in end_buffer_async_read_io()
541 int sync_mapping_buffers(struct address_space *mapping) in sync_mapping_buffers() argument
543 struct address_space *buffer_mapping = mapping->private_data; in sync_mapping_buffers()
545 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) in sync_mapping_buffers()
549 &mapping->private_list); in sync_mapping_buffers()
572 struct address_space *mapping = inode->i_mapping; in mark_buffer_dirty_inode() local
573 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
576 if (!mapping->private_data) { in mark_buffer_dirty_inode()
577 mapping->private_data = buffer_mapping; in mark_buffer_dirty_inode()
579 BUG_ON(mapping->private_data != buffer_mapping); in mark_buffer_dirty_inode()
584 &mapping->private_list); in mark_buffer_dirty_inode()
585 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
600 void __set_page_dirty(struct page *page, struct address_space *mapping, in __set_page_dirty() argument
605 xa_lock_irqsave(&mapping->i_pages, flags); in __set_page_dirty()
606 if (page->mapping) { /* Race with truncate? */ in __set_page_dirty()
608 account_page_dirtied(page, mapping); in __set_page_dirty()
609 __xa_set_mark(&mapping->i_pages, page_index(page), in __set_page_dirty()
612 xa_unlock_irqrestore(&mapping->i_pages, flags); in __set_page_dirty()
644 struct address_space *mapping = page_mapping(page); in __set_page_dirty_buffers() local
646 if (unlikely(!mapping)) in __set_page_dirty_buffers()
649 spin_lock(&mapping->private_lock); in __set_page_dirty_buffers()
665 spin_unlock(&mapping->private_lock); in __set_page_dirty_buffers()
668 __set_page_dirty(page, mapping, 1); in __set_page_dirty_buffers()
673 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in __set_page_dirty_buffers()
702 struct address_space *mapping; in fsync_buffers_list() local
712 mapping = bh->b_assoc_map; in fsync_buffers_list()
719 bh->b_assoc_map = mapping; in fsync_buffers_list()
751 mapping = bh->b_assoc_map; in fsync_buffers_list()
758 &mapping->private_list); in fsync_buffers_list()
759 bh->b_assoc_map = mapping; in fsync_buffers_list()
789 struct address_space *mapping = &inode->i_data; in invalidate_inode_buffers() local
790 struct list_head *list = &mapping->private_list; in invalidate_inode_buffers()
791 struct address_space *buffer_mapping = mapping->private_data; in invalidate_inode_buffers()
812 struct address_space *mapping = &inode->i_data; in remove_inode_buffers() local
813 struct list_head *list = &mapping->private_list; in remove_inode_buffers()
814 struct address_space *buffer_mapping = mapping->private_data; in remove_inode_buffers()
1131 struct address_space *mapping = NULL; in mark_buffer_dirty() local
1135 mapping = page_mapping(page); in mark_buffer_dirty()
1136 if (mapping) in mark_buffer_dirty()
1137 __set_page_dirty(page, mapping, 0); in mark_buffer_dirty()
1140 if (mapping) in mark_buffer_dirty()
1141 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in mark_buffer_dirty()
1152 if (bh->b_page && bh->b_page->mapping) in mark_buffer_write_io_error()
1153 mapping_set_error(bh->b_page->mapping, -EIO); in mark_buffer_write_io_error()
1189 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1598 spin_lock(&page->mapping->private_lock); in create_empty_buffers()
1610 spin_unlock(&page->mapping->private_lock); in create_empty_buffers()
1887 mapping_set_error(page->mapping, err); in __block_write_full_page()
2006 struct inode *inode = page->mapping->host; in __block_write_begin_int()
2137 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, in block_write_begin() argument
2144 page = grab_cache_page_write_begin(mapping, index, flags); in block_write_begin()
2160 int block_write_end(struct file *file, struct address_space *mapping, in block_write_end() argument
2164 struct inode *inode = mapping->host; in block_write_end()
2196 int generic_write_end(struct file *file, struct address_space *mapping, in generic_write_end() argument
2200 struct inode *inode = mapping->host; in generic_write_end()
2204 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); in generic_write_end()
2289 struct inode *inode = page->mapping->host; in block_read_full_page()
2379 struct address_space *mapping = inode->i_mapping; in generic_cont_expand_simple() local
2388 err = pagecache_write_begin(NULL, mapping, size, 0, in generic_cont_expand_simple()
2393 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); in generic_cont_expand_simple()
2401 static int cont_expand_zero(struct file *file, struct address_space *mapping, in cont_expand_zero() argument
2404 struct inode *inode = mapping->host; in cont_expand_zero()
2424 err = pagecache_write_begin(file, mapping, curpos, len, 0, in cont_expand_zero()
2429 err = pagecache_write_end(file, mapping, curpos, len, len, in cont_expand_zero()
2436 balance_dirty_pages_ratelimited(mapping); in cont_expand_zero()
2457 err = pagecache_write_begin(file, mapping, curpos, len, 0, in cont_expand_zero()
2462 err = pagecache_write_end(file, mapping, curpos, len, len, in cont_expand_zero()
2477 int cont_write_begin(struct file *file, struct address_space *mapping, in cont_write_begin() argument
2482 struct inode *inode = mapping->host; in cont_write_begin()
2487 err = cont_expand_zero(file, mapping, pos, bytes); in cont_write_begin()
2497 return block_write_begin(mapping, pos, len, flags, pagep, get_block); in cont_write_begin()
2503 struct inode *inode = page->mapping->host; in block_commit_write()
2538 if ((page->mapping != inode->i_mapping) || in block_page_mkwrite()
2587 spin_lock(&page->mapping->private_lock); in attach_nobh_buffers()
2597 spin_unlock(&page->mapping->private_lock); in attach_nobh_buffers()
2605 int nobh_write_begin(struct address_space *mapping, in nobh_write_begin() argument
2610 struct inode *inode = mapping->host; in nobh_write_begin()
2628 page = grab_cache_page_write_begin(mapping, index, flags); in nobh_write_begin()
2746 int nobh_write_end(struct file *file, struct address_space *mapping, in nobh_write_end() argument
2750 struct inode *inode = page->mapping->host; in nobh_write_end()
2758 return generic_write_end(file, mapping, pos, len, in nobh_write_end()
2789 struct inode * const inode = page->mapping->host; in nobh_writepage()
2823 int nobh_truncate_page(struct address_space *mapping, in nobh_truncate_page() argument
2831 struct inode *inode = mapping->host; in nobh_truncate_page()
2846 page = grab_cache_page(mapping, index); in nobh_truncate_page()
2855 return block_truncate_page(mapping, from, get_block); in nobh_truncate_page()
2876 err = mapping->a_ops->readpage(NULL, page); in nobh_truncate_page()
2901 int block_truncate_page(struct address_space *mapping, in block_truncate_page() argument
2909 struct inode *inode = mapping->host; in block_truncate_page()
2924 page = grab_cache_page(mapping, index); in block_truncate_page()
2983 struct inode * const inode = page->mapping->host; in block_write_full_page()
3013 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, in generic_block_bmap() argument
3016 struct inode *inode = mapping->host; in generic_block_bmap()
3255 struct address_space * const mapping = page->mapping; in try_to_free_buffers() local
3263 if (mapping == NULL) { /* can this still happen? */ in try_to_free_buffers()
3268 spin_lock(&mapping->private_lock); in try_to_free_buffers()
3287 spin_unlock(&mapping->private_lock); in try_to_free_buffers()