Lines Matching +full:data +full:- +full:mapping

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
34 * Fast and loose check if this write could update the on-disk inode size.
38 return ioend->io_offset + ioend->io_size > in xfs_ioend_is_append()
39 XFS_I(ioend->io_inode)->i_d.di_size; in xfs_ioend_is_append()
46 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; in xfs_setfilesize_trans_alloc()
50 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); in xfs_setfilesize_trans_alloc()
54 ioend->io_private = tp; in xfs_setfilesize_trans_alloc()
60 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS); in xfs_setfilesize_trans_alloc()
70 * Update on-disk file size now that data has been written to disk.
91 ip->i_d.di_size = isize; in __xfs_setfilesize()
104 struct xfs_mount *mp = ip->i_mount; in xfs_setfilesize()
108 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); in xfs_setfilesize()
120 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_setfilesize_ioend()
121 struct xfs_trans *tp = ioend->io_private; in xfs_setfilesize_ioend()
129 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); in xfs_setfilesize_ioend()
137 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); in xfs_setfilesize_ioend()
147 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_end_ioend()
148 struct xfs_mount *mp = ip->i_mount; in xfs_end_ioend()
149 xfs_off_t offset = ioend->io_offset; in xfs_end_ioend()
150 size_t size = ioend->io_size; in xfs_end_ioend()
157 * task-wide nofs context for the following operations. in xfs_end_ioend()
162 * Just clean up the in-memory strutures if the fs has been shut down. in xfs_end_ioend()
165 error = -EIO; in xfs_end_ioend()
170 * Clean up all COW blocks and underlying data fork delalloc blocks on in xfs_end_ioend()
176 error = blk_status_to_errno(ioend->io_bio->bi_status); in xfs_end_ioend()
178 if (ioend->io_flags & IOMAP_F_SHARED) { in xfs_end_ioend()
190 if (ioend->io_flags & IOMAP_F_SHARED) in xfs_end_ioend()
192 else if (ioend->io_type == IOMAP_UNWRITTEN) in xfs_end_ioend()
195 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private); in xfs_end_ioend()
198 if (ioend->io_private) in xfs_end_ioend()
215 if (!ioend->io_private) { in xfs_ioend_merge_private()
216 ioend->io_private = next->io_private; in xfs_ioend_merge_private()
217 next->io_private = NULL; in xfs_ioend_merge_private()
219 xfs_setfilesize_ioend(next, -ECANCELED); in xfs_ioend_merge_private()
234 spin_lock_irqsave(&ip->i_ioend_lock, flags); in xfs_end_io()
235 list_replace_init(&ip->i_ioend_list, &tmp); in xfs_end_io()
236 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); in xfs_end_io()
241 list_del_init(&ioend->io_list); in xfs_end_io()
249 return ioend->io_private || in xfs_ioend_needs_workqueue()
250 ioend->io_type == IOMAP_UNWRITTEN || in xfs_ioend_needs_workqueue()
251 (ioend->io_flags & IOMAP_F_SHARED); in xfs_ioend_needs_workqueue()
258 struct iomap_ioend *ioend = bio->bi_private; in xfs_end_bio()
259 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_end_bio()
264 spin_lock_irqsave(&ip->i_ioend_lock, flags); in xfs_end_bio()
265 if (list_empty(&ip->i_ioend_list)) in xfs_end_bio()
266 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue, in xfs_end_bio()
267 &ip->i_ioend_work)); in xfs_end_bio()
268 list_add_tail(&ioend->io_list, &ip->i_ioend_list); in xfs_end_bio()
269 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); in xfs_end_bio()
273 * Fast revalidation of the cached writeback mapping. Return true if the current
274 * mapping is valid, false otherwise.
282 if (offset < wpc->iomap.offset || in xfs_imap_valid()
283 offset >= wpc->iomap.offset + wpc->iomap.length) in xfs_imap_valid()
286 * If this is a COW mapping, it is sufficient to check that the mapping in xfs_imap_valid()
288 * can revalidate a COW mapping without updating the data seqno. in xfs_imap_valid()
290 if (wpc->iomap.flags & IOMAP_F_SHARED) in xfs_imap_valid()
294 * This is not a COW mapping. Check the sequence number of the data fork in xfs_imap_valid()
300 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) in xfs_imap_valid()
303 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) in xfs_imap_valid()
310 * extent that maps offset_fsb in wpc->iomap.
313 * backing offset_fsb, although it could have moved from the COW to the data
327 seq = &XFS_WPC(wpc)->cow_seq; in xfs_convert_blocks()
329 seq = &XFS_WPC(wpc)->data_seq; in xfs_convert_blocks()
333 * and put the result into wpc->iomap. Allocate in a loop because it in xfs_convert_blocks()
339 &wpc->iomap, seq); in xfs_convert_blocks()
342 } while (wpc->iomap.offset + wpc->iomap.length <= offset); in xfs_convert_blocks()
354 struct xfs_mount *mp = ip->i_mount; in xfs_map_blocks()
366 return -EIO; in xfs_map_blocks()
369 * COW fork blocks can overlap data fork blocks even if the blocks in xfs_map_blocks()
371 * check for overlap on reflink inodes unless the mapping is already a in xfs_map_blocks()
396 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || in xfs_map_blocks()
397 (ip->i_df.if_flags & XFS_IFEXTENTS)); in xfs_map_blocks()
401 * it directly instead of looking up anything in the data fork. in xfs_map_blocks()
404 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap)) in xfs_map_blocks()
407 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); in xfs_map_blocks()
416 * ->cow_seq. If the data mapping is still valid, we're done. in xfs_map_blocks()
428 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) in xfs_map_blocks()
430 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq); in xfs_map_blocks()
435 imap.br_blockcount = imap.br_startoff - offset_fsb; in xfs_map_blocks()
444 * subsequent blocks in the mapping; however, the requirement to treat in xfs_map_blocks()
449 imap.br_blockcount = cow_fsb - imap.br_startoff; in xfs_map_blocks()
456 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0); in xfs_map_blocks()
464 * raced with a COW to data fork conversion or truncate. in xfs_map_blocks()
465 * Restart the lookup to catch the extent in the data fork for in xfs_map_blocks()
469 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++) in xfs_map_blocks()
471 ASSERT(error != -EAGAIN); in xfs_map_blocks()
478 * boundary again to force a re-lookup. in xfs_map_blocks()
483 if (cow_offset < wpc->iomap.offset + wpc->iomap.length) in xfs_map_blocks()
484 wpc->iomap.length = cow_offset - wpc->iomap.offset; in xfs_map_blocks()
487 ASSERT(wpc->iomap.offset <= offset); in xfs_map_blocks()
488 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset); in xfs_map_blocks()
503 * task-wide nofs context for the following operations. in xfs_prepare_ioend()
508 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) { in xfs_prepare_ioend()
509 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), in xfs_prepare_ioend()
510 ioend->io_offset, ioend->io_size); in xfs_prepare_ioend()
513 /* Reserve log space if we might write beyond the on-disk inode size. */ in xfs_prepare_ioend()
515 ((ioend->io_flags & IOMAP_F_SHARED) || in xfs_prepare_ioend()
516 ioend->io_type != IOMAP_UNWRITTEN) && in xfs_prepare_ioend()
518 !ioend->io_private) in xfs_prepare_ioend()
524 ioend->io_bio->bi_end_io = xfs_end_bio; in xfs_prepare_ioend()
530 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
534 * they are delalloc, we can do this without needing a transaction. Indeed - if
544 struct inode *inode = page->mapping->host; in xfs_discard_page()
546 struct xfs_mount *mp = ip->i_mount; in xfs_discard_page()
557 page, ip->i_ino, fileoff); in xfs_discard_page()
560 i_blocks_per_page(inode, page) - pageoff_fsb); in xfs_discard_page()
562 xfs_alert(mp, "page discard unable to remove delalloc mapping."); in xfs_discard_page()
564 iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff); in xfs_discard_page()
580 if (WARN_ON_ONCE(current->journal_info)) { in xfs_vm_writepage()
591 struct address_space *mapping, in xfs_vm_writepages() argument
597 * Writing back data in a transaction context can result in recursive in xfs_vm_writepages()
600 if (WARN_ON_ONCE(current->journal_info)) in xfs_vm_writepages()
603 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); in xfs_vm_writepages()
604 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops); in xfs_vm_writepages()
609 struct address_space *mapping, in xfs_dax_writepages() argument
612 struct xfs_inode *ip = XFS_I(mapping->host); in xfs_dax_writepages()
615 return dax_writeback_mapping_range(mapping, in xfs_dax_writepages()
616 xfs_inode_buftarg(ip)->bt_daxdev, wbc); in xfs_dax_writepages()
621 struct address_space *mapping, in xfs_vm_bmap() argument
624 struct xfs_inode *ip = XFS_I(mapping->host); in xfs_vm_bmap()
629 * The swap code (ab-)uses ->bmap to get a block mapping and then in xfs_vm_bmap()
639 return iomap_bmap(mapping, block, &xfs_read_iomap_ops); in xfs_vm_bmap()
663 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev; in xfs_iomap_swapfile_activate()