1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0+ */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copyright (C) 2016 Oracle. All Rights Reserved. 4*4882a593Smuzhiyun * Author: Darrick J. Wong <darrick.wong@oracle.com> 5*4882a593Smuzhiyun */ 6*4882a593Smuzhiyun #ifndef __XFS_DEFER_H__ 7*4882a593Smuzhiyun #define __XFS_DEFER_H__ 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun struct xfs_btree_cur; 10*4882a593Smuzhiyun struct xfs_defer_op_type; 11*4882a593Smuzhiyun struct xfs_defer_capture; 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun /* 14*4882a593Smuzhiyun * Header for deferred operation list. 15*4882a593Smuzhiyun */ 16*4882a593Smuzhiyun enum xfs_defer_ops_type { 17*4882a593Smuzhiyun XFS_DEFER_OPS_TYPE_BMAP, 18*4882a593Smuzhiyun XFS_DEFER_OPS_TYPE_REFCOUNT, 19*4882a593Smuzhiyun XFS_DEFER_OPS_TYPE_RMAP, 20*4882a593Smuzhiyun XFS_DEFER_OPS_TYPE_FREE, 21*4882a593Smuzhiyun XFS_DEFER_OPS_TYPE_AGFL_FREE, 22*4882a593Smuzhiyun XFS_DEFER_OPS_TYPE_MAX, 23*4882a593Smuzhiyun }; 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun /* 26*4882a593Smuzhiyun * Save a log intent item and a list of extents, so that we can replay 27*4882a593Smuzhiyun * whatever action had to happen to the extent list and file the log done 28*4882a593Smuzhiyun * item. 29*4882a593Smuzhiyun */ 30*4882a593Smuzhiyun struct xfs_defer_pending { 31*4882a593Smuzhiyun struct list_head dfp_list; /* pending items */ 32*4882a593Smuzhiyun struct list_head dfp_work; /* work items */ 33*4882a593Smuzhiyun struct xfs_log_item *dfp_intent; /* log intent item */ 34*4882a593Smuzhiyun struct xfs_log_item *dfp_done; /* log done item */ 35*4882a593Smuzhiyun unsigned int dfp_count; /* # extent items */ 36*4882a593Smuzhiyun enum xfs_defer_ops_type dfp_type; 37*4882a593Smuzhiyun }; 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun void xfs_defer_add(struct xfs_trans *tp, enum xfs_defer_ops_type type, 40*4882a593Smuzhiyun struct list_head *h); 41*4882a593Smuzhiyun int xfs_defer_finish_noroll(struct xfs_trans **tp); 42*4882a593Smuzhiyun int xfs_defer_finish(struct xfs_trans **tp); 43*4882a593Smuzhiyun void xfs_defer_cancel(struct xfs_trans *); 44*4882a593Smuzhiyun void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp); 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun /* Description of a deferred type. */ 47*4882a593Smuzhiyun struct xfs_defer_op_type { 48*4882a593Smuzhiyun struct xfs_log_item *(*create_intent)(struct xfs_trans *tp, 49*4882a593Smuzhiyun struct list_head *items, unsigned int count, bool sort); 50*4882a593Smuzhiyun void (*abort_intent)(struct xfs_log_item *intent); 51*4882a593Smuzhiyun struct xfs_log_item *(*create_done)(struct xfs_trans *tp, 52*4882a593Smuzhiyun struct xfs_log_item *intent, unsigned int count); 53*4882a593Smuzhiyun int (*finish_item)(struct xfs_trans *tp, struct xfs_log_item *done, 54*4882a593Smuzhiyun struct list_head *item, struct xfs_btree_cur **state); 55*4882a593Smuzhiyun void (*finish_cleanup)(struct xfs_trans *tp, 56*4882a593Smuzhiyun struct xfs_btree_cur *state, int error); 57*4882a593Smuzhiyun void (*cancel_item)(struct list_head *item); 58*4882a593Smuzhiyun unsigned int max_items; 59*4882a593Smuzhiyun }; 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun extern const struct xfs_defer_op_type xfs_bmap_update_defer_type; 62*4882a593Smuzhiyun extern const struct xfs_defer_op_type xfs_refcount_update_defer_type; 63*4882a593Smuzhiyun extern const struct xfs_defer_op_type xfs_rmap_update_defer_type; 64*4882a593Smuzhiyun extern const struct xfs_defer_op_type xfs_extent_free_defer_type; 65*4882a593Smuzhiyun extern const struct xfs_defer_op_type xfs_agfl_free_defer_type; 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun /* 68*4882a593Smuzhiyun * This structure enables a dfops user to detach the chain of deferred 69*4882a593Smuzhiyun * operations from a transaction so that they can be continued later. 70*4882a593Smuzhiyun */ 71*4882a593Smuzhiyun struct xfs_defer_capture { 72*4882a593Smuzhiyun /* List of other capture structures. */ 73*4882a593Smuzhiyun struct list_head dfc_list; 74*4882a593Smuzhiyun 75*4882a593Smuzhiyun /* Deferred ops state saved from the transaction. */ 76*4882a593Smuzhiyun struct list_head dfc_dfops; 77*4882a593Smuzhiyun unsigned int dfc_tpflags; 78*4882a593Smuzhiyun 79*4882a593Smuzhiyun /* Block reservations for the data and rt devices. */ 80*4882a593Smuzhiyun unsigned int dfc_blkres; 81*4882a593Smuzhiyun unsigned int dfc_rtxres; 82*4882a593Smuzhiyun 83*4882a593Smuzhiyun /* Log reservation saved from the transaction. */ 84*4882a593Smuzhiyun unsigned int dfc_logres; 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun /* 87*4882a593Smuzhiyun * An inode reference that must be maintained to complete the deferred 88*4882a593Smuzhiyun * work. 89*4882a593Smuzhiyun */ 90*4882a593Smuzhiyun struct xfs_inode *dfc_capture_ip; 91*4882a593Smuzhiyun }; 92*4882a593Smuzhiyun 93*4882a593Smuzhiyun /* 94*4882a593Smuzhiyun * Functions to capture a chain of deferred operations and continue them later. 95*4882a593Smuzhiyun * This doesn't normally happen except log recovery. 96*4882a593Smuzhiyun */ 97*4882a593Smuzhiyun int xfs_defer_ops_capture_and_commit(struct xfs_trans *tp, 98*4882a593Smuzhiyun struct xfs_inode *capture_ip, struct list_head *capture_list); 99*4882a593Smuzhiyun void xfs_defer_ops_continue(struct xfs_defer_capture *d, struct xfs_trans *tp, 100*4882a593Smuzhiyun struct xfs_inode **captured_ipp); 101*4882a593Smuzhiyun void xfs_defer_ops_release(struct xfs_mount *mp, struct xfs_defer_capture *d); 102*4882a593Smuzhiyun 103*4882a593Smuzhiyun #endif /* __XFS_DEFER_H__ */ 104