xref: /OK3568_Linux_fs/kernel/fs/xfs/xfs_log.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4*4882a593Smuzhiyun  * All Rights Reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #ifndef	__XFS_LOG_H__
7*4882a593Smuzhiyun #define __XFS_LOG_H__
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun struct xfs_cil_ctx;
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun struct xfs_log_vec {
12*4882a593Smuzhiyun 	struct xfs_log_vec	*lv_next;	/* next lv in build list */
13*4882a593Smuzhiyun 	int			lv_niovecs;	/* number of iovecs in lv */
14*4882a593Smuzhiyun 	struct xfs_log_iovec	*lv_iovecp;	/* iovec array */
15*4882a593Smuzhiyun 	struct xfs_log_item	*lv_item;	/* owner */
16*4882a593Smuzhiyun 	char			*lv_buf;	/* formatted buffer */
17*4882a593Smuzhiyun 	int			lv_bytes;	/* accounted space in buffer */
18*4882a593Smuzhiyun 	int			lv_buf_len;	/* aligned size of buffer */
19*4882a593Smuzhiyun 	int			lv_size;	/* size of allocated lv */
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define XFS_LOG_VEC_ORDERED	(-1)
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static inline void *
xlog_prepare_iovec(struct xfs_log_vec * lv,struct xfs_log_iovec ** vecp,uint type)25*4882a593Smuzhiyun xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
26*4882a593Smuzhiyun 		uint type)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct xfs_log_iovec *vec = *vecp;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	if (vec) {
31*4882a593Smuzhiyun 		ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
32*4882a593Smuzhiyun 		vec++;
33*4882a593Smuzhiyun 	} else {
34*4882a593Smuzhiyun 		vec = &lv->lv_iovecp[0];
35*4882a593Smuzhiyun 	}
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	vec->i_type = type;
38*4882a593Smuzhiyun 	vec->i_addr = lv->lv_buf + lv->lv_buf_len;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t)));
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	*vecp = vec;
43*4882a593Smuzhiyun 	return vec->i_addr;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun  * We need to make sure the next buffer is naturally aligned for the biggest
48*4882a593Smuzhiyun  * basic data type we put into it.  We already accounted for this padding when
49*4882a593Smuzhiyun  * sizing the buffer.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * However, this padding does not get written into the log, and hence we have to
52*4882a593Smuzhiyun  * track the space used by the log vectors separately to prevent log space hangs
53*4882a593Smuzhiyun  * due to inaccurate accounting (i.e. a leak) of the used log space through the
54*4882a593Smuzhiyun  * CIL context ticket.
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun static inline void
xlog_finish_iovec(struct xfs_log_vec * lv,struct xfs_log_iovec * vec,int len)57*4882a593Smuzhiyun xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	lv->lv_buf_len += round_up(len, sizeof(uint64_t));
60*4882a593Smuzhiyun 	lv->lv_bytes += len;
61*4882a593Smuzhiyun 	vec->i_len = len;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static inline void *
xlog_copy_iovec(struct xfs_log_vec * lv,struct xfs_log_iovec ** vecp,uint type,void * data,int len)65*4882a593Smuzhiyun xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
66*4882a593Smuzhiyun 		uint type, void *data, int len)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	void *buf;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	buf = xlog_prepare_iovec(lv, vecp, type);
71*4882a593Smuzhiyun 	memcpy(buf, data, len);
72*4882a593Smuzhiyun 	xlog_finish_iovec(lv, *vecp, len);
73*4882a593Smuzhiyun 	return buf;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun  * By comparing each component, we don't have to worry about extra
78*4882a593Smuzhiyun  * endian issues in treating two 32 bit numbers as one 64 bit number
79*4882a593Smuzhiyun  */
_lsn_cmp(xfs_lsn_t lsn1,xfs_lsn_t lsn2)80*4882a593Smuzhiyun static inline xfs_lsn_t	_lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
83*4882a593Smuzhiyun 		return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
86*4882a593Smuzhiyun 		return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define	XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * Flags to xfs_log_force()
95*4882a593Smuzhiyun  *
96*4882a593Smuzhiyun  *	XFS_LOG_SYNC:	Synchronous force in-core log to disk
97*4882a593Smuzhiyun  */
98*4882a593Smuzhiyun #define XFS_LOG_SYNC		0x1
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* Log manager interfaces */
101*4882a593Smuzhiyun struct xfs_mount;
102*4882a593Smuzhiyun struct xlog_in_core;
103*4882a593Smuzhiyun struct xlog_ticket;
104*4882a593Smuzhiyun struct xfs_log_item;
105*4882a593Smuzhiyun struct xfs_item_ops;
106*4882a593Smuzhiyun struct xfs_trans;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun int	  xfs_log_force(struct xfs_mount *mp, uint flags);
109*4882a593Smuzhiyun int	  xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags,
110*4882a593Smuzhiyun 		int *log_forced);
111*4882a593Smuzhiyun int	  xfs_log_mount(struct xfs_mount	*mp,
112*4882a593Smuzhiyun 			struct xfs_buftarg	*log_target,
113*4882a593Smuzhiyun 			xfs_daddr_t		start_block,
114*4882a593Smuzhiyun 			int		 	num_bblocks);
115*4882a593Smuzhiyun int	  xfs_log_mount_finish(struct xfs_mount *mp);
116*4882a593Smuzhiyun void	xfs_log_mount_cancel(struct xfs_mount *);
117*4882a593Smuzhiyun xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
118*4882a593Smuzhiyun xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
119*4882a593Smuzhiyun void	  xfs_log_space_wake(struct xfs_mount *mp);
120*4882a593Smuzhiyun void	  xfs_log_release_iclog(struct xlog_in_core *iclog);
121*4882a593Smuzhiyun int	  xfs_log_reserve(struct xfs_mount *mp,
122*4882a593Smuzhiyun 			  int		   length,
123*4882a593Smuzhiyun 			  int		   count,
124*4882a593Smuzhiyun 			  struct xlog_ticket **ticket,
125*4882a593Smuzhiyun 			  uint8_t		   clientid,
126*4882a593Smuzhiyun 			  bool		   permanent);
127*4882a593Smuzhiyun int	  xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
128*4882a593Smuzhiyun void      xfs_log_unmount(struct xfs_mount *mp);
129*4882a593Smuzhiyun int	  xfs_log_force_umount(struct xfs_mount *mp, int logerror);
130*4882a593Smuzhiyun bool	xfs_log_writable(struct xfs_mount *mp);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
133*4882a593Smuzhiyun void	  xfs_log_ticket_put(struct xlog_ticket *ticket);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun void	xlog_cil_process_committed(struct list_head *list);
136*4882a593Smuzhiyun bool	xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun void	xfs_log_work_queue(struct xfs_mount *mp);
139*4882a593Smuzhiyun void	xfs_log_quiesce(struct xfs_mount *mp);
140*4882a593Smuzhiyun bool	xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
141*4882a593Smuzhiyun bool	xfs_log_in_recovery(struct xfs_mount *);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #endif	/* __XFS_LOG_H__ */
146