1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * gcinode.c - dummy inodes to buffer blocks for garbage collection
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.
8*4882a593Smuzhiyun * Revised by Ryusuke Konishi.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun * This file adds the cache of on-disk blocks to be moved in garbage
13*4882a593Smuzhiyun * collection. The disk blocks are held with dummy inodes (called
14*4882a593Smuzhiyun * gcinodes), and this file provides lookup function of the dummy
15*4882a593Smuzhiyun * inodes and their buffer read function.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Buffers and pages held by the dummy inodes will be released each
18*4882a593Smuzhiyun * time after they are copied to a new log. Dirty blocks made on the
19*4882a593Smuzhiyun * current generation and the blocks to be moved by GC never overlap
20*4882a593Smuzhiyun * because the dirty blocks make a new generation; they rather must be
21*4882a593Smuzhiyun * written individually.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/buffer_head.h>
25*4882a593Smuzhiyun #include <linux/mpage.h>
26*4882a593Smuzhiyun #include <linux/hash.h>
27*4882a593Smuzhiyun #include <linux/slab.h>
28*4882a593Smuzhiyun #include <linux/swap.h>
29*4882a593Smuzhiyun #include "nilfs.h"
30*4882a593Smuzhiyun #include "btree.h"
31*4882a593Smuzhiyun #include "btnode.h"
32*4882a593Smuzhiyun #include "page.h"
33*4882a593Smuzhiyun #include "mdt.h"
34*4882a593Smuzhiyun #include "dat.h"
35*4882a593Smuzhiyun #include "ifile.h"
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * nilfs_gccache_submit_read_data() - add data buffer and submit read request
39*4882a593Smuzhiyun * @inode - gc inode
40*4882a593Smuzhiyun * @blkoff - dummy offset treated as the key for the page cache
41*4882a593Smuzhiyun * @pbn - physical block number of the block
42*4882a593Smuzhiyun * @vbn - virtual block number of the block, 0 for non-virtual block
43*4882a593Smuzhiyun * @out_bh - indirect pointer to a buffer_head struct to receive the results
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * Description: nilfs_gccache_submit_read_data() registers the data buffer
46*4882a593Smuzhiyun * specified by @pbn to the GC pagecache with the key @blkoff.
47*4882a593Smuzhiyun * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * Return Value: On success, 0 is returned. On Error, one of the following
50*4882a593Smuzhiyun * negative error code is returned.
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * %-EIO - I/O error.
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * %-ENOMEM - Insufficient amount of memory available.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * %-ENOENT - The block specified with @pbn does not exist.
57*4882a593Smuzhiyun */
nilfs_gccache_submit_read_data(struct inode * inode,sector_t blkoff,sector_t pbn,__u64 vbn,struct buffer_head ** out_bh)58*4882a593Smuzhiyun int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
59*4882a593Smuzhiyun sector_t pbn, __u64 vbn,
60*4882a593Smuzhiyun struct buffer_head **out_bh)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct buffer_head *bh;
63*4882a593Smuzhiyun int err;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
66*4882a593Smuzhiyun if (unlikely(!bh))
67*4882a593Smuzhiyun return -ENOMEM;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (buffer_uptodate(bh))
70*4882a593Smuzhiyun goto out;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (pbn == 0) {
73*4882a593Smuzhiyun struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
76*4882a593Smuzhiyun if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
77*4882a593Smuzhiyun brelse(bh);
78*4882a593Smuzhiyun goto failed;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun lock_buffer(bh);
83*4882a593Smuzhiyun if (buffer_uptodate(bh)) {
84*4882a593Smuzhiyun unlock_buffer(bh);
85*4882a593Smuzhiyun goto out;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (!buffer_mapped(bh)) {
89*4882a593Smuzhiyun bh->b_bdev = inode->i_sb->s_bdev;
90*4882a593Smuzhiyun set_buffer_mapped(bh);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun bh->b_blocknr = pbn;
93*4882a593Smuzhiyun bh->b_end_io = end_buffer_read_sync;
94*4882a593Smuzhiyun get_bh(bh);
95*4882a593Smuzhiyun submit_bh(REQ_OP_READ, 0, bh);
96*4882a593Smuzhiyun if (vbn)
97*4882a593Smuzhiyun bh->b_blocknr = vbn;
98*4882a593Smuzhiyun out:
99*4882a593Smuzhiyun err = 0;
100*4882a593Smuzhiyun *out_bh = bh;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun failed:
103*4882a593Smuzhiyun unlock_page(bh->b_page);
104*4882a593Smuzhiyun put_page(bh->b_page);
105*4882a593Smuzhiyun return err;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun * nilfs_gccache_submit_read_node() - add node buffer and submit read request
110*4882a593Smuzhiyun * @inode - gc inode
111*4882a593Smuzhiyun * @pbn - physical block number for the block
112*4882a593Smuzhiyun * @vbn - virtual block number for the block
113*4882a593Smuzhiyun * @out_bh - indirect pointer to a buffer_head struct to receive the results
114*4882a593Smuzhiyun *
115*4882a593Smuzhiyun * Description: nilfs_gccache_submit_read_node() registers the node buffer
116*4882a593Smuzhiyun * specified by @vbn to the GC pagecache. @pbn can be supplied by the
117*4882a593Smuzhiyun * caller to avoid translation of the disk block address.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * Return Value: On success, 0 is returned. On Error, one of the following
120*4882a593Smuzhiyun * negative error code is returned.
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * %-EIO - I/O error.
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * %-ENOMEM - Insufficient amount of memory available.
125*4882a593Smuzhiyun */
nilfs_gccache_submit_read_node(struct inode * inode,sector_t pbn,__u64 vbn,struct buffer_head ** out_bh)126*4882a593Smuzhiyun int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
127*4882a593Smuzhiyun __u64 vbn, struct buffer_head **out_bh)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
130*4882a593Smuzhiyun int ret;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
133*4882a593Smuzhiyun vbn ? : pbn, pbn, REQ_OP_READ, 0,
134*4882a593Smuzhiyun out_bh, &pbn);
135*4882a593Smuzhiyun if (ret == -EEXIST) /* internal code (cache hit) */
136*4882a593Smuzhiyun ret = 0;
137*4882a593Smuzhiyun return ret;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
nilfs_gccache_wait_and_mark_dirty(struct buffer_head * bh)140*4882a593Smuzhiyun int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun wait_on_buffer(bh);
143*4882a593Smuzhiyun if (!buffer_uptodate(bh)) {
144*4882a593Smuzhiyun struct inode *inode = bh->b_page->mapping->host;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun nilfs_err(inode->i_sb,
147*4882a593Smuzhiyun "I/O error reading %s block for GC (ino=%lu, vblocknr=%llu)",
148*4882a593Smuzhiyun buffer_nilfs_node(bh) ? "node" : "data",
149*4882a593Smuzhiyun inode->i_ino, (unsigned long long)bh->b_blocknr);
150*4882a593Smuzhiyun return -EIO;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun if (buffer_dirty(bh))
153*4882a593Smuzhiyun return -EEXIST;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) {
156*4882a593Smuzhiyun clear_buffer_uptodate(bh);
157*4882a593Smuzhiyun return -EIO;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun mark_buffer_dirty(bh);
160*4882a593Smuzhiyun return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
nilfs_init_gcinode(struct inode * inode)163*4882a593Smuzhiyun int nilfs_init_gcinode(struct inode *inode)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun inode->i_mode = S_IFREG;
168*4882a593Smuzhiyun mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
169*4882a593Smuzhiyun inode->i_mapping->a_ops = &empty_aops;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun ii->i_flags = 0;
172*4882a593Smuzhiyun nilfs_bmap_init_gc(ii->i_bmap);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return nilfs_attach_btree_node_cache(inode);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /**
178*4882a593Smuzhiyun * nilfs_remove_all_gcinodes() - remove all unprocessed gc inodes
179*4882a593Smuzhiyun */
nilfs_remove_all_gcinodes(struct the_nilfs * nilfs)180*4882a593Smuzhiyun void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun struct list_head *head = &nilfs->ns_gc_inodes;
183*4882a593Smuzhiyun struct nilfs_inode_info *ii;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun while (!list_empty(head)) {
186*4882a593Smuzhiyun ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
187*4882a593Smuzhiyun list_del_init(&ii->i_dirty);
188*4882a593Smuzhiyun truncate_inode_pages(&ii->vfs_inode.i_data, 0);
189*4882a593Smuzhiyun nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
190*4882a593Smuzhiyun iput(&ii->vfs_inode);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun }
193