1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /**
3*4882a593Smuzhiyun * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
6*4882a593Smuzhiyun * Copyright (c) 2002 Richard Russon
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/buffer_head.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/swap.h>
13*4882a593Smuzhiyun #include <linux/writeback.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "attrib.h"
16*4882a593Smuzhiyun #include "debug.h"
17*4882a593Smuzhiyun #include "layout.h"
18*4882a593Smuzhiyun #include "lcnalloc.h"
19*4882a593Smuzhiyun #include "malloc.h"
20*4882a593Smuzhiyun #include "mft.h"
21*4882a593Smuzhiyun #include "ntfs.h"
22*4882a593Smuzhiyun #include "types.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /**
25*4882a593Smuzhiyun * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
26*4882a593Smuzhiyun * @ni: ntfs inode for which to map (part of) a runlist
27*4882a593Smuzhiyun * @vcn: map runlist part containing this vcn
28*4882a593Smuzhiyun * @ctx: active attribute search context if present or NULL if not
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * If @ctx is specified, it is an active search context of @ni and its base mft
33*4882a593Smuzhiyun * record. This is needed when ntfs_map_runlist_nolock() encounters unmapped
34*4882a593Smuzhiyun * runlist fragments and allows their mapping. If you do not have the mft
35*4882a593Smuzhiyun * record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock()
36*4882a593Smuzhiyun * will perform the necessary mapping and unmapping.
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and
39*4882a593Smuzhiyun * restores it before returning. Thus, @ctx will be left pointing to the same
40*4882a593Smuzhiyun * attribute on return as on entry. However, the actual pointers in @ctx may
41*4882a593Smuzhiyun * point to different memory locations on return, so you must remember to reset
42*4882a593Smuzhiyun * any cached pointers from the @ctx, i.e. after the call to
43*4882a593Smuzhiyun * ntfs_map_runlist_nolock(), you will probably want to do:
44*4882a593Smuzhiyun * m = ctx->mrec;
45*4882a593Smuzhiyun * a = ctx->attr;
46*4882a593Smuzhiyun * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
47*4882a593Smuzhiyun * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * Return 0 on success and -errno on error. There is one special error code
50*4882a593Smuzhiyun * which is not an error as such. This is -ENOENT. It means that @vcn is out
51*4882a593Smuzhiyun * of bounds of the runlist.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * Note the runlist can be NULL after this function returns if @vcn is zero and
54*4882a593Smuzhiyun * the attribute has zero allocated size, i.e. there simply is no runlist.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * WARNING: If @ctx is supplied, regardless of whether success or failure is
57*4882a593Smuzhiyun * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
58*4882a593Smuzhiyun * is no longer valid, i.e. you need to either call
59*4882a593Smuzhiyun * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
60*4882a593Smuzhiyun * In that case PTR_ERR(@ctx->mrec) will give you the error code for
61*4882a593Smuzhiyun * why the mapping of the old inode failed.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * Locking: - The runlist described by @ni must be locked for writing on entry
64*4882a593Smuzhiyun * and is locked on return. Note the runlist will be modified.
65*4882a593Smuzhiyun * - If @ctx is NULL, the base mft record of @ni must not be mapped on
66*4882a593Smuzhiyun * entry and it will be left unmapped on return.
67*4882a593Smuzhiyun * - If @ctx is not NULL, the base mft record must be mapped on entry
68*4882a593Smuzhiyun * and it will be left mapped on return.
69*4882a593Smuzhiyun */
ntfs_map_runlist_nolock(ntfs_inode * ni,VCN vcn,ntfs_attr_search_ctx * ctx)70*4882a593Smuzhiyun int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun VCN end_vcn;
73*4882a593Smuzhiyun unsigned long flags;
74*4882a593Smuzhiyun ntfs_inode *base_ni;
75*4882a593Smuzhiyun MFT_RECORD *m;
76*4882a593Smuzhiyun ATTR_RECORD *a;
77*4882a593Smuzhiyun runlist_element *rl;
78*4882a593Smuzhiyun struct page *put_this_page = NULL;
79*4882a593Smuzhiyun int err = 0;
80*4882a593Smuzhiyun bool ctx_is_temporary, ctx_needs_reset;
81*4882a593Smuzhiyun ntfs_attr_search_ctx old_ctx = { NULL, };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
84*4882a593Smuzhiyun (unsigned long long)vcn);
85*4882a593Smuzhiyun if (!NInoAttr(ni))
86*4882a593Smuzhiyun base_ni = ni;
87*4882a593Smuzhiyun else
88*4882a593Smuzhiyun base_ni = ni->ext.base_ntfs_ino;
89*4882a593Smuzhiyun if (!ctx) {
90*4882a593Smuzhiyun ctx_is_temporary = ctx_needs_reset = true;
91*4882a593Smuzhiyun m = map_mft_record(base_ni);
92*4882a593Smuzhiyun if (IS_ERR(m))
93*4882a593Smuzhiyun return PTR_ERR(m);
94*4882a593Smuzhiyun ctx = ntfs_attr_get_search_ctx(base_ni, m);
95*4882a593Smuzhiyun if (unlikely(!ctx)) {
96*4882a593Smuzhiyun err = -ENOMEM;
97*4882a593Smuzhiyun goto err_out;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun } else {
100*4882a593Smuzhiyun VCN allocated_size_vcn;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun BUG_ON(IS_ERR(ctx->mrec));
103*4882a593Smuzhiyun a = ctx->attr;
104*4882a593Smuzhiyun BUG_ON(!a->non_resident);
105*4882a593Smuzhiyun ctx_is_temporary = false;
106*4882a593Smuzhiyun end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
107*4882a593Smuzhiyun read_lock_irqsave(&ni->size_lock, flags);
108*4882a593Smuzhiyun allocated_size_vcn = ni->allocated_size >>
109*4882a593Smuzhiyun ni->vol->cluster_size_bits;
110*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
111*4882a593Smuzhiyun if (!a->data.non_resident.lowest_vcn && end_vcn <= 0)
112*4882a593Smuzhiyun end_vcn = allocated_size_vcn - 1;
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * If we already have the attribute extent containing @vcn in
115*4882a593Smuzhiyun * @ctx, no need to look it up again. We slightly cheat in
116*4882a593Smuzhiyun * that if vcn exceeds the allocated size, we will refuse to
117*4882a593Smuzhiyun * map the runlist below, so there is definitely no need to get
118*4882a593Smuzhiyun * the right attribute extent.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun if (vcn >= allocated_size_vcn || (a->type == ni->type &&
121*4882a593Smuzhiyun a->name_length == ni->name_len &&
122*4882a593Smuzhiyun !memcmp((u8*)a + le16_to_cpu(a->name_offset),
123*4882a593Smuzhiyun ni->name, ni->name_len) &&
124*4882a593Smuzhiyun sle64_to_cpu(a->data.non_resident.lowest_vcn)
125*4882a593Smuzhiyun <= vcn && end_vcn >= vcn))
126*4882a593Smuzhiyun ctx_needs_reset = false;
127*4882a593Smuzhiyun else {
128*4882a593Smuzhiyun /* Save the old search context. */
129*4882a593Smuzhiyun old_ctx = *ctx;
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * If the currently mapped (extent) inode is not the
132*4882a593Smuzhiyun * base inode we will unmap it when we reinitialize the
133*4882a593Smuzhiyun * search context which means we need to get a
134*4882a593Smuzhiyun * reference to the page containing the mapped mft
135*4882a593Smuzhiyun * record so we do not accidentally drop changes to the
136*4882a593Smuzhiyun * mft record when it has not been marked dirty yet.
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
139*4882a593Smuzhiyun old_ctx.base_ntfs_ino) {
140*4882a593Smuzhiyun put_this_page = old_ctx.ntfs_ino->page;
141*4882a593Smuzhiyun get_page(put_this_page);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * Reinitialize the search context so we can lookup the
145*4882a593Smuzhiyun * needed attribute extent.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun ntfs_attr_reinit_search_ctx(ctx);
148*4882a593Smuzhiyun ctx_needs_reset = true;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun if (ctx_needs_reset) {
152*4882a593Smuzhiyun err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
153*4882a593Smuzhiyun CASE_SENSITIVE, vcn, NULL, 0, ctx);
154*4882a593Smuzhiyun if (unlikely(err)) {
155*4882a593Smuzhiyun if (err == -ENOENT)
156*4882a593Smuzhiyun err = -EIO;
157*4882a593Smuzhiyun goto err_out;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun BUG_ON(!ctx->attr->non_resident);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun a = ctx->attr;
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * Only decompress the mapping pairs if @vcn is inside it. Otherwise
164*4882a593Smuzhiyun * we get into problems when we try to map an out of bounds vcn because
165*4882a593Smuzhiyun * we then try to map the already mapped runlist fragment and
166*4882a593Smuzhiyun * ntfs_mapping_pairs_decompress() fails.
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1;
169*4882a593Smuzhiyun if (unlikely(vcn && vcn >= end_vcn)) {
170*4882a593Smuzhiyun err = -ENOENT;
171*4882a593Smuzhiyun goto err_out;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
174*4882a593Smuzhiyun if (IS_ERR(rl))
175*4882a593Smuzhiyun err = PTR_ERR(rl);
176*4882a593Smuzhiyun else
177*4882a593Smuzhiyun ni->runlist.rl = rl;
178*4882a593Smuzhiyun err_out:
179*4882a593Smuzhiyun if (ctx_is_temporary) {
180*4882a593Smuzhiyun if (likely(ctx))
181*4882a593Smuzhiyun ntfs_attr_put_search_ctx(ctx);
182*4882a593Smuzhiyun unmap_mft_record(base_ni);
183*4882a593Smuzhiyun } else if (ctx_needs_reset) {
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun * If there is no attribute list, restoring the search context
186*4882a593Smuzhiyun * is accomplished simply by copying the saved context back over
187*4882a593Smuzhiyun * the caller supplied context. If there is an attribute list,
188*4882a593Smuzhiyun * things are more complicated as we need to deal with mapping
189*4882a593Smuzhiyun * of mft records and resulting potential changes in pointers.
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun if (NInoAttrList(base_ni)) {
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun * If the currently mapped (extent) inode is not the
194*4882a593Smuzhiyun * one we had before, we need to unmap it and map the
195*4882a593Smuzhiyun * old one.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun if (ctx->ntfs_ino != old_ctx.ntfs_ino) {
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * If the currently mapped inode is not the
200*4882a593Smuzhiyun * base inode, unmap it.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun if (ctx->base_ntfs_ino && ctx->ntfs_ino !=
203*4882a593Smuzhiyun ctx->base_ntfs_ino) {
204*4882a593Smuzhiyun unmap_extent_mft_record(ctx->ntfs_ino);
205*4882a593Smuzhiyun ctx->mrec = ctx->base_mrec;
206*4882a593Smuzhiyun BUG_ON(!ctx->mrec);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * If the old mapped inode is not the base
210*4882a593Smuzhiyun * inode, map it.
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun if (old_ctx.base_ntfs_ino &&
213*4882a593Smuzhiyun old_ctx.ntfs_ino !=
214*4882a593Smuzhiyun old_ctx.base_ntfs_ino) {
215*4882a593Smuzhiyun retry_map:
216*4882a593Smuzhiyun ctx->mrec = map_mft_record(
217*4882a593Smuzhiyun old_ctx.ntfs_ino);
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun * Something bad has happened. If out
220*4882a593Smuzhiyun * of memory retry till it succeeds.
221*4882a593Smuzhiyun * Any other errors are fatal and we
222*4882a593Smuzhiyun * return the error code in ctx->mrec.
223*4882a593Smuzhiyun * Let the caller deal with it... We
224*4882a593Smuzhiyun * just need to fudge things so the
225*4882a593Smuzhiyun * caller can reinit and/or put the
226*4882a593Smuzhiyun * search context safely.
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun if (IS_ERR(ctx->mrec)) {
229*4882a593Smuzhiyun if (PTR_ERR(ctx->mrec) ==
230*4882a593Smuzhiyun -ENOMEM) {
231*4882a593Smuzhiyun schedule();
232*4882a593Smuzhiyun goto retry_map;
233*4882a593Smuzhiyun } else
234*4882a593Smuzhiyun old_ctx.ntfs_ino =
235*4882a593Smuzhiyun old_ctx.
236*4882a593Smuzhiyun base_ntfs_ino;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun /* Update the changed pointers in the saved context. */
241*4882a593Smuzhiyun if (ctx->mrec != old_ctx.mrec) {
242*4882a593Smuzhiyun if (!IS_ERR(ctx->mrec))
243*4882a593Smuzhiyun old_ctx.attr = (ATTR_RECORD*)(
244*4882a593Smuzhiyun (u8*)ctx->mrec +
245*4882a593Smuzhiyun ((u8*)old_ctx.attr -
246*4882a593Smuzhiyun (u8*)old_ctx.mrec));
247*4882a593Smuzhiyun old_ctx.mrec = ctx->mrec;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun /* Restore the search context to the saved one. */
251*4882a593Smuzhiyun *ctx = old_ctx;
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun * We drop the reference on the page we took earlier. In the
254*4882a593Smuzhiyun * case that IS_ERR(ctx->mrec) is true this means we might lose
255*4882a593Smuzhiyun * some changes to the mft record that had been made between
256*4882a593Smuzhiyun * the last time it was marked dirty/written out and now. This
257*4882a593Smuzhiyun * at this stage is not a problem as the mapping error is fatal
258*4882a593Smuzhiyun * enough that the mft record cannot be written out anyway and
259*4882a593Smuzhiyun * the caller is very likely to shutdown the whole inode
260*4882a593Smuzhiyun * immediately and mark the volume dirty for chkdsk to pick up
261*4882a593Smuzhiyun * the pieces anyway.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun if (put_this_page)
264*4882a593Smuzhiyun put_page(put_this_page);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun return err;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /**
270*4882a593Smuzhiyun * ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
271*4882a593Smuzhiyun * @ni: ntfs inode for which to map (part of) a runlist
272*4882a593Smuzhiyun * @vcn: map runlist part containing this vcn
273*4882a593Smuzhiyun *
274*4882a593Smuzhiyun * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * Return 0 on success and -errno on error. There is one special error code
277*4882a593Smuzhiyun * which is not an error as such. This is -ENOENT. It means that @vcn is out
278*4882a593Smuzhiyun * of bounds of the runlist.
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * Locking: - The runlist must be unlocked on entry and is unlocked on return.
281*4882a593Smuzhiyun * - This function takes the runlist lock for writing and may modify
282*4882a593Smuzhiyun * the runlist.
283*4882a593Smuzhiyun */
ntfs_map_runlist(ntfs_inode * ni,VCN vcn)284*4882a593Smuzhiyun int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun int err = 0;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun down_write(&ni->runlist.lock);
289*4882a593Smuzhiyun /* Make sure someone else didn't do the work while we were sleeping. */
290*4882a593Smuzhiyun if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
291*4882a593Smuzhiyun LCN_RL_NOT_MAPPED))
292*4882a593Smuzhiyun err = ntfs_map_runlist_nolock(ni, vcn, NULL);
293*4882a593Smuzhiyun up_write(&ni->runlist.lock);
294*4882a593Smuzhiyun return err;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun * ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode
299*4882a593Smuzhiyun * @ni: ntfs inode of the attribute whose runlist to search
300*4882a593Smuzhiyun * @vcn: vcn to convert
301*4882a593Smuzhiyun * @write_locked: true if the runlist is locked for writing
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * Find the virtual cluster number @vcn in the runlist of the ntfs attribute
304*4882a593Smuzhiyun * described by the ntfs inode @ni and return the corresponding logical cluster
305*4882a593Smuzhiyun * number (lcn).
306*4882a593Smuzhiyun *
307*4882a593Smuzhiyun * If the @vcn is not mapped yet, the attempt is made to map the attribute
308*4882a593Smuzhiyun * extent containing the @vcn and the vcn to lcn conversion is retried.
309*4882a593Smuzhiyun *
310*4882a593Smuzhiyun * If @write_locked is true the caller has locked the runlist for writing and
311*4882a593Smuzhiyun * if false for reading.
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * Since lcns must be >= 0, we use negative return codes with special meaning:
314*4882a593Smuzhiyun *
315*4882a593Smuzhiyun * Return code Meaning / Description
316*4882a593Smuzhiyun * ==========================================
317*4882a593Smuzhiyun * LCN_HOLE Hole / not allocated on disk.
318*4882a593Smuzhiyun * LCN_ENOENT There is no such vcn in the runlist, i.e. @vcn is out of bounds.
319*4882a593Smuzhiyun * LCN_ENOMEM Not enough memory to map runlist.
320*4882a593Smuzhiyun * LCN_EIO Critical error (runlist/file is corrupt, i/o error, etc).
321*4882a593Smuzhiyun *
322*4882a593Smuzhiyun * Locking: - The runlist must be locked on entry and is left locked on return.
323*4882a593Smuzhiyun * - If @write_locked is 'false', i.e. the runlist is locked for reading,
324*4882a593Smuzhiyun * the lock may be dropped inside the function so you cannot rely on
325*4882a593Smuzhiyun * the runlist still being the same when this function returns.
326*4882a593Smuzhiyun */
ntfs_attr_vcn_to_lcn_nolock(ntfs_inode * ni,const VCN vcn,const bool write_locked)327*4882a593Smuzhiyun LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
328*4882a593Smuzhiyun const bool write_locked)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun LCN lcn;
331*4882a593Smuzhiyun unsigned long flags;
332*4882a593Smuzhiyun bool is_retry = false;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun BUG_ON(!ni);
335*4882a593Smuzhiyun ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
336*4882a593Smuzhiyun ni->mft_no, (unsigned long long)vcn,
337*4882a593Smuzhiyun write_locked ? "write" : "read");
338*4882a593Smuzhiyun BUG_ON(!NInoNonResident(ni));
339*4882a593Smuzhiyun BUG_ON(vcn < 0);
340*4882a593Smuzhiyun if (!ni->runlist.rl) {
341*4882a593Smuzhiyun read_lock_irqsave(&ni->size_lock, flags);
342*4882a593Smuzhiyun if (!ni->allocated_size) {
343*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
344*4882a593Smuzhiyun return LCN_ENOENT;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun retry_remap:
349*4882a593Smuzhiyun /* Convert vcn to lcn. If that fails map the runlist and retry once. */
350*4882a593Smuzhiyun lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
351*4882a593Smuzhiyun if (likely(lcn >= LCN_HOLE)) {
352*4882a593Smuzhiyun ntfs_debug("Done, lcn 0x%llx.", (long long)lcn);
353*4882a593Smuzhiyun return lcn;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun if (lcn != LCN_RL_NOT_MAPPED) {
356*4882a593Smuzhiyun if (lcn != LCN_ENOENT)
357*4882a593Smuzhiyun lcn = LCN_EIO;
358*4882a593Smuzhiyun } else if (!is_retry) {
359*4882a593Smuzhiyun int err;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (!write_locked) {
362*4882a593Smuzhiyun up_read(&ni->runlist.lock);
363*4882a593Smuzhiyun down_write(&ni->runlist.lock);
364*4882a593Smuzhiyun if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
365*4882a593Smuzhiyun LCN_RL_NOT_MAPPED)) {
366*4882a593Smuzhiyun up_write(&ni->runlist.lock);
367*4882a593Smuzhiyun down_read(&ni->runlist.lock);
368*4882a593Smuzhiyun goto retry_remap;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun err = ntfs_map_runlist_nolock(ni, vcn, NULL);
372*4882a593Smuzhiyun if (!write_locked) {
373*4882a593Smuzhiyun up_write(&ni->runlist.lock);
374*4882a593Smuzhiyun down_read(&ni->runlist.lock);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun if (likely(!err)) {
377*4882a593Smuzhiyun is_retry = true;
378*4882a593Smuzhiyun goto retry_remap;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun if (err == -ENOENT)
381*4882a593Smuzhiyun lcn = LCN_ENOENT;
382*4882a593Smuzhiyun else if (err == -ENOMEM)
383*4882a593Smuzhiyun lcn = LCN_ENOMEM;
384*4882a593Smuzhiyun else
385*4882a593Smuzhiyun lcn = LCN_EIO;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun if (lcn != LCN_ENOENT)
388*4882a593Smuzhiyun ntfs_error(ni->vol->sb, "Failed with error code %lli.",
389*4882a593Smuzhiyun (long long)lcn);
390*4882a593Smuzhiyun return lcn;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun * ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode
395*4882a593Smuzhiyun * @ni: ntfs inode describing the runlist to search
396*4882a593Smuzhiyun * @vcn: vcn to find
397*4882a593Smuzhiyun * @ctx: active attribute search context if present or NULL if not
398*4882a593Smuzhiyun *
399*4882a593Smuzhiyun * Find the virtual cluster number @vcn in the runlist described by the ntfs
400*4882a593Smuzhiyun * inode @ni and return the address of the runlist element containing the @vcn.
401*4882a593Smuzhiyun *
402*4882a593Smuzhiyun * If the @vcn is not mapped yet, the attempt is made to map the attribute
403*4882a593Smuzhiyun * extent containing the @vcn and the vcn to lcn conversion is retried.
404*4882a593Smuzhiyun *
405*4882a593Smuzhiyun * If @ctx is specified, it is an active search context of @ni and its base mft
406*4882a593Smuzhiyun * record. This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped
407*4882a593Smuzhiyun * runlist fragments and allows their mapping. If you do not have the mft
408*4882a593Smuzhiyun * record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock()
409*4882a593Smuzhiyun * will perform the necessary mapping and unmapping.
410*4882a593Smuzhiyun *
411*4882a593Smuzhiyun * Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and
412*4882a593Smuzhiyun * restores it before returning. Thus, @ctx will be left pointing to the same
413*4882a593Smuzhiyun * attribute on return as on entry. However, the actual pointers in @ctx may
414*4882a593Smuzhiyun * point to different memory locations on return, so you must remember to reset
415*4882a593Smuzhiyun * any cached pointers from the @ctx, i.e. after the call to
416*4882a593Smuzhiyun * ntfs_attr_find_vcn_nolock(), you will probably want to do:
417*4882a593Smuzhiyun * m = ctx->mrec;
418*4882a593Smuzhiyun * a = ctx->attr;
419*4882a593Smuzhiyun * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
420*4882a593Smuzhiyun * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
421*4882a593Smuzhiyun * Note you need to distinguish between the lcn of the returned runlist element
422*4882a593Smuzhiyun * being >= 0 and LCN_HOLE. In the later case you have to return zeroes on
423*4882a593Smuzhiyun * read and allocate clusters on write.
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * Return the runlist element containing the @vcn on success and
426*4882a593Smuzhiyun * ERR_PTR(-errno) on error. You need to test the return value with IS_ERR()
427*4882a593Smuzhiyun * to decide if the return is success or failure and PTR_ERR() to get to the
428*4882a593Smuzhiyun * error code if IS_ERR() is true.
429*4882a593Smuzhiyun *
430*4882a593Smuzhiyun * The possible error return codes are:
431*4882a593Smuzhiyun * -ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds.
432*4882a593Smuzhiyun * -ENOMEM - Not enough memory to map runlist.
433*4882a593Smuzhiyun * -EIO - Critical error (runlist/file is corrupt, i/o error, etc).
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * WARNING: If @ctx is supplied, regardless of whether success or failure is
436*4882a593Smuzhiyun * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
437*4882a593Smuzhiyun * is no longer valid, i.e. you need to either call
438*4882a593Smuzhiyun * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
439*4882a593Smuzhiyun * In that case PTR_ERR(@ctx->mrec) will give you the error code for
440*4882a593Smuzhiyun * why the mapping of the old inode failed.
441*4882a593Smuzhiyun *
442*4882a593Smuzhiyun * Locking: - The runlist described by @ni must be locked for writing on entry
443*4882a593Smuzhiyun * and is locked on return. Note the runlist may be modified when
444*4882a593Smuzhiyun * needed runlist fragments need to be mapped.
445*4882a593Smuzhiyun * - If @ctx is NULL, the base mft record of @ni must not be mapped on
446*4882a593Smuzhiyun * entry and it will be left unmapped on return.
447*4882a593Smuzhiyun * - If @ctx is not NULL, the base mft record must be mapped on entry
448*4882a593Smuzhiyun * and it will be left mapped on return.
449*4882a593Smuzhiyun */
ntfs_attr_find_vcn_nolock(ntfs_inode * ni,const VCN vcn,ntfs_attr_search_ctx * ctx)450*4882a593Smuzhiyun runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
451*4882a593Smuzhiyun ntfs_attr_search_ctx *ctx)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun unsigned long flags;
454*4882a593Smuzhiyun runlist_element *rl;
455*4882a593Smuzhiyun int err = 0;
456*4882a593Smuzhiyun bool is_retry = false;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun BUG_ON(!ni);
459*4882a593Smuzhiyun ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
460*4882a593Smuzhiyun ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
461*4882a593Smuzhiyun BUG_ON(!NInoNonResident(ni));
462*4882a593Smuzhiyun BUG_ON(vcn < 0);
463*4882a593Smuzhiyun if (!ni->runlist.rl) {
464*4882a593Smuzhiyun read_lock_irqsave(&ni->size_lock, flags);
465*4882a593Smuzhiyun if (!ni->allocated_size) {
466*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
467*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun retry_remap:
472*4882a593Smuzhiyun rl = ni->runlist.rl;
473*4882a593Smuzhiyun if (likely(rl && vcn >= rl[0].vcn)) {
474*4882a593Smuzhiyun while (likely(rl->length)) {
475*4882a593Smuzhiyun if (unlikely(vcn < rl[1].vcn)) {
476*4882a593Smuzhiyun if (likely(rl->lcn >= LCN_HOLE)) {
477*4882a593Smuzhiyun ntfs_debug("Done.");
478*4882a593Smuzhiyun return rl;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun break;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun rl++;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
485*4882a593Smuzhiyun if (likely(rl->lcn == LCN_ENOENT))
486*4882a593Smuzhiyun err = -ENOENT;
487*4882a593Smuzhiyun else
488*4882a593Smuzhiyun err = -EIO;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun if (!err && !is_retry) {
492*4882a593Smuzhiyun /*
493*4882a593Smuzhiyun * If the search context is invalid we cannot map the unmapped
494*4882a593Smuzhiyun * region.
495*4882a593Smuzhiyun */
496*4882a593Smuzhiyun if (IS_ERR(ctx->mrec))
497*4882a593Smuzhiyun err = PTR_ERR(ctx->mrec);
498*4882a593Smuzhiyun else {
499*4882a593Smuzhiyun /*
500*4882a593Smuzhiyun * The @vcn is in an unmapped region, map the runlist
501*4882a593Smuzhiyun * and retry.
502*4882a593Smuzhiyun */
503*4882a593Smuzhiyun err = ntfs_map_runlist_nolock(ni, vcn, ctx);
504*4882a593Smuzhiyun if (likely(!err)) {
505*4882a593Smuzhiyun is_retry = true;
506*4882a593Smuzhiyun goto retry_remap;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun if (err == -EINVAL)
510*4882a593Smuzhiyun err = -EIO;
511*4882a593Smuzhiyun } else if (!err)
512*4882a593Smuzhiyun err = -EIO;
513*4882a593Smuzhiyun if (err != -ENOENT)
514*4882a593Smuzhiyun ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
515*4882a593Smuzhiyun return ERR_PTR(err);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /**
519*4882a593Smuzhiyun * ntfs_attr_find - find (next) attribute in mft record
520*4882a593Smuzhiyun * @type: attribute type to find
521*4882a593Smuzhiyun * @name: attribute name to find (optional, i.e. NULL means don't care)
522*4882a593Smuzhiyun * @name_len: attribute name length (only needed if @name present)
523*4882a593Smuzhiyun * @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
524*4882a593Smuzhiyun * @val: attribute value to find (optional, resident attributes only)
525*4882a593Smuzhiyun * @val_len: attribute value length
526*4882a593Smuzhiyun * @ctx: search context with mft record and attribute to search from
527*4882a593Smuzhiyun *
528*4882a593Smuzhiyun * You should not need to call this function directly. Use ntfs_attr_lookup()
529*4882a593Smuzhiyun * instead.
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun * ntfs_attr_find() takes a search context @ctx as parameter and searches the
532*4882a593Smuzhiyun * mft record specified by @ctx->mrec, beginning at @ctx->attr, for an
533*4882a593Smuzhiyun * attribute of @type, optionally @name and @val.
534*4882a593Smuzhiyun *
535*4882a593Smuzhiyun * If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will
536*4882a593Smuzhiyun * point to the found attribute.
537*4882a593Smuzhiyun *
538*4882a593Smuzhiyun * If the attribute is not found, ntfs_attr_find() returns -ENOENT and
539*4882a593Smuzhiyun * @ctx->attr will point to the attribute before which the attribute being
540*4882a593Smuzhiyun * searched for would need to be inserted if such an action were to be desired.
541*4882a593Smuzhiyun *
542*4882a593Smuzhiyun * On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is
543*4882a593Smuzhiyun * undefined and in particular do not rely on it not changing.
544*4882a593Smuzhiyun *
545*4882a593Smuzhiyun * If @ctx->is_first is 'true', the search begins with @ctx->attr itself. If it
546*4882a593Smuzhiyun * is 'false', the search begins after @ctx->attr.
547*4882a593Smuzhiyun *
548*4882a593Smuzhiyun * If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
549*4882a593Smuzhiyun * @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
550*4882a593Smuzhiyun * @ctx->mrec belongs. This is so we can get at the ntfs volume and hence at
551*4882a593Smuzhiyun * the upcase table. If @ic is CASE_SENSITIVE, the comparison is case
552*4882a593Smuzhiyun * sensitive. When @name is present, @name_len is the @name length in Unicode
553*4882a593Smuzhiyun * characters.
554*4882a593Smuzhiyun *
555*4882a593Smuzhiyun * If @name is not present (NULL), we assume that the unnamed attribute is
556*4882a593Smuzhiyun * being searched for.
557*4882a593Smuzhiyun *
558*4882a593Smuzhiyun * Finally, the resident attribute value @val is looked for, if present. If
559*4882a593Smuzhiyun * @val is not present (NULL), @val_len is ignored.
560*4882a593Smuzhiyun *
561*4882a593Smuzhiyun * ntfs_attr_find() only searches the specified mft record and it ignores the
562*4882a593Smuzhiyun * presence of an attribute list attribute (unless it is the one being searched
563*4882a593Smuzhiyun * for, obviously). If you need to take attribute lists into consideration,
564*4882a593Smuzhiyun * use ntfs_attr_lookup() instead (see below). This also means that you cannot
565*4882a593Smuzhiyun * use ntfs_attr_find() to search for extent records of non-resident
566*4882a593Smuzhiyun * attributes, as extents with lowest_vcn != 0 are usually described by the
567*4882a593Smuzhiyun * attribute list attribute only. - Note that it is possible that the first
568*4882a593Smuzhiyun * extent is only in the attribute list while the last extent is in the base
569*4882a593Smuzhiyun * mft record, so do not rely on being able to find the first extent in the
570*4882a593Smuzhiyun * base mft record.
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * Warning: Never use @val when looking for attribute types which can be
573*4882a593Smuzhiyun * non-resident as this most likely will result in a crash!
574*4882a593Smuzhiyun */
ntfs_attr_find(const ATTR_TYPE type,const ntfschar * name,const u32 name_len,const IGNORE_CASE_BOOL ic,const u8 * val,const u32 val_len,ntfs_attr_search_ctx * ctx)575*4882a593Smuzhiyun static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
576*4882a593Smuzhiyun const u32 name_len, const IGNORE_CASE_BOOL ic,
577*4882a593Smuzhiyun const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun ATTR_RECORD *a;
580*4882a593Smuzhiyun ntfs_volume *vol = ctx->ntfs_ino->vol;
581*4882a593Smuzhiyun ntfschar *upcase = vol->upcase;
582*4882a593Smuzhiyun u32 upcase_len = vol->upcase_len;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /*
585*4882a593Smuzhiyun * Iterate over attributes in mft record starting at @ctx->attr, or the
586*4882a593Smuzhiyun * attribute following that, if @ctx->is_first is 'true'.
587*4882a593Smuzhiyun */
588*4882a593Smuzhiyun if (ctx->is_first) {
589*4882a593Smuzhiyun a = ctx->attr;
590*4882a593Smuzhiyun ctx->is_first = false;
591*4882a593Smuzhiyun } else
592*4882a593Smuzhiyun a = (ATTR_RECORD*)((u8*)ctx->attr +
593*4882a593Smuzhiyun le32_to_cpu(ctx->attr->length));
594*4882a593Smuzhiyun for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
595*4882a593Smuzhiyun u8 *mrec_end = (u8 *)ctx->mrec +
596*4882a593Smuzhiyun le32_to_cpu(ctx->mrec->bytes_allocated);
597*4882a593Smuzhiyun u8 *name_end;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /* check whether ATTR_RECORD wrap */
600*4882a593Smuzhiyun if ((u8 *)a < (u8 *)ctx->mrec)
601*4882a593Smuzhiyun break;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* check whether Attribute Record Header is within bounds */
604*4882a593Smuzhiyun if ((u8 *)a > mrec_end ||
605*4882a593Smuzhiyun (u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
606*4882a593Smuzhiyun break;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /* check whether ATTR_RECORD's name is within bounds */
609*4882a593Smuzhiyun name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
610*4882a593Smuzhiyun a->name_length * sizeof(ntfschar);
611*4882a593Smuzhiyun if (name_end > mrec_end)
612*4882a593Smuzhiyun break;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun ctx->attr = a;
615*4882a593Smuzhiyun if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
616*4882a593Smuzhiyun a->type == AT_END))
617*4882a593Smuzhiyun return -ENOENT;
618*4882a593Smuzhiyun if (unlikely(!a->length))
619*4882a593Smuzhiyun break;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* check whether ATTR_RECORD's length wrap */
622*4882a593Smuzhiyun if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
623*4882a593Smuzhiyun break;
624*4882a593Smuzhiyun /* check whether ATTR_RECORD's length is within bounds */
625*4882a593Smuzhiyun if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
626*4882a593Smuzhiyun break;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (a->type != type)
629*4882a593Smuzhiyun continue;
630*4882a593Smuzhiyun /*
631*4882a593Smuzhiyun * If @name is present, compare the two names. If @name is
632*4882a593Smuzhiyun * missing, assume we want an unnamed attribute.
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun if (!name) {
635*4882a593Smuzhiyun /* The search failed if the found attribute is named. */
636*4882a593Smuzhiyun if (a->name_length)
637*4882a593Smuzhiyun return -ENOENT;
638*4882a593Smuzhiyun } else if (!ntfs_are_names_equal(name, name_len,
639*4882a593Smuzhiyun (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
640*4882a593Smuzhiyun a->name_length, ic, upcase, upcase_len)) {
641*4882a593Smuzhiyun register int rc;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun rc = ntfs_collate_names(name, name_len,
644*4882a593Smuzhiyun (ntfschar*)((u8*)a +
645*4882a593Smuzhiyun le16_to_cpu(a->name_offset)),
646*4882a593Smuzhiyun a->name_length, 1, IGNORE_CASE,
647*4882a593Smuzhiyun upcase, upcase_len);
648*4882a593Smuzhiyun /*
649*4882a593Smuzhiyun * If @name collates before a->name, there is no
650*4882a593Smuzhiyun * matching attribute.
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun if (rc == -1)
653*4882a593Smuzhiyun return -ENOENT;
654*4882a593Smuzhiyun /* If the strings are not equal, continue search. */
655*4882a593Smuzhiyun if (rc)
656*4882a593Smuzhiyun continue;
657*4882a593Smuzhiyun rc = ntfs_collate_names(name, name_len,
658*4882a593Smuzhiyun (ntfschar*)((u8*)a +
659*4882a593Smuzhiyun le16_to_cpu(a->name_offset)),
660*4882a593Smuzhiyun a->name_length, 1, CASE_SENSITIVE,
661*4882a593Smuzhiyun upcase, upcase_len);
662*4882a593Smuzhiyun if (rc == -1)
663*4882a593Smuzhiyun return -ENOENT;
664*4882a593Smuzhiyun if (rc)
665*4882a593Smuzhiyun continue;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun /*
668*4882a593Smuzhiyun * The names match or @name not present and attribute is
669*4882a593Smuzhiyun * unnamed. If no @val specified, we have found the attribute
670*4882a593Smuzhiyun * and are done.
671*4882a593Smuzhiyun */
672*4882a593Smuzhiyun if (!val)
673*4882a593Smuzhiyun return 0;
674*4882a593Smuzhiyun /* @val is present; compare values. */
675*4882a593Smuzhiyun else {
676*4882a593Smuzhiyun register int rc;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun rc = memcmp(val, (u8*)a + le16_to_cpu(
679*4882a593Smuzhiyun a->data.resident.value_offset),
680*4882a593Smuzhiyun min_t(u32, val_len, le32_to_cpu(
681*4882a593Smuzhiyun a->data.resident.value_length)));
682*4882a593Smuzhiyun /*
683*4882a593Smuzhiyun * If @val collates before the current attribute's
684*4882a593Smuzhiyun * value, there is no matching attribute.
685*4882a593Smuzhiyun */
686*4882a593Smuzhiyun if (!rc) {
687*4882a593Smuzhiyun register u32 avl;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun avl = le32_to_cpu(
690*4882a593Smuzhiyun a->data.resident.value_length);
691*4882a593Smuzhiyun if (val_len == avl)
692*4882a593Smuzhiyun return 0;
693*4882a593Smuzhiyun if (val_len < avl)
694*4882a593Smuzhiyun return -ENOENT;
695*4882a593Smuzhiyun } else if (rc < 0)
696*4882a593Smuzhiyun return -ENOENT;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun ntfs_error(vol->sb, "Inode is corrupt. Run chkdsk.");
700*4882a593Smuzhiyun NVolSetErrors(vol);
701*4882a593Smuzhiyun return -EIO;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /**
705*4882a593Smuzhiyun * load_attribute_list - load an attribute list into memory
706*4882a593Smuzhiyun * @vol: ntfs volume from which to read
707*4882a593Smuzhiyun * @runlist: runlist of the attribute list
708*4882a593Smuzhiyun * @al_start: destination buffer
709*4882a593Smuzhiyun * @size: size of the destination buffer in bytes
710*4882a593Smuzhiyun * @initialized_size: initialized size of the attribute list
711*4882a593Smuzhiyun *
712*4882a593Smuzhiyun * Walk the runlist @runlist and load all clusters from it copying them into
713*4882a593Smuzhiyun * the linear buffer @al. The maximum number of bytes copied to @al is @size
714*4882a593Smuzhiyun * bytes. Note, @size does not need to be a multiple of the cluster size. If
715*4882a593Smuzhiyun * @initialized_size is less than @size, the region in @al between
716*4882a593Smuzhiyun * @initialized_size and @size will be zeroed and not read from disk.
717*4882a593Smuzhiyun *
718*4882a593Smuzhiyun * Return 0 on success or -errno on error.
719*4882a593Smuzhiyun */
load_attribute_list(ntfs_volume * vol,runlist * runlist,u8 * al_start,const s64 size,const s64 initialized_size)720*4882a593Smuzhiyun int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
721*4882a593Smuzhiyun const s64 size, const s64 initialized_size)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun LCN lcn;
724*4882a593Smuzhiyun u8 *al = al_start;
725*4882a593Smuzhiyun u8 *al_end = al + initialized_size;
726*4882a593Smuzhiyun runlist_element *rl;
727*4882a593Smuzhiyun struct buffer_head *bh;
728*4882a593Smuzhiyun struct super_block *sb;
729*4882a593Smuzhiyun unsigned long block_size;
730*4882a593Smuzhiyun unsigned long block, max_block;
731*4882a593Smuzhiyun int err = 0;
732*4882a593Smuzhiyun unsigned char block_size_bits;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun ntfs_debug("Entering.");
735*4882a593Smuzhiyun if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
736*4882a593Smuzhiyun initialized_size > size)
737*4882a593Smuzhiyun return -EINVAL;
738*4882a593Smuzhiyun if (!initialized_size) {
739*4882a593Smuzhiyun memset(al, 0, size);
740*4882a593Smuzhiyun return 0;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun sb = vol->sb;
743*4882a593Smuzhiyun block_size = sb->s_blocksize;
744*4882a593Smuzhiyun block_size_bits = sb->s_blocksize_bits;
745*4882a593Smuzhiyun down_read(&runlist->lock);
746*4882a593Smuzhiyun rl = runlist->rl;
747*4882a593Smuzhiyun if (!rl) {
748*4882a593Smuzhiyun ntfs_error(sb, "Cannot read attribute list since runlist is "
749*4882a593Smuzhiyun "missing.");
750*4882a593Smuzhiyun goto err_out;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun /* Read all clusters specified by the runlist one run at a time. */
753*4882a593Smuzhiyun while (rl->length) {
754*4882a593Smuzhiyun lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
755*4882a593Smuzhiyun ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
756*4882a593Smuzhiyun (unsigned long long)rl->vcn,
757*4882a593Smuzhiyun (unsigned long long)lcn);
758*4882a593Smuzhiyun /* The attribute list cannot be sparse. */
759*4882a593Smuzhiyun if (lcn < 0) {
760*4882a593Smuzhiyun ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed. Cannot "
761*4882a593Smuzhiyun "read attribute list.");
762*4882a593Smuzhiyun goto err_out;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun block = lcn << vol->cluster_size_bits >> block_size_bits;
765*4882a593Smuzhiyun /* Read the run from device in chunks of block_size bytes. */
766*4882a593Smuzhiyun max_block = block + (rl->length << vol->cluster_size_bits >>
767*4882a593Smuzhiyun block_size_bits);
768*4882a593Smuzhiyun ntfs_debug("max_block = 0x%lx.", max_block);
769*4882a593Smuzhiyun do {
770*4882a593Smuzhiyun ntfs_debug("Reading block = 0x%lx.", block);
771*4882a593Smuzhiyun bh = sb_bread(sb, block);
772*4882a593Smuzhiyun if (!bh) {
773*4882a593Smuzhiyun ntfs_error(sb, "sb_bread() failed. Cannot "
774*4882a593Smuzhiyun "read attribute list.");
775*4882a593Smuzhiyun goto err_out;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun if (al + block_size >= al_end)
778*4882a593Smuzhiyun goto do_final;
779*4882a593Smuzhiyun memcpy(al, bh->b_data, block_size);
780*4882a593Smuzhiyun brelse(bh);
781*4882a593Smuzhiyun al += block_size;
782*4882a593Smuzhiyun } while (++block < max_block);
783*4882a593Smuzhiyun rl++;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun if (initialized_size < size) {
786*4882a593Smuzhiyun initialize:
787*4882a593Smuzhiyun memset(al_start + initialized_size, 0, size - initialized_size);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun done:
790*4882a593Smuzhiyun up_read(&runlist->lock);
791*4882a593Smuzhiyun return err;
792*4882a593Smuzhiyun do_final:
793*4882a593Smuzhiyun if (al < al_end) {
794*4882a593Smuzhiyun /*
795*4882a593Smuzhiyun * Partial block.
796*4882a593Smuzhiyun *
797*4882a593Smuzhiyun * Note: The attribute list can be smaller than its allocation
798*4882a593Smuzhiyun * by multiple clusters. This has been encountered by at least
799*4882a593Smuzhiyun * two people running Windows XP, thus we cannot do any
800*4882a593Smuzhiyun * truncation sanity checking here. (AIA)
801*4882a593Smuzhiyun */
802*4882a593Smuzhiyun memcpy(al, bh->b_data, al_end - al);
803*4882a593Smuzhiyun brelse(bh);
804*4882a593Smuzhiyun if (initialized_size < size)
805*4882a593Smuzhiyun goto initialize;
806*4882a593Smuzhiyun goto done;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun brelse(bh);
809*4882a593Smuzhiyun /* Real overflow! */
810*4882a593Smuzhiyun ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
811*4882a593Smuzhiyun "is truncated.");
812*4882a593Smuzhiyun err_out:
813*4882a593Smuzhiyun err = -EIO;
814*4882a593Smuzhiyun goto done;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /**
818*4882a593Smuzhiyun * ntfs_external_attr_find - find an attribute in the attribute list of an inode
819*4882a593Smuzhiyun * @type: attribute type to find
820*4882a593Smuzhiyun * @name: attribute name to find (optional, i.e. NULL means don't care)
821*4882a593Smuzhiyun * @name_len: attribute name length (only needed if @name present)
822*4882a593Smuzhiyun * @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
823*4882a593Smuzhiyun * @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
824*4882a593Smuzhiyun * @val: attribute value to find (optional, resident attributes only)
825*4882a593Smuzhiyun * @val_len: attribute value length
826*4882a593Smuzhiyun * @ctx: search context with mft record and attribute to search from
827*4882a593Smuzhiyun *
828*4882a593Smuzhiyun * You should not need to call this function directly. Use ntfs_attr_lookup()
829*4882a593Smuzhiyun * instead.
830*4882a593Smuzhiyun *
831*4882a593Smuzhiyun * Find an attribute by searching the attribute list for the corresponding
832*4882a593Smuzhiyun * attribute list entry. Having found the entry, map the mft record if the
833*4882a593Smuzhiyun * attribute is in a different mft record/inode, ntfs_attr_find() the attribute
834*4882a593Smuzhiyun * in there and return it.
835*4882a593Smuzhiyun *
836*4882a593Smuzhiyun * On first search @ctx->ntfs_ino must be the base mft record and @ctx must
837*4882a593Smuzhiyun * have been obtained from a call to ntfs_attr_get_search_ctx(). On subsequent
838*4882a593Smuzhiyun * calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is
839*4882a593Smuzhiyun * then the base inode).
840*4882a593Smuzhiyun *
841*4882a593Smuzhiyun * After finishing with the attribute/mft record you need to call
842*4882a593Smuzhiyun * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
843*4882a593Smuzhiyun * mapped inodes, etc).
844*4882a593Smuzhiyun *
845*4882a593Smuzhiyun * If the attribute is found, ntfs_external_attr_find() returns 0 and
846*4882a593Smuzhiyun * @ctx->attr will point to the found attribute. @ctx->mrec will point to the
847*4882a593Smuzhiyun * mft record in which @ctx->attr is located and @ctx->al_entry will point to
848*4882a593Smuzhiyun * the attribute list entry for the attribute.
849*4882a593Smuzhiyun *
850*4882a593Smuzhiyun * If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and
851*4882a593Smuzhiyun * @ctx->attr will point to the attribute in the base mft record before which
852*4882a593Smuzhiyun * the attribute being searched for would need to be inserted if such an action
853*4882a593Smuzhiyun * were to be desired. @ctx->mrec will point to the mft record in which
854*4882a593Smuzhiyun * @ctx->attr is located and @ctx->al_entry will point to the attribute list
855*4882a593Smuzhiyun * entry of the attribute before which the attribute being searched for would
856*4882a593Smuzhiyun * need to be inserted if such an action were to be desired.
857*4882a593Smuzhiyun *
858*4882a593Smuzhiyun * Thus to insert the not found attribute, one wants to add the attribute to
859*4882a593Smuzhiyun * @ctx->mrec (the base mft record) and if there is not enough space, the
860*4882a593Smuzhiyun * attribute should be placed in a newly allocated extent mft record. The
861*4882a593Smuzhiyun * attribute list entry for the inserted attribute should be inserted in the
862*4882a593Smuzhiyun * attribute list attribute at @ctx->al_entry.
863*4882a593Smuzhiyun *
864*4882a593Smuzhiyun * On actual error, ntfs_external_attr_find() returns -EIO. In this case
865*4882a593Smuzhiyun * @ctx->attr is undefined and in particular do not rely on it not changing.
866*4882a593Smuzhiyun */
ntfs_external_attr_find(const ATTR_TYPE type,const ntfschar * name,const u32 name_len,const IGNORE_CASE_BOOL ic,const VCN lowest_vcn,const u8 * val,const u32 val_len,ntfs_attr_search_ctx * ctx)867*4882a593Smuzhiyun static int ntfs_external_attr_find(const ATTR_TYPE type,
868*4882a593Smuzhiyun const ntfschar *name, const u32 name_len,
869*4882a593Smuzhiyun const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
870*4882a593Smuzhiyun const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun ntfs_inode *base_ni, *ni;
873*4882a593Smuzhiyun ntfs_volume *vol;
874*4882a593Smuzhiyun ATTR_LIST_ENTRY *al_entry, *next_al_entry;
875*4882a593Smuzhiyun u8 *al_start, *al_end;
876*4882a593Smuzhiyun ATTR_RECORD *a;
877*4882a593Smuzhiyun ntfschar *al_name;
878*4882a593Smuzhiyun u32 al_name_len;
879*4882a593Smuzhiyun int err = 0;
880*4882a593Smuzhiyun static const char *es = " Unmount and run chkdsk.";
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun ni = ctx->ntfs_ino;
883*4882a593Smuzhiyun base_ni = ctx->base_ntfs_ino;
884*4882a593Smuzhiyun ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
885*4882a593Smuzhiyun if (!base_ni) {
886*4882a593Smuzhiyun /* First call happens with the base mft record. */
887*4882a593Smuzhiyun base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
888*4882a593Smuzhiyun ctx->base_mrec = ctx->mrec;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun if (ni == base_ni)
891*4882a593Smuzhiyun ctx->base_attr = ctx->attr;
892*4882a593Smuzhiyun if (type == AT_END)
893*4882a593Smuzhiyun goto not_found;
894*4882a593Smuzhiyun vol = base_ni->vol;
895*4882a593Smuzhiyun al_start = base_ni->attr_list;
896*4882a593Smuzhiyun al_end = al_start + base_ni->attr_list_size;
897*4882a593Smuzhiyun if (!ctx->al_entry)
898*4882a593Smuzhiyun ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
899*4882a593Smuzhiyun /*
900*4882a593Smuzhiyun * Iterate over entries in attribute list starting at @ctx->al_entry,
901*4882a593Smuzhiyun * or the entry following that, if @ctx->is_first is 'true'.
902*4882a593Smuzhiyun */
903*4882a593Smuzhiyun if (ctx->is_first) {
904*4882a593Smuzhiyun al_entry = ctx->al_entry;
905*4882a593Smuzhiyun ctx->is_first = false;
906*4882a593Smuzhiyun } else
907*4882a593Smuzhiyun al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
908*4882a593Smuzhiyun le16_to_cpu(ctx->al_entry->length));
909*4882a593Smuzhiyun for (;; al_entry = next_al_entry) {
910*4882a593Smuzhiyun /* Out of bounds check. */
911*4882a593Smuzhiyun if ((u8*)al_entry < base_ni->attr_list ||
912*4882a593Smuzhiyun (u8*)al_entry > al_end)
913*4882a593Smuzhiyun break; /* Inode is corrupt. */
914*4882a593Smuzhiyun ctx->al_entry = al_entry;
915*4882a593Smuzhiyun /* Catch the end of the attribute list. */
916*4882a593Smuzhiyun if ((u8*)al_entry == al_end)
917*4882a593Smuzhiyun goto not_found;
918*4882a593Smuzhiyun if (!al_entry->length)
919*4882a593Smuzhiyun break;
920*4882a593Smuzhiyun if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
921*4882a593Smuzhiyun le16_to_cpu(al_entry->length) > al_end)
922*4882a593Smuzhiyun break;
923*4882a593Smuzhiyun next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
924*4882a593Smuzhiyun le16_to_cpu(al_entry->length));
925*4882a593Smuzhiyun if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
926*4882a593Smuzhiyun goto not_found;
927*4882a593Smuzhiyun if (type != al_entry->type)
928*4882a593Smuzhiyun continue;
929*4882a593Smuzhiyun /*
930*4882a593Smuzhiyun * If @name is present, compare the two names. If @name is
931*4882a593Smuzhiyun * missing, assume we want an unnamed attribute.
932*4882a593Smuzhiyun */
933*4882a593Smuzhiyun al_name_len = al_entry->name_length;
934*4882a593Smuzhiyun al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
935*4882a593Smuzhiyun if (!name) {
936*4882a593Smuzhiyun if (al_name_len)
937*4882a593Smuzhiyun goto not_found;
938*4882a593Smuzhiyun } else if (!ntfs_are_names_equal(al_name, al_name_len, name,
939*4882a593Smuzhiyun name_len, ic, vol->upcase, vol->upcase_len)) {
940*4882a593Smuzhiyun register int rc;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun rc = ntfs_collate_names(name, name_len, al_name,
943*4882a593Smuzhiyun al_name_len, 1, IGNORE_CASE,
944*4882a593Smuzhiyun vol->upcase, vol->upcase_len);
945*4882a593Smuzhiyun /*
946*4882a593Smuzhiyun * If @name collates before al_name, there is no
947*4882a593Smuzhiyun * matching attribute.
948*4882a593Smuzhiyun */
949*4882a593Smuzhiyun if (rc == -1)
950*4882a593Smuzhiyun goto not_found;
951*4882a593Smuzhiyun /* If the strings are not equal, continue search. */
952*4882a593Smuzhiyun if (rc)
953*4882a593Smuzhiyun continue;
954*4882a593Smuzhiyun /*
955*4882a593Smuzhiyun * FIXME: Reverse engineering showed 0, IGNORE_CASE but
956*4882a593Smuzhiyun * that is inconsistent with ntfs_attr_find(). The
957*4882a593Smuzhiyun * subsequent rc checks were also different. Perhaps I
958*4882a593Smuzhiyun * made a mistake in one of the two. Need to recheck
959*4882a593Smuzhiyun * which is correct or at least see what is going on...
960*4882a593Smuzhiyun * (AIA)
961*4882a593Smuzhiyun */
962*4882a593Smuzhiyun rc = ntfs_collate_names(name, name_len, al_name,
963*4882a593Smuzhiyun al_name_len, 1, CASE_SENSITIVE,
964*4882a593Smuzhiyun vol->upcase, vol->upcase_len);
965*4882a593Smuzhiyun if (rc == -1)
966*4882a593Smuzhiyun goto not_found;
967*4882a593Smuzhiyun if (rc)
968*4882a593Smuzhiyun continue;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun * The names match or @name not present and attribute is
972*4882a593Smuzhiyun * unnamed. Now check @lowest_vcn. Continue search if the
973*4882a593Smuzhiyun * next attribute list entry still fits @lowest_vcn. Otherwise
974*4882a593Smuzhiyun * we have reached the right one or the search has failed.
975*4882a593Smuzhiyun */
976*4882a593Smuzhiyun if (lowest_vcn && (u8*)next_al_entry >= al_start &&
977*4882a593Smuzhiyun (u8*)next_al_entry + 6 < al_end &&
978*4882a593Smuzhiyun (u8*)next_al_entry + le16_to_cpu(
979*4882a593Smuzhiyun next_al_entry->length) <= al_end &&
980*4882a593Smuzhiyun sle64_to_cpu(next_al_entry->lowest_vcn) <=
981*4882a593Smuzhiyun lowest_vcn &&
982*4882a593Smuzhiyun next_al_entry->type == al_entry->type &&
983*4882a593Smuzhiyun next_al_entry->name_length == al_name_len &&
984*4882a593Smuzhiyun ntfs_are_names_equal((ntfschar*)((u8*)
985*4882a593Smuzhiyun next_al_entry +
986*4882a593Smuzhiyun next_al_entry->name_offset),
987*4882a593Smuzhiyun next_al_entry->name_length,
988*4882a593Smuzhiyun al_name, al_name_len, CASE_SENSITIVE,
989*4882a593Smuzhiyun vol->upcase, vol->upcase_len))
990*4882a593Smuzhiyun continue;
991*4882a593Smuzhiyun if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
992*4882a593Smuzhiyun if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
993*4882a593Smuzhiyun ntfs_error(vol->sb, "Found stale mft "
994*4882a593Smuzhiyun "reference in attribute list "
995*4882a593Smuzhiyun "of base inode 0x%lx.%s",
996*4882a593Smuzhiyun base_ni->mft_no, es);
997*4882a593Smuzhiyun err = -EIO;
998*4882a593Smuzhiyun break;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun } else { /* Mft references do not match. */
1001*4882a593Smuzhiyun /* If there is a mapped record unmap it first. */
1002*4882a593Smuzhiyun if (ni != base_ni)
1003*4882a593Smuzhiyun unmap_extent_mft_record(ni);
1004*4882a593Smuzhiyun /* Do we want the base record back? */
1005*4882a593Smuzhiyun if (MREF_LE(al_entry->mft_reference) ==
1006*4882a593Smuzhiyun base_ni->mft_no) {
1007*4882a593Smuzhiyun ni = ctx->ntfs_ino = base_ni;
1008*4882a593Smuzhiyun ctx->mrec = ctx->base_mrec;
1009*4882a593Smuzhiyun } else {
1010*4882a593Smuzhiyun /* We want an extent record. */
1011*4882a593Smuzhiyun ctx->mrec = map_extent_mft_record(base_ni,
1012*4882a593Smuzhiyun le64_to_cpu(
1013*4882a593Smuzhiyun al_entry->mft_reference), &ni);
1014*4882a593Smuzhiyun if (IS_ERR(ctx->mrec)) {
1015*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to map "
1016*4882a593Smuzhiyun "extent mft record "
1017*4882a593Smuzhiyun "0x%lx of base inode "
1018*4882a593Smuzhiyun "0x%lx.%s",
1019*4882a593Smuzhiyun MREF_LE(al_entry->
1020*4882a593Smuzhiyun mft_reference),
1021*4882a593Smuzhiyun base_ni->mft_no, es);
1022*4882a593Smuzhiyun err = PTR_ERR(ctx->mrec);
1023*4882a593Smuzhiyun if (err == -ENOENT)
1024*4882a593Smuzhiyun err = -EIO;
1025*4882a593Smuzhiyun /* Cause @ctx to be sanitized below. */
1026*4882a593Smuzhiyun ni = NULL;
1027*4882a593Smuzhiyun break;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun ctx->ntfs_ino = ni;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1032*4882a593Smuzhiyun le16_to_cpu(ctx->mrec->attrs_offset));
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun /*
1035*4882a593Smuzhiyun * ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
1036*4882a593Smuzhiyun * mft record containing the attribute represented by the
1037*4882a593Smuzhiyun * current al_entry.
1038*4882a593Smuzhiyun */
1039*4882a593Smuzhiyun /*
1040*4882a593Smuzhiyun * We could call into ntfs_attr_find() to find the right
1041*4882a593Smuzhiyun * attribute in this mft record but this would be less
1042*4882a593Smuzhiyun * efficient and not quite accurate as ntfs_attr_find() ignores
1043*4882a593Smuzhiyun * the attribute instance numbers for example which become
1044*4882a593Smuzhiyun * important when one plays with attribute lists. Also,
1045*4882a593Smuzhiyun * because a proper match has been found in the attribute list
1046*4882a593Smuzhiyun * entry above, the comparison can now be optimized. So it is
1047*4882a593Smuzhiyun * worth re-implementing a simplified ntfs_attr_find() here.
1048*4882a593Smuzhiyun */
1049*4882a593Smuzhiyun a = ctx->attr;
1050*4882a593Smuzhiyun /*
1051*4882a593Smuzhiyun * Use a manual loop so we can still use break and continue
1052*4882a593Smuzhiyun * with the same meanings as above.
1053*4882a593Smuzhiyun */
1054*4882a593Smuzhiyun do_next_attr_loop:
1055*4882a593Smuzhiyun if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
1056*4882a593Smuzhiyun le32_to_cpu(ctx->mrec->bytes_allocated))
1057*4882a593Smuzhiyun break;
1058*4882a593Smuzhiyun if (a->type == AT_END)
1059*4882a593Smuzhiyun break;
1060*4882a593Smuzhiyun if (!a->length)
1061*4882a593Smuzhiyun break;
1062*4882a593Smuzhiyun if (al_entry->instance != a->instance)
1063*4882a593Smuzhiyun goto do_next_attr;
1064*4882a593Smuzhiyun /*
1065*4882a593Smuzhiyun * If the type and/or the name are mismatched between the
1066*4882a593Smuzhiyun * attribute list entry and the attribute record, there is
1067*4882a593Smuzhiyun * corruption so we break and return error EIO.
1068*4882a593Smuzhiyun */
1069*4882a593Smuzhiyun if (al_entry->type != a->type)
1070*4882a593Smuzhiyun break;
1071*4882a593Smuzhiyun if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
1072*4882a593Smuzhiyun le16_to_cpu(a->name_offset)), a->name_length,
1073*4882a593Smuzhiyun al_name, al_name_len, CASE_SENSITIVE,
1074*4882a593Smuzhiyun vol->upcase, vol->upcase_len))
1075*4882a593Smuzhiyun break;
1076*4882a593Smuzhiyun ctx->attr = a;
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * If no @val specified or @val specified and it matches, we
1079*4882a593Smuzhiyun * have found it!
1080*4882a593Smuzhiyun */
1081*4882a593Smuzhiyun if (!val || (!a->non_resident && le32_to_cpu(
1082*4882a593Smuzhiyun a->data.resident.value_length) == val_len &&
1083*4882a593Smuzhiyun !memcmp((u8*)a +
1084*4882a593Smuzhiyun le16_to_cpu(a->data.resident.value_offset),
1085*4882a593Smuzhiyun val, val_len))) {
1086*4882a593Smuzhiyun ntfs_debug("Done, found.");
1087*4882a593Smuzhiyun return 0;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun do_next_attr:
1090*4882a593Smuzhiyun /* Proceed to the next attribute in the current mft record. */
1091*4882a593Smuzhiyun a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
1092*4882a593Smuzhiyun goto do_next_attr_loop;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun if (!err) {
1095*4882a593Smuzhiyun ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
1096*4882a593Smuzhiyun "attribute list attribute.%s", base_ni->mft_no,
1097*4882a593Smuzhiyun es);
1098*4882a593Smuzhiyun err = -EIO;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun if (ni != base_ni) {
1101*4882a593Smuzhiyun if (ni)
1102*4882a593Smuzhiyun unmap_extent_mft_record(ni);
1103*4882a593Smuzhiyun ctx->ntfs_ino = base_ni;
1104*4882a593Smuzhiyun ctx->mrec = ctx->base_mrec;
1105*4882a593Smuzhiyun ctx->attr = ctx->base_attr;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun if (err != -ENOMEM)
1108*4882a593Smuzhiyun NVolSetErrors(vol);
1109*4882a593Smuzhiyun return err;
1110*4882a593Smuzhiyun not_found:
1111*4882a593Smuzhiyun /*
1112*4882a593Smuzhiyun * If we were looking for AT_END, we reset the search context @ctx and
1113*4882a593Smuzhiyun * use ntfs_attr_find() to seek to the end of the base mft record.
1114*4882a593Smuzhiyun */
1115*4882a593Smuzhiyun if (type == AT_END) {
1116*4882a593Smuzhiyun ntfs_attr_reinit_search_ctx(ctx);
1117*4882a593Smuzhiyun return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
1118*4882a593Smuzhiyun ctx);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun /*
1121*4882a593Smuzhiyun * The attribute was not found. Before we return, we want to ensure
1122*4882a593Smuzhiyun * @ctx->mrec and @ctx->attr indicate the position at which the
1123*4882a593Smuzhiyun * attribute should be inserted in the base mft record. Since we also
1124*4882a593Smuzhiyun * want to preserve @ctx->al_entry we cannot reinitialize the search
1125*4882a593Smuzhiyun * context using ntfs_attr_reinit_search_ctx() as this would set
1126*4882a593Smuzhiyun * @ctx->al_entry to NULL. Thus we do the necessary bits manually (see
1127*4882a593Smuzhiyun * ntfs_attr_init_search_ctx() below). Note, we _only_ preserve
1128*4882a593Smuzhiyun * @ctx->al_entry as the remaining fields (base_*) are identical to
1129*4882a593Smuzhiyun * their non base_ counterparts and we cannot set @ctx->base_attr
1130*4882a593Smuzhiyun * correctly yet as we do not know what @ctx->attr will be set to by
1131*4882a593Smuzhiyun * the call to ntfs_attr_find() below.
1132*4882a593Smuzhiyun */
1133*4882a593Smuzhiyun if (ni != base_ni)
1134*4882a593Smuzhiyun unmap_extent_mft_record(ni);
1135*4882a593Smuzhiyun ctx->mrec = ctx->base_mrec;
1136*4882a593Smuzhiyun ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1137*4882a593Smuzhiyun le16_to_cpu(ctx->mrec->attrs_offset));
1138*4882a593Smuzhiyun ctx->is_first = true;
1139*4882a593Smuzhiyun ctx->ntfs_ino = base_ni;
1140*4882a593Smuzhiyun ctx->base_ntfs_ino = NULL;
1141*4882a593Smuzhiyun ctx->base_mrec = NULL;
1142*4882a593Smuzhiyun ctx->base_attr = NULL;
1143*4882a593Smuzhiyun /*
1144*4882a593Smuzhiyun * In case there are multiple matches in the base mft record, need to
1145*4882a593Smuzhiyun * keep enumerating until we get an attribute not found response (or
1146*4882a593Smuzhiyun * another error), otherwise we would keep returning the same attribute
1147*4882a593Smuzhiyun * over and over again and all programs using us for enumeration would
1148*4882a593Smuzhiyun * lock up in a tight loop.
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun do {
1151*4882a593Smuzhiyun err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
1152*4882a593Smuzhiyun ctx);
1153*4882a593Smuzhiyun } while (!err);
1154*4882a593Smuzhiyun ntfs_debug("Done, not found.");
1155*4882a593Smuzhiyun return err;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun /**
1159*4882a593Smuzhiyun * ntfs_attr_lookup - find an attribute in an ntfs inode
1160*4882a593Smuzhiyun * @type: attribute type to find
1161*4882a593Smuzhiyun * @name: attribute name to find (optional, i.e. NULL means don't care)
1162*4882a593Smuzhiyun * @name_len: attribute name length (only needed if @name present)
1163*4882a593Smuzhiyun * @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
1164*4882a593Smuzhiyun * @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
1165*4882a593Smuzhiyun * @val: attribute value to find (optional, resident attributes only)
1166*4882a593Smuzhiyun * @val_len: attribute value length
1167*4882a593Smuzhiyun * @ctx: search context with mft record and attribute to search from
1168*4882a593Smuzhiyun *
1169*4882a593Smuzhiyun * Find an attribute in an ntfs inode. On first search @ctx->ntfs_ino must
1170*4882a593Smuzhiyun * be the base mft record and @ctx must have been obtained from a call to
1171*4882a593Smuzhiyun * ntfs_attr_get_search_ctx().
1172*4882a593Smuzhiyun *
1173*4882a593Smuzhiyun * This function transparently handles attribute lists and @ctx is used to
1174*4882a593Smuzhiyun * continue searches where they were left off at.
1175*4882a593Smuzhiyun *
1176*4882a593Smuzhiyun * After finishing with the attribute/mft record you need to call
1177*4882a593Smuzhiyun * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
1178*4882a593Smuzhiyun * mapped inodes, etc).
1179*4882a593Smuzhiyun *
1180*4882a593Smuzhiyun * Return 0 if the search was successful and -errno if not.
1181*4882a593Smuzhiyun *
1182*4882a593Smuzhiyun * When 0, @ctx->attr is the found attribute and it is in mft record
1183*4882a593Smuzhiyun * @ctx->mrec. If an attribute list attribute is present, @ctx->al_entry is
1184*4882a593Smuzhiyun * the attribute list entry of the found attribute.
1185*4882a593Smuzhiyun *
1186*4882a593Smuzhiyun * When -ENOENT, @ctx->attr is the attribute which collates just after the
1187*4882a593Smuzhiyun * attribute being searched for, i.e. if one wants to add the attribute to the
1188*4882a593Smuzhiyun * mft record this is the correct place to insert it into. If an attribute
1189*4882a593Smuzhiyun * list attribute is present, @ctx->al_entry is the attribute list entry which
1190*4882a593Smuzhiyun * collates just after the attribute list entry of the attribute being searched
1191*4882a593Smuzhiyun * for, i.e. if one wants to add the attribute to the mft record this is the
1192*4882a593Smuzhiyun * correct place to insert its attribute list entry into.
1193*4882a593Smuzhiyun *
1194*4882a593Smuzhiyun * When -errno != -ENOENT, an error occurred during the lookup. @ctx->attr is
1195*4882a593Smuzhiyun * then undefined and in particular you should not rely on it not changing.
1196*4882a593Smuzhiyun */
ntfs_attr_lookup(const ATTR_TYPE type,const ntfschar * name,const u32 name_len,const IGNORE_CASE_BOOL ic,const VCN lowest_vcn,const u8 * val,const u32 val_len,ntfs_attr_search_ctx * ctx)1197*4882a593Smuzhiyun int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
1198*4882a593Smuzhiyun const u32 name_len, const IGNORE_CASE_BOOL ic,
1199*4882a593Smuzhiyun const VCN lowest_vcn, const u8 *val, const u32 val_len,
1200*4882a593Smuzhiyun ntfs_attr_search_ctx *ctx)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun ntfs_inode *base_ni;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun ntfs_debug("Entering.");
1205*4882a593Smuzhiyun BUG_ON(IS_ERR(ctx->mrec));
1206*4882a593Smuzhiyun if (ctx->base_ntfs_ino)
1207*4882a593Smuzhiyun base_ni = ctx->base_ntfs_ino;
1208*4882a593Smuzhiyun else
1209*4882a593Smuzhiyun base_ni = ctx->ntfs_ino;
1210*4882a593Smuzhiyun /* Sanity check, just for debugging really. */
1211*4882a593Smuzhiyun BUG_ON(!base_ni);
1212*4882a593Smuzhiyun if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
1213*4882a593Smuzhiyun return ntfs_attr_find(type, name, name_len, ic, val, val_len,
1214*4882a593Smuzhiyun ctx);
1215*4882a593Smuzhiyun return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
1216*4882a593Smuzhiyun val, val_len, ctx);
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun /**
1220*4882a593Smuzhiyun * ntfs_attr_init_search_ctx - initialize an attribute search context
1221*4882a593Smuzhiyun * @ctx: attribute search context to initialize
1222*4882a593Smuzhiyun * @ni: ntfs inode with which to initialize the search context
1223*4882a593Smuzhiyun * @mrec: mft record with which to initialize the search context
1224*4882a593Smuzhiyun *
1225*4882a593Smuzhiyun * Initialize the attribute search context @ctx with @ni and @mrec.
1226*4882a593Smuzhiyun */
ntfs_attr_init_search_ctx(ntfs_attr_search_ctx * ctx,ntfs_inode * ni,MFT_RECORD * mrec)1227*4882a593Smuzhiyun static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
1228*4882a593Smuzhiyun ntfs_inode *ni, MFT_RECORD *mrec)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun *ctx = (ntfs_attr_search_ctx) {
1231*4882a593Smuzhiyun .mrec = mrec,
1232*4882a593Smuzhiyun /* Sanity checks are performed elsewhere. */
1233*4882a593Smuzhiyun .attr = (ATTR_RECORD*)((u8*)mrec +
1234*4882a593Smuzhiyun le16_to_cpu(mrec->attrs_offset)),
1235*4882a593Smuzhiyun .is_first = true,
1236*4882a593Smuzhiyun .ntfs_ino = ni,
1237*4882a593Smuzhiyun };
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /**
1241*4882a593Smuzhiyun * ntfs_attr_reinit_search_ctx - reinitialize an attribute search context
1242*4882a593Smuzhiyun * @ctx: attribute search context to reinitialize
1243*4882a593Smuzhiyun *
1244*4882a593Smuzhiyun * Reinitialize the attribute search context @ctx, unmapping an associated
1245*4882a593Smuzhiyun * extent mft record if present, and initialize the search context again.
1246*4882a593Smuzhiyun *
1247*4882a593Smuzhiyun * This is used when a search for a new attribute is being started to reset
1248*4882a593Smuzhiyun * the search context to the beginning.
1249*4882a593Smuzhiyun */
ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx * ctx)1250*4882a593Smuzhiyun void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun if (likely(!ctx->base_ntfs_ino)) {
1253*4882a593Smuzhiyun /* No attribute list. */
1254*4882a593Smuzhiyun ctx->is_first = true;
1255*4882a593Smuzhiyun /* Sanity checks are performed elsewhere. */
1256*4882a593Smuzhiyun ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1257*4882a593Smuzhiyun le16_to_cpu(ctx->mrec->attrs_offset));
1258*4882a593Smuzhiyun /*
1259*4882a593Smuzhiyun * This needs resetting due to ntfs_external_attr_find() which
1260*4882a593Smuzhiyun * can leave it set despite having zeroed ctx->base_ntfs_ino.
1261*4882a593Smuzhiyun */
1262*4882a593Smuzhiyun ctx->al_entry = NULL;
1263*4882a593Smuzhiyun return;
1264*4882a593Smuzhiyun } /* Attribute list. */
1265*4882a593Smuzhiyun if (ctx->ntfs_ino != ctx->base_ntfs_ino)
1266*4882a593Smuzhiyun unmap_extent_mft_record(ctx->ntfs_ino);
1267*4882a593Smuzhiyun ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
1268*4882a593Smuzhiyun return;
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun /**
1272*4882a593Smuzhiyun * ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context
1273*4882a593Smuzhiyun * @ni: ntfs inode with which to initialize the search context
1274*4882a593Smuzhiyun * @mrec: mft record with which to initialize the search context
1275*4882a593Smuzhiyun *
1276*4882a593Smuzhiyun * Allocate a new attribute search context, initialize it with @ni and @mrec,
1277*4882a593Smuzhiyun * and return it. Return NULL if allocation failed.
1278*4882a593Smuzhiyun */
ntfs_attr_get_search_ctx(ntfs_inode * ni,MFT_RECORD * mrec)1279*4882a593Smuzhiyun ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
1280*4882a593Smuzhiyun {
1281*4882a593Smuzhiyun ntfs_attr_search_ctx *ctx;
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
1284*4882a593Smuzhiyun if (ctx)
1285*4882a593Smuzhiyun ntfs_attr_init_search_ctx(ctx, ni, mrec);
1286*4882a593Smuzhiyun return ctx;
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun /**
1290*4882a593Smuzhiyun * ntfs_attr_put_search_ctx - release an attribute search context
1291*4882a593Smuzhiyun * @ctx: attribute search context to free
1292*4882a593Smuzhiyun *
1293*4882a593Smuzhiyun * Release the attribute search context @ctx, unmapping an associated extent
1294*4882a593Smuzhiyun * mft record if present.
1295*4882a593Smuzhiyun */
ntfs_attr_put_search_ctx(ntfs_attr_search_ctx * ctx)1296*4882a593Smuzhiyun void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
1299*4882a593Smuzhiyun unmap_extent_mft_record(ctx->ntfs_ino);
1300*4882a593Smuzhiyun kmem_cache_free(ntfs_attr_ctx_cache, ctx);
1301*4882a593Smuzhiyun return;
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun #ifdef NTFS_RW
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun /**
1307*4882a593Smuzhiyun * ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file
1308*4882a593Smuzhiyun * @vol: ntfs volume to which the attribute belongs
1309*4882a593Smuzhiyun * @type: attribute type which to find
1310*4882a593Smuzhiyun *
1311*4882a593Smuzhiyun * Search for the attribute definition record corresponding to the attribute
1312*4882a593Smuzhiyun * @type in the $AttrDef system file.
1313*4882a593Smuzhiyun *
1314*4882a593Smuzhiyun * Return the attribute type definition record if found and NULL if not found.
1315*4882a593Smuzhiyun */
ntfs_attr_find_in_attrdef(const ntfs_volume * vol,const ATTR_TYPE type)1316*4882a593Smuzhiyun static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
1317*4882a593Smuzhiyun const ATTR_TYPE type)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun ATTR_DEF *ad;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun BUG_ON(!vol->attrdef);
1322*4882a593Smuzhiyun BUG_ON(!type);
1323*4882a593Smuzhiyun for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
1324*4882a593Smuzhiyun vol->attrdef_size && ad->type; ++ad) {
1325*4882a593Smuzhiyun /* We have not found it yet, carry on searching. */
1326*4882a593Smuzhiyun if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
1327*4882a593Smuzhiyun continue;
1328*4882a593Smuzhiyun /* We found the attribute; return it. */
1329*4882a593Smuzhiyun if (likely(ad->type == type))
1330*4882a593Smuzhiyun return ad;
1331*4882a593Smuzhiyun /* We have gone too far already. No point in continuing. */
1332*4882a593Smuzhiyun break;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun /* Attribute not found. */
1335*4882a593Smuzhiyun ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
1336*4882a593Smuzhiyun le32_to_cpu(type));
1337*4882a593Smuzhiyun return NULL;
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun /**
1341*4882a593Smuzhiyun * ntfs_attr_size_bounds_check - check a size of an attribute type for validity
1342*4882a593Smuzhiyun * @vol: ntfs volume to which the attribute belongs
1343*4882a593Smuzhiyun * @type: attribute type which to check
1344*4882a593Smuzhiyun * @size: size which to check
1345*4882a593Smuzhiyun *
1346*4882a593Smuzhiyun * Check whether the @size in bytes is valid for an attribute of @type on the
1347*4882a593Smuzhiyun * ntfs volume @vol. This information is obtained from $AttrDef system file.
1348*4882a593Smuzhiyun *
1349*4882a593Smuzhiyun * Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not
1350*4882a593Smuzhiyun * listed in $AttrDef.
1351*4882a593Smuzhiyun */
ntfs_attr_size_bounds_check(const ntfs_volume * vol,const ATTR_TYPE type,const s64 size)1352*4882a593Smuzhiyun int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
1353*4882a593Smuzhiyun const s64 size)
1354*4882a593Smuzhiyun {
1355*4882a593Smuzhiyun ATTR_DEF *ad;
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun BUG_ON(size < 0);
1358*4882a593Smuzhiyun /*
1359*4882a593Smuzhiyun * $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
1360*4882a593Smuzhiyun * listed in $AttrDef.
1361*4882a593Smuzhiyun */
1362*4882a593Smuzhiyun if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
1363*4882a593Smuzhiyun return -ERANGE;
1364*4882a593Smuzhiyun /* Get the $AttrDef entry for the attribute @type. */
1365*4882a593Smuzhiyun ad = ntfs_attr_find_in_attrdef(vol, type);
1366*4882a593Smuzhiyun if (unlikely(!ad))
1367*4882a593Smuzhiyun return -ENOENT;
1368*4882a593Smuzhiyun /* Do the bounds check. */
1369*4882a593Smuzhiyun if (((sle64_to_cpu(ad->min_size) > 0) &&
1370*4882a593Smuzhiyun size < sle64_to_cpu(ad->min_size)) ||
1371*4882a593Smuzhiyun ((sle64_to_cpu(ad->max_size) > 0) && size >
1372*4882a593Smuzhiyun sle64_to_cpu(ad->max_size)))
1373*4882a593Smuzhiyun return -ERANGE;
1374*4882a593Smuzhiyun return 0;
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun /**
1378*4882a593Smuzhiyun * ntfs_attr_can_be_non_resident - check if an attribute can be non-resident
1379*4882a593Smuzhiyun * @vol: ntfs volume to which the attribute belongs
1380*4882a593Smuzhiyun * @type: attribute type which to check
1381*4882a593Smuzhiyun *
1382*4882a593Smuzhiyun * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1383*4882a593Smuzhiyun * be non-resident. This information is obtained from $AttrDef system file.
1384*4882a593Smuzhiyun *
1385*4882a593Smuzhiyun * Return 0 if the attribute is allowed to be non-resident, -EPERM if not, and
1386*4882a593Smuzhiyun * -ENOENT if the attribute is not listed in $AttrDef.
1387*4882a593Smuzhiyun */
ntfs_attr_can_be_non_resident(const ntfs_volume * vol,const ATTR_TYPE type)1388*4882a593Smuzhiyun int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun ATTR_DEF *ad;
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun /* Find the attribute definition record in $AttrDef. */
1393*4882a593Smuzhiyun ad = ntfs_attr_find_in_attrdef(vol, type);
1394*4882a593Smuzhiyun if (unlikely(!ad))
1395*4882a593Smuzhiyun return -ENOENT;
1396*4882a593Smuzhiyun /* Check the flags and return the result. */
1397*4882a593Smuzhiyun if (ad->flags & ATTR_DEF_RESIDENT)
1398*4882a593Smuzhiyun return -EPERM;
1399*4882a593Smuzhiyun return 0;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun /**
1403*4882a593Smuzhiyun * ntfs_attr_can_be_resident - check if an attribute can be resident
1404*4882a593Smuzhiyun * @vol: ntfs volume to which the attribute belongs
1405*4882a593Smuzhiyun * @type: attribute type which to check
1406*4882a593Smuzhiyun *
1407*4882a593Smuzhiyun * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1408*4882a593Smuzhiyun * be resident. This information is derived from our ntfs knowledge and may
1409*4882a593Smuzhiyun * not be completely accurate, especially when user defined attributes are
1410*4882a593Smuzhiyun * present. Basically we allow everything to be resident except for index
1411*4882a593Smuzhiyun * allocation and $EA attributes.
1412*4882a593Smuzhiyun *
1413*4882a593Smuzhiyun * Return 0 if the attribute is allowed to be non-resident and -EPERM if not.
1414*4882a593Smuzhiyun *
1415*4882a593Smuzhiyun * Warning: In the system file $MFT the attribute $Bitmap must be non-resident
1416*4882a593Smuzhiyun * otherwise windows will not boot (blue screen of death)! We cannot
1417*4882a593Smuzhiyun * check for this here as we do not know which inode's $Bitmap is
1418*4882a593Smuzhiyun * being asked about so the caller needs to special case this.
1419*4882a593Smuzhiyun */
ntfs_attr_can_be_resident(const ntfs_volume * vol,const ATTR_TYPE type)1420*4882a593Smuzhiyun int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1421*4882a593Smuzhiyun {
1422*4882a593Smuzhiyun if (type == AT_INDEX_ALLOCATION)
1423*4882a593Smuzhiyun return -EPERM;
1424*4882a593Smuzhiyun return 0;
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun /**
1428*4882a593Smuzhiyun * ntfs_attr_record_resize - resize an attribute record
1429*4882a593Smuzhiyun * @m: mft record containing attribute record
1430*4882a593Smuzhiyun * @a: attribute record to resize
1431*4882a593Smuzhiyun * @new_size: new size in bytes to which to resize the attribute record @a
1432*4882a593Smuzhiyun *
1433*4882a593Smuzhiyun * Resize the attribute record @a, i.e. the resident part of the attribute, in
1434*4882a593Smuzhiyun * the mft record @m to @new_size bytes.
1435*4882a593Smuzhiyun *
1436*4882a593Smuzhiyun * Return 0 on success and -errno on error. The following error codes are
1437*4882a593Smuzhiyun * defined:
1438*4882a593Smuzhiyun * -ENOSPC - Not enough space in the mft record @m to perform the resize.
1439*4882a593Smuzhiyun *
1440*4882a593Smuzhiyun * Note: On error, no modifications have been performed whatsoever.
1441*4882a593Smuzhiyun *
1442*4882a593Smuzhiyun * Warning: If you make a record smaller without having copied all the data you
1443*4882a593Smuzhiyun * are interested in the data may be overwritten.
1444*4882a593Smuzhiyun */
ntfs_attr_record_resize(MFT_RECORD * m,ATTR_RECORD * a,u32 new_size)1445*4882a593Smuzhiyun int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun ntfs_debug("Entering for new_size %u.", new_size);
1448*4882a593Smuzhiyun /* Align to 8 bytes if it is not already done. */
1449*4882a593Smuzhiyun if (new_size & 7)
1450*4882a593Smuzhiyun new_size = (new_size + 7) & ~7;
1451*4882a593Smuzhiyun /* If the actual attribute length has changed, move things around. */
1452*4882a593Smuzhiyun if (new_size != le32_to_cpu(a->length)) {
1453*4882a593Smuzhiyun u32 new_muse = le32_to_cpu(m->bytes_in_use) -
1454*4882a593Smuzhiyun le32_to_cpu(a->length) + new_size;
1455*4882a593Smuzhiyun /* Not enough space in this mft record. */
1456*4882a593Smuzhiyun if (new_muse > le32_to_cpu(m->bytes_allocated))
1457*4882a593Smuzhiyun return -ENOSPC;
1458*4882a593Smuzhiyun /* Move attributes following @a to their new location. */
1459*4882a593Smuzhiyun memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
1460*4882a593Smuzhiyun le32_to_cpu(m->bytes_in_use) - ((u8*)a -
1461*4882a593Smuzhiyun (u8*)m) - le32_to_cpu(a->length));
1462*4882a593Smuzhiyun /* Adjust @m to reflect the change in used space. */
1463*4882a593Smuzhiyun m->bytes_in_use = cpu_to_le32(new_muse);
1464*4882a593Smuzhiyun /* Adjust @a to reflect the new size. */
1465*4882a593Smuzhiyun if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
1466*4882a593Smuzhiyun a->length = cpu_to_le32(new_size);
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun return 0;
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun /**
1472*4882a593Smuzhiyun * ntfs_resident_attr_value_resize - resize the value of a resident attribute
1473*4882a593Smuzhiyun * @m: mft record containing attribute record
1474*4882a593Smuzhiyun * @a: attribute record whose value to resize
1475*4882a593Smuzhiyun * @new_size: new size in bytes to which to resize the attribute value of @a
1476*4882a593Smuzhiyun *
1477*4882a593Smuzhiyun * Resize the value of the attribute @a in the mft record @m to @new_size bytes.
1478*4882a593Smuzhiyun * If the value is made bigger, the newly allocated space is cleared.
1479*4882a593Smuzhiyun *
1480*4882a593Smuzhiyun * Return 0 on success and -errno on error. The following error codes are
1481*4882a593Smuzhiyun * defined:
1482*4882a593Smuzhiyun * -ENOSPC - Not enough space in the mft record @m to perform the resize.
1483*4882a593Smuzhiyun *
1484*4882a593Smuzhiyun * Note: On error, no modifications have been performed whatsoever.
1485*4882a593Smuzhiyun *
1486*4882a593Smuzhiyun * Warning: If you make a record smaller without having copied all the data you
1487*4882a593Smuzhiyun * are interested in the data may be overwritten.
1488*4882a593Smuzhiyun */
ntfs_resident_attr_value_resize(MFT_RECORD * m,ATTR_RECORD * a,const u32 new_size)1489*4882a593Smuzhiyun int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
1490*4882a593Smuzhiyun const u32 new_size)
1491*4882a593Smuzhiyun {
1492*4882a593Smuzhiyun u32 old_size;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun /* Resize the resident part of the attribute record. */
1495*4882a593Smuzhiyun if (ntfs_attr_record_resize(m, a,
1496*4882a593Smuzhiyun le16_to_cpu(a->data.resident.value_offset) + new_size))
1497*4882a593Smuzhiyun return -ENOSPC;
1498*4882a593Smuzhiyun /*
1499*4882a593Smuzhiyun * The resize succeeded! If we made the attribute value bigger, clear
1500*4882a593Smuzhiyun * the area between the old size and @new_size.
1501*4882a593Smuzhiyun */
1502*4882a593Smuzhiyun old_size = le32_to_cpu(a->data.resident.value_length);
1503*4882a593Smuzhiyun if (new_size > old_size)
1504*4882a593Smuzhiyun memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
1505*4882a593Smuzhiyun old_size, 0, new_size - old_size);
1506*4882a593Smuzhiyun /* Finally update the length of the attribute value. */
1507*4882a593Smuzhiyun a->data.resident.value_length = cpu_to_le32(new_size);
1508*4882a593Smuzhiyun return 0;
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun /**
1512*4882a593Smuzhiyun * ntfs_attr_make_non_resident - convert a resident to a non-resident attribute
1513*4882a593Smuzhiyun * @ni: ntfs inode describing the attribute to convert
1514*4882a593Smuzhiyun * @data_size: size of the resident data to copy to the non-resident attribute
1515*4882a593Smuzhiyun *
1516*4882a593Smuzhiyun * Convert the resident ntfs attribute described by the ntfs inode @ni to a
1517*4882a593Smuzhiyun * non-resident one.
1518*4882a593Smuzhiyun *
1519*4882a593Smuzhiyun * @data_size must be equal to the attribute value size. This is needed since
1520*4882a593Smuzhiyun * we need to know the size before we can map the mft record and our callers
1521*4882a593Smuzhiyun * always know it. The reason we cannot simply read the size from the vfs
1522*4882a593Smuzhiyun * inode i_size is that this is not necessarily uptodate. This happens when
1523*4882a593Smuzhiyun * ntfs_attr_make_non_resident() is called in the ->truncate call path(s).
1524*4882a593Smuzhiyun *
1525*4882a593Smuzhiyun * Return 0 on success and -errno on error. The following error return codes
1526*4882a593Smuzhiyun * are defined:
1527*4882a593Smuzhiyun * -EPERM - The attribute is not allowed to be non-resident.
1528*4882a593Smuzhiyun * -ENOMEM - Not enough memory.
1529*4882a593Smuzhiyun * -ENOSPC - Not enough disk space.
1530*4882a593Smuzhiyun * -EINVAL - Attribute not defined on the volume.
1531*4882a593Smuzhiyun * -EIO - I/o error or other error.
1532*4882a593Smuzhiyun * Note that -ENOSPC is also returned in the case that there is not enough
1533*4882a593Smuzhiyun * space in the mft record to do the conversion. This can happen when the mft
1534*4882a593Smuzhiyun * record is already very full. The caller is responsible for trying to make
1535*4882a593Smuzhiyun * space in the mft record and trying again. FIXME: Do we need a separate
1536*4882a593Smuzhiyun * error return code for this kind of -ENOSPC or is it always worth trying
1537*4882a593Smuzhiyun * again in case the attribute may then fit in a resident state so no need to
1538*4882a593Smuzhiyun * make it non-resident at all? Ho-hum... (AIA)
1539*4882a593Smuzhiyun *
1540*4882a593Smuzhiyun * NOTE to self: No changes in the attribute list are required to move from
1541*4882a593Smuzhiyun * a resident to a non-resident attribute.
1542*4882a593Smuzhiyun *
1543*4882a593Smuzhiyun * Locking: - The caller must hold i_mutex on the inode.
1544*4882a593Smuzhiyun */
ntfs_attr_make_non_resident(ntfs_inode * ni,const u32 data_size)1545*4882a593Smuzhiyun int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1546*4882a593Smuzhiyun {
1547*4882a593Smuzhiyun s64 new_size;
1548*4882a593Smuzhiyun struct inode *vi = VFS_I(ni);
1549*4882a593Smuzhiyun ntfs_volume *vol = ni->vol;
1550*4882a593Smuzhiyun ntfs_inode *base_ni;
1551*4882a593Smuzhiyun MFT_RECORD *m;
1552*4882a593Smuzhiyun ATTR_RECORD *a;
1553*4882a593Smuzhiyun ntfs_attr_search_ctx *ctx;
1554*4882a593Smuzhiyun struct page *page;
1555*4882a593Smuzhiyun runlist_element *rl;
1556*4882a593Smuzhiyun u8 *kaddr;
1557*4882a593Smuzhiyun unsigned long flags;
1558*4882a593Smuzhiyun int mp_size, mp_ofs, name_ofs, arec_size, err, err2;
1559*4882a593Smuzhiyun u32 attr_size;
1560*4882a593Smuzhiyun u8 old_res_attr_flags;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun /* Check that the attribute is allowed to be non-resident. */
1563*4882a593Smuzhiyun err = ntfs_attr_can_be_non_resident(vol, ni->type);
1564*4882a593Smuzhiyun if (unlikely(err)) {
1565*4882a593Smuzhiyun if (err == -EPERM)
1566*4882a593Smuzhiyun ntfs_debug("Attribute is not allowed to be "
1567*4882a593Smuzhiyun "non-resident.");
1568*4882a593Smuzhiyun else
1569*4882a593Smuzhiyun ntfs_debug("Attribute not defined on the NTFS "
1570*4882a593Smuzhiyun "volume!");
1571*4882a593Smuzhiyun return err;
1572*4882a593Smuzhiyun }
1573*4882a593Smuzhiyun /*
1574*4882a593Smuzhiyun * FIXME: Compressed and encrypted attributes are not supported when
1575*4882a593Smuzhiyun * writing and we should never have gotten here for them.
1576*4882a593Smuzhiyun */
1577*4882a593Smuzhiyun BUG_ON(NInoCompressed(ni));
1578*4882a593Smuzhiyun BUG_ON(NInoEncrypted(ni));
1579*4882a593Smuzhiyun /*
1580*4882a593Smuzhiyun * The size needs to be aligned to a cluster boundary for allocation
1581*4882a593Smuzhiyun * purposes.
1582*4882a593Smuzhiyun */
1583*4882a593Smuzhiyun new_size = (data_size + vol->cluster_size - 1) &
1584*4882a593Smuzhiyun ~(vol->cluster_size - 1);
1585*4882a593Smuzhiyun if (new_size > 0) {
1586*4882a593Smuzhiyun /*
1587*4882a593Smuzhiyun * Will need the page later and since the page lock nests
1588*4882a593Smuzhiyun * outside all ntfs locks, we need to get the page now.
1589*4882a593Smuzhiyun */
1590*4882a593Smuzhiyun page = find_or_create_page(vi->i_mapping, 0,
1591*4882a593Smuzhiyun mapping_gfp_mask(vi->i_mapping));
1592*4882a593Smuzhiyun if (unlikely(!page))
1593*4882a593Smuzhiyun return -ENOMEM;
1594*4882a593Smuzhiyun /* Start by allocating clusters to hold the attribute value. */
1595*4882a593Smuzhiyun rl = ntfs_cluster_alloc(vol, 0, new_size >>
1596*4882a593Smuzhiyun vol->cluster_size_bits, -1, DATA_ZONE, true);
1597*4882a593Smuzhiyun if (IS_ERR(rl)) {
1598*4882a593Smuzhiyun err = PTR_ERR(rl);
1599*4882a593Smuzhiyun ntfs_debug("Failed to allocate cluster%s, error code "
1600*4882a593Smuzhiyun "%i.", (new_size >>
1601*4882a593Smuzhiyun vol->cluster_size_bits) > 1 ? "s" : "",
1602*4882a593Smuzhiyun err);
1603*4882a593Smuzhiyun goto page_err_out;
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun } else {
1606*4882a593Smuzhiyun rl = NULL;
1607*4882a593Smuzhiyun page = NULL;
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun /* Determine the size of the mapping pairs array. */
1610*4882a593Smuzhiyun mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
1611*4882a593Smuzhiyun if (unlikely(mp_size < 0)) {
1612*4882a593Smuzhiyun err = mp_size;
1613*4882a593Smuzhiyun ntfs_debug("Failed to get size for mapping pairs array, error "
1614*4882a593Smuzhiyun "code %i.", err);
1615*4882a593Smuzhiyun goto rl_err_out;
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun down_write(&ni->runlist.lock);
1618*4882a593Smuzhiyun if (!NInoAttr(ni))
1619*4882a593Smuzhiyun base_ni = ni;
1620*4882a593Smuzhiyun else
1621*4882a593Smuzhiyun base_ni = ni->ext.base_ntfs_ino;
1622*4882a593Smuzhiyun m = map_mft_record(base_ni);
1623*4882a593Smuzhiyun if (IS_ERR(m)) {
1624*4882a593Smuzhiyun err = PTR_ERR(m);
1625*4882a593Smuzhiyun m = NULL;
1626*4882a593Smuzhiyun ctx = NULL;
1627*4882a593Smuzhiyun goto err_out;
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun ctx = ntfs_attr_get_search_ctx(base_ni, m);
1630*4882a593Smuzhiyun if (unlikely(!ctx)) {
1631*4882a593Smuzhiyun err = -ENOMEM;
1632*4882a593Smuzhiyun goto err_out;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1635*4882a593Smuzhiyun CASE_SENSITIVE, 0, NULL, 0, ctx);
1636*4882a593Smuzhiyun if (unlikely(err)) {
1637*4882a593Smuzhiyun if (err == -ENOENT)
1638*4882a593Smuzhiyun err = -EIO;
1639*4882a593Smuzhiyun goto err_out;
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun m = ctx->mrec;
1642*4882a593Smuzhiyun a = ctx->attr;
1643*4882a593Smuzhiyun BUG_ON(NInoNonResident(ni));
1644*4882a593Smuzhiyun BUG_ON(a->non_resident);
1645*4882a593Smuzhiyun /*
1646*4882a593Smuzhiyun * Calculate new offsets for the name and the mapping pairs array.
1647*4882a593Smuzhiyun */
1648*4882a593Smuzhiyun if (NInoSparse(ni) || NInoCompressed(ni))
1649*4882a593Smuzhiyun name_ofs = (offsetof(ATTR_REC,
1650*4882a593Smuzhiyun data.non_resident.compressed_size) +
1651*4882a593Smuzhiyun sizeof(a->data.non_resident.compressed_size) +
1652*4882a593Smuzhiyun 7) & ~7;
1653*4882a593Smuzhiyun else
1654*4882a593Smuzhiyun name_ofs = (offsetof(ATTR_REC,
1655*4882a593Smuzhiyun data.non_resident.compressed_size) + 7) & ~7;
1656*4882a593Smuzhiyun mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1657*4882a593Smuzhiyun /*
1658*4882a593Smuzhiyun * Determine the size of the resident part of the now non-resident
1659*4882a593Smuzhiyun * attribute record.
1660*4882a593Smuzhiyun */
1661*4882a593Smuzhiyun arec_size = (mp_ofs + mp_size + 7) & ~7;
1662*4882a593Smuzhiyun /*
1663*4882a593Smuzhiyun * If the page is not uptodate bring it uptodate by copying from the
1664*4882a593Smuzhiyun * attribute value.
1665*4882a593Smuzhiyun */
1666*4882a593Smuzhiyun attr_size = le32_to_cpu(a->data.resident.value_length);
1667*4882a593Smuzhiyun BUG_ON(attr_size != data_size);
1668*4882a593Smuzhiyun if (page && !PageUptodate(page)) {
1669*4882a593Smuzhiyun kaddr = kmap_atomic(page);
1670*4882a593Smuzhiyun memcpy(kaddr, (u8*)a +
1671*4882a593Smuzhiyun le16_to_cpu(a->data.resident.value_offset),
1672*4882a593Smuzhiyun attr_size);
1673*4882a593Smuzhiyun memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
1674*4882a593Smuzhiyun kunmap_atomic(kaddr);
1675*4882a593Smuzhiyun flush_dcache_page(page);
1676*4882a593Smuzhiyun SetPageUptodate(page);
1677*4882a593Smuzhiyun }
1678*4882a593Smuzhiyun /* Backup the attribute flag. */
1679*4882a593Smuzhiyun old_res_attr_flags = a->data.resident.flags;
1680*4882a593Smuzhiyun /* Resize the resident part of the attribute record. */
1681*4882a593Smuzhiyun err = ntfs_attr_record_resize(m, a, arec_size);
1682*4882a593Smuzhiyun if (unlikely(err))
1683*4882a593Smuzhiyun goto err_out;
1684*4882a593Smuzhiyun /*
1685*4882a593Smuzhiyun * Convert the resident part of the attribute record to describe a
1686*4882a593Smuzhiyun * non-resident attribute.
1687*4882a593Smuzhiyun */
1688*4882a593Smuzhiyun a->non_resident = 1;
1689*4882a593Smuzhiyun /* Move the attribute name if it exists and update the offset. */
1690*4882a593Smuzhiyun if (a->name_length)
1691*4882a593Smuzhiyun memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1692*4882a593Smuzhiyun a->name_length * sizeof(ntfschar));
1693*4882a593Smuzhiyun a->name_offset = cpu_to_le16(name_ofs);
1694*4882a593Smuzhiyun /* Setup the fields specific to non-resident attributes. */
1695*4882a593Smuzhiyun a->data.non_resident.lowest_vcn = 0;
1696*4882a593Smuzhiyun a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>
1697*4882a593Smuzhiyun vol->cluster_size_bits);
1698*4882a593Smuzhiyun a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);
1699*4882a593Smuzhiyun memset(&a->data.non_resident.reserved, 0,
1700*4882a593Smuzhiyun sizeof(a->data.non_resident.reserved));
1701*4882a593Smuzhiyun a->data.non_resident.allocated_size = cpu_to_sle64(new_size);
1702*4882a593Smuzhiyun a->data.non_resident.data_size =
1703*4882a593Smuzhiyun a->data.non_resident.initialized_size =
1704*4882a593Smuzhiyun cpu_to_sle64(attr_size);
1705*4882a593Smuzhiyun if (NInoSparse(ni) || NInoCompressed(ni)) {
1706*4882a593Smuzhiyun a->data.non_resident.compression_unit = 0;
1707*4882a593Smuzhiyun if (NInoCompressed(ni) || vol->major_ver < 3)
1708*4882a593Smuzhiyun a->data.non_resident.compression_unit = 4;
1709*4882a593Smuzhiyun a->data.non_resident.compressed_size =
1710*4882a593Smuzhiyun a->data.non_resident.allocated_size;
1711*4882a593Smuzhiyun } else
1712*4882a593Smuzhiyun a->data.non_resident.compression_unit = 0;
1713*4882a593Smuzhiyun /* Generate the mapping pairs array into the attribute record. */
1714*4882a593Smuzhiyun err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,
1715*4882a593Smuzhiyun arec_size - mp_ofs, rl, 0, -1, NULL);
1716*4882a593Smuzhiyun if (unlikely(err)) {
1717*4882a593Smuzhiyun ntfs_debug("Failed to build mapping pairs, error code %i.",
1718*4882a593Smuzhiyun err);
1719*4882a593Smuzhiyun goto undo_err_out;
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun /* Setup the in-memory attribute structure to be non-resident. */
1722*4882a593Smuzhiyun ni->runlist.rl = rl;
1723*4882a593Smuzhiyun write_lock_irqsave(&ni->size_lock, flags);
1724*4882a593Smuzhiyun ni->allocated_size = new_size;
1725*4882a593Smuzhiyun if (NInoSparse(ni) || NInoCompressed(ni)) {
1726*4882a593Smuzhiyun ni->itype.compressed.size = ni->allocated_size;
1727*4882a593Smuzhiyun if (a->data.non_resident.compression_unit) {
1728*4882a593Smuzhiyun ni->itype.compressed.block_size = 1U << (a->data.
1729*4882a593Smuzhiyun non_resident.compression_unit +
1730*4882a593Smuzhiyun vol->cluster_size_bits);
1731*4882a593Smuzhiyun ni->itype.compressed.block_size_bits =
1732*4882a593Smuzhiyun ffs(ni->itype.compressed.block_size) -
1733*4882a593Smuzhiyun 1;
1734*4882a593Smuzhiyun ni->itype.compressed.block_clusters = 1U <<
1735*4882a593Smuzhiyun a->data.non_resident.compression_unit;
1736*4882a593Smuzhiyun } else {
1737*4882a593Smuzhiyun ni->itype.compressed.block_size = 0;
1738*4882a593Smuzhiyun ni->itype.compressed.block_size_bits = 0;
1739*4882a593Smuzhiyun ni->itype.compressed.block_clusters = 0;
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun vi->i_blocks = ni->itype.compressed.size >> 9;
1742*4882a593Smuzhiyun } else
1743*4882a593Smuzhiyun vi->i_blocks = ni->allocated_size >> 9;
1744*4882a593Smuzhiyun write_unlock_irqrestore(&ni->size_lock, flags);
1745*4882a593Smuzhiyun /*
1746*4882a593Smuzhiyun * This needs to be last since the address space operations ->readpage
1747*4882a593Smuzhiyun * and ->writepage can run concurrently with us as they are not
1748*4882a593Smuzhiyun * serialized on i_mutex. Note, we are not allowed to fail once we flip
1749*4882a593Smuzhiyun * this switch, which is another reason to do this last.
1750*4882a593Smuzhiyun */
1751*4882a593Smuzhiyun NInoSetNonResident(ni);
1752*4882a593Smuzhiyun /* Mark the mft record dirty, so it gets written back. */
1753*4882a593Smuzhiyun flush_dcache_mft_record_page(ctx->ntfs_ino);
1754*4882a593Smuzhiyun mark_mft_record_dirty(ctx->ntfs_ino);
1755*4882a593Smuzhiyun ntfs_attr_put_search_ctx(ctx);
1756*4882a593Smuzhiyun unmap_mft_record(base_ni);
1757*4882a593Smuzhiyun up_write(&ni->runlist.lock);
1758*4882a593Smuzhiyun if (page) {
1759*4882a593Smuzhiyun set_page_dirty(page);
1760*4882a593Smuzhiyun unlock_page(page);
1761*4882a593Smuzhiyun put_page(page);
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun ntfs_debug("Done.");
1764*4882a593Smuzhiyun return 0;
1765*4882a593Smuzhiyun undo_err_out:
1766*4882a593Smuzhiyun /* Convert the attribute back into a resident attribute. */
1767*4882a593Smuzhiyun a->non_resident = 0;
1768*4882a593Smuzhiyun /* Move the attribute name if it exists and update the offset. */
1769*4882a593Smuzhiyun name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +
1770*4882a593Smuzhiyun sizeof(a->data.resident.reserved) + 7) & ~7;
1771*4882a593Smuzhiyun if (a->name_length)
1772*4882a593Smuzhiyun memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1773*4882a593Smuzhiyun a->name_length * sizeof(ntfschar));
1774*4882a593Smuzhiyun mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1775*4882a593Smuzhiyun a->name_offset = cpu_to_le16(name_ofs);
1776*4882a593Smuzhiyun arec_size = (mp_ofs + attr_size + 7) & ~7;
1777*4882a593Smuzhiyun /* Resize the resident part of the attribute record. */
1778*4882a593Smuzhiyun err2 = ntfs_attr_record_resize(m, a, arec_size);
1779*4882a593Smuzhiyun if (unlikely(err2)) {
1780*4882a593Smuzhiyun /*
1781*4882a593Smuzhiyun * This cannot happen (well if memory corruption is at work it
1782*4882a593Smuzhiyun * could happen in theory), but deal with it as well as we can.
1783*4882a593Smuzhiyun * If the old size is too small, truncate the attribute,
1784*4882a593Smuzhiyun * otherwise simply give it a larger allocated size.
1785*4882a593Smuzhiyun * FIXME: Should check whether chkdsk complains when the
1786*4882a593Smuzhiyun * allocated size is much bigger than the resident value size.
1787*4882a593Smuzhiyun */
1788*4882a593Smuzhiyun arec_size = le32_to_cpu(a->length);
1789*4882a593Smuzhiyun if ((mp_ofs + attr_size) > arec_size) {
1790*4882a593Smuzhiyun err2 = attr_size;
1791*4882a593Smuzhiyun attr_size = arec_size - mp_ofs;
1792*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to undo partial resident "
1793*4882a593Smuzhiyun "to non-resident attribute "
1794*4882a593Smuzhiyun "conversion. Truncating inode 0x%lx, "
1795*4882a593Smuzhiyun "attribute type 0x%x from %i bytes to "
1796*4882a593Smuzhiyun "%i bytes to maintain metadata "
1797*4882a593Smuzhiyun "consistency. THIS MEANS YOU ARE "
1798*4882a593Smuzhiyun "LOSING %i BYTES DATA FROM THIS %s.",
1799*4882a593Smuzhiyun vi->i_ino,
1800*4882a593Smuzhiyun (unsigned)le32_to_cpu(ni->type),
1801*4882a593Smuzhiyun err2, attr_size, err2 - attr_size,
1802*4882a593Smuzhiyun ((ni->type == AT_DATA) &&
1803*4882a593Smuzhiyun !ni->name_len) ? "FILE": "ATTRIBUTE");
1804*4882a593Smuzhiyun write_lock_irqsave(&ni->size_lock, flags);
1805*4882a593Smuzhiyun ni->initialized_size = attr_size;
1806*4882a593Smuzhiyun i_size_write(vi, attr_size);
1807*4882a593Smuzhiyun write_unlock_irqrestore(&ni->size_lock, flags);
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun /* Setup the fields specific to resident attributes. */
1811*4882a593Smuzhiyun a->data.resident.value_length = cpu_to_le32(attr_size);
1812*4882a593Smuzhiyun a->data.resident.value_offset = cpu_to_le16(mp_ofs);
1813*4882a593Smuzhiyun a->data.resident.flags = old_res_attr_flags;
1814*4882a593Smuzhiyun memset(&a->data.resident.reserved, 0,
1815*4882a593Smuzhiyun sizeof(a->data.resident.reserved));
1816*4882a593Smuzhiyun /* Copy the data from the page back to the attribute value. */
1817*4882a593Smuzhiyun if (page) {
1818*4882a593Smuzhiyun kaddr = kmap_atomic(page);
1819*4882a593Smuzhiyun memcpy((u8*)a + mp_ofs, kaddr, attr_size);
1820*4882a593Smuzhiyun kunmap_atomic(kaddr);
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun /* Setup the allocated size in the ntfs inode in case it changed. */
1823*4882a593Smuzhiyun write_lock_irqsave(&ni->size_lock, flags);
1824*4882a593Smuzhiyun ni->allocated_size = arec_size - mp_ofs;
1825*4882a593Smuzhiyun write_unlock_irqrestore(&ni->size_lock, flags);
1826*4882a593Smuzhiyun /* Mark the mft record dirty, so it gets written back. */
1827*4882a593Smuzhiyun flush_dcache_mft_record_page(ctx->ntfs_ino);
1828*4882a593Smuzhiyun mark_mft_record_dirty(ctx->ntfs_ino);
1829*4882a593Smuzhiyun err_out:
1830*4882a593Smuzhiyun if (ctx)
1831*4882a593Smuzhiyun ntfs_attr_put_search_ctx(ctx);
1832*4882a593Smuzhiyun if (m)
1833*4882a593Smuzhiyun unmap_mft_record(base_ni);
1834*4882a593Smuzhiyun ni->runlist.rl = NULL;
1835*4882a593Smuzhiyun up_write(&ni->runlist.lock);
1836*4882a593Smuzhiyun rl_err_out:
1837*4882a593Smuzhiyun if (rl) {
1838*4882a593Smuzhiyun if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
1839*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to release allocated "
1840*4882a593Smuzhiyun "cluster(s) in error code path. Run "
1841*4882a593Smuzhiyun "chkdsk to recover the lost "
1842*4882a593Smuzhiyun "cluster(s).");
1843*4882a593Smuzhiyun NVolSetErrors(vol);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun ntfs_free(rl);
1846*4882a593Smuzhiyun page_err_out:
1847*4882a593Smuzhiyun unlock_page(page);
1848*4882a593Smuzhiyun put_page(page);
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun if (err == -EINVAL)
1851*4882a593Smuzhiyun err = -EIO;
1852*4882a593Smuzhiyun return err;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun /**
1856*4882a593Smuzhiyun * ntfs_attr_extend_allocation - extend the allocated space of an attribute
1857*4882a593Smuzhiyun * @ni: ntfs inode of the attribute whose allocation to extend
1858*4882a593Smuzhiyun * @new_alloc_size: new size in bytes to which to extend the allocation to
1859*4882a593Smuzhiyun * @new_data_size: new size in bytes to which to extend the data to
1860*4882a593Smuzhiyun * @data_start: beginning of region which is required to be non-sparse
1861*4882a593Smuzhiyun *
1862*4882a593Smuzhiyun * Extend the allocated space of an attribute described by the ntfs inode @ni
1863*4882a593Smuzhiyun * to @new_alloc_size bytes. If @data_start is -1, the whole extension may be
1864*4882a593Smuzhiyun * implemented as a hole in the file (as long as both the volume and the ntfs
1865*4882a593Smuzhiyun * inode @ni have sparse support enabled). If @data_start is >= 0, then the
1866*4882a593Smuzhiyun * region between the old allocated size and @data_start - 1 may be made sparse
1867*4882a593Smuzhiyun * but the regions between @data_start and @new_alloc_size must be backed by
1868*4882a593Smuzhiyun * actual clusters.
1869*4882a593Smuzhiyun *
1870*4882a593Smuzhiyun * If @new_data_size is -1, it is ignored. If it is >= 0, then the data size
1871*4882a593Smuzhiyun * of the attribute is extended to @new_data_size. Note that the i_size of the
1872*4882a593Smuzhiyun * vfs inode is not updated. Only the data size in the base attribute record
1873*4882a593Smuzhiyun * is updated. The caller has to update i_size separately if this is required.
1874*4882a593Smuzhiyun * WARNING: It is a BUG() for @new_data_size to be smaller than the old data
1875*4882a593Smuzhiyun * size as well as for @new_data_size to be greater than @new_alloc_size.
1876*4882a593Smuzhiyun *
1877*4882a593Smuzhiyun * For resident attributes this involves resizing the attribute record and if
1878*4882a593Smuzhiyun * necessary moving it and/or other attributes into extent mft records and/or
1879*4882a593Smuzhiyun * converting the attribute to a non-resident attribute which in turn involves
1880*4882a593Smuzhiyun * extending the allocation of a non-resident attribute as described below.
1881*4882a593Smuzhiyun *
1882*4882a593Smuzhiyun * For non-resident attributes this involves allocating clusters in the data
1883*4882a593Smuzhiyun * zone on the volume (except for regions that are being made sparse) and
1884*4882a593Smuzhiyun * extending the run list to describe the allocated clusters as well as
1885*4882a593Smuzhiyun * updating the mapping pairs array of the attribute. This in turn involves
1886*4882a593Smuzhiyun * resizing the attribute record and if necessary moving it and/or other
1887*4882a593Smuzhiyun * attributes into extent mft records and/or splitting the attribute record
1888*4882a593Smuzhiyun * into multiple extent attribute records.
1889*4882a593Smuzhiyun *
1890*4882a593Smuzhiyun * Also, the attribute list attribute is updated if present and in some of the
1891*4882a593Smuzhiyun * above cases (the ones where extent mft records/attributes come into play),
1892*4882a593Smuzhiyun * an attribute list attribute is created if not already present.
1893*4882a593Smuzhiyun *
1894*4882a593Smuzhiyun * Return the new allocated size on success and -errno on error. In the case
1895*4882a593Smuzhiyun * that an error is encountered but a partial extension at least up to
1896*4882a593Smuzhiyun * @data_start (if present) is possible, the allocation is partially extended
1897*4882a593Smuzhiyun * and this is returned. This means the caller must check the returned size to
1898*4882a593Smuzhiyun * determine if the extension was partial. If @data_start is -1 then partial
1899*4882a593Smuzhiyun * allocations are not performed.
1900*4882a593Smuzhiyun *
1901*4882a593Smuzhiyun * WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA.
1902*4882a593Smuzhiyun *
1903*4882a593Smuzhiyun * Locking: This function takes the runlist lock of @ni for writing as well as
1904*4882a593Smuzhiyun * locking the mft record of the base ntfs inode. These locks are maintained
1905*4882a593Smuzhiyun * throughout execution of the function. These locks are required so that the
1906*4882a593Smuzhiyun * attribute can be resized safely and so that it can for example be converted
1907*4882a593Smuzhiyun * from resident to non-resident safely.
1908*4882a593Smuzhiyun *
1909*4882a593Smuzhiyun * TODO: At present attribute list attribute handling is not implemented.
1910*4882a593Smuzhiyun *
1911*4882a593Smuzhiyun * TODO: At present it is not safe to call this function for anything other
1912*4882a593Smuzhiyun * than the $DATA attribute(s) of an uncompressed and unencrypted file.
1913*4882a593Smuzhiyun */
ntfs_attr_extend_allocation(ntfs_inode * ni,s64 new_alloc_size,const s64 new_data_size,const s64 data_start)1914*4882a593Smuzhiyun s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
1915*4882a593Smuzhiyun const s64 new_data_size, const s64 data_start)
1916*4882a593Smuzhiyun {
1917*4882a593Smuzhiyun VCN vcn;
1918*4882a593Smuzhiyun s64 ll, allocated_size, start = data_start;
1919*4882a593Smuzhiyun struct inode *vi = VFS_I(ni);
1920*4882a593Smuzhiyun ntfs_volume *vol = ni->vol;
1921*4882a593Smuzhiyun ntfs_inode *base_ni;
1922*4882a593Smuzhiyun MFT_RECORD *m;
1923*4882a593Smuzhiyun ATTR_RECORD *a;
1924*4882a593Smuzhiyun ntfs_attr_search_ctx *ctx;
1925*4882a593Smuzhiyun runlist_element *rl, *rl2;
1926*4882a593Smuzhiyun unsigned long flags;
1927*4882a593Smuzhiyun int err, mp_size;
1928*4882a593Smuzhiyun u32 attr_len = 0; /* Silence stupid gcc warning. */
1929*4882a593Smuzhiyun bool mp_rebuilt;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun #ifdef DEBUG
1932*4882a593Smuzhiyun read_lock_irqsave(&ni->size_lock, flags);
1933*4882a593Smuzhiyun allocated_size = ni->allocated_size;
1934*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
1935*4882a593Smuzhiyun ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1936*4882a593Smuzhiyun "old_allocated_size 0x%llx, "
1937*4882a593Smuzhiyun "new_allocated_size 0x%llx, new_data_size 0x%llx, "
1938*4882a593Smuzhiyun "data_start 0x%llx.", vi->i_ino,
1939*4882a593Smuzhiyun (unsigned)le32_to_cpu(ni->type),
1940*4882a593Smuzhiyun (unsigned long long)allocated_size,
1941*4882a593Smuzhiyun (unsigned long long)new_alloc_size,
1942*4882a593Smuzhiyun (unsigned long long)new_data_size,
1943*4882a593Smuzhiyun (unsigned long long)start);
1944*4882a593Smuzhiyun #endif
1945*4882a593Smuzhiyun retry_extend:
1946*4882a593Smuzhiyun /*
1947*4882a593Smuzhiyun * For non-resident attributes, @start and @new_size need to be aligned
1948*4882a593Smuzhiyun * to cluster boundaries for allocation purposes.
1949*4882a593Smuzhiyun */
1950*4882a593Smuzhiyun if (NInoNonResident(ni)) {
1951*4882a593Smuzhiyun if (start > 0)
1952*4882a593Smuzhiyun start &= ~(s64)vol->cluster_size_mask;
1953*4882a593Smuzhiyun new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
1954*4882a593Smuzhiyun ~(s64)vol->cluster_size_mask;
1955*4882a593Smuzhiyun }
1956*4882a593Smuzhiyun BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
1957*4882a593Smuzhiyun /* Check if new size is allowed in $AttrDef. */
1958*4882a593Smuzhiyun err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
1959*4882a593Smuzhiyun if (unlikely(err)) {
1960*4882a593Smuzhiyun /* Only emit errors when the write will fail completely. */
1961*4882a593Smuzhiyun read_lock_irqsave(&ni->size_lock, flags);
1962*4882a593Smuzhiyun allocated_size = ni->allocated_size;
1963*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
1964*4882a593Smuzhiyun if (start < 0 || start >= allocated_size) {
1965*4882a593Smuzhiyun if (err == -ERANGE) {
1966*4882a593Smuzhiyun ntfs_error(vol->sb, "Cannot extend allocation "
1967*4882a593Smuzhiyun "of inode 0x%lx, attribute "
1968*4882a593Smuzhiyun "type 0x%x, because the new "
1969*4882a593Smuzhiyun "allocation would exceed the "
1970*4882a593Smuzhiyun "maximum allowed size for "
1971*4882a593Smuzhiyun "this attribute type.",
1972*4882a593Smuzhiyun vi->i_ino, (unsigned)
1973*4882a593Smuzhiyun le32_to_cpu(ni->type));
1974*4882a593Smuzhiyun } else {
1975*4882a593Smuzhiyun ntfs_error(vol->sb, "Cannot extend allocation "
1976*4882a593Smuzhiyun "of inode 0x%lx, attribute "
1977*4882a593Smuzhiyun "type 0x%x, because this "
1978*4882a593Smuzhiyun "attribute type is not "
1979*4882a593Smuzhiyun "defined on the NTFS volume. "
1980*4882a593Smuzhiyun "Possible corruption! You "
1981*4882a593Smuzhiyun "should run chkdsk!",
1982*4882a593Smuzhiyun vi->i_ino, (unsigned)
1983*4882a593Smuzhiyun le32_to_cpu(ni->type));
1984*4882a593Smuzhiyun }
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun /* Translate error code to be POSIX conformant for write(2). */
1987*4882a593Smuzhiyun if (err == -ERANGE)
1988*4882a593Smuzhiyun err = -EFBIG;
1989*4882a593Smuzhiyun else
1990*4882a593Smuzhiyun err = -EIO;
1991*4882a593Smuzhiyun return err;
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun if (!NInoAttr(ni))
1994*4882a593Smuzhiyun base_ni = ni;
1995*4882a593Smuzhiyun else
1996*4882a593Smuzhiyun base_ni = ni->ext.base_ntfs_ino;
1997*4882a593Smuzhiyun /*
1998*4882a593Smuzhiyun * We will be modifying both the runlist (if non-resident) and the mft
1999*4882a593Smuzhiyun * record so lock them both down.
2000*4882a593Smuzhiyun */
2001*4882a593Smuzhiyun down_write(&ni->runlist.lock);
2002*4882a593Smuzhiyun m = map_mft_record(base_ni);
2003*4882a593Smuzhiyun if (IS_ERR(m)) {
2004*4882a593Smuzhiyun err = PTR_ERR(m);
2005*4882a593Smuzhiyun m = NULL;
2006*4882a593Smuzhiyun ctx = NULL;
2007*4882a593Smuzhiyun goto err_out;
2008*4882a593Smuzhiyun }
2009*4882a593Smuzhiyun ctx = ntfs_attr_get_search_ctx(base_ni, m);
2010*4882a593Smuzhiyun if (unlikely(!ctx)) {
2011*4882a593Smuzhiyun err = -ENOMEM;
2012*4882a593Smuzhiyun goto err_out;
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun read_lock_irqsave(&ni->size_lock, flags);
2015*4882a593Smuzhiyun allocated_size = ni->allocated_size;
2016*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
2017*4882a593Smuzhiyun /*
2018*4882a593Smuzhiyun * If non-resident, seek to the last extent. If resident, there is
2019*4882a593Smuzhiyun * only one extent, so seek to that.
2020*4882a593Smuzhiyun */
2021*4882a593Smuzhiyun vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
2022*4882a593Smuzhiyun 0;
2023*4882a593Smuzhiyun /*
2024*4882a593Smuzhiyun * Abort if someone did the work whilst we waited for the locks. If we
2025*4882a593Smuzhiyun * just converted the attribute from resident to non-resident it is
2026*4882a593Smuzhiyun * likely that exactly this has happened already. We cannot quite
2027*4882a593Smuzhiyun * abort if we need to update the data size.
2028*4882a593Smuzhiyun */
2029*4882a593Smuzhiyun if (unlikely(new_alloc_size <= allocated_size)) {
2030*4882a593Smuzhiyun ntfs_debug("Allocated size already exceeds requested size.");
2031*4882a593Smuzhiyun new_alloc_size = allocated_size;
2032*4882a593Smuzhiyun if (new_data_size < 0)
2033*4882a593Smuzhiyun goto done;
2034*4882a593Smuzhiyun /*
2035*4882a593Smuzhiyun * We want the first attribute extent so that we can update the
2036*4882a593Smuzhiyun * data size.
2037*4882a593Smuzhiyun */
2038*4882a593Smuzhiyun vcn = 0;
2039*4882a593Smuzhiyun }
2040*4882a593Smuzhiyun err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2041*4882a593Smuzhiyun CASE_SENSITIVE, vcn, NULL, 0, ctx);
2042*4882a593Smuzhiyun if (unlikely(err)) {
2043*4882a593Smuzhiyun if (err == -ENOENT)
2044*4882a593Smuzhiyun err = -EIO;
2045*4882a593Smuzhiyun goto err_out;
2046*4882a593Smuzhiyun }
2047*4882a593Smuzhiyun m = ctx->mrec;
2048*4882a593Smuzhiyun a = ctx->attr;
2049*4882a593Smuzhiyun /* Use goto to reduce indentation. */
2050*4882a593Smuzhiyun if (a->non_resident)
2051*4882a593Smuzhiyun goto do_non_resident_extend;
2052*4882a593Smuzhiyun BUG_ON(NInoNonResident(ni));
2053*4882a593Smuzhiyun /* The total length of the attribute value. */
2054*4882a593Smuzhiyun attr_len = le32_to_cpu(a->data.resident.value_length);
2055*4882a593Smuzhiyun /*
2056*4882a593Smuzhiyun * Extend the attribute record to be able to store the new attribute
2057*4882a593Smuzhiyun * size. ntfs_attr_record_resize() will not do anything if the size is
2058*4882a593Smuzhiyun * not changing.
2059*4882a593Smuzhiyun */
2060*4882a593Smuzhiyun if (new_alloc_size < vol->mft_record_size &&
2061*4882a593Smuzhiyun !ntfs_attr_record_resize(m, a,
2062*4882a593Smuzhiyun le16_to_cpu(a->data.resident.value_offset) +
2063*4882a593Smuzhiyun new_alloc_size)) {
2064*4882a593Smuzhiyun /* The resize succeeded! */
2065*4882a593Smuzhiyun write_lock_irqsave(&ni->size_lock, flags);
2066*4882a593Smuzhiyun ni->allocated_size = le32_to_cpu(a->length) -
2067*4882a593Smuzhiyun le16_to_cpu(a->data.resident.value_offset);
2068*4882a593Smuzhiyun write_unlock_irqrestore(&ni->size_lock, flags);
2069*4882a593Smuzhiyun if (new_data_size >= 0) {
2070*4882a593Smuzhiyun BUG_ON(new_data_size < attr_len);
2071*4882a593Smuzhiyun a->data.resident.value_length =
2072*4882a593Smuzhiyun cpu_to_le32((u32)new_data_size);
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun goto flush_done;
2075*4882a593Smuzhiyun }
2076*4882a593Smuzhiyun /*
2077*4882a593Smuzhiyun * We have to drop all the locks so we can call
2078*4882a593Smuzhiyun * ntfs_attr_make_non_resident(). This could be optimised by try-
2079*4882a593Smuzhiyun * locking the first page cache page and only if that fails dropping
2080*4882a593Smuzhiyun * the locks, locking the page, and redoing all the locking and
2081*4882a593Smuzhiyun * lookups. While this would be a huge optimisation, it is not worth
2082*4882a593Smuzhiyun * it as this is definitely a slow code path.
2083*4882a593Smuzhiyun */
2084*4882a593Smuzhiyun ntfs_attr_put_search_ctx(ctx);
2085*4882a593Smuzhiyun unmap_mft_record(base_ni);
2086*4882a593Smuzhiyun up_write(&ni->runlist.lock);
2087*4882a593Smuzhiyun /*
2088*4882a593Smuzhiyun * Not enough space in the mft record, try to make the attribute
2089*4882a593Smuzhiyun * non-resident and if successful restart the extension process.
2090*4882a593Smuzhiyun */
2091*4882a593Smuzhiyun err = ntfs_attr_make_non_resident(ni, attr_len);
2092*4882a593Smuzhiyun if (likely(!err))
2093*4882a593Smuzhiyun goto retry_extend;
2094*4882a593Smuzhiyun /*
2095*4882a593Smuzhiyun * Could not make non-resident. If this is due to this not being
2096*4882a593Smuzhiyun * permitted for this attribute type or there not being enough space,
2097*4882a593Smuzhiyun * try to make other attributes non-resident. Otherwise fail.
2098*4882a593Smuzhiyun */
2099*4882a593Smuzhiyun if (unlikely(err != -EPERM && err != -ENOSPC)) {
2100*4882a593Smuzhiyun /* Only emit errors when the write will fail completely. */
2101*4882a593Smuzhiyun read_lock_irqsave(&ni->size_lock, flags);
2102*4882a593Smuzhiyun allocated_size = ni->allocated_size;
2103*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
2104*4882a593Smuzhiyun if (start < 0 || start >= allocated_size)
2105*4882a593Smuzhiyun ntfs_error(vol->sb, "Cannot extend allocation of "
2106*4882a593Smuzhiyun "inode 0x%lx, attribute type 0x%x, "
2107*4882a593Smuzhiyun "because the conversion from resident "
2108*4882a593Smuzhiyun "to non-resident attribute failed "
2109*4882a593Smuzhiyun "with error code %i.", vi->i_ino,
2110*4882a593Smuzhiyun (unsigned)le32_to_cpu(ni->type), err);
2111*4882a593Smuzhiyun if (err != -ENOMEM)
2112*4882a593Smuzhiyun err = -EIO;
2113*4882a593Smuzhiyun goto conv_err_out;
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun /* TODO: Not implemented from here, abort. */
2116*4882a593Smuzhiyun read_lock_irqsave(&ni->size_lock, flags);
2117*4882a593Smuzhiyun allocated_size = ni->allocated_size;
2118*4882a593Smuzhiyun read_unlock_irqrestore(&ni->size_lock, flags);
2119*4882a593Smuzhiyun if (start < 0 || start >= allocated_size) {
2120*4882a593Smuzhiyun if (err == -ENOSPC)
2121*4882a593Smuzhiyun ntfs_error(vol->sb, "Not enough space in the mft "
2122*4882a593Smuzhiyun "record/on disk for the non-resident "
2123*4882a593Smuzhiyun "attribute value. This case is not "
2124*4882a593Smuzhiyun "implemented yet.");
2125*4882a593Smuzhiyun else /* if (err == -EPERM) */
2126*4882a593Smuzhiyun ntfs_error(vol->sb, "This attribute type may not be "
2127*4882a593Smuzhiyun "non-resident. This case is not "
2128*4882a593Smuzhiyun "implemented yet.");
2129*4882a593Smuzhiyun }
2130*4882a593Smuzhiyun err = -EOPNOTSUPP;
2131*4882a593Smuzhiyun goto conv_err_out;
2132*4882a593Smuzhiyun #if 0
2133*4882a593Smuzhiyun // TODO: Attempt to make other attributes non-resident.
2134*4882a593Smuzhiyun if (!err)
2135*4882a593Smuzhiyun goto do_resident_extend;
2136*4882a593Smuzhiyun /*
2137*4882a593Smuzhiyun * Both the attribute list attribute and the standard information
2138*4882a593Smuzhiyun * attribute must remain in the base inode. Thus, if this is one of
2139*4882a593Smuzhiyun * these attributes, we have to try to move other attributes out into
2140*4882a593Smuzhiyun * extent mft records instead.
2141*4882a593Smuzhiyun */
2142*4882a593Smuzhiyun if (ni->type == AT_ATTRIBUTE_LIST ||
2143*4882a593Smuzhiyun ni->type == AT_STANDARD_INFORMATION) {
2144*4882a593Smuzhiyun // TODO: Attempt to move other attributes into extent mft
2145*4882a593Smuzhiyun // records.
2146*4882a593Smuzhiyun err = -EOPNOTSUPP;
2147*4882a593Smuzhiyun if (!err)
2148*4882a593Smuzhiyun goto do_resident_extend;
2149*4882a593Smuzhiyun goto err_out;
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun // TODO: Attempt to move this attribute to an extent mft record, but
2152*4882a593Smuzhiyun // only if it is not already the only attribute in an mft record in
2153*4882a593Smuzhiyun // which case there would be nothing to gain.
2154*4882a593Smuzhiyun err = -EOPNOTSUPP;
2155*4882a593Smuzhiyun if (!err)
2156*4882a593Smuzhiyun goto do_resident_extend;
2157*4882a593Smuzhiyun /* There is nothing we can do to make enough space. )-: */
2158*4882a593Smuzhiyun goto err_out;
2159*4882a593Smuzhiyun #endif
2160*4882a593Smuzhiyun do_non_resident_extend:
2161*4882a593Smuzhiyun BUG_ON(!NInoNonResident(ni));
2162*4882a593Smuzhiyun if (new_alloc_size == allocated_size) {
2163*4882a593Smuzhiyun BUG_ON(vcn);
2164*4882a593Smuzhiyun goto alloc_done;
2165*4882a593Smuzhiyun }
2166*4882a593Smuzhiyun /*
2167*4882a593Smuzhiyun * If the data starts after the end of the old allocation, this is a
2168*4882a593Smuzhiyun * $DATA attribute and sparse attributes are enabled on the volume and
2169*4882a593Smuzhiyun * for this inode, then create a sparse region between the old
2170*4882a593Smuzhiyun * allocated size and the start of the data. Otherwise simply proceed
2171*4882a593Smuzhiyun * with filling the whole space between the old allocated size and the
2172*4882a593Smuzhiyun * new allocated size with clusters.
2173*4882a593Smuzhiyun */
2174*4882a593Smuzhiyun if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
2175*4882a593Smuzhiyun !NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
2176*4882a593Smuzhiyun goto skip_sparse;
2177*4882a593Smuzhiyun // TODO: This is not implemented yet. We just fill in with real
2178*4882a593Smuzhiyun // clusters for now...
2179*4882a593Smuzhiyun ntfs_debug("Inserting holes is not-implemented yet. Falling back to "
2180*4882a593Smuzhiyun "allocating real clusters instead.");
2181*4882a593Smuzhiyun skip_sparse:
2182*4882a593Smuzhiyun rl = ni->runlist.rl;
2183*4882a593Smuzhiyun if (likely(rl)) {
2184*4882a593Smuzhiyun /* Seek to the end of the runlist. */
2185*4882a593Smuzhiyun while (rl->length)
2186*4882a593Smuzhiyun rl++;
2187*4882a593Smuzhiyun }
2188*4882a593Smuzhiyun /* If this attribute extent is not mapped, map it now. */
2189*4882a593Smuzhiyun if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
2190*4882a593Smuzhiyun (rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
2191*4882a593Smuzhiyun (rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
2192*4882a593Smuzhiyun if (!rl && !allocated_size)
2193*4882a593Smuzhiyun goto first_alloc;
2194*4882a593Smuzhiyun rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
2195*4882a593Smuzhiyun if (IS_ERR(rl)) {
2196*4882a593Smuzhiyun err = PTR_ERR(rl);
2197*4882a593Smuzhiyun if (start < 0 || start >= allocated_size)
2198*4882a593Smuzhiyun ntfs_error(vol->sb, "Cannot extend allocation "
2199*4882a593Smuzhiyun "of inode 0x%lx, attribute "
2200*4882a593Smuzhiyun "type 0x%x, because the "
2201*4882a593Smuzhiyun "mapping of a runlist "
2202*4882a593Smuzhiyun "fragment failed with error "
2203*4882a593Smuzhiyun "code %i.", vi->i_ino,
2204*4882a593Smuzhiyun (unsigned)le32_to_cpu(ni->type),
2205*4882a593Smuzhiyun err);
2206*4882a593Smuzhiyun if (err != -ENOMEM)
2207*4882a593Smuzhiyun err = -EIO;
2208*4882a593Smuzhiyun goto err_out;
2209*4882a593Smuzhiyun }
2210*4882a593Smuzhiyun ni->runlist.rl = rl;
2211*4882a593Smuzhiyun /* Seek to the end of the runlist. */
2212*4882a593Smuzhiyun while (rl->length)
2213*4882a593Smuzhiyun rl++;
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun /*
2216*4882a593Smuzhiyun * We now know the runlist of the last extent is mapped and @rl is at
2217*4882a593Smuzhiyun * the end of the runlist. We want to begin allocating clusters
2218*4882a593Smuzhiyun * starting at the last allocated cluster to reduce fragmentation. If
2219*4882a593Smuzhiyun * there are no valid LCNs in the attribute we let the cluster
2220*4882a593Smuzhiyun * allocator choose the starting cluster.
2221*4882a593Smuzhiyun */
2222*4882a593Smuzhiyun /* If the last LCN is a hole or simillar seek back to last real LCN. */
2223*4882a593Smuzhiyun while (rl->lcn < 0 && rl > ni->runlist.rl)
2224*4882a593Smuzhiyun rl--;
2225*4882a593Smuzhiyun first_alloc:
2226*4882a593Smuzhiyun // FIXME: Need to implement partial allocations so at least part of the
2227*4882a593Smuzhiyun // write can be performed when start >= 0. (Needed for POSIX write(2)
2228*4882a593Smuzhiyun // conformance.)
2229*4882a593Smuzhiyun rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
2230*4882a593Smuzhiyun (new_alloc_size - allocated_size) >>
2231*4882a593Smuzhiyun vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
2232*4882a593Smuzhiyun rl->lcn + rl->length : -1, DATA_ZONE, true);
2233*4882a593Smuzhiyun if (IS_ERR(rl2)) {
2234*4882a593Smuzhiyun err = PTR_ERR(rl2);
2235*4882a593Smuzhiyun if (start < 0 || start >= allocated_size)
2236*4882a593Smuzhiyun ntfs_error(vol->sb, "Cannot extend allocation of "
2237*4882a593Smuzhiyun "inode 0x%lx, attribute type 0x%x, "
2238*4882a593Smuzhiyun "because the allocation of clusters "
2239*4882a593Smuzhiyun "failed with error code %i.", vi->i_ino,
2240*4882a593Smuzhiyun (unsigned)le32_to_cpu(ni->type), err);
2241*4882a593Smuzhiyun if (err != -ENOMEM && err != -ENOSPC)
2242*4882a593Smuzhiyun err = -EIO;
2243*4882a593Smuzhiyun goto err_out;
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
2246*4882a593Smuzhiyun if (IS_ERR(rl)) {
2247*4882a593Smuzhiyun err = PTR_ERR(rl);
2248*4882a593Smuzhiyun if (start < 0 || start >= allocated_size)
2249*4882a593Smuzhiyun ntfs_error(vol->sb, "Cannot extend allocation of "
2250*4882a593Smuzhiyun "inode 0x%lx, attribute type 0x%x, "
2251*4882a593Smuzhiyun "because the runlist merge failed "
2252*4882a593Smuzhiyun "with error code %i.", vi->i_ino,
2253*4882a593Smuzhiyun (unsigned)le32_to_cpu(ni->type), err);
2254*4882a593Smuzhiyun if (err != -ENOMEM)
2255*4882a593Smuzhiyun err = -EIO;
2256*4882a593Smuzhiyun if (ntfs_cluster_free_from_rl(vol, rl2)) {
2257*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to release allocated "
2258*4882a593Smuzhiyun "cluster(s) in error code path. Run "
2259*4882a593Smuzhiyun "chkdsk to recover the lost "
2260*4882a593Smuzhiyun "cluster(s).");
2261*4882a593Smuzhiyun NVolSetErrors(vol);
2262*4882a593Smuzhiyun }
2263*4882a593Smuzhiyun ntfs_free(rl2);
2264*4882a593Smuzhiyun goto err_out;
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun ni->runlist.rl = rl;
2267*4882a593Smuzhiyun ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
2268*4882a593Smuzhiyun allocated_size) >> vol->cluster_size_bits);
2269*4882a593Smuzhiyun /* Find the runlist element with which the attribute extent starts. */
2270*4882a593Smuzhiyun ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
2271*4882a593Smuzhiyun rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
2272*4882a593Smuzhiyun BUG_ON(!rl2);
2273*4882a593Smuzhiyun BUG_ON(!rl2->length);
2274*4882a593Smuzhiyun BUG_ON(rl2->lcn < LCN_HOLE);
2275*4882a593Smuzhiyun mp_rebuilt = false;
2276*4882a593Smuzhiyun /* Get the size for the new mapping pairs array for this extent. */
2277*4882a593Smuzhiyun mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
2278*4882a593Smuzhiyun if (unlikely(mp_size <= 0)) {
2279*4882a593Smuzhiyun err = mp_size;
2280*4882a593Smuzhiyun if (start < 0 || start >= allocated_size)
2281*4882a593Smuzhiyun ntfs_error(vol->sb, "Cannot extend allocation of "
2282*4882a593Smuzhiyun "inode 0x%lx, attribute type 0x%x, "
2283*4882a593Smuzhiyun "because determining the size for the "
2284*4882a593Smuzhiyun "mapping pairs failed with error code "
2285*4882a593Smuzhiyun "%i.", vi->i_ino,
2286*4882a593Smuzhiyun (unsigned)le32_to_cpu(ni->type), err);
2287*4882a593Smuzhiyun err = -EIO;
2288*4882a593Smuzhiyun goto undo_alloc;
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun /* Extend the attribute record to fit the bigger mapping pairs array. */
2291*4882a593Smuzhiyun attr_len = le32_to_cpu(a->length);
2292*4882a593Smuzhiyun err = ntfs_attr_record_resize(m, a, mp_size +
2293*4882a593Smuzhiyun le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
2294*4882a593Smuzhiyun if (unlikely(err)) {
2295*4882a593Smuzhiyun BUG_ON(err != -ENOSPC);
2296*4882a593Smuzhiyun // TODO: Deal with this by moving this extent to a new mft
2297*4882a593Smuzhiyun // record or by starting a new extent in a new mft record,
2298*4882a593Smuzhiyun // possibly by extending this extent partially and filling it
2299*4882a593Smuzhiyun // and creating a new extent for the remainder, or by making
2300*4882a593Smuzhiyun // other attributes non-resident and/or by moving other
2301*4882a593Smuzhiyun // attributes out of this mft record.
2302*4882a593Smuzhiyun if (start < 0 || start >= allocated_size)
2303*4882a593Smuzhiyun ntfs_error(vol->sb, "Not enough space in the mft "
2304*4882a593Smuzhiyun "record for the extended attribute "
2305*4882a593Smuzhiyun "record. This case is not "
2306*4882a593Smuzhiyun "implemented yet.");
2307*4882a593Smuzhiyun err = -EOPNOTSUPP;
2308*4882a593Smuzhiyun goto undo_alloc;
2309*4882a593Smuzhiyun }
2310*4882a593Smuzhiyun mp_rebuilt = true;
2311*4882a593Smuzhiyun /* Generate the mapping pairs array directly into the attr record. */
2312*4882a593Smuzhiyun err = ntfs_mapping_pairs_build(vol, (u8*)a +
2313*4882a593Smuzhiyun le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
2314*4882a593Smuzhiyun mp_size, rl2, ll, -1, NULL);
2315*4882a593Smuzhiyun if (unlikely(err)) {
2316*4882a593Smuzhiyun if (start < 0 || start >= allocated_size)
2317*4882a593Smuzhiyun ntfs_error(vol->sb, "Cannot extend allocation of "
2318*4882a593Smuzhiyun "inode 0x%lx, attribute type 0x%x, "
2319*4882a593Smuzhiyun "because building the mapping pairs "
2320*4882a593Smuzhiyun "failed with error code %i.", vi->i_ino,
2321*4882a593Smuzhiyun (unsigned)le32_to_cpu(ni->type), err);
2322*4882a593Smuzhiyun err = -EIO;
2323*4882a593Smuzhiyun goto undo_alloc;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun /* Update the highest_vcn. */
2326*4882a593Smuzhiyun a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
2327*4882a593Smuzhiyun vol->cluster_size_bits) - 1);
2328*4882a593Smuzhiyun /*
2329*4882a593Smuzhiyun * We now have extended the allocated size of the attribute. Reflect
2330*4882a593Smuzhiyun * this in the ntfs_inode structure and the attribute record.
2331*4882a593Smuzhiyun */
2332*4882a593Smuzhiyun if (a->data.non_resident.lowest_vcn) {
2333*4882a593Smuzhiyun /*
2334*4882a593Smuzhiyun * We are not in the first attribute extent, switch to it, but
2335*4882a593Smuzhiyun * first ensure the changes will make it to disk later.
2336*4882a593Smuzhiyun */
2337*4882a593Smuzhiyun flush_dcache_mft_record_page(ctx->ntfs_ino);
2338*4882a593Smuzhiyun mark_mft_record_dirty(ctx->ntfs_ino);
2339*4882a593Smuzhiyun ntfs_attr_reinit_search_ctx(ctx);
2340*4882a593Smuzhiyun err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2341*4882a593Smuzhiyun CASE_SENSITIVE, 0, NULL, 0, ctx);
2342*4882a593Smuzhiyun if (unlikely(err))
2343*4882a593Smuzhiyun goto restore_undo_alloc;
2344*4882a593Smuzhiyun /* @m is not used any more so no need to set it. */
2345*4882a593Smuzhiyun a = ctx->attr;
2346*4882a593Smuzhiyun }
2347*4882a593Smuzhiyun write_lock_irqsave(&ni->size_lock, flags);
2348*4882a593Smuzhiyun ni->allocated_size = new_alloc_size;
2349*4882a593Smuzhiyun a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
2350*4882a593Smuzhiyun /*
2351*4882a593Smuzhiyun * FIXME: This would fail if @ni is a directory, $MFT, or an index,
2352*4882a593Smuzhiyun * since those can have sparse/compressed set. For example can be
2353*4882a593Smuzhiyun * set compressed even though it is not compressed itself and in that
2354*4882a593Smuzhiyun * case the bit means that files are to be created compressed in the
2355*4882a593Smuzhiyun * directory... At present this is ok as this code is only called for
2356*4882a593Smuzhiyun * regular files, and only for their $DATA attribute(s).
2357*4882a593Smuzhiyun * FIXME: The calculation is wrong if we created a hole above. For now
2358*4882a593Smuzhiyun * it does not matter as we never create holes.
2359*4882a593Smuzhiyun */
2360*4882a593Smuzhiyun if (NInoSparse(ni) || NInoCompressed(ni)) {
2361*4882a593Smuzhiyun ni->itype.compressed.size += new_alloc_size - allocated_size;
2362*4882a593Smuzhiyun a->data.non_resident.compressed_size =
2363*4882a593Smuzhiyun cpu_to_sle64(ni->itype.compressed.size);
2364*4882a593Smuzhiyun vi->i_blocks = ni->itype.compressed.size >> 9;
2365*4882a593Smuzhiyun } else
2366*4882a593Smuzhiyun vi->i_blocks = new_alloc_size >> 9;
2367*4882a593Smuzhiyun write_unlock_irqrestore(&ni->size_lock, flags);
2368*4882a593Smuzhiyun alloc_done:
2369*4882a593Smuzhiyun if (new_data_size >= 0) {
2370*4882a593Smuzhiyun BUG_ON(new_data_size <
2371*4882a593Smuzhiyun sle64_to_cpu(a->data.non_resident.data_size));
2372*4882a593Smuzhiyun a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
2373*4882a593Smuzhiyun }
2374*4882a593Smuzhiyun flush_done:
2375*4882a593Smuzhiyun /* Ensure the changes make it to disk. */
2376*4882a593Smuzhiyun flush_dcache_mft_record_page(ctx->ntfs_ino);
2377*4882a593Smuzhiyun mark_mft_record_dirty(ctx->ntfs_ino);
2378*4882a593Smuzhiyun done:
2379*4882a593Smuzhiyun ntfs_attr_put_search_ctx(ctx);
2380*4882a593Smuzhiyun unmap_mft_record(base_ni);
2381*4882a593Smuzhiyun up_write(&ni->runlist.lock);
2382*4882a593Smuzhiyun ntfs_debug("Done, new_allocated_size 0x%llx.",
2383*4882a593Smuzhiyun (unsigned long long)new_alloc_size);
2384*4882a593Smuzhiyun return new_alloc_size;
2385*4882a593Smuzhiyun restore_undo_alloc:
2386*4882a593Smuzhiyun if (start < 0 || start >= allocated_size)
2387*4882a593Smuzhiyun ntfs_error(vol->sb, "Cannot complete extension of allocation "
2388*4882a593Smuzhiyun "of inode 0x%lx, attribute type 0x%x, because "
2389*4882a593Smuzhiyun "lookup of first attribute extent failed with "
2390*4882a593Smuzhiyun "error code %i.", vi->i_ino,
2391*4882a593Smuzhiyun (unsigned)le32_to_cpu(ni->type), err);
2392*4882a593Smuzhiyun if (err == -ENOENT)
2393*4882a593Smuzhiyun err = -EIO;
2394*4882a593Smuzhiyun ntfs_attr_reinit_search_ctx(ctx);
2395*4882a593Smuzhiyun if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
2396*4882a593Smuzhiyun allocated_size >> vol->cluster_size_bits, NULL, 0,
2397*4882a593Smuzhiyun ctx)) {
2398*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to find last attribute extent of "
2399*4882a593Smuzhiyun "attribute in error code path. Run chkdsk to "
2400*4882a593Smuzhiyun "recover.");
2401*4882a593Smuzhiyun write_lock_irqsave(&ni->size_lock, flags);
2402*4882a593Smuzhiyun ni->allocated_size = new_alloc_size;
2403*4882a593Smuzhiyun /*
2404*4882a593Smuzhiyun * FIXME: This would fail if @ni is a directory... See above.
2405*4882a593Smuzhiyun * FIXME: The calculation is wrong if we created a hole above.
2406*4882a593Smuzhiyun * For now it does not matter as we never create holes.
2407*4882a593Smuzhiyun */
2408*4882a593Smuzhiyun if (NInoSparse(ni) || NInoCompressed(ni)) {
2409*4882a593Smuzhiyun ni->itype.compressed.size += new_alloc_size -
2410*4882a593Smuzhiyun allocated_size;
2411*4882a593Smuzhiyun vi->i_blocks = ni->itype.compressed.size >> 9;
2412*4882a593Smuzhiyun } else
2413*4882a593Smuzhiyun vi->i_blocks = new_alloc_size >> 9;
2414*4882a593Smuzhiyun write_unlock_irqrestore(&ni->size_lock, flags);
2415*4882a593Smuzhiyun ntfs_attr_put_search_ctx(ctx);
2416*4882a593Smuzhiyun unmap_mft_record(base_ni);
2417*4882a593Smuzhiyun up_write(&ni->runlist.lock);
2418*4882a593Smuzhiyun /*
2419*4882a593Smuzhiyun * The only thing that is now wrong is the allocated size of the
2420*4882a593Smuzhiyun * base attribute extent which chkdsk should be able to fix.
2421*4882a593Smuzhiyun */
2422*4882a593Smuzhiyun NVolSetErrors(vol);
2423*4882a593Smuzhiyun return err;
2424*4882a593Smuzhiyun }
2425*4882a593Smuzhiyun ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
2426*4882a593Smuzhiyun (allocated_size >> vol->cluster_size_bits) - 1);
2427*4882a593Smuzhiyun undo_alloc:
2428*4882a593Smuzhiyun ll = allocated_size >> vol->cluster_size_bits;
2429*4882a593Smuzhiyun if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
2430*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
2431*4882a593Smuzhiyun "in error code path. Run chkdsk to recover "
2432*4882a593Smuzhiyun "the lost cluster(s).");
2433*4882a593Smuzhiyun NVolSetErrors(vol);
2434*4882a593Smuzhiyun }
2435*4882a593Smuzhiyun m = ctx->mrec;
2436*4882a593Smuzhiyun a = ctx->attr;
2437*4882a593Smuzhiyun /*
2438*4882a593Smuzhiyun * If the runlist truncation fails and/or the search context is no
2439*4882a593Smuzhiyun * longer valid, we cannot resize the attribute record or build the
2440*4882a593Smuzhiyun * mapping pairs array thus we mark the inode bad so that no access to
2441*4882a593Smuzhiyun * the freed clusters can happen.
2442*4882a593Smuzhiyun */
2443*4882a593Smuzhiyun if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
2444*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to %s in error code path. Run "
2445*4882a593Smuzhiyun "chkdsk to recover.", IS_ERR(m) ?
2446*4882a593Smuzhiyun "restore attribute search context" :
2447*4882a593Smuzhiyun "truncate attribute runlist");
2448*4882a593Smuzhiyun NVolSetErrors(vol);
2449*4882a593Smuzhiyun } else if (mp_rebuilt) {
2450*4882a593Smuzhiyun if (ntfs_attr_record_resize(m, a, attr_len)) {
2451*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to restore attribute "
2452*4882a593Smuzhiyun "record in error code path. Run "
2453*4882a593Smuzhiyun "chkdsk to recover.");
2454*4882a593Smuzhiyun NVolSetErrors(vol);
2455*4882a593Smuzhiyun } else /* if (success) */ {
2456*4882a593Smuzhiyun if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
2457*4882a593Smuzhiyun a->data.non_resident.
2458*4882a593Smuzhiyun mapping_pairs_offset), attr_len -
2459*4882a593Smuzhiyun le16_to_cpu(a->data.non_resident.
2460*4882a593Smuzhiyun mapping_pairs_offset), rl2, ll, -1,
2461*4882a593Smuzhiyun NULL)) {
2462*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to restore "
2463*4882a593Smuzhiyun "mapping pairs array in error "
2464*4882a593Smuzhiyun "code path. Run chkdsk to "
2465*4882a593Smuzhiyun "recover.");
2466*4882a593Smuzhiyun NVolSetErrors(vol);
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun flush_dcache_mft_record_page(ctx->ntfs_ino);
2469*4882a593Smuzhiyun mark_mft_record_dirty(ctx->ntfs_ino);
2470*4882a593Smuzhiyun }
2471*4882a593Smuzhiyun }
2472*4882a593Smuzhiyun err_out:
2473*4882a593Smuzhiyun if (ctx)
2474*4882a593Smuzhiyun ntfs_attr_put_search_ctx(ctx);
2475*4882a593Smuzhiyun if (m)
2476*4882a593Smuzhiyun unmap_mft_record(base_ni);
2477*4882a593Smuzhiyun up_write(&ni->runlist.lock);
2478*4882a593Smuzhiyun conv_err_out:
2479*4882a593Smuzhiyun ntfs_debug("Failed. Returning error code %i.", err);
2480*4882a593Smuzhiyun return err;
2481*4882a593Smuzhiyun }
2482*4882a593Smuzhiyun
2483*4882a593Smuzhiyun /**
2484*4882a593Smuzhiyun * ntfs_attr_set - fill (a part of) an attribute with a byte
2485*4882a593Smuzhiyun * @ni: ntfs inode describing the attribute to fill
2486*4882a593Smuzhiyun * @ofs: offset inside the attribute at which to start to fill
2487*4882a593Smuzhiyun * @cnt: number of bytes to fill
2488*4882a593Smuzhiyun * @val: the unsigned 8-bit value with which to fill the attribute
2489*4882a593Smuzhiyun *
2490*4882a593Smuzhiyun * Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
2491*4882a593Smuzhiyun * byte offset @ofs inside the attribute with the constant byte @val.
2492*4882a593Smuzhiyun *
2493*4882a593Smuzhiyun * This function is effectively like memset() applied to an ntfs attribute.
2494*4882a593Smuzhiyun * Note thie function actually only operates on the page cache pages belonging
2495*4882a593Smuzhiyun * to the ntfs attribute and it marks them dirty after doing the memset().
2496*4882a593Smuzhiyun * Thus it relies on the vm dirty page write code paths to cause the modified
2497*4882a593Smuzhiyun * pages to be written to the mft record/disk.
2498*4882a593Smuzhiyun *
2499*4882a593Smuzhiyun * Return 0 on success and -errno on error. An error code of -ESPIPE means
2500*4882a593Smuzhiyun * that @ofs + @cnt were outside the end of the attribute and no write was
2501*4882a593Smuzhiyun * performed.
2502*4882a593Smuzhiyun */
ntfs_attr_set(ntfs_inode * ni,const s64 ofs,const s64 cnt,const u8 val)2503*4882a593Smuzhiyun int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2504*4882a593Smuzhiyun {
2505*4882a593Smuzhiyun ntfs_volume *vol = ni->vol;
2506*4882a593Smuzhiyun struct address_space *mapping;
2507*4882a593Smuzhiyun struct page *page;
2508*4882a593Smuzhiyun u8 *kaddr;
2509*4882a593Smuzhiyun pgoff_t idx, end;
2510*4882a593Smuzhiyun unsigned start_ofs, end_ofs, size;
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
2513*4882a593Smuzhiyun (long long)ofs, (long long)cnt, val);
2514*4882a593Smuzhiyun BUG_ON(ofs < 0);
2515*4882a593Smuzhiyun BUG_ON(cnt < 0);
2516*4882a593Smuzhiyun if (!cnt)
2517*4882a593Smuzhiyun goto done;
2518*4882a593Smuzhiyun /*
2519*4882a593Smuzhiyun * FIXME: Compressed and encrypted attributes are not supported when
2520*4882a593Smuzhiyun * writing and we should never have gotten here for them.
2521*4882a593Smuzhiyun */
2522*4882a593Smuzhiyun BUG_ON(NInoCompressed(ni));
2523*4882a593Smuzhiyun BUG_ON(NInoEncrypted(ni));
2524*4882a593Smuzhiyun mapping = VFS_I(ni)->i_mapping;
2525*4882a593Smuzhiyun /* Work out the starting index and page offset. */
2526*4882a593Smuzhiyun idx = ofs >> PAGE_SHIFT;
2527*4882a593Smuzhiyun start_ofs = ofs & ~PAGE_MASK;
2528*4882a593Smuzhiyun /* Work out the ending index and page offset. */
2529*4882a593Smuzhiyun end = ofs + cnt;
2530*4882a593Smuzhiyun end_ofs = end & ~PAGE_MASK;
2531*4882a593Smuzhiyun /* If the end is outside the inode size return -ESPIPE. */
2532*4882a593Smuzhiyun if (unlikely(end > i_size_read(VFS_I(ni)))) {
2533*4882a593Smuzhiyun ntfs_error(vol->sb, "Request exceeds end of attribute.");
2534*4882a593Smuzhiyun return -ESPIPE;
2535*4882a593Smuzhiyun }
2536*4882a593Smuzhiyun end >>= PAGE_SHIFT;
2537*4882a593Smuzhiyun /* If there is a first partial page, need to do it the slow way. */
2538*4882a593Smuzhiyun if (start_ofs) {
2539*4882a593Smuzhiyun page = read_mapping_page(mapping, idx, NULL);
2540*4882a593Smuzhiyun if (IS_ERR(page)) {
2541*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to read first partial "
2542*4882a593Smuzhiyun "page (error, index 0x%lx).", idx);
2543*4882a593Smuzhiyun return PTR_ERR(page);
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun /*
2546*4882a593Smuzhiyun * If the last page is the same as the first page, need to
2547*4882a593Smuzhiyun * limit the write to the end offset.
2548*4882a593Smuzhiyun */
2549*4882a593Smuzhiyun size = PAGE_SIZE;
2550*4882a593Smuzhiyun if (idx == end)
2551*4882a593Smuzhiyun size = end_ofs;
2552*4882a593Smuzhiyun kaddr = kmap_atomic(page);
2553*4882a593Smuzhiyun memset(kaddr + start_ofs, val, size - start_ofs);
2554*4882a593Smuzhiyun flush_dcache_page(page);
2555*4882a593Smuzhiyun kunmap_atomic(kaddr);
2556*4882a593Smuzhiyun set_page_dirty(page);
2557*4882a593Smuzhiyun put_page(page);
2558*4882a593Smuzhiyun balance_dirty_pages_ratelimited(mapping);
2559*4882a593Smuzhiyun cond_resched();
2560*4882a593Smuzhiyun if (idx == end)
2561*4882a593Smuzhiyun goto done;
2562*4882a593Smuzhiyun idx++;
2563*4882a593Smuzhiyun }
2564*4882a593Smuzhiyun /* Do the whole pages the fast way. */
2565*4882a593Smuzhiyun for (; idx < end; idx++) {
2566*4882a593Smuzhiyun /* Find or create the current page. (The page is locked.) */
2567*4882a593Smuzhiyun page = grab_cache_page(mapping, idx);
2568*4882a593Smuzhiyun if (unlikely(!page)) {
2569*4882a593Smuzhiyun ntfs_error(vol->sb, "Insufficient memory to grab "
2570*4882a593Smuzhiyun "page (index 0x%lx).", idx);
2571*4882a593Smuzhiyun return -ENOMEM;
2572*4882a593Smuzhiyun }
2573*4882a593Smuzhiyun kaddr = kmap_atomic(page);
2574*4882a593Smuzhiyun memset(kaddr, val, PAGE_SIZE);
2575*4882a593Smuzhiyun flush_dcache_page(page);
2576*4882a593Smuzhiyun kunmap_atomic(kaddr);
2577*4882a593Smuzhiyun /*
2578*4882a593Smuzhiyun * If the page has buffers, mark them uptodate since buffer
2579*4882a593Smuzhiyun * state and not page state is definitive in 2.6 kernels.
2580*4882a593Smuzhiyun */
2581*4882a593Smuzhiyun if (page_has_buffers(page)) {
2582*4882a593Smuzhiyun struct buffer_head *bh, *head;
2583*4882a593Smuzhiyun
2584*4882a593Smuzhiyun bh = head = page_buffers(page);
2585*4882a593Smuzhiyun do {
2586*4882a593Smuzhiyun set_buffer_uptodate(bh);
2587*4882a593Smuzhiyun } while ((bh = bh->b_this_page) != head);
2588*4882a593Smuzhiyun }
2589*4882a593Smuzhiyun /* Now that buffers are uptodate, set the page uptodate, too. */
2590*4882a593Smuzhiyun SetPageUptodate(page);
2591*4882a593Smuzhiyun /*
2592*4882a593Smuzhiyun * Set the page and all its buffers dirty and mark the inode
2593*4882a593Smuzhiyun * dirty, too. The VM will write the page later on.
2594*4882a593Smuzhiyun */
2595*4882a593Smuzhiyun set_page_dirty(page);
2596*4882a593Smuzhiyun /* Finally unlock and release the page. */
2597*4882a593Smuzhiyun unlock_page(page);
2598*4882a593Smuzhiyun put_page(page);
2599*4882a593Smuzhiyun balance_dirty_pages_ratelimited(mapping);
2600*4882a593Smuzhiyun cond_resched();
2601*4882a593Smuzhiyun }
2602*4882a593Smuzhiyun /* If there is a last partial page, need to do it the slow way. */
2603*4882a593Smuzhiyun if (end_ofs) {
2604*4882a593Smuzhiyun page = read_mapping_page(mapping, idx, NULL);
2605*4882a593Smuzhiyun if (IS_ERR(page)) {
2606*4882a593Smuzhiyun ntfs_error(vol->sb, "Failed to read last partial page "
2607*4882a593Smuzhiyun "(error, index 0x%lx).", idx);
2608*4882a593Smuzhiyun return PTR_ERR(page);
2609*4882a593Smuzhiyun }
2610*4882a593Smuzhiyun kaddr = kmap_atomic(page);
2611*4882a593Smuzhiyun memset(kaddr, val, end_ofs);
2612*4882a593Smuzhiyun flush_dcache_page(page);
2613*4882a593Smuzhiyun kunmap_atomic(kaddr);
2614*4882a593Smuzhiyun set_page_dirty(page);
2615*4882a593Smuzhiyun put_page(page);
2616*4882a593Smuzhiyun balance_dirty_pages_ratelimited(mapping);
2617*4882a593Smuzhiyun cond_resched();
2618*4882a593Smuzhiyun }
2619*4882a593Smuzhiyun done:
2620*4882a593Smuzhiyun ntfs_debug("Done.");
2621*4882a593Smuzhiyun return 0;
2622*4882a593Smuzhiyun }
2623*4882a593Smuzhiyun
2624*4882a593Smuzhiyun #endif /* NTFS_RW */
2625