1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Author: Lasse Collin <lasse.collin@tukaani.org>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This file has been put into the public domain.
7*4882a593Smuzhiyun * You can do whatever you want with this file.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun * Important notes about in-place decompression
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * At least on x86, the kernel is decompressed in place: the compressed data
14*4882a593Smuzhiyun * is placed to the end of the output buffer, and the decompressor overwrites
15*4882a593Smuzhiyun * most of the compressed data. There must be enough safety margin to
16*4882a593Smuzhiyun * guarantee that the write position is always behind the read position.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
19*4882a593Smuzhiyun * Note that the margin with XZ is bigger than with Deflate (gzip)!
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * The worst case for in-place decompression is that the beginning of
22*4882a593Smuzhiyun * the file is compressed extremely well, and the rest of the file is
23*4882a593Smuzhiyun * uncompressible. Thus, we must look for worst-case expansion when the
24*4882a593Smuzhiyun * compressor is encoding uncompressible data.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * The structure of the .xz file in case of a compresed kernel is as follows.
27*4882a593Smuzhiyun * Sizes (as bytes) of the fields are in parenthesis.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Stream Header (12)
30*4882a593Smuzhiyun * Block Header:
31*4882a593Smuzhiyun * Block Header (8-12)
32*4882a593Smuzhiyun * Compressed Data (N)
33*4882a593Smuzhiyun * Block Padding (0-3)
34*4882a593Smuzhiyun * CRC32 (4)
35*4882a593Smuzhiyun * Index (8-20)
36*4882a593Smuzhiyun * Stream Footer (12)
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Normally there is exactly one Block, but let's assume that there are
39*4882a593Smuzhiyun * 2-4 Blocks just in case. Because Stream Header and also Block Header
40*4882a593Smuzhiyun * of the first Block don't make the decompressor produce any uncompressed
41*4882a593Smuzhiyun * data, we can ignore them from our calculations. Block Headers of possible
42*4882a593Smuzhiyun * additional Blocks have to be taken into account still. With these
43*4882a593Smuzhiyun * assumptions, it is safe to assume that the total header overhead is
44*4882a593Smuzhiyun * less than 128 bytes.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
47*4882a593Smuzhiyun * doesn't change the size of the data, it is enough to calculate the
48*4882a593Smuzhiyun * safety margin for LZMA2.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * LZMA2 stores the data in chunks. Each chunk has a header whose size is
51*4882a593Smuzhiyun * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
52*4882a593Smuzhiyun * the maximum chunk header size is 8 bytes. After the chunk header, there
53*4882a593Smuzhiyun * may be up to 64 KiB of actual payload in the chunk. Often the payload is
54*4882a593Smuzhiyun * quite a bit smaller though; to be safe, let's assume that an average
55*4882a593Smuzhiyun * chunk has only 32 KiB of payload.
56*4882a593Smuzhiyun *
57*4882a593Smuzhiyun * The maximum uncompressed size of the payload is 2 MiB. The minimum
58*4882a593Smuzhiyun * uncompressed size of the payload is in practice never less than the
59*4882a593Smuzhiyun * payload size itself. The LZMA2 format would allow uncompressed size
60*4882a593Smuzhiyun * to be less than the payload size, but no sane compressor creates such
61*4882a593Smuzhiyun * files. LZMA2 supports storing uncompressible data in uncompressed form,
62*4882a593Smuzhiyun * so there's never a need to create payloads whose uncompressed size is
63*4882a593Smuzhiyun * smaller than the compressed size.
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * The assumption, that the uncompressed size of the payload is never
66*4882a593Smuzhiyun * smaller than the payload itself, is valid only when talking about
67*4882a593Smuzhiyun * the payload as a whole. It is possible that the payload has parts where
68*4882a593Smuzhiyun * the decompressor consumes more input than it produces output. Calculating
69*4882a593Smuzhiyun * the worst case for this would be tricky. Instead of trying to do that,
70*4882a593Smuzhiyun * let's simply make sure that the decompressor never overwrites any bytes
71*4882a593Smuzhiyun * of the payload which it is currently reading.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * Now we have enough information to calculate the safety margin. We need
74*4882a593Smuzhiyun * - 128 bytes for the .xz file format headers;
75*4882a593Smuzhiyun * - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
76*4882a593Smuzhiyun * per chunk, each chunk having average payload size of 32 KiB); and
77*4882a593Smuzhiyun * - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
78*4882a593Smuzhiyun * the decompressor never overwrites anything from the LZMA2 chunk
79*4882a593Smuzhiyun * payload it is currently reading.
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * We get the following formula:
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
84*4882a593Smuzhiyun * = 128 + (uncompressed_size >> 12) + 65536
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * For comparison, according to arch/x86/boot/compressed/misc.c, the
87*4882a593Smuzhiyun * equivalent formula for Deflate is this:
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * safety_margin = 18 + (uncompressed_size >> 12) + 32768
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * Thus, when updating Deflate-only in-place kernel decompressor to
92*4882a593Smuzhiyun * support XZ, the fixed overhead has to be increased from 18+32768 bytes
93*4882a593Smuzhiyun * to 128+65536 bytes.
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * STATIC is defined to "static" if we are being built for kernel
98*4882a593Smuzhiyun * decompression (pre-boot code). <linux/decompress/mm.h> will define
99*4882a593Smuzhiyun * STATIC to empty if it wasn't already defined. Since we will need to
100*4882a593Smuzhiyun * know later if we are being used for kernel decompression, we define
101*4882a593Smuzhiyun * XZ_PREBOOT here.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun #ifdef STATIC
104*4882a593Smuzhiyun # define XZ_PREBOOT
105*4882a593Smuzhiyun #endif
106*4882a593Smuzhiyun #ifdef __KERNEL__
107*4882a593Smuzhiyun # include <linux/decompress/mm.h>
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun #define XZ_EXTERN STATIC
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #ifndef XZ_PREBOOT
112*4882a593Smuzhiyun # include <linux/slab.h>
113*4882a593Smuzhiyun # include <linux/xz.h>
114*4882a593Smuzhiyun #else
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * Use the internal CRC32 code instead of kernel's CRC32 module, which
117*4882a593Smuzhiyun * is not available in early phase of booting.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun #define XZ_INTERNAL_CRC32 1
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * For boot time use, we enable only the BCJ filter of the current
123*4882a593Smuzhiyun * architecture or none if no BCJ filter is available for the architecture.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun #ifdef CONFIG_X86
126*4882a593Smuzhiyun # define XZ_DEC_X86
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun #ifdef CONFIG_PPC
129*4882a593Smuzhiyun # define XZ_DEC_POWERPC
130*4882a593Smuzhiyun #endif
131*4882a593Smuzhiyun #ifdef CONFIG_ARM
132*4882a593Smuzhiyun # ifdef CONFIG_THUMB2_KERNEL
133*4882a593Smuzhiyun # define XZ_DEC_ARMTHUMB
134*4882a593Smuzhiyun # else
135*4882a593Smuzhiyun # define XZ_DEC_ARM
136*4882a593Smuzhiyun # endif
137*4882a593Smuzhiyun #endif
138*4882a593Smuzhiyun #ifdef CONFIG_IA64
139*4882a593Smuzhiyun # define XZ_DEC_IA64
140*4882a593Smuzhiyun #endif
141*4882a593Smuzhiyun #ifdef CONFIG_SPARC
142*4882a593Smuzhiyun # define XZ_DEC_SPARC
143*4882a593Smuzhiyun #endif
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * This will get the basic headers so that memeq() and others
147*4882a593Smuzhiyun * can be defined.
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun #include "xz/xz_private.h"
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Replace the normal allocation functions with the versions from
153*4882a593Smuzhiyun * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL)
154*4882a593Smuzhiyun * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
155*4882a593Smuzhiyun * Workaround it here because the other decompressors don't need it.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun #undef kmalloc
158*4882a593Smuzhiyun #undef kfree
159*4882a593Smuzhiyun #undef vmalloc
160*4882a593Smuzhiyun #undef vfree
161*4882a593Smuzhiyun #define kmalloc(size, flags) malloc(size)
162*4882a593Smuzhiyun #define kfree(ptr) free(ptr)
163*4882a593Smuzhiyun #define vmalloc(size) malloc(size)
164*4882a593Smuzhiyun #define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * FIXME: Not all basic memory functions are provided in architecture-specific
168*4882a593Smuzhiyun * files (yet). We define our own versions here for now, but this should be
169*4882a593Smuzhiyun * only a temporary solution.
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * memeq and memzero are not used much and any remotely sane implementation
172*4882a593Smuzhiyun * is fast enough. memcpy/memmove speed matters in multi-call mode, but
173*4882a593Smuzhiyun * the kernel image is decompressed in single-call mode, in which only
174*4882a593Smuzhiyun * memmove speed can matter and only if there is a lot of uncompressible data
175*4882a593Smuzhiyun * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
176*4882a593Smuzhiyun * functions below should just be kept small; it's probably not worth
177*4882a593Smuzhiyun * optimizing for speed.
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun #ifndef memeq
memeq(const void * a,const void * b,size_t size)181*4882a593Smuzhiyun static bool memeq(const void *a, const void *b, size_t size)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun const uint8_t *x = a;
184*4882a593Smuzhiyun const uint8_t *y = b;
185*4882a593Smuzhiyun size_t i;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun for (i = 0; i < size; ++i)
188*4882a593Smuzhiyun if (x[i] != y[i])
189*4882a593Smuzhiyun return false;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun return true;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun #ifndef memzero
memzero(void * buf,size_t size)196*4882a593Smuzhiyun static void memzero(void *buf, size_t size)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun uint8_t *b = buf;
199*4882a593Smuzhiyun uint8_t *e = b + size;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun while (b != e)
202*4882a593Smuzhiyun *b++ = '\0';
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun #endif
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun #ifndef memmove
207*4882a593Smuzhiyun /* Not static to avoid a conflict with the prototype in the Linux headers. */
memmove(void * dest,const void * src,size_t size)208*4882a593Smuzhiyun void *memmove(void *dest, const void *src, size_t size)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun uint8_t *d = dest;
211*4882a593Smuzhiyun const uint8_t *s = src;
212*4882a593Smuzhiyun size_t i;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (d < s) {
215*4882a593Smuzhiyun for (i = 0; i < size; ++i)
216*4882a593Smuzhiyun d[i] = s[i];
217*4882a593Smuzhiyun } else if (d > s) {
218*4882a593Smuzhiyun i = size;
219*4882a593Smuzhiyun while (i-- > 0)
220*4882a593Smuzhiyun d[i] = s[i];
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return dest;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun #endif
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * Since we need memmove anyway, would use it as memcpy too.
229*4882a593Smuzhiyun * Commented out for now to avoid breaking things.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun #ifndef memcpy
233*4882a593Smuzhiyun # define memcpy memmove
234*4882a593Smuzhiyun #endif
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun #include "xz/xz_crc32.c"
238*4882a593Smuzhiyun #include "xz/xz_dec_stream.c"
239*4882a593Smuzhiyun #include "xz/xz_dec_lzma2.c"
240*4882a593Smuzhiyun #include "xz/xz_dec_bcj.c"
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun #endif /* XZ_PREBOOT */
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Size of the input and output buffers in multi-call mode */
245*4882a593Smuzhiyun #define XZ_IOBUF_SIZE 4096
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun * This function implements the API defined in <linux/decompress/generic.h>.
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * This wrapper will automatically choose single-call or multi-call mode
251*4882a593Smuzhiyun * of the native XZ decoder API. The single-call mode can be used only when
252*4882a593Smuzhiyun * both input and output buffers are available as a single chunk, i.e. when
253*4882a593Smuzhiyun * fill() and flush() won't be used.
254*4882a593Smuzhiyun */
unxz(unsigned char * in,long in_size,long (* fill)(void * dest,unsigned long size),long (* flush)(void * src,unsigned long size),unsigned char * out,long * in_used,void (* error)(char * x))255*4882a593Smuzhiyun STATIC int INIT unxz(unsigned char *in, long in_size,
256*4882a593Smuzhiyun long (*fill)(void *dest, unsigned long size),
257*4882a593Smuzhiyun long (*flush)(void *src, unsigned long size),
258*4882a593Smuzhiyun unsigned char *out, long *in_used,
259*4882a593Smuzhiyun void (*error)(char *x))
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct xz_buf b;
262*4882a593Smuzhiyun struct xz_dec *s;
263*4882a593Smuzhiyun enum xz_ret ret;
264*4882a593Smuzhiyun bool must_free_in = false;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun #if XZ_INTERNAL_CRC32
267*4882a593Smuzhiyun xz_crc32_init();
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (in_used != NULL)
271*4882a593Smuzhiyun *in_used = 0;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (fill == NULL && flush == NULL)
274*4882a593Smuzhiyun s = xz_dec_init(XZ_SINGLE, 0);
275*4882a593Smuzhiyun else
276*4882a593Smuzhiyun s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (s == NULL)
279*4882a593Smuzhiyun goto error_alloc_state;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (flush == NULL) {
282*4882a593Smuzhiyun b.out = out;
283*4882a593Smuzhiyun b.out_size = (size_t)-1;
284*4882a593Smuzhiyun } else {
285*4882a593Smuzhiyun b.out_size = XZ_IOBUF_SIZE;
286*4882a593Smuzhiyun b.out = malloc(XZ_IOBUF_SIZE);
287*4882a593Smuzhiyun if (b.out == NULL)
288*4882a593Smuzhiyun goto error_alloc_out;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (in == NULL) {
292*4882a593Smuzhiyun must_free_in = true;
293*4882a593Smuzhiyun in = malloc(XZ_IOBUF_SIZE);
294*4882a593Smuzhiyun if (in == NULL)
295*4882a593Smuzhiyun goto error_alloc_in;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun b.in = in;
299*4882a593Smuzhiyun b.in_pos = 0;
300*4882a593Smuzhiyun b.in_size = in_size;
301*4882a593Smuzhiyun b.out_pos = 0;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (fill == NULL && flush == NULL) {
304*4882a593Smuzhiyun ret = xz_dec_run(s, &b);
305*4882a593Smuzhiyun } else {
306*4882a593Smuzhiyun do {
307*4882a593Smuzhiyun if (b.in_pos == b.in_size && fill != NULL) {
308*4882a593Smuzhiyun if (in_used != NULL)
309*4882a593Smuzhiyun *in_used += b.in_pos;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun b.in_pos = 0;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun in_size = fill(in, XZ_IOBUF_SIZE);
314*4882a593Smuzhiyun if (in_size < 0) {
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun * This isn't an optimal error code
317*4882a593Smuzhiyun * but it probably isn't worth making
318*4882a593Smuzhiyun * a new one either.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun ret = XZ_BUF_ERROR;
321*4882a593Smuzhiyun break;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun b.in_size = in_size;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun ret = xz_dec_run(s, &b);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (flush != NULL && (b.out_pos == b.out_size
330*4882a593Smuzhiyun || (ret != XZ_OK && b.out_pos > 0))) {
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun * Setting ret here may hide an error
333*4882a593Smuzhiyun * returned by xz_dec_run(), but probably
334*4882a593Smuzhiyun * it's not too bad.
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun if (flush(b.out, b.out_pos) != (long)b.out_pos)
337*4882a593Smuzhiyun ret = XZ_BUF_ERROR;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun b.out_pos = 0;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun } while (ret == XZ_OK);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (must_free_in)
344*4882a593Smuzhiyun free(in);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (flush != NULL)
347*4882a593Smuzhiyun free(b.out);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (in_used != NULL)
351*4882a593Smuzhiyun *in_used += b.in_pos;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun xz_dec_end(s);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun switch (ret) {
356*4882a593Smuzhiyun case XZ_STREAM_END:
357*4882a593Smuzhiyun return 0;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun case XZ_MEM_ERROR:
360*4882a593Smuzhiyun /* This can occur only in multi-call mode. */
361*4882a593Smuzhiyun error("XZ decompressor ran out of memory");
362*4882a593Smuzhiyun break;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun case XZ_FORMAT_ERROR:
365*4882a593Smuzhiyun error("Input is not in the XZ format (wrong magic bytes)");
366*4882a593Smuzhiyun break;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun case XZ_OPTIONS_ERROR:
369*4882a593Smuzhiyun error("Input was encoded with settings that are not "
370*4882a593Smuzhiyun "supported by this XZ decoder");
371*4882a593Smuzhiyun break;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun case XZ_DATA_ERROR:
374*4882a593Smuzhiyun case XZ_BUF_ERROR:
375*4882a593Smuzhiyun error("XZ-compressed data is corrupt");
376*4882a593Smuzhiyun break;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun default:
379*4882a593Smuzhiyun error("Bug in the XZ decompressor");
380*4882a593Smuzhiyun break;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun return -1;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun error_alloc_in:
386*4882a593Smuzhiyun if (flush != NULL)
387*4882a593Smuzhiyun free(b.out);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun error_alloc_out:
390*4882a593Smuzhiyun xz_dec_end(s);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun error_alloc_state:
393*4882a593Smuzhiyun error("XZ decompressor ran out of memory");
394*4882a593Smuzhiyun return -1;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /*
398*4882a593Smuzhiyun * This macro is used by architecture-specific files to decompress
399*4882a593Smuzhiyun * the kernel image.
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun #ifdef XZ_PREBOOT
__decompress(unsigned char * buf,long len,long (* fill)(void *,unsigned long),long (* flush)(void *,unsigned long),unsigned char * out_buf,long olen,long * pos,void (* error)(char * x))402*4882a593Smuzhiyun STATIC int INIT __decompress(unsigned char *buf, long len,
403*4882a593Smuzhiyun long (*fill)(void*, unsigned long),
404*4882a593Smuzhiyun long (*flush)(void*, unsigned long),
405*4882a593Smuzhiyun unsigned char *out_buf, long olen,
406*4882a593Smuzhiyun long *pos,
407*4882a593Smuzhiyun void (*error)(char *x))
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun return unxz(buf, len, fill, flush, out_buf, pos, error);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun #endif
412