1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Partial Parity Log for closing the RAID5 write hole
4*4882a593Smuzhiyun * Copyright (c) 2017, Intel Corporation.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/blkdev.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/crc32c.h>
11*4882a593Smuzhiyun #include <linux/async_tx.h>
12*4882a593Smuzhiyun #include <linux/raid/md_p.h>
13*4882a593Smuzhiyun #include "md.h"
14*4882a593Smuzhiyun #include "raid5.h"
15*4882a593Smuzhiyun #include "raid5-log.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
19*4882a593Smuzhiyun * partial parity data. The header contains an array of entries
20*4882a593Smuzhiyun * (struct ppl_header_entry) which describe the logged write requests.
21*4882a593Smuzhiyun * Partial parity for the entries comes after the header, written in the same
22*4882a593Smuzhiyun * sequence as the entries:
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Header
25*4882a593Smuzhiyun * entry0
26*4882a593Smuzhiyun * ...
27*4882a593Smuzhiyun * entryN
28*4882a593Smuzhiyun * PP data
29*4882a593Smuzhiyun * PP for entry0
30*4882a593Smuzhiyun * ...
31*4882a593Smuzhiyun * PP for entryN
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * An entry describes one or more consecutive stripe_heads, up to a full
34*4882a593Smuzhiyun * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
35*4882a593Smuzhiyun * number of stripe_heads in the entry and n is the number of modified data
36*4882a593Smuzhiyun * disks. Every stripe_head in the entry must write to the same data disks.
37*4882a593Smuzhiyun * An example of a valid case described by a single entry (writes to the first
38*4882a593Smuzhiyun * stripe of a 4 disk array, 16k chunk size):
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * sh->sector dd0 dd1 dd2 ppl
41*4882a593Smuzhiyun * +-----+-----+-----+
42*4882a593Smuzhiyun * 0 | --- | --- | --- | +----+
43*4882a593Smuzhiyun * 8 | -W- | -W- | --- | | pp | data_sector = 8
44*4882a593Smuzhiyun * 16 | -W- | -W- | --- | | pp | data_size = 3 * 2 * 4k
45*4882a593Smuzhiyun * 24 | -W- | -W- | --- | | pp | pp_size = 3 * 4k
46*4882a593Smuzhiyun * +-----+-----+-----+ +----+
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * data_sector is the first raid sector of the modified data, data_size is the
49*4882a593Smuzhiyun * total size of modified data and pp_size is the size of partial parity for
50*4882a593Smuzhiyun * this entry. Entries for full stripe writes contain no partial parity
51*4882a593Smuzhiyun * (pp_size = 0), they only mark the stripes for which parity should be
52*4882a593Smuzhiyun * recalculated after an unclean shutdown. Every entry holds a checksum of its
53*4882a593Smuzhiyun * partial parity, the header also has a checksum of the header itself.
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * A write request is always logged to the PPL instance stored on the parity
56*4882a593Smuzhiyun * disk of the corresponding stripe. For each member disk there is one ppl_log
57*4882a593Smuzhiyun * used to handle logging for this disk, independently from others. They are
58*4882a593Smuzhiyun * grouped in child_logs array in struct ppl_conf, which is assigned to
59*4882a593Smuzhiyun * r5conf->log_private.
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
62*4882a593Smuzhiyun * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
63*4882a593Smuzhiyun * can be appended to the last entry if it meets the conditions for a valid
64*4882a593Smuzhiyun * entry described above, otherwise a new entry is added. Checksums of entries
65*4882a593Smuzhiyun * are calculated incrementally as stripes containing partial parity are being
66*4882a593Smuzhiyun * added. ppl_submit_iounit() calculates the checksum of the header and submits
67*4882a593Smuzhiyun * a bio containing the header page and partial parity pages (sh->ppl_page) for
68*4882a593Smuzhiyun * all stripes of the io_unit. When the PPL write completes, the stripes
69*4882a593Smuzhiyun * associated with the io_unit are released and raid5d starts writing their data
70*4882a593Smuzhiyun * and parity. When all stripes are written, the io_unit is freed and the next
71*4882a593Smuzhiyun * can be submitted.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * An io_unit is used to gather stripes until it is submitted or becomes full
74*4882a593Smuzhiyun * (if the maximum number of entries or size of PPL is reached). Another io_unit
75*4882a593Smuzhiyun * can't be submitted until the previous has completed (PPL and stripe
76*4882a593Smuzhiyun * data+parity is written). The log->io_list tracks all io_units of a log
77*4882a593Smuzhiyun * (for a single member disk). New io_units are added to the end of the list
78*4882a593Smuzhiyun * and the first io_unit is submitted, if it is not submitted already.
79*4882a593Smuzhiyun * The current io_unit accepting new stripes is always at the end of the list.
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * If write-back cache is enabled for any of the disks in the array, its data
82*4882a593Smuzhiyun * must be flushed before next io_unit is submitted.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #define PPL_SPACE_SIZE (128 * 1024)
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct ppl_conf {
88*4882a593Smuzhiyun struct mddev *mddev;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* array of child logs, one for each raid disk */
91*4882a593Smuzhiyun struct ppl_log *child_logs;
92*4882a593Smuzhiyun int count;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun int block_size; /* the logical block size used for data_sector
95*4882a593Smuzhiyun * in ppl_header_entry */
96*4882a593Smuzhiyun u32 signature; /* raid array identifier */
97*4882a593Smuzhiyun atomic64_t seq; /* current log write sequence number */
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun struct kmem_cache *io_kc;
100*4882a593Smuzhiyun mempool_t io_pool;
101*4882a593Smuzhiyun struct bio_set bs;
102*4882a593Smuzhiyun struct bio_set flush_bs;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* used only for recovery */
105*4882a593Smuzhiyun int recovered_entries;
106*4882a593Smuzhiyun int mismatch_count;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* stripes to retry if failed to allocate io_unit */
109*4882a593Smuzhiyun struct list_head no_mem_stripes;
110*4882a593Smuzhiyun spinlock_t no_mem_stripes_lock;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun unsigned short write_hint;
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun struct ppl_log {
116*4882a593Smuzhiyun struct ppl_conf *ppl_conf; /* shared between all log instances */
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun struct md_rdev *rdev; /* array member disk associated with
119*4882a593Smuzhiyun * this log instance */
120*4882a593Smuzhiyun struct mutex io_mutex;
121*4882a593Smuzhiyun struct ppl_io_unit *current_io; /* current io_unit accepting new data
122*4882a593Smuzhiyun * always at the end of io_list */
123*4882a593Smuzhiyun spinlock_t io_list_lock;
124*4882a593Smuzhiyun struct list_head io_list; /* all io_units of this log */
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun sector_t next_io_sector;
127*4882a593Smuzhiyun unsigned int entry_space;
128*4882a593Smuzhiyun bool use_multippl;
129*4882a593Smuzhiyun bool wb_cache_on;
130*4882a593Smuzhiyun unsigned long disk_flush_bitmap;
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #define PPL_IO_INLINE_BVECS 32
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun struct ppl_io_unit {
136*4882a593Smuzhiyun struct ppl_log *log;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun struct page *header_page; /* for ppl_header */
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun unsigned int entries_count; /* number of entries in ppl_header */
141*4882a593Smuzhiyun unsigned int pp_size; /* total size current of partial parity */
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun u64 seq; /* sequence number of this log write */
144*4882a593Smuzhiyun struct list_head log_sibling; /* log->io_list */
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun struct list_head stripe_list; /* stripes added to the io_unit */
147*4882a593Smuzhiyun atomic_t pending_stripes; /* how many stripes not written to raid */
148*4882a593Smuzhiyun atomic_t pending_flushes; /* how many disk flushes are in progress */
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun bool submitted; /* true if write to log started */
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* inline bio and its biovec for submitting the iounit */
153*4882a593Smuzhiyun struct bio bio;
154*4882a593Smuzhiyun struct bio_vec biovec[PPL_IO_INLINE_BVECS];
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun struct dma_async_tx_descriptor *
ops_run_partial_parity(struct stripe_head * sh,struct raid5_percpu * percpu,struct dma_async_tx_descriptor * tx)158*4882a593Smuzhiyun ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
159*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun int disks = sh->disks;
162*4882a593Smuzhiyun struct page **srcs = percpu->scribble;
163*4882a593Smuzhiyun int count = 0, pd_idx = sh->pd_idx, i;
164*4882a593Smuzhiyun struct async_submit_ctl submit;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * Partial parity is the XOR of stripe data chunks that are not changed
170*4882a593Smuzhiyun * during the write request. Depending on available data
171*4882a593Smuzhiyun * (read-modify-write vs. reconstruct-write case) we calculate it
172*4882a593Smuzhiyun * differently.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * rmw: xor old data and parity from updated disks
177*4882a593Smuzhiyun * This is calculated earlier by ops_run_prexor5() so just copy
178*4882a593Smuzhiyun * the parity dev page.
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyun srcs[count++] = sh->dev[pd_idx].page;
181*4882a593Smuzhiyun } else if (sh->reconstruct_state == reconstruct_state_drain_run) {
182*4882a593Smuzhiyun /* rcw: xor data from all not updated disks */
183*4882a593Smuzhiyun for (i = disks; i--;) {
184*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
185*4882a593Smuzhiyun if (test_bit(R5_UPTODATE, &dev->flags))
186*4882a593Smuzhiyun srcs[count++] = dev->page;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun } else {
189*4882a593Smuzhiyun return tx;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
193*4882a593Smuzhiyun NULL, sh, (void *) (srcs + sh->disks + 2));
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (count == 1)
196*4882a593Smuzhiyun tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
197*4882a593Smuzhiyun &submit);
198*4882a593Smuzhiyun else
199*4882a593Smuzhiyun tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
200*4882a593Smuzhiyun &submit);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun return tx;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
ppl_io_pool_alloc(gfp_t gfp_mask,void * pool_data)205*4882a593Smuzhiyun static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct kmem_cache *kc = pool_data;
208*4882a593Smuzhiyun struct ppl_io_unit *io;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun io = kmem_cache_alloc(kc, gfp_mask);
211*4882a593Smuzhiyun if (!io)
212*4882a593Smuzhiyun return NULL;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun io->header_page = alloc_page(gfp_mask);
215*4882a593Smuzhiyun if (!io->header_page) {
216*4882a593Smuzhiyun kmem_cache_free(kc, io);
217*4882a593Smuzhiyun return NULL;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun return io;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
ppl_io_pool_free(void * element,void * pool_data)223*4882a593Smuzhiyun static void ppl_io_pool_free(void *element, void *pool_data)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct kmem_cache *kc = pool_data;
226*4882a593Smuzhiyun struct ppl_io_unit *io = element;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun __free_page(io->header_page);
229*4882a593Smuzhiyun kmem_cache_free(kc, io);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
ppl_new_iounit(struct ppl_log * log,struct stripe_head * sh)232*4882a593Smuzhiyun static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
233*4882a593Smuzhiyun struct stripe_head *sh)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct ppl_conf *ppl_conf = log->ppl_conf;
236*4882a593Smuzhiyun struct ppl_io_unit *io;
237*4882a593Smuzhiyun struct ppl_header *pplhdr;
238*4882a593Smuzhiyun struct page *header_page;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
241*4882a593Smuzhiyun if (!io)
242*4882a593Smuzhiyun return NULL;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun header_page = io->header_page;
245*4882a593Smuzhiyun memset(io, 0, sizeof(*io));
246*4882a593Smuzhiyun io->header_page = header_page;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun io->log = log;
249*4882a593Smuzhiyun INIT_LIST_HEAD(&io->log_sibling);
250*4882a593Smuzhiyun INIT_LIST_HEAD(&io->stripe_list);
251*4882a593Smuzhiyun atomic_set(&io->pending_stripes, 0);
252*4882a593Smuzhiyun atomic_set(&io->pending_flushes, 0);
253*4882a593Smuzhiyun bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun pplhdr = page_address(io->header_page);
256*4882a593Smuzhiyun clear_page(pplhdr);
257*4882a593Smuzhiyun memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
258*4882a593Smuzhiyun pplhdr->signature = cpu_to_le32(ppl_conf->signature);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun io->seq = atomic64_add_return(1, &ppl_conf->seq);
261*4882a593Smuzhiyun pplhdr->generation = cpu_to_le64(io->seq);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun return io;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
ppl_log_stripe(struct ppl_log * log,struct stripe_head * sh)266*4882a593Smuzhiyun static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun struct ppl_io_unit *io = log->current_io;
269*4882a593Smuzhiyun struct ppl_header_entry *e = NULL;
270*4882a593Smuzhiyun struct ppl_header *pplhdr;
271*4882a593Smuzhiyun int i;
272*4882a593Smuzhiyun sector_t data_sector = 0;
273*4882a593Smuzhiyun int data_disks = 0;
274*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* check if current io_unit is full */
279*4882a593Smuzhiyun if (io && (io->pp_size == log->entry_space ||
280*4882a593Smuzhiyun io->entries_count == PPL_HDR_MAX_ENTRIES)) {
281*4882a593Smuzhiyun pr_debug("%s: add io_unit blocked by seq: %llu\n",
282*4882a593Smuzhiyun __func__, io->seq);
283*4882a593Smuzhiyun io = NULL;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* add a new unit if there is none or the current is full */
287*4882a593Smuzhiyun if (!io) {
288*4882a593Smuzhiyun io = ppl_new_iounit(log, sh);
289*4882a593Smuzhiyun if (!io)
290*4882a593Smuzhiyun return -ENOMEM;
291*4882a593Smuzhiyun spin_lock_irq(&log->io_list_lock);
292*4882a593Smuzhiyun list_add_tail(&io->log_sibling, &log->io_list);
293*4882a593Smuzhiyun spin_unlock_irq(&log->io_list_lock);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun log->current_io = io;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun for (i = 0; i < sh->disks; i++) {
299*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
302*4882a593Smuzhiyun if (!data_disks || dev->sector < data_sector)
303*4882a593Smuzhiyun data_sector = dev->sector;
304*4882a593Smuzhiyun data_disks++;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun BUG_ON(!data_disks);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
310*4882a593Smuzhiyun io->seq, (unsigned long long)data_sector, data_disks);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun pplhdr = page_address(io->header_page);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (io->entries_count > 0) {
315*4882a593Smuzhiyun struct ppl_header_entry *last =
316*4882a593Smuzhiyun &pplhdr->entries[io->entries_count - 1];
317*4882a593Smuzhiyun struct stripe_head *sh_last = list_last_entry(
318*4882a593Smuzhiyun &io->stripe_list, struct stripe_head, log_list);
319*4882a593Smuzhiyun u64 data_sector_last = le64_to_cpu(last->data_sector);
320*4882a593Smuzhiyun u32 data_size_last = le32_to_cpu(last->data_size);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /*
323*4882a593Smuzhiyun * Check if we can append the stripe to the last entry. It must
324*4882a593Smuzhiyun * be just after the last logged stripe and write to the same
325*4882a593Smuzhiyun * disks. Use bit shift and logarithm to avoid 64-bit division.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) &&
328*4882a593Smuzhiyun (data_sector >> ilog2(conf->chunk_sectors) ==
329*4882a593Smuzhiyun data_sector_last >> ilog2(conf->chunk_sectors)) &&
330*4882a593Smuzhiyun ((data_sector - data_sector_last) * data_disks ==
331*4882a593Smuzhiyun data_size_last >> 9))
332*4882a593Smuzhiyun e = last;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (!e) {
336*4882a593Smuzhiyun e = &pplhdr->entries[io->entries_count++];
337*4882a593Smuzhiyun e->data_sector = cpu_to_le64(data_sector);
338*4882a593Smuzhiyun e->parity_disk = cpu_to_le32(sh->pd_idx);
339*4882a593Smuzhiyun e->checksum = cpu_to_le32(~0);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /* don't write any PP if full stripe write */
345*4882a593Smuzhiyun if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
346*4882a593Smuzhiyun le32_add_cpu(&e->pp_size, PAGE_SIZE);
347*4882a593Smuzhiyun io->pp_size += PAGE_SIZE;
348*4882a593Smuzhiyun e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
349*4882a593Smuzhiyun page_address(sh->ppl_page),
350*4882a593Smuzhiyun PAGE_SIZE));
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun list_add_tail(&sh->log_list, &io->stripe_list);
354*4882a593Smuzhiyun atomic_inc(&io->pending_stripes);
355*4882a593Smuzhiyun sh->ppl_io = io;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun return 0;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
ppl_write_stripe(struct r5conf * conf,struct stripe_head * sh)360*4882a593Smuzhiyun int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun struct ppl_conf *ppl_conf = conf->log_private;
363*4882a593Smuzhiyun struct ppl_io_unit *io = sh->ppl_io;
364*4882a593Smuzhiyun struct ppl_log *log;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
367*4882a593Smuzhiyun !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
368*4882a593Smuzhiyun !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
369*4882a593Smuzhiyun clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
370*4882a593Smuzhiyun return -EAGAIN;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun log = &ppl_conf->child_logs[sh->pd_idx];
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun mutex_lock(&log->io_mutex);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
378*4882a593Smuzhiyun mutex_unlock(&log->io_mutex);
379*4882a593Smuzhiyun return -EAGAIN;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun set_bit(STRIPE_LOG_TRAPPED, &sh->state);
383*4882a593Smuzhiyun clear_bit(STRIPE_DELAYED, &sh->state);
384*4882a593Smuzhiyun atomic_inc(&sh->count);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (ppl_log_stripe(log, sh)) {
387*4882a593Smuzhiyun spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
388*4882a593Smuzhiyun list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
389*4882a593Smuzhiyun spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun mutex_unlock(&log->io_mutex);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun return 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
ppl_log_endio(struct bio * bio)397*4882a593Smuzhiyun static void ppl_log_endio(struct bio *bio)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun struct ppl_io_unit *io = bio->bi_private;
400*4882a593Smuzhiyun struct ppl_log *log = io->log;
401*4882a593Smuzhiyun struct ppl_conf *ppl_conf = log->ppl_conf;
402*4882a593Smuzhiyun struct stripe_head *sh, *next;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun pr_debug("%s: seq: %llu\n", __func__, io->seq);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (bio->bi_status)
407*4882a593Smuzhiyun md_error(ppl_conf->mddev, log->rdev);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
410*4882a593Smuzhiyun list_del_init(&sh->log_list);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
413*4882a593Smuzhiyun raid5_release_stripe(sh);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
ppl_submit_iounit_bio(struct ppl_io_unit * io,struct bio * bio)417*4882a593Smuzhiyun static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
422*4882a593Smuzhiyun __func__, io->seq, bio->bi_iter.bi_size,
423*4882a593Smuzhiyun (unsigned long long)bio->bi_iter.bi_sector,
424*4882a593Smuzhiyun bio_devname(bio, b));
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun submit_bio(bio);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
ppl_submit_iounit(struct ppl_io_unit * io)429*4882a593Smuzhiyun static void ppl_submit_iounit(struct ppl_io_unit *io)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct ppl_log *log = io->log;
432*4882a593Smuzhiyun struct ppl_conf *ppl_conf = log->ppl_conf;
433*4882a593Smuzhiyun struct ppl_header *pplhdr = page_address(io->header_page);
434*4882a593Smuzhiyun struct bio *bio = &io->bio;
435*4882a593Smuzhiyun struct stripe_head *sh;
436*4882a593Smuzhiyun int i;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun bio->bi_private = io;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
441*4882a593Smuzhiyun ppl_log_endio(bio);
442*4882a593Smuzhiyun return;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun for (i = 0; i < io->entries_count; i++) {
446*4882a593Smuzhiyun struct ppl_header_entry *e = &pplhdr->entries[i];
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
449*4882a593Smuzhiyun __func__, io->seq, i, le64_to_cpu(e->data_sector),
450*4882a593Smuzhiyun le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
453*4882a593Smuzhiyun ilog2(ppl_conf->block_size >> 9));
454*4882a593Smuzhiyun e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun pplhdr->entries_count = cpu_to_le32(io->entries_count);
458*4882a593Smuzhiyun pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /* Rewind the buffer if current PPL is larger then remaining space */
461*4882a593Smuzhiyun if (log->use_multippl &&
462*4882a593Smuzhiyun log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
463*4882a593Smuzhiyun (PPL_HEADER_SIZE + io->pp_size) >> 9)
464*4882a593Smuzhiyun log->next_io_sector = log->rdev->ppl.sector;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun bio->bi_end_io = ppl_log_endio;
468*4882a593Smuzhiyun bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
469*4882a593Smuzhiyun bio_set_dev(bio, log->rdev->bdev);
470*4882a593Smuzhiyun bio->bi_iter.bi_sector = log->next_io_sector;
471*4882a593Smuzhiyun bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
472*4882a593Smuzhiyun bio->bi_write_hint = ppl_conf->write_hint;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun pr_debug("%s: log->current_io_sector: %llu\n", __func__,
475*4882a593Smuzhiyun (unsigned long long)log->next_io_sector);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun if (log->use_multippl)
478*4882a593Smuzhiyun log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun WARN_ON(log->disk_flush_bitmap != 0);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun list_for_each_entry(sh, &io->stripe_list, log_list) {
483*4882a593Smuzhiyun for (i = 0; i < sh->disks; i++) {
484*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if ((ppl_conf->child_logs[i].wb_cache_on) &&
487*4882a593Smuzhiyun (test_bit(R5_Wantwrite, &dev->flags))) {
488*4882a593Smuzhiyun set_bit(i, &log->disk_flush_bitmap);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /* entries for full stripe writes have no partial parity */
493*4882a593Smuzhiyun if (test_bit(STRIPE_FULL_WRITE, &sh->state))
494*4882a593Smuzhiyun continue;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
497*4882a593Smuzhiyun struct bio *prev = bio;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
500*4882a593Smuzhiyun &ppl_conf->bs);
501*4882a593Smuzhiyun bio->bi_opf = prev->bi_opf;
502*4882a593Smuzhiyun bio->bi_write_hint = prev->bi_write_hint;
503*4882a593Smuzhiyun bio_copy_dev(bio, prev);
504*4882a593Smuzhiyun bio->bi_iter.bi_sector = bio_end_sector(prev);
505*4882a593Smuzhiyun bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun bio_chain(bio, prev);
508*4882a593Smuzhiyun ppl_submit_iounit_bio(io, prev);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun ppl_submit_iounit_bio(io, bio);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
ppl_submit_current_io(struct ppl_log * log)515*4882a593Smuzhiyun static void ppl_submit_current_io(struct ppl_log *log)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct ppl_io_unit *io;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun spin_lock_irq(&log->io_list_lock);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
522*4882a593Smuzhiyun log_sibling);
523*4882a593Smuzhiyun if (io && io->submitted)
524*4882a593Smuzhiyun io = NULL;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun spin_unlock_irq(&log->io_list_lock);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun if (io) {
529*4882a593Smuzhiyun io->submitted = true;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun if (io == log->current_io)
532*4882a593Smuzhiyun log->current_io = NULL;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun ppl_submit_iounit(io);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
ppl_write_stripe_run(struct r5conf * conf)538*4882a593Smuzhiyun void ppl_write_stripe_run(struct r5conf *conf)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun struct ppl_conf *ppl_conf = conf->log_private;
541*4882a593Smuzhiyun struct ppl_log *log;
542*4882a593Smuzhiyun int i;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun for (i = 0; i < ppl_conf->count; i++) {
545*4882a593Smuzhiyun log = &ppl_conf->child_logs[i];
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun mutex_lock(&log->io_mutex);
548*4882a593Smuzhiyun ppl_submit_current_io(log);
549*4882a593Smuzhiyun mutex_unlock(&log->io_mutex);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
ppl_io_unit_finished(struct ppl_io_unit * io)553*4882a593Smuzhiyun static void ppl_io_unit_finished(struct ppl_io_unit *io)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct ppl_log *log = io->log;
556*4882a593Smuzhiyun struct ppl_conf *ppl_conf = log->ppl_conf;
557*4882a593Smuzhiyun struct r5conf *conf = ppl_conf->mddev->private;
558*4882a593Smuzhiyun unsigned long flags;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun pr_debug("%s: seq: %llu\n", __func__, io->seq);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun local_irq_save(flags);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun spin_lock(&log->io_list_lock);
565*4882a593Smuzhiyun list_del(&io->log_sibling);
566*4882a593Smuzhiyun spin_unlock(&log->io_list_lock);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun mempool_free(io, &ppl_conf->io_pool);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun spin_lock(&ppl_conf->no_mem_stripes_lock);
571*4882a593Smuzhiyun if (!list_empty(&ppl_conf->no_mem_stripes)) {
572*4882a593Smuzhiyun struct stripe_head *sh;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun sh = list_first_entry(&ppl_conf->no_mem_stripes,
575*4882a593Smuzhiyun struct stripe_head, log_list);
576*4882a593Smuzhiyun list_del_init(&sh->log_list);
577*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
578*4882a593Smuzhiyun raid5_release_stripe(sh);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun spin_unlock(&ppl_conf->no_mem_stripes_lock);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun local_irq_restore(flags);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun wake_up(&conf->wait_for_quiescent);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
ppl_flush_endio(struct bio * bio)587*4882a593Smuzhiyun static void ppl_flush_endio(struct bio *bio)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun struct ppl_io_unit *io = bio->bi_private;
590*4882a593Smuzhiyun struct ppl_log *log = io->log;
591*4882a593Smuzhiyun struct ppl_conf *ppl_conf = log->ppl_conf;
592*4882a593Smuzhiyun struct r5conf *conf = ppl_conf->mddev->private;
593*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (bio->bi_status) {
598*4882a593Smuzhiyun struct md_rdev *rdev;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun rcu_read_lock();
601*4882a593Smuzhiyun rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
602*4882a593Smuzhiyun if (rdev)
603*4882a593Smuzhiyun md_error(rdev->mddev, rdev);
604*4882a593Smuzhiyun rcu_read_unlock();
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun bio_put(bio);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun if (atomic_dec_and_test(&io->pending_flushes)) {
610*4882a593Smuzhiyun ppl_io_unit_finished(io);
611*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
ppl_do_flush(struct ppl_io_unit * io)615*4882a593Smuzhiyun static void ppl_do_flush(struct ppl_io_unit *io)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun struct ppl_log *log = io->log;
618*4882a593Smuzhiyun struct ppl_conf *ppl_conf = log->ppl_conf;
619*4882a593Smuzhiyun struct r5conf *conf = ppl_conf->mddev->private;
620*4882a593Smuzhiyun int raid_disks = conf->raid_disks;
621*4882a593Smuzhiyun int flushed_disks = 0;
622*4882a593Smuzhiyun int i;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun atomic_set(&io->pending_flushes, raid_disks);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
627*4882a593Smuzhiyun struct md_rdev *rdev;
628*4882a593Smuzhiyun struct block_device *bdev = NULL;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun rcu_read_lock();
631*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[i].rdev);
632*4882a593Smuzhiyun if (rdev && !test_bit(Faulty, &rdev->flags))
633*4882a593Smuzhiyun bdev = rdev->bdev;
634*4882a593Smuzhiyun rcu_read_unlock();
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (bdev) {
637*4882a593Smuzhiyun struct bio *bio;
638*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
641*4882a593Smuzhiyun bio_set_dev(bio, bdev);
642*4882a593Smuzhiyun bio->bi_private = io;
643*4882a593Smuzhiyun bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
644*4882a593Smuzhiyun bio->bi_end_io = ppl_flush_endio;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun pr_debug("%s: dev: %s\n", __func__,
647*4882a593Smuzhiyun bio_devname(bio, b));
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun submit_bio(bio);
650*4882a593Smuzhiyun flushed_disks++;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun log->disk_flush_bitmap = 0;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun for (i = flushed_disks ; i < raid_disks; i++) {
657*4882a593Smuzhiyun if (atomic_dec_and_test(&io->pending_flushes))
658*4882a593Smuzhiyun ppl_io_unit_finished(io);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
ppl_no_io_unit_submitted(struct r5conf * conf,struct ppl_log * log)662*4882a593Smuzhiyun static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
663*4882a593Smuzhiyun struct ppl_log *log)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun struct ppl_io_unit *io;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
668*4882a593Smuzhiyun log_sibling);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun return !io || !io->submitted;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
ppl_quiesce(struct r5conf * conf,int quiesce)673*4882a593Smuzhiyun void ppl_quiesce(struct r5conf *conf, int quiesce)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun struct ppl_conf *ppl_conf = conf->log_private;
676*4882a593Smuzhiyun int i;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (quiesce) {
679*4882a593Smuzhiyun for (i = 0; i < ppl_conf->count; i++) {
680*4882a593Smuzhiyun struct ppl_log *log = &ppl_conf->child_logs[i];
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun spin_lock_irq(&log->io_list_lock);
683*4882a593Smuzhiyun wait_event_lock_irq(conf->wait_for_quiescent,
684*4882a593Smuzhiyun ppl_no_io_unit_submitted(conf, log),
685*4882a593Smuzhiyun log->io_list_lock);
686*4882a593Smuzhiyun spin_unlock_irq(&log->io_list_lock);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
ppl_handle_flush_request(struct r5l_log * log,struct bio * bio)691*4882a593Smuzhiyun int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun if (bio->bi_iter.bi_size == 0) {
694*4882a593Smuzhiyun bio_endio(bio);
695*4882a593Smuzhiyun return 0;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun bio->bi_opf &= ~REQ_PREFLUSH;
698*4882a593Smuzhiyun return -EAGAIN;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
ppl_stripe_write_finished(struct stripe_head * sh)701*4882a593Smuzhiyun void ppl_stripe_write_finished(struct stripe_head *sh)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun struct ppl_io_unit *io;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun io = sh->ppl_io;
706*4882a593Smuzhiyun sh->ppl_io = NULL;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if (io && atomic_dec_and_test(&io->pending_stripes)) {
709*4882a593Smuzhiyun if (io->log->disk_flush_bitmap)
710*4882a593Smuzhiyun ppl_do_flush(io);
711*4882a593Smuzhiyun else
712*4882a593Smuzhiyun ppl_io_unit_finished(io);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
ppl_xor(int size,struct page * page1,struct page * page2)716*4882a593Smuzhiyun static void ppl_xor(int size, struct page *page1, struct page *page2)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun struct async_submit_ctl submit;
719*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
720*4882a593Smuzhiyun struct page *xor_srcs[] = { page1, page2 };
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
723*4882a593Smuzhiyun NULL, NULL, NULL, NULL);
724*4882a593Smuzhiyun tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun async_tx_quiesce(&tx);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /*
730*4882a593Smuzhiyun * PPL recovery strategy: xor partial parity and data from all modified data
731*4882a593Smuzhiyun * disks within a stripe and write the result as the new stripe parity. If all
732*4882a593Smuzhiyun * stripe data disks are modified (full stripe write), no partial parity is
733*4882a593Smuzhiyun * available, so just xor the data disks.
734*4882a593Smuzhiyun *
735*4882a593Smuzhiyun * Recovery of a PPL entry shall occur only if all modified data disks are
736*4882a593Smuzhiyun * available and read from all of them succeeds.
737*4882a593Smuzhiyun *
738*4882a593Smuzhiyun * A PPL entry applies to a stripe, partial parity size for an entry is at most
739*4882a593Smuzhiyun * the size of the chunk. Examples of possible cases for a single entry:
740*4882a593Smuzhiyun *
741*4882a593Smuzhiyun * case 0: single data disk write:
742*4882a593Smuzhiyun * data0 data1 data2 ppl parity
743*4882a593Smuzhiyun * +--------+--------+--------+ +--------------------+
744*4882a593Smuzhiyun * | ------ | ------ | ------ | +----+ | (no change) |
745*4882a593Smuzhiyun * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
746*4882a593Smuzhiyun * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
747*4882a593Smuzhiyun * | ------ | ------ | ------ | +----+ | (no change) |
748*4882a593Smuzhiyun * +--------+--------+--------+ +--------------------+
749*4882a593Smuzhiyun * pp_size = data_size
750*4882a593Smuzhiyun *
751*4882a593Smuzhiyun * case 1: more than one data disk write:
752*4882a593Smuzhiyun * data0 data1 data2 ppl parity
753*4882a593Smuzhiyun * +--------+--------+--------+ +--------------------+
754*4882a593Smuzhiyun * | ------ | ------ | ------ | +----+ | (no change) |
755*4882a593Smuzhiyun * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
756*4882a593Smuzhiyun * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
757*4882a593Smuzhiyun * | ------ | ------ | ------ | +----+ | (no change) |
758*4882a593Smuzhiyun * +--------+--------+--------+ +--------------------+
759*4882a593Smuzhiyun * pp_size = data_size / modified_data_disks
760*4882a593Smuzhiyun *
761*4882a593Smuzhiyun * case 2: write to all data disks (also full stripe write):
762*4882a593Smuzhiyun * data0 data1 data2 parity
763*4882a593Smuzhiyun * +--------+--------+--------+ +--------------------+
764*4882a593Smuzhiyun * | ------ | ------ | ------ | | (no change) |
765*4882a593Smuzhiyun * | -data- | -data- | -data- | --------> | xor all data |
766*4882a593Smuzhiyun * | ------ | ------ | ------ | --------> | (no change) |
767*4882a593Smuzhiyun * | ------ | ------ | ------ | | (no change) |
768*4882a593Smuzhiyun * +--------+--------+--------+ +--------------------+
769*4882a593Smuzhiyun * pp_size = 0
770*4882a593Smuzhiyun *
771*4882a593Smuzhiyun * The following cases are possible only in other implementations. The recovery
772*4882a593Smuzhiyun * code can handle them, but they are not generated at runtime because they can
773*4882a593Smuzhiyun * be reduced to cases 0, 1 and 2:
774*4882a593Smuzhiyun *
775*4882a593Smuzhiyun * case 3:
776*4882a593Smuzhiyun * data0 data1 data2 ppl parity
777*4882a593Smuzhiyun * +--------+--------+--------+ +----+ +--------------------+
778*4882a593Smuzhiyun * | ------ | -data- | -data- | | pp | | data1 ^ data2 ^ pp |
779*4882a593Smuzhiyun * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
780*4882a593Smuzhiyun * | -data- | -data- | -data- | | -- | -> | xor all data |
781*4882a593Smuzhiyun * | -data- | -data- | ------ | | pp | | data0 ^ data1 ^ pp |
782*4882a593Smuzhiyun * +--------+--------+--------+ +----+ +--------------------+
783*4882a593Smuzhiyun * pp_size = chunk_size
784*4882a593Smuzhiyun *
785*4882a593Smuzhiyun * case 4:
786*4882a593Smuzhiyun * data0 data1 data2 ppl parity
787*4882a593Smuzhiyun * +--------+--------+--------+ +----+ +--------------------+
788*4882a593Smuzhiyun * | ------ | -data- | ------ | | pp | | data1 ^ pp |
789*4882a593Smuzhiyun * | ------ | ------ | ------ | | -- | -> | (no change) |
790*4882a593Smuzhiyun * | ------ | ------ | ------ | | -- | -> | (no change) |
791*4882a593Smuzhiyun * | -data- | ------ | ------ | | pp | | data0 ^ pp |
792*4882a593Smuzhiyun * +--------+--------+--------+ +----+ +--------------------+
793*4882a593Smuzhiyun * pp_size = chunk_size
794*4882a593Smuzhiyun */
ppl_recover_entry(struct ppl_log * log,struct ppl_header_entry * e,sector_t ppl_sector)795*4882a593Smuzhiyun static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
796*4882a593Smuzhiyun sector_t ppl_sector)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun struct ppl_conf *ppl_conf = log->ppl_conf;
799*4882a593Smuzhiyun struct mddev *mddev = ppl_conf->mddev;
800*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
801*4882a593Smuzhiyun int block_size = ppl_conf->block_size;
802*4882a593Smuzhiyun struct page *page1;
803*4882a593Smuzhiyun struct page *page2;
804*4882a593Smuzhiyun sector_t r_sector_first;
805*4882a593Smuzhiyun sector_t r_sector_last;
806*4882a593Smuzhiyun int strip_sectors;
807*4882a593Smuzhiyun int data_disks;
808*4882a593Smuzhiyun int i;
809*4882a593Smuzhiyun int ret = 0;
810*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
811*4882a593Smuzhiyun unsigned int pp_size = le32_to_cpu(e->pp_size);
812*4882a593Smuzhiyun unsigned int data_size = le32_to_cpu(e->data_size);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun page1 = alloc_page(GFP_KERNEL);
815*4882a593Smuzhiyun page2 = alloc_page(GFP_KERNEL);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun if (!page1 || !page2) {
818*4882a593Smuzhiyun ret = -ENOMEM;
819*4882a593Smuzhiyun goto out;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun if ((pp_size >> 9) < conf->chunk_sectors) {
825*4882a593Smuzhiyun if (pp_size > 0) {
826*4882a593Smuzhiyun data_disks = data_size / pp_size;
827*4882a593Smuzhiyun strip_sectors = pp_size >> 9;
828*4882a593Smuzhiyun } else {
829*4882a593Smuzhiyun data_disks = conf->raid_disks - conf->max_degraded;
830*4882a593Smuzhiyun strip_sectors = (data_size >> 9) / data_disks;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun r_sector_last = r_sector_first +
833*4882a593Smuzhiyun (data_disks - 1) * conf->chunk_sectors +
834*4882a593Smuzhiyun strip_sectors;
835*4882a593Smuzhiyun } else {
836*4882a593Smuzhiyun data_disks = conf->raid_disks - conf->max_degraded;
837*4882a593Smuzhiyun strip_sectors = conf->chunk_sectors;
838*4882a593Smuzhiyun r_sector_last = r_sector_first + (data_size >> 9);
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
842*4882a593Smuzhiyun (unsigned long long)r_sector_first,
843*4882a593Smuzhiyun (unsigned long long)r_sector_last);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun /* if start and end is 4k aligned, use a 4k block */
846*4882a593Smuzhiyun if (block_size == 512 &&
847*4882a593Smuzhiyun (r_sector_first & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0 &&
848*4882a593Smuzhiyun (r_sector_last & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0)
849*4882a593Smuzhiyun block_size = RAID5_STRIPE_SIZE(conf);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /* iterate through blocks in strip */
852*4882a593Smuzhiyun for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
853*4882a593Smuzhiyun bool update_parity = false;
854*4882a593Smuzhiyun sector_t parity_sector;
855*4882a593Smuzhiyun struct md_rdev *parity_rdev;
856*4882a593Smuzhiyun struct stripe_head sh;
857*4882a593Smuzhiyun int disk;
858*4882a593Smuzhiyun int indent = 0;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
861*4882a593Smuzhiyun indent += 2;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun memset(page_address(page1), 0, PAGE_SIZE);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /* iterate through data member disks */
866*4882a593Smuzhiyun for (disk = 0; disk < data_disks; disk++) {
867*4882a593Smuzhiyun int dd_idx;
868*4882a593Smuzhiyun struct md_rdev *rdev;
869*4882a593Smuzhiyun sector_t sector;
870*4882a593Smuzhiyun sector_t r_sector = r_sector_first + i +
871*4882a593Smuzhiyun (disk * conf->chunk_sectors);
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun pr_debug("%s:%*s data member disk %d start\n",
874*4882a593Smuzhiyun __func__, indent, "", disk);
875*4882a593Smuzhiyun indent += 2;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun if (r_sector >= r_sector_last) {
878*4882a593Smuzhiyun pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
879*4882a593Smuzhiyun __func__, indent, "",
880*4882a593Smuzhiyun (unsigned long long)r_sector);
881*4882a593Smuzhiyun indent -= 2;
882*4882a593Smuzhiyun continue;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun update_parity = true;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /* map raid sector to member disk */
888*4882a593Smuzhiyun sector = raid5_compute_sector(conf, r_sector, 0,
889*4882a593Smuzhiyun &dd_idx, NULL);
890*4882a593Smuzhiyun pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
891*4882a593Smuzhiyun __func__, indent, "",
892*4882a593Smuzhiyun (unsigned long long)r_sector, dd_idx,
893*4882a593Smuzhiyun (unsigned long long)sector);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun rdev = conf->disks[dd_idx].rdev;
896*4882a593Smuzhiyun if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
897*4882a593Smuzhiyun sector >= rdev->recovery_offset)) {
898*4882a593Smuzhiyun pr_debug("%s:%*s data member disk %d missing\n",
899*4882a593Smuzhiyun __func__, indent, "", dd_idx);
900*4882a593Smuzhiyun update_parity = false;
901*4882a593Smuzhiyun break;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun pr_debug("%s:%*s reading data member disk %s sector %llu\n",
905*4882a593Smuzhiyun __func__, indent, "", bdevname(rdev->bdev, b),
906*4882a593Smuzhiyun (unsigned long long)sector);
907*4882a593Smuzhiyun if (!sync_page_io(rdev, sector, block_size, page2,
908*4882a593Smuzhiyun REQ_OP_READ, 0, false)) {
909*4882a593Smuzhiyun md_error(mddev, rdev);
910*4882a593Smuzhiyun pr_debug("%s:%*s read failed!\n", __func__,
911*4882a593Smuzhiyun indent, "");
912*4882a593Smuzhiyun ret = -EIO;
913*4882a593Smuzhiyun goto out;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun ppl_xor(block_size, page1, page2);
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun indent -= 2;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (!update_parity)
922*4882a593Smuzhiyun continue;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun if (pp_size > 0) {
925*4882a593Smuzhiyun pr_debug("%s:%*s reading pp disk sector %llu\n",
926*4882a593Smuzhiyun __func__, indent, "",
927*4882a593Smuzhiyun (unsigned long long)(ppl_sector + i));
928*4882a593Smuzhiyun if (!sync_page_io(log->rdev,
929*4882a593Smuzhiyun ppl_sector - log->rdev->data_offset + i,
930*4882a593Smuzhiyun block_size, page2, REQ_OP_READ, 0,
931*4882a593Smuzhiyun false)) {
932*4882a593Smuzhiyun pr_debug("%s:%*s read failed!\n", __func__,
933*4882a593Smuzhiyun indent, "");
934*4882a593Smuzhiyun md_error(mddev, log->rdev);
935*4882a593Smuzhiyun ret = -EIO;
936*4882a593Smuzhiyun goto out;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun ppl_xor(block_size, page1, page2);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun /* map raid sector to parity disk */
943*4882a593Smuzhiyun parity_sector = raid5_compute_sector(conf, r_sector_first + i,
944*4882a593Smuzhiyun 0, &disk, &sh);
945*4882a593Smuzhiyun BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
946*4882a593Smuzhiyun parity_rdev = conf->disks[sh.pd_idx].rdev;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
949*4882a593Smuzhiyun pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
950*4882a593Smuzhiyun __func__, indent, "",
951*4882a593Smuzhiyun (unsigned long long)parity_sector,
952*4882a593Smuzhiyun bdevname(parity_rdev->bdev, b));
953*4882a593Smuzhiyun if (!sync_page_io(parity_rdev, parity_sector, block_size,
954*4882a593Smuzhiyun page1, REQ_OP_WRITE, 0, false)) {
955*4882a593Smuzhiyun pr_debug("%s:%*s parity write error!\n", __func__,
956*4882a593Smuzhiyun indent, "");
957*4882a593Smuzhiyun md_error(mddev, parity_rdev);
958*4882a593Smuzhiyun ret = -EIO;
959*4882a593Smuzhiyun goto out;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun out:
963*4882a593Smuzhiyun if (page1)
964*4882a593Smuzhiyun __free_page(page1);
965*4882a593Smuzhiyun if (page2)
966*4882a593Smuzhiyun __free_page(page2);
967*4882a593Smuzhiyun return ret;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
ppl_recover(struct ppl_log * log,struct ppl_header * pplhdr,sector_t offset)970*4882a593Smuzhiyun static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
971*4882a593Smuzhiyun sector_t offset)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun struct ppl_conf *ppl_conf = log->ppl_conf;
974*4882a593Smuzhiyun struct md_rdev *rdev = log->rdev;
975*4882a593Smuzhiyun struct mddev *mddev = rdev->mddev;
976*4882a593Smuzhiyun sector_t ppl_sector = rdev->ppl.sector + offset +
977*4882a593Smuzhiyun (PPL_HEADER_SIZE >> 9);
978*4882a593Smuzhiyun struct page *page;
979*4882a593Smuzhiyun int i;
980*4882a593Smuzhiyun int ret = 0;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun page = alloc_page(GFP_KERNEL);
983*4882a593Smuzhiyun if (!page)
984*4882a593Smuzhiyun return -ENOMEM;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /* iterate through all PPL entries saved */
987*4882a593Smuzhiyun for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
988*4882a593Smuzhiyun struct ppl_header_entry *e = &pplhdr->entries[i];
989*4882a593Smuzhiyun u32 pp_size = le32_to_cpu(e->pp_size);
990*4882a593Smuzhiyun sector_t sector = ppl_sector;
991*4882a593Smuzhiyun int ppl_entry_sectors = pp_size >> 9;
992*4882a593Smuzhiyun u32 crc, crc_stored;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
995*4882a593Smuzhiyun __func__, rdev->raid_disk, i,
996*4882a593Smuzhiyun (unsigned long long)ppl_sector, pp_size);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun crc = ~0;
999*4882a593Smuzhiyun crc_stored = le32_to_cpu(e->checksum);
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /* read parial parity for this entry and calculate its checksum */
1002*4882a593Smuzhiyun while (pp_size) {
1003*4882a593Smuzhiyun int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (!sync_page_io(rdev, sector - rdev->data_offset,
1006*4882a593Smuzhiyun s, page, REQ_OP_READ, 0, false)) {
1007*4882a593Smuzhiyun md_error(mddev, rdev);
1008*4882a593Smuzhiyun ret = -EIO;
1009*4882a593Smuzhiyun goto out;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun crc = crc32c_le(crc, page_address(page), s);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun pp_size -= s;
1015*4882a593Smuzhiyun sector += s >> 9;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun crc = ~crc;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun if (crc != crc_stored) {
1021*4882a593Smuzhiyun /*
1022*4882a593Smuzhiyun * Don't recover this entry if the checksum does not
1023*4882a593Smuzhiyun * match, but keep going and try to recover other
1024*4882a593Smuzhiyun * entries.
1025*4882a593Smuzhiyun */
1026*4882a593Smuzhiyun pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1027*4882a593Smuzhiyun __func__, crc_stored, crc);
1028*4882a593Smuzhiyun ppl_conf->mismatch_count++;
1029*4882a593Smuzhiyun } else {
1030*4882a593Smuzhiyun ret = ppl_recover_entry(log, e, ppl_sector);
1031*4882a593Smuzhiyun if (ret)
1032*4882a593Smuzhiyun goto out;
1033*4882a593Smuzhiyun ppl_conf->recovered_entries++;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun ppl_sector += ppl_entry_sectors;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun /* flush the disk cache after recovery if necessary */
1040*4882a593Smuzhiyun ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
1041*4882a593Smuzhiyun out:
1042*4882a593Smuzhiyun __free_page(page);
1043*4882a593Smuzhiyun return ret;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
ppl_write_empty_header(struct ppl_log * log)1046*4882a593Smuzhiyun static int ppl_write_empty_header(struct ppl_log *log)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun struct page *page;
1049*4882a593Smuzhiyun struct ppl_header *pplhdr;
1050*4882a593Smuzhiyun struct md_rdev *rdev = log->rdev;
1051*4882a593Smuzhiyun int ret = 0;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
1054*4882a593Smuzhiyun rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun page = alloc_page(GFP_NOIO | __GFP_ZERO);
1057*4882a593Smuzhiyun if (!page)
1058*4882a593Smuzhiyun return -ENOMEM;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun pplhdr = page_address(page);
1061*4882a593Smuzhiyun /* zero out PPL space to avoid collision with old PPLs */
1062*4882a593Smuzhiyun blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
1063*4882a593Smuzhiyun log->rdev->ppl.size, GFP_NOIO, 0);
1064*4882a593Smuzhiyun memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
1065*4882a593Smuzhiyun pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1066*4882a593Smuzhiyun pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
1069*4882a593Smuzhiyun PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
1070*4882a593Smuzhiyun REQ_FUA, 0, false)) {
1071*4882a593Smuzhiyun md_error(rdev->mddev, rdev);
1072*4882a593Smuzhiyun ret = -EIO;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun __free_page(page);
1076*4882a593Smuzhiyun return ret;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun
ppl_load_distributed(struct ppl_log * log)1079*4882a593Smuzhiyun static int ppl_load_distributed(struct ppl_log *log)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun struct ppl_conf *ppl_conf = log->ppl_conf;
1082*4882a593Smuzhiyun struct md_rdev *rdev = log->rdev;
1083*4882a593Smuzhiyun struct mddev *mddev = rdev->mddev;
1084*4882a593Smuzhiyun struct page *page, *page2, *tmp;
1085*4882a593Smuzhiyun struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
1086*4882a593Smuzhiyun u32 crc, crc_stored;
1087*4882a593Smuzhiyun u32 signature;
1088*4882a593Smuzhiyun int ret = 0, i;
1089*4882a593Smuzhiyun sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
1092*4882a593Smuzhiyun /* read PPL headers, find the recent one */
1093*4882a593Smuzhiyun page = alloc_page(GFP_KERNEL);
1094*4882a593Smuzhiyun if (!page)
1095*4882a593Smuzhiyun return -ENOMEM;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun page2 = alloc_page(GFP_KERNEL);
1098*4882a593Smuzhiyun if (!page2) {
1099*4882a593Smuzhiyun __free_page(page);
1100*4882a593Smuzhiyun return -ENOMEM;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun /* searching ppl area for latest ppl */
1104*4882a593Smuzhiyun while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
1105*4882a593Smuzhiyun if (!sync_page_io(rdev,
1106*4882a593Smuzhiyun rdev->ppl.sector - rdev->data_offset +
1107*4882a593Smuzhiyun pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
1108*4882a593Smuzhiyun 0, false)) {
1109*4882a593Smuzhiyun md_error(mddev, rdev);
1110*4882a593Smuzhiyun ret = -EIO;
1111*4882a593Smuzhiyun /* if not able to read - don't recover any PPL */
1112*4882a593Smuzhiyun pplhdr = NULL;
1113*4882a593Smuzhiyun break;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun pplhdr = page_address(page);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun /* check header validity */
1118*4882a593Smuzhiyun crc_stored = le32_to_cpu(pplhdr->checksum);
1119*4882a593Smuzhiyun pplhdr->checksum = 0;
1120*4882a593Smuzhiyun crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun if (crc_stored != crc) {
1123*4882a593Smuzhiyun pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1124*4882a593Smuzhiyun __func__, crc_stored, crc,
1125*4882a593Smuzhiyun (unsigned long long)pplhdr_offset);
1126*4882a593Smuzhiyun pplhdr = prev_pplhdr;
1127*4882a593Smuzhiyun pplhdr_offset = prev_pplhdr_offset;
1128*4882a593Smuzhiyun break;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun signature = le32_to_cpu(pplhdr->signature);
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun if (mddev->external) {
1134*4882a593Smuzhiyun /*
1135*4882a593Smuzhiyun * For external metadata the header signature is set and
1136*4882a593Smuzhiyun * validated in userspace.
1137*4882a593Smuzhiyun */
1138*4882a593Smuzhiyun ppl_conf->signature = signature;
1139*4882a593Smuzhiyun } else if (ppl_conf->signature != signature) {
1140*4882a593Smuzhiyun pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1141*4882a593Smuzhiyun __func__, signature, ppl_conf->signature,
1142*4882a593Smuzhiyun (unsigned long long)pplhdr_offset);
1143*4882a593Smuzhiyun pplhdr = prev_pplhdr;
1144*4882a593Smuzhiyun pplhdr_offset = prev_pplhdr_offset;
1145*4882a593Smuzhiyun break;
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
1149*4882a593Smuzhiyun le64_to_cpu(pplhdr->generation)) {
1150*4882a593Smuzhiyun /* previous was newest */
1151*4882a593Smuzhiyun pplhdr = prev_pplhdr;
1152*4882a593Smuzhiyun pplhdr_offset = prev_pplhdr_offset;
1153*4882a593Smuzhiyun break;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun prev_pplhdr_offset = pplhdr_offset;
1157*4882a593Smuzhiyun prev_pplhdr = pplhdr;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun tmp = page;
1160*4882a593Smuzhiyun page = page2;
1161*4882a593Smuzhiyun page2 = tmp;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /* calculate next potential ppl offset */
1164*4882a593Smuzhiyun for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
1165*4882a593Smuzhiyun pplhdr_offset +=
1166*4882a593Smuzhiyun le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
1167*4882a593Smuzhiyun pplhdr_offset += PPL_HEADER_SIZE >> 9;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun /* no valid ppl found */
1171*4882a593Smuzhiyun if (!pplhdr)
1172*4882a593Smuzhiyun ppl_conf->mismatch_count++;
1173*4882a593Smuzhiyun else
1174*4882a593Smuzhiyun pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1175*4882a593Smuzhiyun __func__, (unsigned long long)pplhdr_offset,
1176*4882a593Smuzhiyun le64_to_cpu(pplhdr->generation));
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun /* attempt to recover from log if we are starting a dirty array */
1179*4882a593Smuzhiyun if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
1180*4882a593Smuzhiyun ret = ppl_recover(log, pplhdr, pplhdr_offset);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun /* write empty header if we are starting the array */
1183*4882a593Smuzhiyun if (!ret && !mddev->pers)
1184*4882a593Smuzhiyun ret = ppl_write_empty_header(log);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun __free_page(page);
1187*4882a593Smuzhiyun __free_page(page2);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1190*4882a593Smuzhiyun __func__, ret, ppl_conf->mismatch_count,
1191*4882a593Smuzhiyun ppl_conf->recovered_entries);
1192*4882a593Smuzhiyun return ret;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
ppl_load(struct ppl_conf * ppl_conf)1195*4882a593Smuzhiyun static int ppl_load(struct ppl_conf *ppl_conf)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun int ret = 0;
1198*4882a593Smuzhiyun u32 signature = 0;
1199*4882a593Smuzhiyun bool signature_set = false;
1200*4882a593Smuzhiyun int i;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun for (i = 0; i < ppl_conf->count; i++) {
1203*4882a593Smuzhiyun struct ppl_log *log = &ppl_conf->child_logs[i];
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun /* skip missing drive */
1206*4882a593Smuzhiyun if (!log->rdev)
1207*4882a593Smuzhiyun continue;
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun ret = ppl_load_distributed(log);
1210*4882a593Smuzhiyun if (ret)
1211*4882a593Smuzhiyun break;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /*
1214*4882a593Smuzhiyun * For external metadata we can't check if the signature is
1215*4882a593Smuzhiyun * correct on a single drive, but we can check if it is the same
1216*4882a593Smuzhiyun * on all drives.
1217*4882a593Smuzhiyun */
1218*4882a593Smuzhiyun if (ppl_conf->mddev->external) {
1219*4882a593Smuzhiyun if (!signature_set) {
1220*4882a593Smuzhiyun signature = ppl_conf->signature;
1221*4882a593Smuzhiyun signature_set = true;
1222*4882a593Smuzhiyun } else if (signature != ppl_conf->signature) {
1223*4882a593Smuzhiyun pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1224*4882a593Smuzhiyun mdname(ppl_conf->mddev));
1225*4882a593Smuzhiyun ret = -EINVAL;
1226*4882a593Smuzhiyun break;
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1232*4882a593Smuzhiyun __func__, ret, ppl_conf->mismatch_count,
1233*4882a593Smuzhiyun ppl_conf->recovered_entries);
1234*4882a593Smuzhiyun return ret;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
__ppl_exit_log(struct ppl_conf * ppl_conf)1237*4882a593Smuzhiyun static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1240*4882a593Smuzhiyun clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun kfree(ppl_conf->child_logs);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun bioset_exit(&ppl_conf->bs);
1245*4882a593Smuzhiyun bioset_exit(&ppl_conf->flush_bs);
1246*4882a593Smuzhiyun mempool_exit(&ppl_conf->io_pool);
1247*4882a593Smuzhiyun kmem_cache_destroy(ppl_conf->io_kc);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun kfree(ppl_conf);
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
ppl_exit_log(struct r5conf * conf)1252*4882a593Smuzhiyun void ppl_exit_log(struct r5conf *conf)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun struct ppl_conf *ppl_conf = conf->log_private;
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun if (ppl_conf) {
1257*4882a593Smuzhiyun __ppl_exit_log(ppl_conf);
1258*4882a593Smuzhiyun conf->log_private = NULL;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun
ppl_validate_rdev(struct md_rdev * rdev)1262*4882a593Smuzhiyun static int ppl_validate_rdev(struct md_rdev *rdev)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
1265*4882a593Smuzhiyun int ppl_data_sectors;
1266*4882a593Smuzhiyun int ppl_size_new;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /*
1269*4882a593Smuzhiyun * The configured PPL size must be enough to store
1270*4882a593Smuzhiyun * the header and (at the very least) partial parity
1271*4882a593Smuzhiyun * for one stripe. Round it down to ensure the data
1272*4882a593Smuzhiyun * space is cleanly divisible by stripe size.
1273*4882a593Smuzhiyun */
1274*4882a593Smuzhiyun ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun if (ppl_data_sectors > 0)
1277*4882a593Smuzhiyun ppl_data_sectors = rounddown(ppl_data_sectors,
1278*4882a593Smuzhiyun RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun if (ppl_data_sectors <= 0) {
1281*4882a593Smuzhiyun pr_warn("md/raid:%s: PPL space too small on %s\n",
1282*4882a593Smuzhiyun mdname(rdev->mddev), bdevname(rdev->bdev, b));
1283*4882a593Smuzhiyun return -ENOSPC;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun if ((rdev->ppl.sector < rdev->data_offset &&
1289*4882a593Smuzhiyun rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1290*4882a593Smuzhiyun (rdev->ppl.sector >= rdev->data_offset &&
1291*4882a593Smuzhiyun rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1292*4882a593Smuzhiyun pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
1293*4882a593Smuzhiyun mdname(rdev->mddev), bdevname(rdev->bdev, b));
1294*4882a593Smuzhiyun return -EINVAL;
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun if (!rdev->mddev->external &&
1298*4882a593Smuzhiyun ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1299*4882a593Smuzhiyun (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1300*4882a593Smuzhiyun pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
1301*4882a593Smuzhiyun mdname(rdev->mddev), bdevname(rdev->bdev, b));
1302*4882a593Smuzhiyun return -EINVAL;
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun rdev->ppl.size = ppl_size_new;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun return 0;
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun
ppl_init_child_log(struct ppl_log * log,struct md_rdev * rdev)1310*4882a593Smuzhiyun static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun struct request_queue *q;
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1315*4882a593Smuzhiyun PPL_HEADER_SIZE) * 2) {
1316*4882a593Smuzhiyun log->use_multippl = true;
1317*4882a593Smuzhiyun set_bit(MD_HAS_MULTIPLE_PPLS,
1318*4882a593Smuzhiyun &log->ppl_conf->mddev->flags);
1319*4882a593Smuzhiyun log->entry_space = PPL_SPACE_SIZE;
1320*4882a593Smuzhiyun } else {
1321*4882a593Smuzhiyun log->use_multippl = false;
1322*4882a593Smuzhiyun log->entry_space = (log->rdev->ppl.size << 9) -
1323*4882a593Smuzhiyun PPL_HEADER_SIZE;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun log->next_io_sector = rdev->ppl.sector;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun q = bdev_get_queue(rdev->bdev);
1328*4882a593Smuzhiyun if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
1329*4882a593Smuzhiyun log->wb_cache_on = true;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun
ppl_init_log(struct r5conf * conf)1332*4882a593Smuzhiyun int ppl_init_log(struct r5conf *conf)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun struct ppl_conf *ppl_conf;
1335*4882a593Smuzhiyun struct mddev *mddev = conf->mddev;
1336*4882a593Smuzhiyun int ret = 0;
1337*4882a593Smuzhiyun int max_disks;
1338*4882a593Smuzhiyun int i;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1341*4882a593Smuzhiyun mdname(conf->mddev));
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun if (PAGE_SIZE != 4096)
1344*4882a593Smuzhiyun return -EINVAL;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun if (mddev->level != 5) {
1347*4882a593Smuzhiyun pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1348*4882a593Smuzhiyun mdname(mddev), mddev->level);
1349*4882a593Smuzhiyun return -EINVAL;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
1353*4882a593Smuzhiyun pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1354*4882a593Smuzhiyun mdname(mddev));
1355*4882a593Smuzhiyun return -EINVAL;
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1359*4882a593Smuzhiyun pr_warn("md/raid:%s PPL is not compatible with journal\n",
1360*4882a593Smuzhiyun mdname(mddev));
1361*4882a593Smuzhiyun return -EINVAL;
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) *
1365*4882a593Smuzhiyun BITS_PER_BYTE;
1366*4882a593Smuzhiyun if (conf->raid_disks > max_disks) {
1367*4882a593Smuzhiyun pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1368*4882a593Smuzhiyun mdname(mddev), max_disks);
1369*4882a593Smuzhiyun return -EINVAL;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1373*4882a593Smuzhiyun if (!ppl_conf)
1374*4882a593Smuzhiyun return -ENOMEM;
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun ppl_conf->mddev = mddev;
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1379*4882a593Smuzhiyun if (!ppl_conf->io_kc) {
1380*4882a593Smuzhiyun ret = -ENOMEM;
1381*4882a593Smuzhiyun goto err;
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
1385*4882a593Smuzhiyun ppl_io_pool_free, ppl_conf->io_kc);
1386*4882a593Smuzhiyun if (ret)
1387*4882a593Smuzhiyun goto err;
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
1390*4882a593Smuzhiyun if (ret)
1391*4882a593Smuzhiyun goto err;
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
1394*4882a593Smuzhiyun if (ret)
1395*4882a593Smuzhiyun goto err;
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun ppl_conf->count = conf->raid_disks;
1398*4882a593Smuzhiyun ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1399*4882a593Smuzhiyun GFP_KERNEL);
1400*4882a593Smuzhiyun if (!ppl_conf->child_logs) {
1401*4882a593Smuzhiyun ret = -ENOMEM;
1402*4882a593Smuzhiyun goto err;
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun atomic64_set(&ppl_conf->seq, 0);
1406*4882a593Smuzhiyun INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1407*4882a593Smuzhiyun spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1408*4882a593Smuzhiyun ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun if (!mddev->external) {
1411*4882a593Smuzhiyun ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1412*4882a593Smuzhiyun ppl_conf->block_size = 512;
1413*4882a593Smuzhiyun } else {
1414*4882a593Smuzhiyun ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun for (i = 0; i < ppl_conf->count; i++) {
1418*4882a593Smuzhiyun struct ppl_log *log = &ppl_conf->child_logs[i];
1419*4882a593Smuzhiyun struct md_rdev *rdev = conf->disks[i].rdev;
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun mutex_init(&log->io_mutex);
1422*4882a593Smuzhiyun spin_lock_init(&log->io_list_lock);
1423*4882a593Smuzhiyun INIT_LIST_HEAD(&log->io_list);
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun log->ppl_conf = ppl_conf;
1426*4882a593Smuzhiyun log->rdev = rdev;
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun if (rdev) {
1429*4882a593Smuzhiyun ret = ppl_validate_rdev(rdev);
1430*4882a593Smuzhiyun if (ret)
1431*4882a593Smuzhiyun goto err;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun ppl_init_child_log(log, rdev);
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /* load and possibly recover the logs from the member disks */
1438*4882a593Smuzhiyun ret = ppl_load(ppl_conf);
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun if (ret) {
1441*4882a593Smuzhiyun goto err;
1442*4882a593Smuzhiyun } else if (!mddev->pers && mddev->recovery_cp == 0 &&
1443*4882a593Smuzhiyun ppl_conf->recovered_entries > 0 &&
1444*4882a593Smuzhiyun ppl_conf->mismatch_count == 0) {
1445*4882a593Smuzhiyun /*
1446*4882a593Smuzhiyun * If we are starting a dirty array and the recovery succeeds
1447*4882a593Smuzhiyun * without any issues, set the array as clean.
1448*4882a593Smuzhiyun */
1449*4882a593Smuzhiyun mddev->recovery_cp = MaxSector;
1450*4882a593Smuzhiyun set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
1451*4882a593Smuzhiyun } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1452*4882a593Smuzhiyun /* no mismatch allowed when enabling PPL for a running array */
1453*4882a593Smuzhiyun ret = -EINVAL;
1454*4882a593Smuzhiyun goto err;
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun conf->log_private = ppl_conf;
1458*4882a593Smuzhiyun set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun return 0;
1461*4882a593Smuzhiyun err:
1462*4882a593Smuzhiyun __ppl_exit_log(ppl_conf);
1463*4882a593Smuzhiyun return ret;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
ppl_modify_log(struct r5conf * conf,struct md_rdev * rdev,bool add)1466*4882a593Smuzhiyun int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1467*4882a593Smuzhiyun {
1468*4882a593Smuzhiyun struct ppl_conf *ppl_conf = conf->log_private;
1469*4882a593Smuzhiyun struct ppl_log *log;
1470*4882a593Smuzhiyun int ret = 0;
1471*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun if (!rdev)
1474*4882a593Smuzhiyun return -EINVAL;
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun pr_debug("%s: disk: %d operation: %s dev: %s\n",
1477*4882a593Smuzhiyun __func__, rdev->raid_disk, add ? "add" : "remove",
1478*4882a593Smuzhiyun bdevname(rdev->bdev, b));
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun if (rdev->raid_disk < 0)
1481*4882a593Smuzhiyun return 0;
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun if (rdev->raid_disk >= ppl_conf->count)
1484*4882a593Smuzhiyun return -ENODEV;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun log = &ppl_conf->child_logs[rdev->raid_disk];
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun mutex_lock(&log->io_mutex);
1489*4882a593Smuzhiyun if (add) {
1490*4882a593Smuzhiyun ret = ppl_validate_rdev(rdev);
1491*4882a593Smuzhiyun if (!ret) {
1492*4882a593Smuzhiyun log->rdev = rdev;
1493*4882a593Smuzhiyun ret = ppl_write_empty_header(log);
1494*4882a593Smuzhiyun ppl_init_child_log(log, rdev);
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun } else {
1497*4882a593Smuzhiyun log->rdev = NULL;
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun mutex_unlock(&log->io_mutex);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun return ret;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun static ssize_t
ppl_write_hint_show(struct mddev * mddev,char * buf)1505*4882a593Smuzhiyun ppl_write_hint_show(struct mddev *mddev, char *buf)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun size_t ret = 0;
1508*4882a593Smuzhiyun struct r5conf *conf;
1509*4882a593Smuzhiyun struct ppl_conf *ppl_conf = NULL;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun spin_lock(&mddev->lock);
1512*4882a593Smuzhiyun conf = mddev->private;
1513*4882a593Smuzhiyun if (conf && raid5_has_ppl(conf))
1514*4882a593Smuzhiyun ppl_conf = conf->log_private;
1515*4882a593Smuzhiyun ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
1516*4882a593Smuzhiyun spin_unlock(&mddev->lock);
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun return ret;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun static ssize_t
ppl_write_hint_store(struct mddev * mddev,const char * page,size_t len)1522*4882a593Smuzhiyun ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
1523*4882a593Smuzhiyun {
1524*4882a593Smuzhiyun struct r5conf *conf;
1525*4882a593Smuzhiyun struct ppl_conf *ppl_conf;
1526*4882a593Smuzhiyun int err = 0;
1527*4882a593Smuzhiyun unsigned short new;
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun if (len >= PAGE_SIZE)
1530*4882a593Smuzhiyun return -EINVAL;
1531*4882a593Smuzhiyun if (kstrtou16(page, 10, &new))
1532*4882a593Smuzhiyun return -EINVAL;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun err = mddev_lock(mddev);
1535*4882a593Smuzhiyun if (err)
1536*4882a593Smuzhiyun return err;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun conf = mddev->private;
1539*4882a593Smuzhiyun if (!conf) {
1540*4882a593Smuzhiyun err = -ENODEV;
1541*4882a593Smuzhiyun } else if (raid5_has_ppl(conf)) {
1542*4882a593Smuzhiyun ppl_conf = conf->log_private;
1543*4882a593Smuzhiyun if (!ppl_conf)
1544*4882a593Smuzhiyun err = -EINVAL;
1545*4882a593Smuzhiyun else
1546*4882a593Smuzhiyun ppl_conf->write_hint = new;
1547*4882a593Smuzhiyun } else {
1548*4882a593Smuzhiyun err = -EINVAL;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun mddev_unlock(mddev);
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun return err ?: len;
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun struct md_sysfs_entry
1557*4882a593Smuzhiyun ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
1558*4882a593Smuzhiyun ppl_write_hint_show,
1559*4882a593Smuzhiyun ppl_write_hint_store);
1560