1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2017 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/libnvdimm.h>
6*4882a593Smuzhiyun #include <linux/badblocks.h>
7*4882a593Smuzhiyun #include <linux/export.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/blkdev.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/ctype.h>
12*4882a593Smuzhiyun #include <linux/ndctl.h>
13*4882a593Smuzhiyun #include <linux/mutex.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include "nd-core.h"
17*4882a593Smuzhiyun #include "nd.h"
18*4882a593Smuzhiyun
badrange_init(struct badrange * badrange)19*4882a593Smuzhiyun void badrange_init(struct badrange *badrange)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun INIT_LIST_HEAD(&badrange->list);
22*4882a593Smuzhiyun spin_lock_init(&badrange->lock);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(badrange_init);
25*4882a593Smuzhiyun
append_badrange_entry(struct badrange * badrange,struct badrange_entry * bre,u64 addr,u64 length)26*4882a593Smuzhiyun static void append_badrange_entry(struct badrange *badrange,
27*4882a593Smuzhiyun struct badrange_entry *bre, u64 addr, u64 length)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun lockdep_assert_held(&badrange->lock);
30*4882a593Smuzhiyun bre->start = addr;
31*4882a593Smuzhiyun bre->length = length;
32*4882a593Smuzhiyun list_add_tail(&bre->list, &badrange->list);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
alloc_and_append_badrange_entry(struct badrange * badrange,u64 addr,u64 length,gfp_t flags)35*4882a593Smuzhiyun static int alloc_and_append_badrange_entry(struct badrange *badrange,
36*4882a593Smuzhiyun u64 addr, u64 length, gfp_t flags)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun struct badrange_entry *bre;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun bre = kzalloc(sizeof(*bre), flags);
41*4882a593Smuzhiyun if (!bre)
42*4882a593Smuzhiyun return -ENOMEM;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun append_badrange_entry(badrange, bre, addr, length);
45*4882a593Smuzhiyun return 0;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
add_badrange(struct badrange * badrange,u64 addr,u64 length)48*4882a593Smuzhiyun static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun struct badrange_entry *bre, *bre_new;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun spin_unlock(&badrange->lock);
53*4882a593Smuzhiyun bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
54*4882a593Smuzhiyun spin_lock(&badrange->lock);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (list_empty(&badrange->list)) {
57*4882a593Smuzhiyun if (!bre_new)
58*4882a593Smuzhiyun return -ENOMEM;
59*4882a593Smuzhiyun append_badrange_entry(badrange, bre_new, addr, length);
60*4882a593Smuzhiyun return 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * There is a chance this is a duplicate, check for those first.
65*4882a593Smuzhiyun * This will be the common case as ARS_STATUS returns all known
66*4882a593Smuzhiyun * errors in the SPA space, and we can't query it per region
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun list_for_each_entry(bre, &badrange->list, list)
69*4882a593Smuzhiyun if (bre->start == addr) {
70*4882a593Smuzhiyun /* If length has changed, update this list entry */
71*4882a593Smuzhiyun if (bre->length != length)
72*4882a593Smuzhiyun bre->length = length;
73*4882a593Smuzhiyun kfree(bre_new);
74*4882a593Smuzhiyun return 0;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * If not a duplicate or a simple length update, add the entry as is,
79*4882a593Smuzhiyun * as any overlapping ranges will get resolved when the list is consumed
80*4882a593Smuzhiyun * and converted to badblocks
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun if (!bre_new)
83*4882a593Smuzhiyun return -ENOMEM;
84*4882a593Smuzhiyun append_badrange_entry(badrange, bre_new, addr, length);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
badrange_add(struct badrange * badrange,u64 addr,u64 length)89*4882a593Smuzhiyun int badrange_add(struct badrange *badrange, u64 addr, u64 length)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun int rc;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun spin_lock(&badrange->lock);
94*4882a593Smuzhiyun rc = add_badrange(badrange, addr, length);
95*4882a593Smuzhiyun spin_unlock(&badrange->lock);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return rc;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(badrange_add);
100*4882a593Smuzhiyun
badrange_forget(struct badrange * badrange,phys_addr_t start,unsigned int len)101*4882a593Smuzhiyun void badrange_forget(struct badrange *badrange, phys_addr_t start,
102*4882a593Smuzhiyun unsigned int len)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun struct list_head *badrange_list = &badrange->list;
105*4882a593Smuzhiyun u64 clr_end = start + len - 1;
106*4882a593Smuzhiyun struct badrange_entry *bre, *next;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun spin_lock(&badrange->lock);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * [start, clr_end] is the badrange interval being cleared.
112*4882a593Smuzhiyun * [bre->start, bre_end] is the badrange_list entry we're comparing
113*4882a593Smuzhiyun * the above interval against. The badrange list entry may need
114*4882a593Smuzhiyun * to be modified (update either start or length), deleted, or
115*4882a593Smuzhiyun * split into two based on the overlap characteristics
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun list_for_each_entry_safe(bre, next, badrange_list, list) {
119*4882a593Smuzhiyun u64 bre_end = bre->start + bre->length - 1;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Skip intervals with no intersection */
122*4882a593Smuzhiyun if (bre_end < start)
123*4882a593Smuzhiyun continue;
124*4882a593Smuzhiyun if (bre->start > clr_end)
125*4882a593Smuzhiyun continue;
126*4882a593Smuzhiyun /* Delete completely overlapped badrange entries */
127*4882a593Smuzhiyun if ((bre->start >= start) && (bre_end <= clr_end)) {
128*4882a593Smuzhiyun list_del(&bre->list);
129*4882a593Smuzhiyun kfree(bre);
130*4882a593Smuzhiyun continue;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun /* Adjust start point of partially cleared entries */
133*4882a593Smuzhiyun if ((start <= bre->start) && (clr_end > bre->start)) {
134*4882a593Smuzhiyun bre->length -= clr_end - bre->start + 1;
135*4882a593Smuzhiyun bre->start = clr_end + 1;
136*4882a593Smuzhiyun continue;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun /* Adjust bre->length for partial clearing at the tail end */
139*4882a593Smuzhiyun if ((bre->start < start) && (bre_end <= clr_end)) {
140*4882a593Smuzhiyun /* bre->start remains the same */
141*4882a593Smuzhiyun bre->length = start - bre->start;
142*4882a593Smuzhiyun continue;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun * If clearing in the middle of an entry, we split it into
146*4882a593Smuzhiyun * two by modifying the current entry to represent one half of
147*4882a593Smuzhiyun * the split, and adding a new entry for the second half.
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun if ((bre->start < start) && (bre_end > clr_end)) {
150*4882a593Smuzhiyun u64 new_start = clr_end + 1;
151*4882a593Smuzhiyun u64 new_len = bre_end - new_start + 1;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* Add new entry covering the right half */
154*4882a593Smuzhiyun alloc_and_append_badrange_entry(badrange, new_start,
155*4882a593Smuzhiyun new_len, GFP_NOWAIT);
156*4882a593Smuzhiyun /* Adjust this entry to cover the left half */
157*4882a593Smuzhiyun bre->length = start - bre->start;
158*4882a593Smuzhiyun continue;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun spin_unlock(&badrange->lock);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(badrange_forget);
164*4882a593Smuzhiyun
set_badblock(struct badblocks * bb,sector_t s,int num)165*4882a593Smuzhiyun static void set_badblock(struct badblocks *bb, sector_t s, int num)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
168*4882a593Smuzhiyun (u64) s * 512, (u64) num * 512);
169*4882a593Smuzhiyun /* this isn't an error as the hardware will still throw an exception */
170*4882a593Smuzhiyun if (badblocks_set(bb, s, num, 1))
171*4882a593Smuzhiyun dev_info_once(bb->dev, "%s: failed for sector %llx\n",
172*4882a593Smuzhiyun __func__, (u64) s);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun * __add_badblock_range() - Convert a physical address range to bad sectors
177*4882a593Smuzhiyun * @bb: badblocks instance to populate
178*4882a593Smuzhiyun * @ns_offset: namespace offset where the error range begins (in bytes)
179*4882a593Smuzhiyun * @len: number of bytes of badrange to be added
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * This assumes that the range provided with (ns_offset, len) is within
182*4882a593Smuzhiyun * the bounds of physical addresses for this namespace, i.e. lies in the
183*4882a593Smuzhiyun * interval [ns_start, ns_start + ns_size)
184*4882a593Smuzhiyun */
__add_badblock_range(struct badblocks * bb,u64 ns_offset,u64 len)185*4882a593Smuzhiyun static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun const unsigned int sector_size = 512;
188*4882a593Smuzhiyun sector_t start_sector, end_sector;
189*4882a593Smuzhiyun u64 num_sectors;
190*4882a593Smuzhiyun u32 rem;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun start_sector = div_u64(ns_offset, sector_size);
193*4882a593Smuzhiyun end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
194*4882a593Smuzhiyun if (rem)
195*4882a593Smuzhiyun end_sector++;
196*4882a593Smuzhiyun num_sectors = end_sector - start_sector;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (unlikely(num_sectors > (u64)INT_MAX)) {
199*4882a593Smuzhiyun u64 remaining = num_sectors;
200*4882a593Smuzhiyun sector_t s = start_sector;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun while (remaining) {
203*4882a593Smuzhiyun int done = min_t(u64, remaining, INT_MAX);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun set_badblock(bb, s, done);
206*4882a593Smuzhiyun remaining -= done;
207*4882a593Smuzhiyun s += done;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun } else
210*4882a593Smuzhiyun set_badblock(bb, start_sector, num_sectors);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
badblocks_populate(struct badrange * badrange,struct badblocks * bb,const struct range * range)213*4882a593Smuzhiyun static void badblocks_populate(struct badrange *badrange,
214*4882a593Smuzhiyun struct badblocks *bb, const struct range *range)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct badrange_entry *bre;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (list_empty(&badrange->list))
219*4882a593Smuzhiyun return;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun list_for_each_entry(bre, &badrange->list, list) {
222*4882a593Smuzhiyun u64 bre_end = bre->start + bre->length - 1;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* Discard intervals with no intersection */
225*4882a593Smuzhiyun if (bre_end < range->start)
226*4882a593Smuzhiyun continue;
227*4882a593Smuzhiyun if (bre->start > range->end)
228*4882a593Smuzhiyun continue;
229*4882a593Smuzhiyun /* Deal with any overlap after start of the namespace */
230*4882a593Smuzhiyun if (bre->start >= range->start) {
231*4882a593Smuzhiyun u64 start = bre->start;
232*4882a593Smuzhiyun u64 len;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (bre_end <= range->end)
235*4882a593Smuzhiyun len = bre->length;
236*4882a593Smuzhiyun else
237*4882a593Smuzhiyun len = range->start + range_len(range)
238*4882a593Smuzhiyun - bre->start;
239*4882a593Smuzhiyun __add_badblock_range(bb, start - range->start, len);
240*4882a593Smuzhiyun continue;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun * Deal with overlap for badrange starting before
244*4882a593Smuzhiyun * the namespace.
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun if (bre->start < range->start) {
247*4882a593Smuzhiyun u64 len;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (bre_end < range->end)
250*4882a593Smuzhiyun len = bre->start + bre->length - range->start;
251*4882a593Smuzhiyun else
252*4882a593Smuzhiyun len = range_len(range);
253*4882a593Smuzhiyun __add_badblock_range(bb, 0, len);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /**
259*4882a593Smuzhiyun * nvdimm_badblocks_populate() - Convert a list of badranges to badblocks
260*4882a593Smuzhiyun * @region: parent region of the range to interrogate
261*4882a593Smuzhiyun * @bb: badblocks instance to populate
262*4882a593Smuzhiyun * @res: resource range to consider
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun * The badrange list generated during bus initialization may contain
265*4882a593Smuzhiyun * multiple, possibly overlapping physical address ranges. Compare each
266*4882a593Smuzhiyun * of these ranges to the resource range currently being initialized,
267*4882a593Smuzhiyun * and add badblocks entries for all matching sub-ranges
268*4882a593Smuzhiyun */
nvdimm_badblocks_populate(struct nd_region * nd_region,struct badblocks * bb,const struct range * range)269*4882a593Smuzhiyun void nvdimm_badblocks_populate(struct nd_region *nd_region,
270*4882a593Smuzhiyun struct badblocks *bb, const struct range *range)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (!is_memory(&nd_region->dev)) {
275*4882a593Smuzhiyun dev_WARN_ONCE(&nd_region->dev, 1,
276*4882a593Smuzhiyun "%s only valid for pmem regions\n", __func__);
277*4882a593Smuzhiyun return;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun nvdimm_bus_lock(&nvdimm_bus->dev);
282*4882a593Smuzhiyun badblocks_populate(&nvdimm_bus->badrange, bb, range);
283*4882a593Smuzhiyun nvdimm_bus_unlock(&nvdimm_bus->dev);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
286