1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/scatterlist.h>
6*4882a593Smuzhiyun #include <linux/memregion.h>
7*4882a593Smuzhiyun #include <linux/highmem.h>
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/hash.h>
11*4882a593Smuzhiyun #include <linux/sort.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/nd.h>
14*4882a593Smuzhiyun #include "nd-core.h"
15*4882a593Smuzhiyun #include "nd.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
19*4882a593Smuzhiyun * irrelevant.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun #include <linux/io-64-nonatomic-hi-lo.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun static DEFINE_PER_CPU(int, flush_idx);
24*4882a593Smuzhiyun
nvdimm_map_flush(struct device * dev,struct nvdimm * nvdimm,int dimm,struct nd_region_data * ndrd)25*4882a593Smuzhiyun static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
26*4882a593Smuzhiyun struct nd_region_data *ndrd)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun int i, j;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
31*4882a593Smuzhiyun nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
32*4882a593Smuzhiyun for (i = 0; i < (1 << ndrd->hints_shift); i++) {
33*4882a593Smuzhiyun struct resource *res = &nvdimm->flush_wpq[i];
34*4882a593Smuzhiyun unsigned long pfn = PHYS_PFN(res->start);
35*4882a593Smuzhiyun void __iomem *flush_page;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* check if flush hints share a page */
38*4882a593Smuzhiyun for (j = 0; j < i; j++) {
39*4882a593Smuzhiyun struct resource *res_j = &nvdimm->flush_wpq[j];
40*4882a593Smuzhiyun unsigned long pfn_j = PHYS_PFN(res_j->start);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (pfn == pfn_j)
43*4882a593Smuzhiyun break;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (j < i)
47*4882a593Smuzhiyun flush_page = (void __iomem *) ((unsigned long)
48*4882a593Smuzhiyun ndrd_get_flush_wpq(ndrd, dimm, j)
49*4882a593Smuzhiyun & PAGE_MASK);
50*4882a593Smuzhiyun else
51*4882a593Smuzhiyun flush_page = devm_nvdimm_ioremap(dev,
52*4882a593Smuzhiyun PFN_PHYS(pfn), PAGE_SIZE);
53*4882a593Smuzhiyun if (!flush_page)
54*4882a593Smuzhiyun return -ENXIO;
55*4882a593Smuzhiyun ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
56*4882a593Smuzhiyun + (res->start & ~PAGE_MASK));
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun return 0;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
nd_region_activate(struct nd_region * nd_region)62*4882a593Smuzhiyun int nd_region_activate(struct nd_region *nd_region)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun int i, j, num_flush = 0;
65*4882a593Smuzhiyun struct nd_region_data *ndrd;
66*4882a593Smuzhiyun struct device *dev = &nd_region->dev;
67*4882a593Smuzhiyun size_t flush_data_size = sizeof(void *);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun nvdimm_bus_lock(&nd_region->dev);
70*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings; i++) {
71*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[i];
72*4882a593Smuzhiyun struct nvdimm *nvdimm = nd_mapping->nvdimm;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
75*4882a593Smuzhiyun nvdimm_bus_unlock(&nd_region->dev);
76*4882a593Smuzhiyun return -EBUSY;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* at least one null hint slot per-dimm for the "no-hint" case */
80*4882a593Smuzhiyun flush_data_size += sizeof(void *);
81*4882a593Smuzhiyun num_flush = min_not_zero(num_flush, nvdimm->num_flush);
82*4882a593Smuzhiyun if (!nvdimm->num_flush)
83*4882a593Smuzhiyun continue;
84*4882a593Smuzhiyun flush_data_size += nvdimm->num_flush * sizeof(void *);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun nvdimm_bus_unlock(&nd_region->dev);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
89*4882a593Smuzhiyun if (!ndrd)
90*4882a593Smuzhiyun return -ENOMEM;
91*4882a593Smuzhiyun dev_set_drvdata(dev, ndrd);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (!num_flush)
94*4882a593Smuzhiyun return 0;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun ndrd->hints_shift = ilog2(num_flush);
97*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings; i++) {
98*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[i];
99*4882a593Smuzhiyun struct nvdimm *nvdimm = nd_mapping->nvdimm;
100*4882a593Smuzhiyun int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (rc)
103*4882a593Smuzhiyun return rc;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * Clear out entries that are duplicates. This should prevent the
108*4882a593Smuzhiyun * extra flushings.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
111*4882a593Smuzhiyun /* ignore if NULL already */
112*4882a593Smuzhiyun if (!ndrd_get_flush_wpq(ndrd, i, 0))
113*4882a593Smuzhiyun continue;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun for (j = i + 1; j < nd_region->ndr_mappings; j++)
116*4882a593Smuzhiyun if (ndrd_get_flush_wpq(ndrd, i, 0) ==
117*4882a593Smuzhiyun ndrd_get_flush_wpq(ndrd, j, 0))
118*4882a593Smuzhiyun ndrd_set_flush_wpq(ndrd, j, 0, NULL);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun return 0;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
nd_region_release(struct device * dev)124*4882a593Smuzhiyun static void nd_region_release(struct device *dev)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
127*4882a593Smuzhiyun u16 i;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings; i++) {
130*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[i];
131*4882a593Smuzhiyun struct nvdimm *nvdimm = nd_mapping->nvdimm;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun put_device(&nvdimm->dev);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun free_percpu(nd_region->lane);
136*4882a593Smuzhiyun memregion_free(nd_region->id);
137*4882a593Smuzhiyun if (is_nd_blk(dev))
138*4882a593Smuzhiyun kfree(to_nd_blk_region(dev));
139*4882a593Smuzhiyun else
140*4882a593Smuzhiyun kfree(nd_region);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
to_nd_region(struct device * dev)143*4882a593Smuzhiyun struct nd_region *to_nd_region(struct device *dev)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun WARN_ON(dev->type->release != nd_region_release);
148*4882a593Smuzhiyun return nd_region;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(to_nd_region);
151*4882a593Smuzhiyun
nd_region_dev(struct nd_region * nd_region)152*4882a593Smuzhiyun struct device *nd_region_dev(struct nd_region *nd_region)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun if (!nd_region)
155*4882a593Smuzhiyun return NULL;
156*4882a593Smuzhiyun return &nd_region->dev;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nd_region_dev);
159*4882a593Smuzhiyun
to_nd_blk_region(struct device * dev)160*4882a593Smuzhiyun struct nd_blk_region *to_nd_blk_region(struct device *dev)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun WARN_ON(!is_nd_blk(dev));
165*4882a593Smuzhiyun return container_of(nd_region, struct nd_blk_region, nd_region);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(to_nd_blk_region);
168*4882a593Smuzhiyun
nd_region_provider_data(struct nd_region * nd_region)169*4882a593Smuzhiyun void *nd_region_provider_data(struct nd_region *nd_region)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun return nd_region->provider_data;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nd_region_provider_data);
174*4882a593Smuzhiyun
nd_blk_region_provider_data(struct nd_blk_region * ndbr)175*4882a593Smuzhiyun void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun return ndbr->blk_provider_data;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
180*4882a593Smuzhiyun
nd_blk_region_set_provider_data(struct nd_blk_region * ndbr,void * data)181*4882a593Smuzhiyun void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun ndbr->blk_provider_data = data;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /**
188*4882a593Smuzhiyun * nd_region_to_nstype() - region to an integer namespace type
189*4882a593Smuzhiyun * @nd_region: region-device to interrogate
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun * This is the 'nstype' attribute of a region as well, an input to the
192*4882a593Smuzhiyun * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
193*4882a593Smuzhiyun * namespace devices with namespace drivers.
194*4882a593Smuzhiyun */
nd_region_to_nstype(struct nd_region * nd_region)195*4882a593Smuzhiyun int nd_region_to_nstype(struct nd_region *nd_region)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun if (is_memory(&nd_region->dev)) {
198*4882a593Smuzhiyun u16 i, label;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) {
201*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[i];
202*4882a593Smuzhiyun struct nvdimm *nvdimm = nd_mapping->nvdimm;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (test_bit(NDD_LABELING, &nvdimm->flags))
205*4882a593Smuzhiyun label++;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun if (label)
208*4882a593Smuzhiyun return ND_DEVICE_NAMESPACE_PMEM;
209*4882a593Smuzhiyun else
210*4882a593Smuzhiyun return ND_DEVICE_NAMESPACE_IO;
211*4882a593Smuzhiyun } else if (is_nd_blk(&nd_region->dev)) {
212*4882a593Smuzhiyun return ND_DEVICE_NAMESPACE_BLK;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun return 0;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun EXPORT_SYMBOL(nd_region_to_nstype);
218*4882a593Smuzhiyun
region_size(struct nd_region * nd_region)219*4882a593Smuzhiyun static unsigned long long region_size(struct nd_region *nd_region)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun if (is_memory(&nd_region->dev)) {
222*4882a593Smuzhiyun return nd_region->ndr_size;
223*4882a593Smuzhiyun } else if (nd_region->ndr_mappings == 1) {
224*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[0];
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return nd_mapping->size;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun return 0;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
size_show(struct device * dev,struct device_attribute * attr,char * buf)232*4882a593Smuzhiyun static ssize_t size_show(struct device *dev,
233*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun return sprintf(buf, "%llu\n", region_size(nd_region));
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun static DEVICE_ATTR_RO(size);
240*4882a593Smuzhiyun
deep_flush_show(struct device * dev,struct device_attribute * attr,char * buf)241*4882a593Smuzhiyun static ssize_t deep_flush_show(struct device *dev,
242*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * NOTE: in the nvdimm_has_flush() error case this attribute is
248*4882a593Smuzhiyun * not visible.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
deep_flush_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)253*4882a593Smuzhiyun static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
254*4882a593Smuzhiyun const char *buf, size_t len)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun bool flush;
257*4882a593Smuzhiyun int rc = strtobool(buf, &flush);
258*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (rc)
261*4882a593Smuzhiyun return rc;
262*4882a593Smuzhiyun if (!flush)
263*4882a593Smuzhiyun return -EINVAL;
264*4882a593Smuzhiyun rc = nvdimm_flush(nd_region, NULL);
265*4882a593Smuzhiyun if (rc)
266*4882a593Smuzhiyun return rc;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return len;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun static DEVICE_ATTR_RW(deep_flush);
271*4882a593Smuzhiyun
mappings_show(struct device * dev,struct device_attribute * attr,char * buf)272*4882a593Smuzhiyun static ssize_t mappings_show(struct device *dev,
273*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun return sprintf(buf, "%d\n", nd_region->ndr_mappings);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun static DEVICE_ATTR_RO(mappings);
280*4882a593Smuzhiyun
nstype_show(struct device * dev,struct device_attribute * attr,char * buf)281*4882a593Smuzhiyun static ssize_t nstype_show(struct device *dev,
282*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun static DEVICE_ATTR_RO(nstype);
289*4882a593Smuzhiyun
set_cookie_show(struct device * dev,struct device_attribute * attr,char * buf)290*4882a593Smuzhiyun static ssize_t set_cookie_show(struct device *dev,
291*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
294*4882a593Smuzhiyun struct nd_interleave_set *nd_set = nd_region->nd_set;
295*4882a593Smuzhiyun ssize_t rc = 0;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (is_memory(dev) && nd_set)
298*4882a593Smuzhiyun /* pass, should be precluded by region_visible */;
299*4882a593Smuzhiyun else
300*4882a593Smuzhiyun return -ENXIO;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun * The cookie to show depends on which specification of the
304*4882a593Smuzhiyun * labels we are using. If there are not labels then default to
305*4882a593Smuzhiyun * the v1.1 namespace label cookie definition. To read all this
306*4882a593Smuzhiyun * data we need to wait for probing to settle.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun nd_device_lock(dev);
309*4882a593Smuzhiyun nvdimm_bus_lock(dev);
310*4882a593Smuzhiyun wait_nvdimm_bus_probe_idle(dev);
311*4882a593Smuzhiyun if (nd_region->ndr_mappings) {
312*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[0];
313*4882a593Smuzhiyun struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (ndd) {
316*4882a593Smuzhiyun struct nd_namespace_index *nsindex;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun nsindex = to_namespace_index(ndd, ndd->ns_current);
319*4882a593Smuzhiyun rc = sprintf(buf, "%#llx\n",
320*4882a593Smuzhiyun nd_region_interleave_set_cookie(nd_region,
321*4882a593Smuzhiyun nsindex));
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
325*4882a593Smuzhiyun nd_device_unlock(dev);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (rc)
328*4882a593Smuzhiyun return rc;
329*4882a593Smuzhiyun return sprintf(buf, "%#llx\n", nd_set->cookie1);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun static DEVICE_ATTR_RO(set_cookie);
332*4882a593Smuzhiyun
nd_region_available_dpa(struct nd_region * nd_region)333*4882a593Smuzhiyun resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun resource_size_t blk_max_overlap = 0, available, overlap;
336*4882a593Smuzhiyun int i;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun retry:
341*4882a593Smuzhiyun available = 0;
342*4882a593Smuzhiyun overlap = blk_max_overlap;
343*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings; i++) {
344*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[i];
345*4882a593Smuzhiyun struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* if a dimm is disabled the available capacity is zero */
348*4882a593Smuzhiyun if (!ndd)
349*4882a593Smuzhiyun return 0;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (is_memory(&nd_region->dev)) {
352*4882a593Smuzhiyun available += nd_pmem_available_dpa(nd_region,
353*4882a593Smuzhiyun nd_mapping, &overlap);
354*4882a593Smuzhiyun if (overlap > blk_max_overlap) {
355*4882a593Smuzhiyun blk_max_overlap = overlap;
356*4882a593Smuzhiyun goto retry;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun } else if (is_nd_blk(&nd_region->dev))
359*4882a593Smuzhiyun available += nd_blk_available_dpa(nd_region);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return available;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
nd_region_allocatable_dpa(struct nd_region * nd_region)365*4882a593Smuzhiyun resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun resource_size_t available = 0;
368*4882a593Smuzhiyun int i;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun if (is_memory(&nd_region->dev))
371*4882a593Smuzhiyun available = PHYS_ADDR_MAX;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
374*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings; i++) {
375*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[i];
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (is_memory(&nd_region->dev))
378*4882a593Smuzhiyun available = min(available,
379*4882a593Smuzhiyun nd_pmem_max_contiguous_dpa(nd_region,
380*4882a593Smuzhiyun nd_mapping));
381*4882a593Smuzhiyun else if (is_nd_blk(&nd_region->dev))
382*4882a593Smuzhiyun available += nd_blk_available_dpa(nd_region);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun if (is_memory(&nd_region->dev))
385*4882a593Smuzhiyun return available * nd_region->ndr_mappings;
386*4882a593Smuzhiyun return available;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
available_size_show(struct device * dev,struct device_attribute * attr,char * buf)389*4882a593Smuzhiyun static ssize_t available_size_show(struct device *dev,
390*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
393*4882a593Smuzhiyun unsigned long long available = 0;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun * Flush in-flight updates and grab a snapshot of the available
397*4882a593Smuzhiyun * size. Of course, this value is potentially invalidated the
398*4882a593Smuzhiyun * memory nvdimm_bus_lock() is dropped, but that's userspace's
399*4882a593Smuzhiyun * problem to not race itself.
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun nd_device_lock(dev);
402*4882a593Smuzhiyun nvdimm_bus_lock(dev);
403*4882a593Smuzhiyun wait_nvdimm_bus_probe_idle(dev);
404*4882a593Smuzhiyun available = nd_region_available_dpa(nd_region);
405*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
406*4882a593Smuzhiyun nd_device_unlock(dev);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun return sprintf(buf, "%llu\n", available);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun static DEVICE_ATTR_RO(available_size);
411*4882a593Smuzhiyun
max_available_extent_show(struct device * dev,struct device_attribute * attr,char * buf)412*4882a593Smuzhiyun static ssize_t max_available_extent_show(struct device *dev,
413*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
416*4882a593Smuzhiyun unsigned long long available = 0;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun nd_device_lock(dev);
419*4882a593Smuzhiyun nvdimm_bus_lock(dev);
420*4882a593Smuzhiyun wait_nvdimm_bus_probe_idle(dev);
421*4882a593Smuzhiyun available = nd_region_allocatable_dpa(nd_region);
422*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
423*4882a593Smuzhiyun nd_device_unlock(dev);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun return sprintf(buf, "%llu\n", available);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun static DEVICE_ATTR_RO(max_available_extent);
428*4882a593Smuzhiyun
init_namespaces_show(struct device * dev,struct device_attribute * attr,char * buf)429*4882a593Smuzhiyun static ssize_t init_namespaces_show(struct device *dev,
430*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun struct nd_region_data *ndrd = dev_get_drvdata(dev);
433*4882a593Smuzhiyun ssize_t rc;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun nvdimm_bus_lock(dev);
436*4882a593Smuzhiyun if (ndrd)
437*4882a593Smuzhiyun rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
438*4882a593Smuzhiyun else
439*4882a593Smuzhiyun rc = -ENXIO;
440*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun return rc;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun static DEVICE_ATTR_RO(init_namespaces);
445*4882a593Smuzhiyun
namespace_seed_show(struct device * dev,struct device_attribute * attr,char * buf)446*4882a593Smuzhiyun static ssize_t namespace_seed_show(struct device *dev,
447*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
450*4882a593Smuzhiyun ssize_t rc;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun nvdimm_bus_lock(dev);
453*4882a593Smuzhiyun if (nd_region->ns_seed)
454*4882a593Smuzhiyun rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
455*4882a593Smuzhiyun else
456*4882a593Smuzhiyun rc = sprintf(buf, "\n");
457*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
458*4882a593Smuzhiyun return rc;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun static DEVICE_ATTR_RO(namespace_seed);
461*4882a593Smuzhiyun
btt_seed_show(struct device * dev,struct device_attribute * attr,char * buf)462*4882a593Smuzhiyun static ssize_t btt_seed_show(struct device *dev,
463*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
466*4882a593Smuzhiyun ssize_t rc;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun nvdimm_bus_lock(dev);
469*4882a593Smuzhiyun if (nd_region->btt_seed)
470*4882a593Smuzhiyun rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
471*4882a593Smuzhiyun else
472*4882a593Smuzhiyun rc = sprintf(buf, "\n");
473*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun return rc;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun static DEVICE_ATTR_RO(btt_seed);
478*4882a593Smuzhiyun
pfn_seed_show(struct device * dev,struct device_attribute * attr,char * buf)479*4882a593Smuzhiyun static ssize_t pfn_seed_show(struct device *dev,
480*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
483*4882a593Smuzhiyun ssize_t rc;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun nvdimm_bus_lock(dev);
486*4882a593Smuzhiyun if (nd_region->pfn_seed)
487*4882a593Smuzhiyun rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
488*4882a593Smuzhiyun else
489*4882a593Smuzhiyun rc = sprintf(buf, "\n");
490*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun return rc;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun static DEVICE_ATTR_RO(pfn_seed);
495*4882a593Smuzhiyun
dax_seed_show(struct device * dev,struct device_attribute * attr,char * buf)496*4882a593Smuzhiyun static ssize_t dax_seed_show(struct device *dev,
497*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
500*4882a593Smuzhiyun ssize_t rc;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun nvdimm_bus_lock(dev);
503*4882a593Smuzhiyun if (nd_region->dax_seed)
504*4882a593Smuzhiyun rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
505*4882a593Smuzhiyun else
506*4882a593Smuzhiyun rc = sprintf(buf, "\n");
507*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun return rc;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun static DEVICE_ATTR_RO(dax_seed);
512*4882a593Smuzhiyun
read_only_show(struct device * dev,struct device_attribute * attr,char * buf)513*4882a593Smuzhiyun static ssize_t read_only_show(struct device *dev,
514*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun return sprintf(buf, "%d\n", nd_region->ro);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
read_only_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)521*4882a593Smuzhiyun static ssize_t read_only_store(struct device *dev,
522*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun bool ro;
525*4882a593Smuzhiyun int rc = strtobool(buf, &ro);
526*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun if (rc)
529*4882a593Smuzhiyun return rc;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun nd_region->ro = ro;
532*4882a593Smuzhiyun return len;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun static DEVICE_ATTR_RW(read_only);
535*4882a593Smuzhiyun
align_show(struct device * dev,struct device_attribute * attr,char * buf)536*4882a593Smuzhiyun static ssize_t align_show(struct device *dev,
537*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun return sprintf(buf, "%#lx\n", nd_region->align);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
align_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)544*4882a593Smuzhiyun static ssize_t align_store(struct device *dev,
545*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
548*4882a593Smuzhiyun unsigned long val, dpa;
549*4882a593Smuzhiyun u32 remainder;
550*4882a593Smuzhiyun int rc;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun rc = kstrtoul(buf, 0, &val);
553*4882a593Smuzhiyun if (rc)
554*4882a593Smuzhiyun return rc;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun if (!nd_region->ndr_mappings)
557*4882a593Smuzhiyun return -ENXIO;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /*
560*4882a593Smuzhiyun * Ensure space-align is evenly divisible by the region
561*4882a593Smuzhiyun * interleave-width because the kernel typically has no facility
562*4882a593Smuzhiyun * to determine which DIMM(s), dimm-physical-addresses, would
563*4882a593Smuzhiyun * contribute to the tail capacity in system-physical-address
564*4882a593Smuzhiyun * space for the namespace.
565*4882a593Smuzhiyun */
566*4882a593Smuzhiyun dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder);
567*4882a593Smuzhiyun if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
568*4882a593Smuzhiyun || val > region_size(nd_region) || remainder)
569*4882a593Smuzhiyun return -EINVAL;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun * Given that space allocation consults this value multiple
573*4882a593Smuzhiyun * times ensure it does not change for the duration of the
574*4882a593Smuzhiyun * allocation.
575*4882a593Smuzhiyun */
576*4882a593Smuzhiyun nvdimm_bus_lock(dev);
577*4882a593Smuzhiyun nd_region->align = val;
578*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun return len;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun static DEVICE_ATTR_RW(align);
583*4882a593Smuzhiyun
region_badblocks_show(struct device * dev,struct device_attribute * attr,char * buf)584*4882a593Smuzhiyun static ssize_t region_badblocks_show(struct device *dev,
585*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
588*4882a593Smuzhiyun ssize_t rc;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun nd_device_lock(dev);
591*4882a593Smuzhiyun if (dev->driver)
592*4882a593Smuzhiyun rc = badblocks_show(&nd_region->bb, buf, 0);
593*4882a593Smuzhiyun else
594*4882a593Smuzhiyun rc = -ENXIO;
595*4882a593Smuzhiyun nd_device_unlock(dev);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun return rc;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
600*4882a593Smuzhiyun
resource_show(struct device * dev,struct device_attribute * attr,char * buf)601*4882a593Smuzhiyun static ssize_t resource_show(struct device *dev,
602*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun return sprintf(buf, "%#llx\n", nd_region->ndr_start);
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun static DEVICE_ATTR_ADMIN_RO(resource);
609*4882a593Smuzhiyun
persistence_domain_show(struct device * dev,struct device_attribute * attr,char * buf)610*4882a593Smuzhiyun static ssize_t persistence_domain_show(struct device *dev,
611*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
616*4882a593Smuzhiyun return sprintf(buf, "cpu_cache\n");
617*4882a593Smuzhiyun else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
618*4882a593Smuzhiyun return sprintf(buf, "memory_controller\n");
619*4882a593Smuzhiyun else
620*4882a593Smuzhiyun return sprintf(buf, "\n");
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun static DEVICE_ATTR_RO(persistence_domain);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun static struct attribute *nd_region_attributes[] = {
625*4882a593Smuzhiyun &dev_attr_size.attr,
626*4882a593Smuzhiyun &dev_attr_align.attr,
627*4882a593Smuzhiyun &dev_attr_nstype.attr,
628*4882a593Smuzhiyun &dev_attr_mappings.attr,
629*4882a593Smuzhiyun &dev_attr_btt_seed.attr,
630*4882a593Smuzhiyun &dev_attr_pfn_seed.attr,
631*4882a593Smuzhiyun &dev_attr_dax_seed.attr,
632*4882a593Smuzhiyun &dev_attr_deep_flush.attr,
633*4882a593Smuzhiyun &dev_attr_read_only.attr,
634*4882a593Smuzhiyun &dev_attr_set_cookie.attr,
635*4882a593Smuzhiyun &dev_attr_available_size.attr,
636*4882a593Smuzhiyun &dev_attr_max_available_extent.attr,
637*4882a593Smuzhiyun &dev_attr_namespace_seed.attr,
638*4882a593Smuzhiyun &dev_attr_init_namespaces.attr,
639*4882a593Smuzhiyun &dev_attr_badblocks.attr,
640*4882a593Smuzhiyun &dev_attr_resource.attr,
641*4882a593Smuzhiyun &dev_attr_persistence_domain.attr,
642*4882a593Smuzhiyun NULL,
643*4882a593Smuzhiyun };
644*4882a593Smuzhiyun
region_visible(struct kobject * kobj,struct attribute * a,int n)645*4882a593Smuzhiyun static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct device *dev = container_of(kobj, typeof(*dev), kobj);
648*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
649*4882a593Smuzhiyun struct nd_interleave_set *nd_set = nd_region->nd_set;
650*4882a593Smuzhiyun int type = nd_region_to_nstype(nd_region);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
653*4882a593Smuzhiyun return 0;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
656*4882a593Smuzhiyun return 0;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
659*4882a593Smuzhiyun return 0;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (a == &dev_attr_resource.attr && !is_memory(dev))
662*4882a593Smuzhiyun return 0;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (a == &dev_attr_deep_flush.attr) {
665*4882a593Smuzhiyun int has_flush = nvdimm_has_flush(nd_region);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun if (has_flush == 1)
668*4882a593Smuzhiyun return a->mode;
669*4882a593Smuzhiyun else if (has_flush == 0)
670*4882a593Smuzhiyun return 0444;
671*4882a593Smuzhiyun else
672*4882a593Smuzhiyun return 0;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (a == &dev_attr_persistence_domain.attr) {
676*4882a593Smuzhiyun if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
677*4882a593Smuzhiyun | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
678*4882a593Smuzhiyun return 0;
679*4882a593Smuzhiyun return a->mode;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun if (a == &dev_attr_align.attr)
683*4882a593Smuzhiyun return a->mode;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (a != &dev_attr_set_cookie.attr
686*4882a593Smuzhiyun && a != &dev_attr_available_size.attr)
687*4882a593Smuzhiyun return a->mode;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if ((type == ND_DEVICE_NAMESPACE_PMEM
690*4882a593Smuzhiyun || type == ND_DEVICE_NAMESPACE_BLK)
691*4882a593Smuzhiyun && a == &dev_attr_available_size.attr)
692*4882a593Smuzhiyun return a->mode;
693*4882a593Smuzhiyun else if (is_memory(dev) && nd_set)
694*4882a593Smuzhiyun return a->mode;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun return 0;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
mappingN(struct device * dev,char * buf,int n)699*4882a593Smuzhiyun static ssize_t mappingN(struct device *dev, char *buf, int n)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
702*4882a593Smuzhiyun struct nd_mapping *nd_mapping;
703*4882a593Smuzhiyun struct nvdimm *nvdimm;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (n >= nd_region->ndr_mappings)
706*4882a593Smuzhiyun return -ENXIO;
707*4882a593Smuzhiyun nd_mapping = &nd_region->mapping[n];
708*4882a593Smuzhiyun nvdimm = nd_mapping->nvdimm;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
711*4882a593Smuzhiyun nd_mapping->start, nd_mapping->size,
712*4882a593Smuzhiyun nd_mapping->position);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun #define REGION_MAPPING(idx) \
716*4882a593Smuzhiyun static ssize_t mapping##idx##_show(struct device *dev, \
717*4882a593Smuzhiyun struct device_attribute *attr, char *buf) \
718*4882a593Smuzhiyun { \
719*4882a593Smuzhiyun return mappingN(dev, buf, idx); \
720*4882a593Smuzhiyun } \
721*4882a593Smuzhiyun static DEVICE_ATTR_RO(mapping##idx)
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /*
724*4882a593Smuzhiyun * 32 should be enough for a while, even in the presence of socket
725*4882a593Smuzhiyun * interleave a 32-way interleave set is a degenerate case.
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun REGION_MAPPING(0);
728*4882a593Smuzhiyun REGION_MAPPING(1);
729*4882a593Smuzhiyun REGION_MAPPING(2);
730*4882a593Smuzhiyun REGION_MAPPING(3);
731*4882a593Smuzhiyun REGION_MAPPING(4);
732*4882a593Smuzhiyun REGION_MAPPING(5);
733*4882a593Smuzhiyun REGION_MAPPING(6);
734*4882a593Smuzhiyun REGION_MAPPING(7);
735*4882a593Smuzhiyun REGION_MAPPING(8);
736*4882a593Smuzhiyun REGION_MAPPING(9);
737*4882a593Smuzhiyun REGION_MAPPING(10);
738*4882a593Smuzhiyun REGION_MAPPING(11);
739*4882a593Smuzhiyun REGION_MAPPING(12);
740*4882a593Smuzhiyun REGION_MAPPING(13);
741*4882a593Smuzhiyun REGION_MAPPING(14);
742*4882a593Smuzhiyun REGION_MAPPING(15);
743*4882a593Smuzhiyun REGION_MAPPING(16);
744*4882a593Smuzhiyun REGION_MAPPING(17);
745*4882a593Smuzhiyun REGION_MAPPING(18);
746*4882a593Smuzhiyun REGION_MAPPING(19);
747*4882a593Smuzhiyun REGION_MAPPING(20);
748*4882a593Smuzhiyun REGION_MAPPING(21);
749*4882a593Smuzhiyun REGION_MAPPING(22);
750*4882a593Smuzhiyun REGION_MAPPING(23);
751*4882a593Smuzhiyun REGION_MAPPING(24);
752*4882a593Smuzhiyun REGION_MAPPING(25);
753*4882a593Smuzhiyun REGION_MAPPING(26);
754*4882a593Smuzhiyun REGION_MAPPING(27);
755*4882a593Smuzhiyun REGION_MAPPING(28);
756*4882a593Smuzhiyun REGION_MAPPING(29);
757*4882a593Smuzhiyun REGION_MAPPING(30);
758*4882a593Smuzhiyun REGION_MAPPING(31);
759*4882a593Smuzhiyun
mapping_visible(struct kobject * kobj,struct attribute * a,int n)760*4882a593Smuzhiyun static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun struct device *dev = container_of(kobj, struct device, kobj);
763*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (n < nd_region->ndr_mappings)
766*4882a593Smuzhiyun return a->mode;
767*4882a593Smuzhiyun return 0;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun static struct attribute *mapping_attributes[] = {
771*4882a593Smuzhiyun &dev_attr_mapping0.attr,
772*4882a593Smuzhiyun &dev_attr_mapping1.attr,
773*4882a593Smuzhiyun &dev_attr_mapping2.attr,
774*4882a593Smuzhiyun &dev_attr_mapping3.attr,
775*4882a593Smuzhiyun &dev_attr_mapping4.attr,
776*4882a593Smuzhiyun &dev_attr_mapping5.attr,
777*4882a593Smuzhiyun &dev_attr_mapping6.attr,
778*4882a593Smuzhiyun &dev_attr_mapping7.attr,
779*4882a593Smuzhiyun &dev_attr_mapping8.attr,
780*4882a593Smuzhiyun &dev_attr_mapping9.attr,
781*4882a593Smuzhiyun &dev_attr_mapping10.attr,
782*4882a593Smuzhiyun &dev_attr_mapping11.attr,
783*4882a593Smuzhiyun &dev_attr_mapping12.attr,
784*4882a593Smuzhiyun &dev_attr_mapping13.attr,
785*4882a593Smuzhiyun &dev_attr_mapping14.attr,
786*4882a593Smuzhiyun &dev_attr_mapping15.attr,
787*4882a593Smuzhiyun &dev_attr_mapping16.attr,
788*4882a593Smuzhiyun &dev_attr_mapping17.attr,
789*4882a593Smuzhiyun &dev_attr_mapping18.attr,
790*4882a593Smuzhiyun &dev_attr_mapping19.attr,
791*4882a593Smuzhiyun &dev_attr_mapping20.attr,
792*4882a593Smuzhiyun &dev_attr_mapping21.attr,
793*4882a593Smuzhiyun &dev_attr_mapping22.attr,
794*4882a593Smuzhiyun &dev_attr_mapping23.attr,
795*4882a593Smuzhiyun &dev_attr_mapping24.attr,
796*4882a593Smuzhiyun &dev_attr_mapping25.attr,
797*4882a593Smuzhiyun &dev_attr_mapping26.attr,
798*4882a593Smuzhiyun &dev_attr_mapping27.attr,
799*4882a593Smuzhiyun &dev_attr_mapping28.attr,
800*4882a593Smuzhiyun &dev_attr_mapping29.attr,
801*4882a593Smuzhiyun &dev_attr_mapping30.attr,
802*4882a593Smuzhiyun &dev_attr_mapping31.attr,
803*4882a593Smuzhiyun NULL,
804*4882a593Smuzhiyun };
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun static const struct attribute_group nd_mapping_attribute_group = {
807*4882a593Smuzhiyun .is_visible = mapping_visible,
808*4882a593Smuzhiyun .attrs = mapping_attributes,
809*4882a593Smuzhiyun };
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun static const struct attribute_group nd_region_attribute_group = {
812*4882a593Smuzhiyun .attrs = nd_region_attributes,
813*4882a593Smuzhiyun .is_visible = region_visible,
814*4882a593Smuzhiyun };
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun static const struct attribute_group *nd_region_attribute_groups[] = {
817*4882a593Smuzhiyun &nd_device_attribute_group,
818*4882a593Smuzhiyun &nd_region_attribute_group,
819*4882a593Smuzhiyun &nd_numa_attribute_group,
820*4882a593Smuzhiyun &nd_mapping_attribute_group,
821*4882a593Smuzhiyun NULL,
822*4882a593Smuzhiyun };
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun static const struct device_type nd_blk_device_type = {
825*4882a593Smuzhiyun .name = "nd_blk",
826*4882a593Smuzhiyun .release = nd_region_release,
827*4882a593Smuzhiyun .groups = nd_region_attribute_groups,
828*4882a593Smuzhiyun };
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun static const struct device_type nd_pmem_device_type = {
831*4882a593Smuzhiyun .name = "nd_pmem",
832*4882a593Smuzhiyun .release = nd_region_release,
833*4882a593Smuzhiyun .groups = nd_region_attribute_groups,
834*4882a593Smuzhiyun };
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun static const struct device_type nd_volatile_device_type = {
837*4882a593Smuzhiyun .name = "nd_volatile",
838*4882a593Smuzhiyun .release = nd_region_release,
839*4882a593Smuzhiyun .groups = nd_region_attribute_groups,
840*4882a593Smuzhiyun };
841*4882a593Smuzhiyun
is_nd_pmem(struct device * dev)842*4882a593Smuzhiyun bool is_nd_pmem(struct device *dev)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun return dev ? dev->type == &nd_pmem_device_type : false;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
is_nd_blk(struct device * dev)847*4882a593Smuzhiyun bool is_nd_blk(struct device *dev)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun return dev ? dev->type == &nd_blk_device_type : false;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
is_nd_volatile(struct device * dev)852*4882a593Smuzhiyun bool is_nd_volatile(struct device *dev)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun return dev ? dev->type == &nd_volatile_device_type : false;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
nd_region_interleave_set_cookie(struct nd_region * nd_region,struct nd_namespace_index * nsindex)857*4882a593Smuzhiyun u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
858*4882a593Smuzhiyun struct nd_namespace_index *nsindex)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun struct nd_interleave_set *nd_set = nd_region->nd_set;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun if (!nd_set)
863*4882a593Smuzhiyun return 0;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (nsindex && __le16_to_cpu(nsindex->major) == 1
866*4882a593Smuzhiyun && __le16_to_cpu(nsindex->minor) == 1)
867*4882a593Smuzhiyun return nd_set->cookie1;
868*4882a593Smuzhiyun return nd_set->cookie2;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
nd_region_interleave_set_altcookie(struct nd_region * nd_region)871*4882a593Smuzhiyun u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun struct nd_interleave_set *nd_set = nd_region->nd_set;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (nd_set)
876*4882a593Smuzhiyun return nd_set->altcookie;
877*4882a593Smuzhiyun return 0;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
nd_mapping_free_labels(struct nd_mapping * nd_mapping)880*4882a593Smuzhiyun void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun struct nd_label_ent *label_ent, *e;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun lockdep_assert_held(&nd_mapping->lock);
885*4882a593Smuzhiyun list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
886*4882a593Smuzhiyun list_del(&label_ent->list);
887*4882a593Smuzhiyun kfree(label_ent);
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /*
892*4882a593Smuzhiyun * When a namespace is activated create new seeds for the next
893*4882a593Smuzhiyun * namespace, or namespace-personality to be configured.
894*4882a593Smuzhiyun */
nd_region_advance_seeds(struct nd_region * nd_region,struct device * dev)895*4882a593Smuzhiyun void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun nvdimm_bus_lock(dev);
898*4882a593Smuzhiyun if (nd_region->ns_seed == dev) {
899*4882a593Smuzhiyun nd_region_create_ns_seed(nd_region);
900*4882a593Smuzhiyun } else if (is_nd_btt(dev)) {
901*4882a593Smuzhiyun struct nd_btt *nd_btt = to_nd_btt(dev);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (nd_region->btt_seed == dev)
904*4882a593Smuzhiyun nd_region_create_btt_seed(nd_region);
905*4882a593Smuzhiyun if (nd_region->ns_seed == &nd_btt->ndns->dev)
906*4882a593Smuzhiyun nd_region_create_ns_seed(nd_region);
907*4882a593Smuzhiyun } else if (is_nd_pfn(dev)) {
908*4882a593Smuzhiyun struct nd_pfn *nd_pfn = to_nd_pfn(dev);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (nd_region->pfn_seed == dev)
911*4882a593Smuzhiyun nd_region_create_pfn_seed(nd_region);
912*4882a593Smuzhiyun if (nd_region->ns_seed == &nd_pfn->ndns->dev)
913*4882a593Smuzhiyun nd_region_create_ns_seed(nd_region);
914*4882a593Smuzhiyun } else if (is_nd_dax(dev)) {
915*4882a593Smuzhiyun struct nd_dax *nd_dax = to_nd_dax(dev);
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (nd_region->dax_seed == dev)
918*4882a593Smuzhiyun nd_region_create_dax_seed(nd_region);
919*4882a593Smuzhiyun if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
920*4882a593Smuzhiyun nd_region_create_ns_seed(nd_region);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
nd_blk_region_init(struct nd_region * nd_region)925*4882a593Smuzhiyun int nd_blk_region_init(struct nd_region *nd_region)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun struct device *dev = &nd_region->dev;
928*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (!is_nd_blk(dev))
931*4882a593Smuzhiyun return 0;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (nd_region->ndr_mappings < 1) {
934*4882a593Smuzhiyun dev_dbg(dev, "invalid BLK region\n");
935*4882a593Smuzhiyun return -ENXIO;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /**
942*4882a593Smuzhiyun * nd_region_acquire_lane - allocate and lock a lane
943*4882a593Smuzhiyun * @nd_region: region id and number of lanes possible
944*4882a593Smuzhiyun *
945*4882a593Smuzhiyun * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
946*4882a593Smuzhiyun * We optimize for the common case where there are 256 lanes, one
947*4882a593Smuzhiyun * per-cpu. For larger systems we need to lock to share lanes. For now
948*4882a593Smuzhiyun * this implementation assumes the cost of maintaining an allocator for
949*4882a593Smuzhiyun * free lanes is on the order of the lock hold time, so it implements a
950*4882a593Smuzhiyun * static lane = cpu % num_lanes mapping.
951*4882a593Smuzhiyun *
952*4882a593Smuzhiyun * In the case of a BTT instance on top of a BLK namespace a lane may be
953*4882a593Smuzhiyun * acquired recursively. We lock on the first instance.
954*4882a593Smuzhiyun *
955*4882a593Smuzhiyun * In the case of a BTT instance on top of PMEM, we only acquire a lane
956*4882a593Smuzhiyun * for the BTT metadata updates.
957*4882a593Smuzhiyun */
nd_region_acquire_lane(struct nd_region * nd_region)958*4882a593Smuzhiyun unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun unsigned int cpu, lane;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun cpu = get_cpu();
963*4882a593Smuzhiyun if (nd_region->num_lanes < nr_cpu_ids) {
964*4882a593Smuzhiyun struct nd_percpu_lane *ndl_lock, *ndl_count;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun lane = cpu % nd_region->num_lanes;
967*4882a593Smuzhiyun ndl_count = per_cpu_ptr(nd_region->lane, cpu);
968*4882a593Smuzhiyun ndl_lock = per_cpu_ptr(nd_region->lane, lane);
969*4882a593Smuzhiyun if (ndl_count->count++ == 0)
970*4882a593Smuzhiyun spin_lock(&ndl_lock->lock);
971*4882a593Smuzhiyun } else
972*4882a593Smuzhiyun lane = cpu;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun return lane;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun EXPORT_SYMBOL(nd_region_acquire_lane);
977*4882a593Smuzhiyun
nd_region_release_lane(struct nd_region * nd_region,unsigned int lane)978*4882a593Smuzhiyun void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun if (nd_region->num_lanes < nr_cpu_ids) {
981*4882a593Smuzhiyun unsigned int cpu = get_cpu();
982*4882a593Smuzhiyun struct nd_percpu_lane *ndl_lock, *ndl_count;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun ndl_count = per_cpu_ptr(nd_region->lane, cpu);
985*4882a593Smuzhiyun ndl_lock = per_cpu_ptr(nd_region->lane, lane);
986*4882a593Smuzhiyun if (--ndl_count->count == 0)
987*4882a593Smuzhiyun spin_unlock(&ndl_lock->lock);
988*4882a593Smuzhiyun put_cpu();
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun put_cpu();
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun EXPORT_SYMBOL(nd_region_release_lane);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun /*
995*4882a593Smuzhiyun * PowerPC requires this alignment for memremap_pages(). All other archs
996*4882a593Smuzhiyun * should be ok with SUBSECTION_SIZE (see memremap_compat_align()).
997*4882a593Smuzhiyun */
998*4882a593Smuzhiyun #define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M
999*4882a593Smuzhiyun
default_align(struct nd_region * nd_region)1000*4882a593Smuzhiyun static unsigned long default_align(struct nd_region *nd_region)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun unsigned long align;
1003*4882a593Smuzhiyun int i, mappings;
1004*4882a593Smuzhiyun u32 remainder;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (is_nd_blk(&nd_region->dev))
1007*4882a593Smuzhiyun align = PAGE_SIZE;
1008*4882a593Smuzhiyun else
1009*4882a593Smuzhiyun align = MEMREMAP_COMPAT_ALIGN_MAX;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings; i++) {
1012*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1013*4882a593Smuzhiyun struct nvdimm *nvdimm = nd_mapping->nvdimm;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun if (test_bit(NDD_ALIASING, &nvdimm->flags)) {
1016*4882a593Smuzhiyun align = MEMREMAP_COMPAT_ALIGN_MAX;
1017*4882a593Smuzhiyun break;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
1022*4882a593Smuzhiyun align = PAGE_SIZE;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun mappings = max_t(u16, 1, nd_region->ndr_mappings);
1025*4882a593Smuzhiyun div_u64_rem(align, mappings, &remainder);
1026*4882a593Smuzhiyun if (remainder)
1027*4882a593Smuzhiyun align *= mappings;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun return align;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
nd_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc,const struct device_type * dev_type,const char * caller)1032*4882a593Smuzhiyun static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
1033*4882a593Smuzhiyun struct nd_region_desc *ndr_desc,
1034*4882a593Smuzhiyun const struct device_type *dev_type, const char *caller)
1035*4882a593Smuzhiyun {
1036*4882a593Smuzhiyun struct nd_region *nd_region;
1037*4882a593Smuzhiyun struct device *dev;
1038*4882a593Smuzhiyun void *region_buf;
1039*4882a593Smuzhiyun unsigned int i;
1040*4882a593Smuzhiyun int ro = 0;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun for (i = 0; i < ndr_desc->num_mappings; i++) {
1043*4882a593Smuzhiyun struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1044*4882a593Smuzhiyun struct nvdimm *nvdimm = mapping->nvdimm;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if ((mapping->start | mapping->size) % PAGE_SIZE) {
1047*4882a593Smuzhiyun dev_err(&nvdimm_bus->dev,
1048*4882a593Smuzhiyun "%s: %s mapping%d is not %ld aligned\n",
1049*4882a593Smuzhiyun caller, dev_name(&nvdimm->dev), i, PAGE_SIZE);
1050*4882a593Smuzhiyun return NULL;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun if (test_bit(NDD_UNARMED, &nvdimm->flags))
1054*4882a593Smuzhiyun ro = 1;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun if (test_bit(NDD_NOBLK, &nvdimm->flags)
1057*4882a593Smuzhiyun && dev_type == &nd_blk_device_type) {
1058*4882a593Smuzhiyun dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
1059*4882a593Smuzhiyun caller, dev_name(&nvdimm->dev), i);
1060*4882a593Smuzhiyun return NULL;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun if (dev_type == &nd_blk_device_type) {
1065*4882a593Smuzhiyun struct nd_blk_region_desc *ndbr_desc;
1066*4882a593Smuzhiyun struct nd_blk_region *ndbr;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun ndbr_desc = to_blk_region_desc(ndr_desc);
1069*4882a593Smuzhiyun ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
1070*4882a593Smuzhiyun * ndr_desc->num_mappings,
1071*4882a593Smuzhiyun GFP_KERNEL);
1072*4882a593Smuzhiyun if (ndbr) {
1073*4882a593Smuzhiyun nd_region = &ndbr->nd_region;
1074*4882a593Smuzhiyun ndbr->enable = ndbr_desc->enable;
1075*4882a593Smuzhiyun ndbr->do_io = ndbr_desc->do_io;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun region_buf = ndbr;
1078*4882a593Smuzhiyun } else {
1079*4882a593Smuzhiyun nd_region = kzalloc(struct_size(nd_region, mapping,
1080*4882a593Smuzhiyun ndr_desc->num_mappings),
1081*4882a593Smuzhiyun GFP_KERNEL);
1082*4882a593Smuzhiyun region_buf = nd_region;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun if (!region_buf)
1086*4882a593Smuzhiyun return NULL;
1087*4882a593Smuzhiyun nd_region->id = memregion_alloc(GFP_KERNEL);
1088*4882a593Smuzhiyun if (nd_region->id < 0)
1089*4882a593Smuzhiyun goto err_id;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun nd_region->lane = alloc_percpu(struct nd_percpu_lane);
1092*4882a593Smuzhiyun if (!nd_region->lane)
1093*4882a593Smuzhiyun goto err_percpu;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun for (i = 0; i < nr_cpu_ids; i++) {
1096*4882a593Smuzhiyun struct nd_percpu_lane *ndl;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun ndl = per_cpu_ptr(nd_region->lane, i);
1099*4882a593Smuzhiyun spin_lock_init(&ndl->lock);
1100*4882a593Smuzhiyun ndl->count = 0;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun for (i = 0; i < ndr_desc->num_mappings; i++) {
1104*4882a593Smuzhiyun struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1105*4882a593Smuzhiyun struct nvdimm *nvdimm = mapping->nvdimm;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun nd_region->mapping[i].nvdimm = nvdimm;
1108*4882a593Smuzhiyun nd_region->mapping[i].start = mapping->start;
1109*4882a593Smuzhiyun nd_region->mapping[i].size = mapping->size;
1110*4882a593Smuzhiyun nd_region->mapping[i].position = mapping->position;
1111*4882a593Smuzhiyun INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1112*4882a593Smuzhiyun mutex_init(&nd_region->mapping[i].lock);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun get_device(&nvdimm->dev);
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun nd_region->ndr_mappings = ndr_desc->num_mappings;
1117*4882a593Smuzhiyun nd_region->provider_data = ndr_desc->provider_data;
1118*4882a593Smuzhiyun nd_region->nd_set = ndr_desc->nd_set;
1119*4882a593Smuzhiyun nd_region->num_lanes = ndr_desc->num_lanes;
1120*4882a593Smuzhiyun nd_region->flags = ndr_desc->flags;
1121*4882a593Smuzhiyun nd_region->ro = ro;
1122*4882a593Smuzhiyun nd_region->numa_node = ndr_desc->numa_node;
1123*4882a593Smuzhiyun nd_region->target_node = ndr_desc->target_node;
1124*4882a593Smuzhiyun ida_init(&nd_region->ns_ida);
1125*4882a593Smuzhiyun ida_init(&nd_region->btt_ida);
1126*4882a593Smuzhiyun ida_init(&nd_region->pfn_ida);
1127*4882a593Smuzhiyun ida_init(&nd_region->dax_ida);
1128*4882a593Smuzhiyun dev = &nd_region->dev;
1129*4882a593Smuzhiyun dev_set_name(dev, "region%d", nd_region->id);
1130*4882a593Smuzhiyun dev->parent = &nvdimm_bus->dev;
1131*4882a593Smuzhiyun dev->type = dev_type;
1132*4882a593Smuzhiyun dev->groups = ndr_desc->attr_groups;
1133*4882a593Smuzhiyun dev->of_node = ndr_desc->of_node;
1134*4882a593Smuzhiyun nd_region->ndr_size = resource_size(ndr_desc->res);
1135*4882a593Smuzhiyun nd_region->ndr_start = ndr_desc->res->start;
1136*4882a593Smuzhiyun nd_region->align = default_align(nd_region);
1137*4882a593Smuzhiyun if (ndr_desc->flush)
1138*4882a593Smuzhiyun nd_region->flush = ndr_desc->flush;
1139*4882a593Smuzhiyun else
1140*4882a593Smuzhiyun nd_region->flush = NULL;
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun nd_device_register(dev);
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun return nd_region;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun err_percpu:
1147*4882a593Smuzhiyun memregion_free(nd_region->id);
1148*4882a593Smuzhiyun err_id:
1149*4882a593Smuzhiyun kfree(region_buf);
1150*4882a593Smuzhiyun return NULL;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun
nvdimm_pmem_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc)1153*4882a593Smuzhiyun struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1154*4882a593Smuzhiyun struct nd_region_desc *ndr_desc)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun ndr_desc->num_lanes = ND_MAX_LANES;
1157*4882a593Smuzhiyun return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1158*4882a593Smuzhiyun __func__);
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1161*4882a593Smuzhiyun
nvdimm_blk_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc)1162*4882a593Smuzhiyun struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1163*4882a593Smuzhiyun struct nd_region_desc *ndr_desc)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun if (ndr_desc->num_mappings > 1)
1166*4882a593Smuzhiyun return NULL;
1167*4882a593Smuzhiyun ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
1168*4882a593Smuzhiyun return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1169*4882a593Smuzhiyun __func__);
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1172*4882a593Smuzhiyun
nvdimm_volatile_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc)1173*4882a593Smuzhiyun struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1174*4882a593Smuzhiyun struct nd_region_desc *ndr_desc)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun ndr_desc->num_lanes = ND_MAX_LANES;
1177*4882a593Smuzhiyun return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1178*4882a593Smuzhiyun __func__);
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
1181*4882a593Smuzhiyun
nvdimm_flush(struct nd_region * nd_region,struct bio * bio)1182*4882a593Smuzhiyun int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun int rc = 0;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun if (!nd_region->flush)
1187*4882a593Smuzhiyun rc = generic_nvdimm_flush(nd_region);
1188*4882a593Smuzhiyun else {
1189*4882a593Smuzhiyun if (nd_region->flush(nd_region, bio))
1190*4882a593Smuzhiyun rc = -EIO;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun return rc;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun /**
1196*4882a593Smuzhiyun * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1197*4882a593Smuzhiyun * @nd_region: blk or interleaved pmem region
1198*4882a593Smuzhiyun */
generic_nvdimm_flush(struct nd_region * nd_region)1199*4882a593Smuzhiyun int generic_nvdimm_flush(struct nd_region *nd_region)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1202*4882a593Smuzhiyun int i, idx;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun /*
1205*4882a593Smuzhiyun * Try to encourage some diversity in flush hint addresses
1206*4882a593Smuzhiyun * across cpus assuming a limited number of flush hints.
1207*4882a593Smuzhiyun */
1208*4882a593Smuzhiyun idx = this_cpu_read(flush_idx);
1209*4882a593Smuzhiyun idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun /*
1212*4882a593Smuzhiyun * The pmem_wmb() is needed to 'sfence' all
1213*4882a593Smuzhiyun * previous writes such that they are architecturally visible for
1214*4882a593Smuzhiyun * the platform buffer flush. Note that we've already arranged for pmem
1215*4882a593Smuzhiyun * writes to avoid the cache via memcpy_flushcache(). The final
1216*4882a593Smuzhiyun * wmb() ensures ordering for the NVDIMM flush write.
1217*4882a593Smuzhiyun */
1218*4882a593Smuzhiyun pmem_wmb();
1219*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings; i++)
1220*4882a593Smuzhiyun if (ndrd_get_flush_wpq(ndrd, i, 0))
1221*4882a593Smuzhiyun writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1222*4882a593Smuzhiyun wmb();
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun return 0;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_flush);
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun /**
1229*4882a593Smuzhiyun * nvdimm_has_flush - determine write flushing requirements
1230*4882a593Smuzhiyun * @nd_region: blk or interleaved pmem region
1231*4882a593Smuzhiyun *
1232*4882a593Smuzhiyun * Returns 1 if writes require flushing
1233*4882a593Smuzhiyun * Returns 0 if writes do not require flushing
1234*4882a593Smuzhiyun * Returns -ENXIO if flushing capability can not be determined
1235*4882a593Smuzhiyun */
nvdimm_has_flush(struct nd_region * nd_region)1236*4882a593Smuzhiyun int nvdimm_has_flush(struct nd_region *nd_region)
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun int i;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /* no nvdimm or pmem api == flushing capability unknown */
1241*4882a593Smuzhiyun if (nd_region->ndr_mappings == 0
1242*4882a593Smuzhiyun || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1243*4882a593Smuzhiyun return -ENXIO;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun /* Test if an explicit flush function is defined */
1246*4882a593Smuzhiyun if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
1247*4882a593Smuzhiyun return 1;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /* Test if any flush hints for the region are available */
1250*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings; i++) {
1251*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1252*4882a593Smuzhiyun struct nvdimm *nvdimm = nd_mapping->nvdimm;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun /* flush hints present / available */
1255*4882a593Smuzhiyun if (nvdimm->num_flush)
1256*4882a593Smuzhiyun return 1;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun /*
1260*4882a593Smuzhiyun * The platform defines dimm devices without hints nor explicit flush,
1261*4882a593Smuzhiyun * assume platform persistence mechanism like ADR
1262*4882a593Smuzhiyun */
1263*4882a593Smuzhiyun return 0;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1266*4882a593Smuzhiyun
nvdimm_has_cache(struct nd_region * nd_region)1267*4882a593Smuzhiyun int nvdimm_has_cache(struct nd_region *nd_region)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun return is_nd_pmem(&nd_region->dev) &&
1270*4882a593Smuzhiyun !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1273*4882a593Smuzhiyun
is_nvdimm_sync(struct nd_region * nd_region)1274*4882a593Smuzhiyun bool is_nvdimm_sync(struct nd_region *nd_region)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun if (is_nd_volatile(&nd_region->dev))
1277*4882a593Smuzhiyun return true;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun return is_nd_pmem(&nd_region->dev) &&
1280*4882a593Smuzhiyun !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(is_nvdimm_sync);
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun struct conflict_context {
1285*4882a593Smuzhiyun struct nd_region *nd_region;
1286*4882a593Smuzhiyun resource_size_t start, size;
1287*4882a593Smuzhiyun };
1288*4882a593Smuzhiyun
region_conflict(struct device * dev,void * data)1289*4882a593Smuzhiyun static int region_conflict(struct device *dev, void *data)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun struct nd_region *nd_region;
1292*4882a593Smuzhiyun struct conflict_context *ctx = data;
1293*4882a593Smuzhiyun resource_size_t res_end, region_end, region_start;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun if (!is_memory(dev))
1296*4882a593Smuzhiyun return 0;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun nd_region = to_nd_region(dev);
1299*4882a593Smuzhiyun if (nd_region == ctx->nd_region)
1300*4882a593Smuzhiyun return 0;
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun res_end = ctx->start + ctx->size;
1303*4882a593Smuzhiyun region_start = nd_region->ndr_start;
1304*4882a593Smuzhiyun region_end = region_start + nd_region->ndr_size;
1305*4882a593Smuzhiyun if (ctx->start >= region_start && ctx->start < region_end)
1306*4882a593Smuzhiyun return -EBUSY;
1307*4882a593Smuzhiyun if (res_end > region_start && res_end <= region_end)
1308*4882a593Smuzhiyun return -EBUSY;
1309*4882a593Smuzhiyun return 0;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun
nd_region_conflict(struct nd_region * nd_region,resource_size_t start,resource_size_t size)1312*4882a593Smuzhiyun int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1313*4882a593Smuzhiyun resource_size_t size)
1314*4882a593Smuzhiyun {
1315*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1316*4882a593Smuzhiyun struct conflict_context ctx = {
1317*4882a593Smuzhiyun .nd_region = nd_region,
1318*4882a593Smuzhiyun .start = start,
1319*4882a593Smuzhiyun .size = size,
1320*4882a593Smuzhiyun };
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1323*4882a593Smuzhiyun }
1324