1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6*4882a593Smuzhiyun #include <linux/moduleparam.h>
7*4882a593Smuzhiyun #include <linux/vmalloc.h>
8*4882a593Smuzhiyun #include <linux/device.h>
9*4882a593Smuzhiyun #include <linux/ndctl.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/fs.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include "nd-core.h"
15*4882a593Smuzhiyun #include "label.h"
16*4882a593Smuzhiyun #include "pmem.h"
17*4882a593Smuzhiyun #include "nd.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun static DEFINE_IDA(dimm_ida);
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static bool noblk;
22*4882a593Smuzhiyun module_param(noblk, bool, 0444);
23*4882a593Smuzhiyun MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Retrieve bus and dimm handle and return if this bus supports
27*4882a593Smuzhiyun * get_config_data commands
28*4882a593Smuzhiyun */
nvdimm_check_config_data(struct device * dev)29*4882a593Smuzhiyun int nvdimm_check_config_data(struct device *dev)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun if (!nvdimm->cmd_mask ||
34*4882a593Smuzhiyun !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
35*4882a593Smuzhiyun if (test_bit(NDD_LABELING, &nvdimm->flags))
36*4882a593Smuzhiyun return -ENXIO;
37*4882a593Smuzhiyun else
38*4882a593Smuzhiyun return -ENOTTY;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun return 0;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
validate_dimm(struct nvdimm_drvdata * ndd)44*4882a593Smuzhiyun static int validate_dimm(struct nvdimm_drvdata *ndd)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun int rc;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (!ndd)
49*4882a593Smuzhiyun return -EINVAL;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun rc = nvdimm_check_config_data(ndd->dev);
52*4882a593Smuzhiyun if (rc)
53*4882a593Smuzhiyun dev_dbg(ndd->dev, "%ps: %s error: %d\n",
54*4882a593Smuzhiyun __builtin_return_address(0), __func__, rc);
55*4882a593Smuzhiyun return rc;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
60*4882a593Smuzhiyun * @nvdimm: dimm to initialize
61*4882a593Smuzhiyun */
nvdimm_init_nsarea(struct nvdimm_drvdata * ndd)62*4882a593Smuzhiyun int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
65*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
66*4882a593Smuzhiyun struct nvdimm_bus_descriptor *nd_desc;
67*4882a593Smuzhiyun int rc = validate_dimm(ndd);
68*4882a593Smuzhiyun int cmd_rc = 0;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (rc)
71*4882a593Smuzhiyun return rc;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (cmd->config_size)
74*4882a593Smuzhiyun return 0; /* already valid */
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun memset(cmd, 0, sizeof(*cmd));
77*4882a593Smuzhiyun nd_desc = nvdimm_bus->nd_desc;
78*4882a593Smuzhiyun rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
79*4882a593Smuzhiyun ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
80*4882a593Smuzhiyun if (rc < 0)
81*4882a593Smuzhiyun return rc;
82*4882a593Smuzhiyun return cmd_rc;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
nvdimm_get_config_data(struct nvdimm_drvdata * ndd,void * buf,size_t offset,size_t len)85*4882a593Smuzhiyun int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
86*4882a593Smuzhiyun size_t offset, size_t len)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
89*4882a593Smuzhiyun struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
90*4882a593Smuzhiyun int rc = validate_dimm(ndd), cmd_rc = 0;
91*4882a593Smuzhiyun struct nd_cmd_get_config_data_hdr *cmd;
92*4882a593Smuzhiyun size_t max_cmd_size, buf_offset;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (rc)
95*4882a593Smuzhiyun return rc;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (offset + len > ndd->nsarea.config_size)
98*4882a593Smuzhiyun return -ENXIO;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
101*4882a593Smuzhiyun cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
102*4882a593Smuzhiyun if (!cmd)
103*4882a593Smuzhiyun return -ENOMEM;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun for (buf_offset = 0; len;
106*4882a593Smuzhiyun len -= cmd->in_length, buf_offset += cmd->in_length) {
107*4882a593Smuzhiyun size_t cmd_size;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun cmd->in_offset = offset + buf_offset;
110*4882a593Smuzhiyun cmd->in_length = min(max_cmd_size, len);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun cmd_size = sizeof(*cmd) + cmd->in_length;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
115*4882a593Smuzhiyun ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
116*4882a593Smuzhiyun if (rc < 0)
117*4882a593Smuzhiyun break;
118*4882a593Smuzhiyun if (cmd_rc < 0) {
119*4882a593Smuzhiyun rc = cmd_rc;
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* out_buf should be valid, copy it into our output buffer */
124*4882a593Smuzhiyun memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun kvfree(cmd);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return rc;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
nvdimm_set_config_data(struct nvdimm_drvdata * ndd,size_t offset,void * buf,size_t len)131*4882a593Smuzhiyun int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
132*4882a593Smuzhiyun void *buf, size_t len)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun size_t max_cmd_size, buf_offset;
135*4882a593Smuzhiyun struct nd_cmd_set_config_hdr *cmd;
136*4882a593Smuzhiyun int rc = validate_dimm(ndd), cmd_rc = 0;
137*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
138*4882a593Smuzhiyun struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (rc)
141*4882a593Smuzhiyun return rc;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (offset + len > ndd->nsarea.config_size)
144*4882a593Smuzhiyun return -ENXIO;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
147*4882a593Smuzhiyun cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
148*4882a593Smuzhiyun if (!cmd)
149*4882a593Smuzhiyun return -ENOMEM;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun for (buf_offset = 0; len; len -= cmd->in_length,
152*4882a593Smuzhiyun buf_offset += cmd->in_length) {
153*4882a593Smuzhiyun size_t cmd_size;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun cmd->in_offset = offset + buf_offset;
156*4882a593Smuzhiyun cmd->in_length = min(max_cmd_size, len);
157*4882a593Smuzhiyun memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* status is output in the last 4-bytes of the command buffer */
160*4882a593Smuzhiyun cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
163*4882a593Smuzhiyun ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
164*4882a593Smuzhiyun if (rc < 0)
165*4882a593Smuzhiyun break;
166*4882a593Smuzhiyun if (cmd_rc < 0) {
167*4882a593Smuzhiyun rc = cmd_rc;
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun kvfree(cmd);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun return rc;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
nvdimm_set_labeling(struct device * dev)176*4882a593Smuzhiyun void nvdimm_set_labeling(struct device *dev)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun set_bit(NDD_LABELING, &nvdimm->flags);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
nvdimm_set_locked(struct device * dev)183*4882a593Smuzhiyun void nvdimm_set_locked(struct device *dev)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun set_bit(NDD_LOCKED, &nvdimm->flags);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
nvdimm_clear_locked(struct device * dev)190*4882a593Smuzhiyun void nvdimm_clear_locked(struct device *dev)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun clear_bit(NDD_LOCKED, &nvdimm->flags);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
nvdimm_release(struct device * dev)197*4882a593Smuzhiyun static void nvdimm_release(struct device *dev)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun ida_simple_remove(&dimm_ida, nvdimm->id);
202*4882a593Smuzhiyun kfree(nvdimm);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
to_nvdimm(struct device * dev)205*4882a593Smuzhiyun struct nvdimm *to_nvdimm(struct device *dev)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun WARN_ON(!is_nvdimm(dev));
210*4882a593Smuzhiyun return nvdimm;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(to_nvdimm);
213*4882a593Smuzhiyun
nd_blk_region_to_dimm(struct nd_blk_region * ndbr)214*4882a593Smuzhiyun struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct nd_region *nd_region = &ndbr->nd_region;
217*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[0];
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return nd_mapping->nvdimm;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
222*4882a593Smuzhiyun
nd_blk_memremap_flags(struct nd_blk_region * ndbr)223*4882a593Smuzhiyun unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun /* pmem mapping properties are private to libnvdimm */
226*4882a593Smuzhiyun return ARCH_MEMREMAP_PMEM;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
229*4882a593Smuzhiyun
to_ndd(struct nd_mapping * nd_mapping)230*4882a593Smuzhiyun struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct nvdimm *nvdimm = nd_mapping->nvdimm;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun return dev_get_drvdata(&nvdimm->dev);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun EXPORT_SYMBOL(to_ndd);
239*4882a593Smuzhiyun
nvdimm_drvdata_release(struct kref * kref)240*4882a593Smuzhiyun void nvdimm_drvdata_release(struct kref *kref)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
243*4882a593Smuzhiyun struct device *dev = ndd->dev;
244*4882a593Smuzhiyun struct resource *res, *_r;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun dev_dbg(dev, "trace\n");
247*4882a593Smuzhiyun nvdimm_bus_lock(dev);
248*4882a593Smuzhiyun for_each_dpa_resource_safe(ndd, res, _r)
249*4882a593Smuzhiyun nvdimm_free_dpa(ndd, res);
250*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun kvfree(ndd->data);
253*4882a593Smuzhiyun kfree(ndd);
254*4882a593Smuzhiyun put_device(dev);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
get_ndd(struct nvdimm_drvdata * ndd)257*4882a593Smuzhiyun void get_ndd(struct nvdimm_drvdata *ndd)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun kref_get(&ndd->kref);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
put_ndd(struct nvdimm_drvdata * ndd)262*4882a593Smuzhiyun void put_ndd(struct nvdimm_drvdata *ndd)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun if (ndd)
265*4882a593Smuzhiyun kref_put(&ndd->kref, nvdimm_drvdata_release);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
nvdimm_name(struct nvdimm * nvdimm)268*4882a593Smuzhiyun const char *nvdimm_name(struct nvdimm *nvdimm)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun return dev_name(&nvdimm->dev);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_name);
273*4882a593Smuzhiyun
nvdimm_kobj(struct nvdimm * nvdimm)274*4882a593Smuzhiyun struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun return &nvdimm->dev.kobj;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_kobj);
279*4882a593Smuzhiyun
nvdimm_cmd_mask(struct nvdimm * nvdimm)280*4882a593Smuzhiyun unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun return nvdimm->cmd_mask;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
285*4882a593Smuzhiyun
nvdimm_provider_data(struct nvdimm * nvdimm)286*4882a593Smuzhiyun void *nvdimm_provider_data(struct nvdimm *nvdimm)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun if (nvdimm)
289*4882a593Smuzhiyun return nvdimm->provider_data;
290*4882a593Smuzhiyun return NULL;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_provider_data);
293*4882a593Smuzhiyun
commands_show(struct device * dev,struct device_attribute * attr,char * buf)294*4882a593Smuzhiyun static ssize_t commands_show(struct device *dev,
295*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
298*4882a593Smuzhiyun int cmd, len = 0;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (!nvdimm->cmd_mask)
301*4882a593Smuzhiyun return sprintf(buf, "\n");
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
304*4882a593Smuzhiyun len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
305*4882a593Smuzhiyun len += sprintf(buf + len, "\n");
306*4882a593Smuzhiyun return len;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun static DEVICE_ATTR_RO(commands);
309*4882a593Smuzhiyun
flags_show(struct device * dev,struct device_attribute * attr,char * buf)310*4882a593Smuzhiyun static ssize_t flags_show(struct device *dev,
311*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return sprintf(buf, "%s%s%s\n",
316*4882a593Smuzhiyun test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
317*4882a593Smuzhiyun test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
318*4882a593Smuzhiyun test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun static DEVICE_ATTR_RO(flags);
321*4882a593Smuzhiyun
state_show(struct device * dev,struct device_attribute * attr,char * buf)322*4882a593Smuzhiyun static ssize_t state_show(struct device *dev, struct device_attribute *attr,
323*4882a593Smuzhiyun char *buf)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun * The state may be in the process of changing, userspace should
329*4882a593Smuzhiyun * quiesce probing if it wants a static answer
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun nvdimm_bus_lock(dev);
332*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
333*4882a593Smuzhiyun return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
334*4882a593Smuzhiyun ? "active" : "idle");
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun static DEVICE_ATTR_RO(state);
337*4882a593Smuzhiyun
__available_slots_show(struct nvdimm_drvdata * ndd,char * buf)338*4882a593Smuzhiyun static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun struct device *dev;
341*4882a593Smuzhiyun ssize_t rc;
342*4882a593Smuzhiyun u32 nfree;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (!ndd)
345*4882a593Smuzhiyun return -ENXIO;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun dev = ndd->dev;
348*4882a593Smuzhiyun nvdimm_bus_lock(dev);
349*4882a593Smuzhiyun nfree = nd_label_nfree(ndd);
350*4882a593Smuzhiyun if (nfree - 1 > nfree) {
351*4882a593Smuzhiyun dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
352*4882a593Smuzhiyun nfree = 0;
353*4882a593Smuzhiyun } else
354*4882a593Smuzhiyun nfree--;
355*4882a593Smuzhiyun rc = sprintf(buf, "%d\n", nfree);
356*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
357*4882a593Smuzhiyun return rc;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
available_slots_show(struct device * dev,struct device_attribute * attr,char * buf)360*4882a593Smuzhiyun static ssize_t available_slots_show(struct device *dev,
361*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun ssize_t rc;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun nd_device_lock(dev);
366*4882a593Smuzhiyun rc = __available_slots_show(dev_get_drvdata(dev), buf);
367*4882a593Smuzhiyun nd_device_unlock(dev);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun return rc;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun static DEVICE_ATTR_RO(available_slots);
372*4882a593Smuzhiyun
security_show(struct device * dev,struct device_attribute * attr,char * buf)373*4882a593Smuzhiyun __weak ssize_t security_show(struct device *dev,
374*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
379*4882a593Smuzhiyun return sprintf(buf, "overwrite\n");
380*4882a593Smuzhiyun if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
381*4882a593Smuzhiyun return sprintf(buf, "disabled\n");
382*4882a593Smuzhiyun if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
383*4882a593Smuzhiyun return sprintf(buf, "unlocked\n");
384*4882a593Smuzhiyun if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
385*4882a593Smuzhiyun return sprintf(buf, "locked\n");
386*4882a593Smuzhiyun return -ENOTTY;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
frozen_show(struct device * dev,struct device_attribute * attr,char * buf)389*4882a593Smuzhiyun static ssize_t frozen_show(struct device *dev,
390*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
395*4882a593Smuzhiyun &nvdimm->sec.flags));
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun static DEVICE_ATTR_RO(frozen);
398*4882a593Smuzhiyun
security_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)399*4882a593Smuzhiyun static ssize_t security_store(struct device *dev,
400*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t len)
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun ssize_t rc;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /*
406*4882a593Smuzhiyun * Require all userspace triggered security management to be
407*4882a593Smuzhiyun * done while probing is idle and the DIMM is not in active use
408*4882a593Smuzhiyun * in any region.
409*4882a593Smuzhiyun */
410*4882a593Smuzhiyun nd_device_lock(dev);
411*4882a593Smuzhiyun nvdimm_bus_lock(dev);
412*4882a593Smuzhiyun wait_nvdimm_bus_probe_idle(dev);
413*4882a593Smuzhiyun rc = nvdimm_security_store(dev, buf, len);
414*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
415*4882a593Smuzhiyun nd_device_unlock(dev);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun return rc;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun static DEVICE_ATTR_RW(security);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun static struct attribute *nvdimm_attributes[] = {
422*4882a593Smuzhiyun &dev_attr_state.attr,
423*4882a593Smuzhiyun &dev_attr_flags.attr,
424*4882a593Smuzhiyun &dev_attr_commands.attr,
425*4882a593Smuzhiyun &dev_attr_available_slots.attr,
426*4882a593Smuzhiyun &dev_attr_security.attr,
427*4882a593Smuzhiyun &dev_attr_frozen.attr,
428*4882a593Smuzhiyun NULL,
429*4882a593Smuzhiyun };
430*4882a593Smuzhiyun
nvdimm_visible(struct kobject * kobj,struct attribute * a,int n)431*4882a593Smuzhiyun static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct device *dev = container_of(kobj, typeof(*dev), kobj);
434*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
437*4882a593Smuzhiyun return a->mode;
438*4882a593Smuzhiyun if (!nvdimm->sec.flags)
439*4882a593Smuzhiyun return 0;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun if (a == &dev_attr_security.attr) {
442*4882a593Smuzhiyun /* Are there any state mutation ops (make writable)? */
443*4882a593Smuzhiyun if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
444*4882a593Smuzhiyun || nvdimm->sec.ops->change_key
445*4882a593Smuzhiyun || nvdimm->sec.ops->erase
446*4882a593Smuzhiyun || nvdimm->sec.ops->overwrite)
447*4882a593Smuzhiyun return a->mode;
448*4882a593Smuzhiyun return 0444;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (nvdimm->sec.ops->freeze)
452*4882a593Smuzhiyun return a->mode;
453*4882a593Smuzhiyun return 0;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun static const struct attribute_group nvdimm_attribute_group = {
457*4882a593Smuzhiyun .attrs = nvdimm_attributes,
458*4882a593Smuzhiyun .is_visible = nvdimm_visible,
459*4882a593Smuzhiyun };
460*4882a593Smuzhiyun
result_show(struct device * dev,struct device_attribute * attr,char * buf)461*4882a593Smuzhiyun static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
464*4882a593Smuzhiyun enum nvdimm_fwa_result result;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (!nvdimm->fw_ops)
467*4882a593Smuzhiyun return -EOPNOTSUPP;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun nvdimm_bus_lock(dev);
470*4882a593Smuzhiyun result = nvdimm->fw_ops->activate_result(nvdimm);
471*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun switch (result) {
474*4882a593Smuzhiyun case NVDIMM_FWA_RESULT_NONE:
475*4882a593Smuzhiyun return sprintf(buf, "none\n");
476*4882a593Smuzhiyun case NVDIMM_FWA_RESULT_SUCCESS:
477*4882a593Smuzhiyun return sprintf(buf, "success\n");
478*4882a593Smuzhiyun case NVDIMM_FWA_RESULT_FAIL:
479*4882a593Smuzhiyun return sprintf(buf, "fail\n");
480*4882a593Smuzhiyun case NVDIMM_FWA_RESULT_NOTSTAGED:
481*4882a593Smuzhiyun return sprintf(buf, "not_staged\n");
482*4882a593Smuzhiyun case NVDIMM_FWA_RESULT_NEEDRESET:
483*4882a593Smuzhiyun return sprintf(buf, "need_reset\n");
484*4882a593Smuzhiyun default:
485*4882a593Smuzhiyun return -ENXIO;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun static DEVICE_ATTR_ADMIN_RO(result);
489*4882a593Smuzhiyun
activate_show(struct device * dev,struct device_attribute * attr,char * buf)490*4882a593Smuzhiyun static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
493*4882a593Smuzhiyun enum nvdimm_fwa_state state;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (!nvdimm->fw_ops)
496*4882a593Smuzhiyun return -EOPNOTSUPP;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun nvdimm_bus_lock(dev);
499*4882a593Smuzhiyun state = nvdimm->fw_ops->activate_state(nvdimm);
500*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun switch (state) {
503*4882a593Smuzhiyun case NVDIMM_FWA_IDLE:
504*4882a593Smuzhiyun return sprintf(buf, "idle\n");
505*4882a593Smuzhiyun case NVDIMM_FWA_BUSY:
506*4882a593Smuzhiyun return sprintf(buf, "busy\n");
507*4882a593Smuzhiyun case NVDIMM_FWA_ARMED:
508*4882a593Smuzhiyun return sprintf(buf, "armed\n");
509*4882a593Smuzhiyun default:
510*4882a593Smuzhiyun return -ENXIO;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
activate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)514*4882a593Smuzhiyun static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
515*4882a593Smuzhiyun const char *buf, size_t len)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
518*4882a593Smuzhiyun enum nvdimm_fwa_trigger arg;
519*4882a593Smuzhiyun int rc;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (!nvdimm->fw_ops)
522*4882a593Smuzhiyun return -EOPNOTSUPP;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (sysfs_streq(buf, "arm"))
525*4882a593Smuzhiyun arg = NVDIMM_FWA_ARM;
526*4882a593Smuzhiyun else if (sysfs_streq(buf, "disarm"))
527*4882a593Smuzhiyun arg = NVDIMM_FWA_DISARM;
528*4882a593Smuzhiyun else
529*4882a593Smuzhiyun return -EINVAL;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun nvdimm_bus_lock(dev);
532*4882a593Smuzhiyun rc = nvdimm->fw_ops->arm(nvdimm, arg);
533*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (rc < 0)
536*4882a593Smuzhiyun return rc;
537*4882a593Smuzhiyun return len;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun static DEVICE_ATTR_ADMIN_RW(activate);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun static struct attribute *nvdimm_firmware_attributes[] = {
542*4882a593Smuzhiyun &dev_attr_activate.attr,
543*4882a593Smuzhiyun &dev_attr_result.attr,
544*4882a593Smuzhiyun NULL,
545*4882a593Smuzhiyun };
546*4882a593Smuzhiyun
nvdimm_firmware_visible(struct kobject * kobj,struct attribute * a,int n)547*4882a593Smuzhiyun static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct device *dev = container_of(kobj, typeof(*dev), kobj);
550*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
551*4882a593Smuzhiyun struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
552*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
553*4882a593Smuzhiyun enum nvdimm_fwa_capability cap;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun if (!nd_desc->fw_ops)
556*4882a593Smuzhiyun return 0;
557*4882a593Smuzhiyun if (!nvdimm->fw_ops)
558*4882a593Smuzhiyun return 0;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun nvdimm_bus_lock(dev);
561*4882a593Smuzhiyun cap = nd_desc->fw_ops->capability(nd_desc);
562*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun if (cap < NVDIMM_FWA_CAP_QUIESCE)
565*4882a593Smuzhiyun return 0;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return a->mode;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun static const struct attribute_group nvdimm_firmware_attribute_group = {
571*4882a593Smuzhiyun .name = "firmware",
572*4882a593Smuzhiyun .attrs = nvdimm_firmware_attributes,
573*4882a593Smuzhiyun .is_visible = nvdimm_firmware_visible,
574*4882a593Smuzhiyun };
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun static const struct attribute_group *nvdimm_attribute_groups[] = {
577*4882a593Smuzhiyun &nd_device_attribute_group,
578*4882a593Smuzhiyun &nvdimm_attribute_group,
579*4882a593Smuzhiyun &nvdimm_firmware_attribute_group,
580*4882a593Smuzhiyun NULL,
581*4882a593Smuzhiyun };
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun static const struct device_type nvdimm_device_type = {
584*4882a593Smuzhiyun .name = "nvdimm",
585*4882a593Smuzhiyun .release = nvdimm_release,
586*4882a593Smuzhiyun .groups = nvdimm_attribute_groups,
587*4882a593Smuzhiyun };
588*4882a593Smuzhiyun
is_nvdimm(struct device * dev)589*4882a593Smuzhiyun bool is_nvdimm(struct device *dev)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun return dev->type == &nvdimm_device_type;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
__nvdimm_create(struct nvdimm_bus * nvdimm_bus,void * provider_data,const struct attribute_group ** groups,unsigned long flags,unsigned long cmd_mask,int num_flush,struct resource * flush_wpq,const char * dimm_id,const struct nvdimm_security_ops * sec_ops,const struct nvdimm_fw_ops * fw_ops)594*4882a593Smuzhiyun struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
595*4882a593Smuzhiyun void *provider_data, const struct attribute_group **groups,
596*4882a593Smuzhiyun unsigned long flags, unsigned long cmd_mask, int num_flush,
597*4882a593Smuzhiyun struct resource *flush_wpq, const char *dimm_id,
598*4882a593Smuzhiyun const struct nvdimm_security_ops *sec_ops,
599*4882a593Smuzhiyun const struct nvdimm_fw_ops *fw_ops)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
602*4882a593Smuzhiyun struct device *dev;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (!nvdimm)
605*4882a593Smuzhiyun return NULL;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
608*4882a593Smuzhiyun if (nvdimm->id < 0) {
609*4882a593Smuzhiyun kfree(nvdimm);
610*4882a593Smuzhiyun return NULL;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun nvdimm->dimm_id = dimm_id;
614*4882a593Smuzhiyun nvdimm->provider_data = provider_data;
615*4882a593Smuzhiyun if (noblk)
616*4882a593Smuzhiyun flags |= 1 << NDD_NOBLK;
617*4882a593Smuzhiyun nvdimm->flags = flags;
618*4882a593Smuzhiyun nvdimm->cmd_mask = cmd_mask;
619*4882a593Smuzhiyun nvdimm->num_flush = num_flush;
620*4882a593Smuzhiyun nvdimm->flush_wpq = flush_wpq;
621*4882a593Smuzhiyun atomic_set(&nvdimm->busy, 0);
622*4882a593Smuzhiyun dev = &nvdimm->dev;
623*4882a593Smuzhiyun dev_set_name(dev, "nmem%d", nvdimm->id);
624*4882a593Smuzhiyun dev->parent = &nvdimm_bus->dev;
625*4882a593Smuzhiyun dev->type = &nvdimm_device_type;
626*4882a593Smuzhiyun dev->devt = MKDEV(nvdimm_major, nvdimm->id);
627*4882a593Smuzhiyun dev->groups = groups;
628*4882a593Smuzhiyun nvdimm->sec.ops = sec_ops;
629*4882a593Smuzhiyun nvdimm->fw_ops = fw_ops;
630*4882a593Smuzhiyun nvdimm->sec.overwrite_tmo = 0;
631*4882a593Smuzhiyun INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun * Security state must be initialized before device_add() for
634*4882a593Smuzhiyun * attribute visibility.
635*4882a593Smuzhiyun */
636*4882a593Smuzhiyun /* get security state and extended (master) state */
637*4882a593Smuzhiyun nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
638*4882a593Smuzhiyun nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
639*4882a593Smuzhiyun nd_device_register(dev);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun return nvdimm;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__nvdimm_create);
644*4882a593Smuzhiyun
shutdown_security_notify(void * data)645*4882a593Smuzhiyun static void shutdown_security_notify(void *data)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct nvdimm *nvdimm = data;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun sysfs_put(nvdimm->sec.overwrite_state);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
nvdimm_security_setup_events(struct device * dev)652*4882a593Smuzhiyun int nvdimm_security_setup_events(struct device *dev)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (!nvdimm->sec.flags || !nvdimm->sec.ops
657*4882a593Smuzhiyun || !nvdimm->sec.ops->overwrite)
658*4882a593Smuzhiyun return 0;
659*4882a593Smuzhiyun nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
660*4882a593Smuzhiyun if (!nvdimm->sec.overwrite_state)
661*4882a593Smuzhiyun return -ENOMEM;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
666*4882a593Smuzhiyun
nvdimm_in_overwrite(struct nvdimm * nvdimm)667*4882a593Smuzhiyun int nvdimm_in_overwrite(struct nvdimm *nvdimm)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
672*4882a593Smuzhiyun
nvdimm_security_freeze(struct nvdimm * nvdimm)673*4882a593Smuzhiyun int nvdimm_security_freeze(struct nvdimm *nvdimm)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun int rc;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
680*4882a593Smuzhiyun return -EOPNOTSUPP;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun if (!nvdimm->sec.flags)
683*4882a593Smuzhiyun return -EIO;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
686*4882a593Smuzhiyun dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
687*4882a593Smuzhiyun return -EBUSY;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun rc = nvdimm->sec.ops->freeze(nvdimm);
691*4882a593Smuzhiyun nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun return rc;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
dpa_align(struct nd_region * nd_region)696*4882a593Smuzhiyun static unsigned long dpa_align(struct nd_region *nd_region)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun struct device *dev = &nd_region->dev;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
701*4882a593Smuzhiyun "bus lock required for capacity provision\n"))
702*4882a593Smuzhiyun return 0;
703*4882a593Smuzhiyun if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
704*4882a593Smuzhiyun % nd_region->ndr_mappings,
705*4882a593Smuzhiyun "invalid region align %#lx mappings: %d\n",
706*4882a593Smuzhiyun nd_region->align, nd_region->ndr_mappings))
707*4882a593Smuzhiyun return 0;
708*4882a593Smuzhiyun return nd_region->align / nd_region->ndr_mappings;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
alias_dpa_busy(struct device * dev,void * data)711*4882a593Smuzhiyun int alias_dpa_busy(struct device *dev, void *data)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun resource_size_t map_end, blk_start, new;
714*4882a593Smuzhiyun struct blk_alloc_info *info = data;
715*4882a593Smuzhiyun struct nd_mapping *nd_mapping;
716*4882a593Smuzhiyun struct nd_region *nd_region;
717*4882a593Smuzhiyun struct nvdimm_drvdata *ndd;
718*4882a593Smuzhiyun struct resource *res;
719*4882a593Smuzhiyun unsigned long align;
720*4882a593Smuzhiyun int i;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (!is_memory(dev))
723*4882a593Smuzhiyun return 0;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun nd_region = to_nd_region(dev);
726*4882a593Smuzhiyun for (i = 0; i < nd_region->ndr_mappings; i++) {
727*4882a593Smuzhiyun nd_mapping = &nd_region->mapping[i];
728*4882a593Smuzhiyun if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
729*4882a593Smuzhiyun break;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (i >= nd_region->ndr_mappings)
733*4882a593Smuzhiyun return 0;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun ndd = to_ndd(nd_mapping);
736*4882a593Smuzhiyun map_end = nd_mapping->start + nd_mapping->size - 1;
737*4882a593Smuzhiyun blk_start = nd_mapping->start;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /*
740*4882a593Smuzhiyun * In the allocation case ->res is set to free space that we are
741*4882a593Smuzhiyun * looking to validate against PMEM aliasing collision rules
742*4882a593Smuzhiyun * (i.e. BLK is allocated after all aliased PMEM).
743*4882a593Smuzhiyun */
744*4882a593Smuzhiyun if (info->res) {
745*4882a593Smuzhiyun if (info->res->start >= nd_mapping->start
746*4882a593Smuzhiyun && info->res->start < map_end)
747*4882a593Smuzhiyun /* pass */;
748*4882a593Smuzhiyun else
749*4882a593Smuzhiyun return 0;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun retry:
753*4882a593Smuzhiyun /*
754*4882a593Smuzhiyun * Find the free dpa from the end of the last pmem allocation to
755*4882a593Smuzhiyun * the end of the interleave-set mapping.
756*4882a593Smuzhiyun */
757*4882a593Smuzhiyun align = dpa_align(nd_region);
758*4882a593Smuzhiyun if (!align)
759*4882a593Smuzhiyun return 0;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun for_each_dpa_resource(ndd, res) {
762*4882a593Smuzhiyun resource_size_t start, end;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun if (strncmp(res->name, "pmem", 4) != 0)
765*4882a593Smuzhiyun continue;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun start = ALIGN_DOWN(res->start, align);
768*4882a593Smuzhiyun end = ALIGN(res->end + 1, align) - 1;
769*4882a593Smuzhiyun if ((start >= blk_start && start < map_end)
770*4882a593Smuzhiyun || (end >= blk_start && end <= map_end)) {
771*4882a593Smuzhiyun new = max(blk_start, min(map_end, end) + 1);
772*4882a593Smuzhiyun if (new != blk_start) {
773*4882a593Smuzhiyun blk_start = new;
774*4882a593Smuzhiyun goto retry;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /* update the free space range with the probed blk_start */
780*4882a593Smuzhiyun if (info->res && blk_start > info->res->start) {
781*4882a593Smuzhiyun info->res->start = max(info->res->start, blk_start);
782*4882a593Smuzhiyun if (info->res->start > info->res->end)
783*4882a593Smuzhiyun info->res->end = info->res->start - 1;
784*4882a593Smuzhiyun return 1;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun info->available -= blk_start - nd_mapping->start;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun return 0;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /**
793*4882a593Smuzhiyun * nd_blk_available_dpa - account the unused dpa of BLK region
794*4882a593Smuzhiyun * @nd_mapping: container of dpa-resource-root + labels
795*4882a593Smuzhiyun *
796*4882a593Smuzhiyun * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
797*4882a593Smuzhiyun * we arrange for them to never start at an lower dpa than the last
798*4882a593Smuzhiyun * PMEM allocation in an aliased region.
799*4882a593Smuzhiyun */
nd_blk_available_dpa(struct nd_region * nd_region)800*4882a593Smuzhiyun resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
803*4882a593Smuzhiyun struct nd_mapping *nd_mapping = &nd_region->mapping[0];
804*4882a593Smuzhiyun struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
805*4882a593Smuzhiyun struct blk_alloc_info info = {
806*4882a593Smuzhiyun .nd_mapping = nd_mapping,
807*4882a593Smuzhiyun .available = nd_mapping->size,
808*4882a593Smuzhiyun .res = NULL,
809*4882a593Smuzhiyun };
810*4882a593Smuzhiyun struct resource *res;
811*4882a593Smuzhiyun unsigned long align;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun if (!ndd)
814*4882a593Smuzhiyun return 0;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /* now account for busy blk allocations in unaliased dpa */
819*4882a593Smuzhiyun align = dpa_align(nd_region);
820*4882a593Smuzhiyun if (!align)
821*4882a593Smuzhiyun return 0;
822*4882a593Smuzhiyun for_each_dpa_resource(ndd, res) {
823*4882a593Smuzhiyun resource_size_t start, end, size;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (strncmp(res->name, "blk", 3) != 0)
826*4882a593Smuzhiyun continue;
827*4882a593Smuzhiyun start = ALIGN_DOWN(res->start, align);
828*4882a593Smuzhiyun end = ALIGN(res->end + 1, align) - 1;
829*4882a593Smuzhiyun size = end - start + 1;
830*4882a593Smuzhiyun if (size >= info.available)
831*4882a593Smuzhiyun return 0;
832*4882a593Smuzhiyun info.available -= size;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun return info.available;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
840*4882a593Smuzhiyun * contiguous unallocated dpa range.
841*4882a593Smuzhiyun * @nd_region: constrain available space check to this reference region
842*4882a593Smuzhiyun * @nd_mapping: container of dpa-resource-root + labels
843*4882a593Smuzhiyun */
nd_pmem_max_contiguous_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping)844*4882a593Smuzhiyun resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
845*4882a593Smuzhiyun struct nd_mapping *nd_mapping)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
848*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus;
849*4882a593Smuzhiyun resource_size_t max = 0;
850*4882a593Smuzhiyun struct resource *res;
851*4882a593Smuzhiyun unsigned long align;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* if a dimm is disabled the available capacity is zero */
854*4882a593Smuzhiyun if (!ndd)
855*4882a593Smuzhiyun return 0;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun align = dpa_align(nd_region);
858*4882a593Smuzhiyun if (!align)
859*4882a593Smuzhiyun return 0;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
862*4882a593Smuzhiyun if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
863*4882a593Smuzhiyun return 0;
864*4882a593Smuzhiyun for_each_dpa_resource(ndd, res) {
865*4882a593Smuzhiyun resource_size_t start, end;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if (strcmp(res->name, "pmem-reserve") != 0)
868*4882a593Smuzhiyun continue;
869*4882a593Smuzhiyun /* trim free space relative to current alignment setting */
870*4882a593Smuzhiyun start = ALIGN(res->start, align);
871*4882a593Smuzhiyun end = ALIGN_DOWN(res->end + 1, align) - 1;
872*4882a593Smuzhiyun if (end < start)
873*4882a593Smuzhiyun continue;
874*4882a593Smuzhiyun if (end - start + 1 > max)
875*4882a593Smuzhiyun max = end - start + 1;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun release_free_pmem(nvdimm_bus, nd_mapping);
878*4882a593Smuzhiyun return max;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /**
882*4882a593Smuzhiyun * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
883*4882a593Smuzhiyun * @nd_mapping: container of dpa-resource-root + labels
884*4882a593Smuzhiyun * @nd_region: constrain available space check to this reference region
885*4882a593Smuzhiyun * @overlap: calculate available space assuming this level of overlap
886*4882a593Smuzhiyun *
887*4882a593Smuzhiyun * Validate that a PMEM label, if present, aligns with the start of an
888*4882a593Smuzhiyun * interleave set and truncate the available size at the lowest BLK
889*4882a593Smuzhiyun * overlap point.
890*4882a593Smuzhiyun *
891*4882a593Smuzhiyun * The expectation is that this routine is called multiple times as it
892*4882a593Smuzhiyun * probes for the largest BLK encroachment for any single member DIMM of
893*4882a593Smuzhiyun * the interleave set. Once that value is determined the PMEM-limit for
894*4882a593Smuzhiyun * the set can be established.
895*4882a593Smuzhiyun */
nd_pmem_available_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping,resource_size_t * overlap)896*4882a593Smuzhiyun resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
897*4882a593Smuzhiyun struct nd_mapping *nd_mapping, resource_size_t *overlap)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun resource_size_t map_start, map_end, busy = 0, available, blk_start;
900*4882a593Smuzhiyun struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
901*4882a593Smuzhiyun struct resource *res;
902*4882a593Smuzhiyun const char *reason;
903*4882a593Smuzhiyun unsigned long align;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun if (!ndd)
906*4882a593Smuzhiyun return 0;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun align = dpa_align(nd_region);
909*4882a593Smuzhiyun if (!align)
910*4882a593Smuzhiyun return 0;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun map_start = nd_mapping->start;
913*4882a593Smuzhiyun map_end = map_start + nd_mapping->size - 1;
914*4882a593Smuzhiyun blk_start = max(map_start, map_end + 1 - *overlap);
915*4882a593Smuzhiyun for_each_dpa_resource(ndd, res) {
916*4882a593Smuzhiyun resource_size_t start, end;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun start = ALIGN_DOWN(res->start, align);
919*4882a593Smuzhiyun end = ALIGN(res->end + 1, align) - 1;
920*4882a593Smuzhiyun if (start >= map_start && start < map_end) {
921*4882a593Smuzhiyun if (strncmp(res->name, "blk", 3) == 0)
922*4882a593Smuzhiyun blk_start = min(blk_start,
923*4882a593Smuzhiyun max(map_start, start));
924*4882a593Smuzhiyun else if (end > map_end) {
925*4882a593Smuzhiyun reason = "misaligned to iset";
926*4882a593Smuzhiyun goto err;
927*4882a593Smuzhiyun } else
928*4882a593Smuzhiyun busy += end - start + 1;
929*4882a593Smuzhiyun } else if (end >= map_start && end <= map_end) {
930*4882a593Smuzhiyun if (strncmp(res->name, "blk", 3) == 0) {
931*4882a593Smuzhiyun /*
932*4882a593Smuzhiyun * If a BLK allocation overlaps the start of
933*4882a593Smuzhiyun * PMEM the entire interleave set may now only
934*4882a593Smuzhiyun * be used for BLK.
935*4882a593Smuzhiyun */
936*4882a593Smuzhiyun blk_start = map_start;
937*4882a593Smuzhiyun } else
938*4882a593Smuzhiyun busy += end - start + 1;
939*4882a593Smuzhiyun } else if (map_start > start && map_start < end) {
940*4882a593Smuzhiyun /* total eclipse of the mapping */
941*4882a593Smuzhiyun busy += nd_mapping->size;
942*4882a593Smuzhiyun blk_start = map_start;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun *overlap = map_end + 1 - blk_start;
947*4882a593Smuzhiyun available = blk_start - map_start;
948*4882a593Smuzhiyun if (busy < available)
949*4882a593Smuzhiyun return ALIGN_DOWN(available - busy, align);
950*4882a593Smuzhiyun return 0;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun err:
953*4882a593Smuzhiyun nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
954*4882a593Smuzhiyun return 0;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
nvdimm_free_dpa(struct nvdimm_drvdata * ndd,struct resource * res)957*4882a593Smuzhiyun void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
960*4882a593Smuzhiyun kfree(res->name);
961*4882a593Smuzhiyun __release_region(&ndd->dpa, res->start, resource_size(res));
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
nvdimm_allocate_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id,resource_size_t start,resource_size_t n)964*4882a593Smuzhiyun struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
965*4882a593Smuzhiyun struct nd_label_id *label_id, resource_size_t start,
966*4882a593Smuzhiyun resource_size_t n)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
969*4882a593Smuzhiyun struct resource *res;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun if (!name)
972*4882a593Smuzhiyun return NULL;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
975*4882a593Smuzhiyun res = __request_region(&ndd->dpa, start, n, name, 0);
976*4882a593Smuzhiyun if (!res)
977*4882a593Smuzhiyun kfree(name);
978*4882a593Smuzhiyun return res;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /**
982*4882a593Smuzhiyun * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
983*4882a593Smuzhiyun * @nvdimm: container of dpa-resource-root + labels
984*4882a593Smuzhiyun * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
985*4882a593Smuzhiyun */
nvdimm_allocated_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id)986*4882a593Smuzhiyun resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
987*4882a593Smuzhiyun struct nd_label_id *label_id)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun resource_size_t allocated = 0;
990*4882a593Smuzhiyun struct resource *res;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun for_each_dpa_resource(ndd, res)
993*4882a593Smuzhiyun if (strcmp(res->name, label_id->id) == 0)
994*4882a593Smuzhiyun allocated += resource_size(res);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun return allocated;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
count_dimms(struct device * dev,void * c)999*4882a593Smuzhiyun static int count_dimms(struct device *dev, void *c)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun int *count = c;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun if (is_nvdimm(dev))
1004*4882a593Smuzhiyun (*count)++;
1005*4882a593Smuzhiyun return 0;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
nvdimm_bus_check_dimm_count(struct nvdimm_bus * nvdimm_bus,int dimm_count)1008*4882a593Smuzhiyun int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun int count = 0;
1011*4882a593Smuzhiyun /* Flush any possible dimm registration failures */
1012*4882a593Smuzhiyun nd_synchronize();
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
1015*4882a593Smuzhiyun dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
1016*4882a593Smuzhiyun if (count != dimm_count)
1017*4882a593Smuzhiyun return -ENXIO;
1018*4882a593Smuzhiyun return 0;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
1021*4882a593Smuzhiyun
nvdimm_devs_exit(void)1022*4882a593Smuzhiyun void __exit nvdimm_devs_exit(void)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun ida_destroy(&dimm_ida);
1025*4882a593Smuzhiyun }
1026