xref: /OK3568_Linux_fs/kernel/drivers/nvdimm/region.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <linux/cpumask.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/device.h>
8*4882a593Smuzhiyun #include <linux/nd.h>
9*4882a593Smuzhiyun #include "nd-core.h"
10*4882a593Smuzhiyun #include "nd.h"
11*4882a593Smuzhiyun 
nd_region_probe(struct device * dev)12*4882a593Smuzhiyun static int nd_region_probe(struct device *dev)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	int err, rc;
15*4882a593Smuzhiyun 	static unsigned long once;
16*4882a593Smuzhiyun 	struct nd_region_data *ndrd;
17*4882a593Smuzhiyun 	struct nd_region *nd_region = to_nd_region(dev);
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	if (nd_region->num_lanes > num_online_cpus()
20*4882a593Smuzhiyun 			&& nd_region->num_lanes < num_possible_cpus()
21*4882a593Smuzhiyun 			&& !test_and_set_bit(0, &once)) {
22*4882a593Smuzhiyun 		dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
23*4882a593Smuzhiyun 				num_online_cpus(), nd_region->num_lanes,
24*4882a593Smuzhiyun 				num_possible_cpus());
25*4882a593Smuzhiyun 		dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
26*4882a593Smuzhiyun 				nd_region->num_lanes);
27*4882a593Smuzhiyun 	}
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	rc = nd_region_activate(nd_region);
30*4882a593Smuzhiyun 	if (rc)
31*4882a593Smuzhiyun 		return rc;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	rc = nd_blk_region_init(nd_region);
34*4882a593Smuzhiyun 	if (rc)
35*4882a593Smuzhiyun 		return rc;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	if (is_memory(&nd_region->dev)) {
38*4882a593Smuzhiyun 		struct range range = {
39*4882a593Smuzhiyun 			.start = nd_region->ndr_start,
40*4882a593Smuzhiyun 			.end = nd_region->ndr_start + nd_region->ndr_size - 1,
41*4882a593Smuzhiyun 		};
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 		if (devm_init_badblocks(dev, &nd_region->bb))
44*4882a593Smuzhiyun 			return -ENODEV;
45*4882a593Smuzhiyun 		nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
46*4882a593Smuzhiyun 						       "badblocks");
47*4882a593Smuzhiyun 		if (!nd_region->bb_state)
48*4882a593Smuzhiyun 			dev_warn(&nd_region->dev,
49*4882a593Smuzhiyun 					"'badblocks' notification disabled\n");
50*4882a593Smuzhiyun 		nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	rc = nd_region_register_namespaces(nd_region, &err);
54*4882a593Smuzhiyun 	if (rc < 0)
55*4882a593Smuzhiyun 		return rc;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	ndrd = dev_get_drvdata(dev);
58*4882a593Smuzhiyun 	ndrd->ns_active = rc;
59*4882a593Smuzhiyun 	ndrd->ns_count = rc + err;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	if (rc && err && rc == err)
62*4882a593Smuzhiyun 		return -ENODEV;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	nd_region->btt_seed = nd_btt_create(nd_region);
65*4882a593Smuzhiyun 	nd_region->pfn_seed = nd_pfn_create(nd_region);
66*4882a593Smuzhiyun 	nd_region->dax_seed = nd_dax_create(nd_region);
67*4882a593Smuzhiyun 	if (err == 0)
68*4882a593Smuzhiyun 		return 0;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/*
71*4882a593Smuzhiyun 	 * Given multiple namespaces per region, we do not want to
72*4882a593Smuzhiyun 	 * disable all the successfully registered peer namespaces upon
73*4882a593Smuzhiyun 	 * a single registration failure.  If userspace is missing a
74*4882a593Smuzhiyun 	 * namespace that it expects it can disable/re-enable the region
75*4882a593Smuzhiyun 	 * to retry discovery after correcting the failure.
76*4882a593Smuzhiyun 	 * <regionX>/namespaces returns the current
77*4882a593Smuzhiyun 	 * "<async-registered>/<total>" namespace count.
78*4882a593Smuzhiyun 	 */
79*4882a593Smuzhiyun 	dev_err(dev, "failed to register %d namespace%s, continuing...\n",
80*4882a593Smuzhiyun 			err, err == 1 ? "" : "s");
81*4882a593Smuzhiyun 	return 0;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
child_unregister(struct device * dev,void * data)84*4882a593Smuzhiyun static int child_unregister(struct device *dev, void *data)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	nd_device_unregister(dev, ND_SYNC);
87*4882a593Smuzhiyun 	return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
nd_region_remove(struct device * dev)90*4882a593Smuzhiyun static int nd_region_remove(struct device *dev)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	struct nd_region *nd_region = to_nd_region(dev);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	device_for_each_child(dev, NULL, child_unregister);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/* flush attribute readers and disable */
97*4882a593Smuzhiyun 	nvdimm_bus_lock(dev);
98*4882a593Smuzhiyun 	nd_region->ns_seed = NULL;
99*4882a593Smuzhiyun 	nd_region->btt_seed = NULL;
100*4882a593Smuzhiyun 	nd_region->pfn_seed = NULL;
101*4882a593Smuzhiyun 	nd_region->dax_seed = NULL;
102*4882a593Smuzhiyun 	dev_set_drvdata(dev, NULL);
103*4882a593Smuzhiyun 	nvdimm_bus_unlock(dev);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	/*
106*4882a593Smuzhiyun 	 * Note, this assumes nd_device_lock() context to not race
107*4882a593Smuzhiyun 	 * nd_region_notify()
108*4882a593Smuzhiyun 	 */
109*4882a593Smuzhiyun 	sysfs_put(nd_region->bb_state);
110*4882a593Smuzhiyun 	nd_region->bb_state = NULL;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
child_notify(struct device * dev,void * data)115*4882a593Smuzhiyun static int child_notify(struct device *dev, void *data)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	nd_device_notify(dev, *(enum nvdimm_event *) data);
118*4882a593Smuzhiyun 	return 0;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
nd_region_notify(struct device * dev,enum nvdimm_event event)121*4882a593Smuzhiyun static void nd_region_notify(struct device *dev, enum nvdimm_event event)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	if (event == NVDIMM_REVALIDATE_POISON) {
124*4882a593Smuzhiyun 		struct nd_region *nd_region = to_nd_region(dev);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 		if (is_memory(&nd_region->dev)) {
127*4882a593Smuzhiyun 			struct range range = {
128*4882a593Smuzhiyun 				.start = nd_region->ndr_start,
129*4882a593Smuzhiyun 				.end = nd_region->ndr_start +
130*4882a593Smuzhiyun 					nd_region->ndr_size - 1,
131*4882a593Smuzhiyun 			};
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 			nvdimm_badblocks_populate(nd_region,
134*4882a593Smuzhiyun 					&nd_region->bb, &range);
135*4882a593Smuzhiyun 			if (nd_region->bb_state)
136*4882a593Smuzhiyun 				sysfs_notify_dirent(nd_region->bb_state);
137*4882a593Smuzhiyun 		}
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 	device_for_each_child(dev, &event, child_notify);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static struct nd_device_driver nd_region_driver = {
143*4882a593Smuzhiyun 	.probe = nd_region_probe,
144*4882a593Smuzhiyun 	.remove = nd_region_remove,
145*4882a593Smuzhiyun 	.notify = nd_region_notify,
146*4882a593Smuzhiyun 	.drv = {
147*4882a593Smuzhiyun 		.name = "nd_region",
148*4882a593Smuzhiyun 	},
149*4882a593Smuzhiyun 	.type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun 
nd_region_init(void)152*4882a593Smuzhiyun int __init nd_region_init(void)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	return nd_driver_register(&nd_region_driver);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
nd_region_exit(void)157*4882a593Smuzhiyun void nd_region_exit(void)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	driver_unregister(&nd_region_driver.drv);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);
163*4882a593Smuzhiyun MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK);
164