1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Persistent Memory Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2014-2015, Intel Corporation.
6*4882a593Smuzhiyun * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7*4882a593Smuzhiyun * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/blkdev.h>
11*4882a593Smuzhiyun #include <linux/hdreg.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/platform_device.h>
14*4882a593Smuzhiyun #include <linux/set_memory.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/moduleparam.h>
17*4882a593Smuzhiyun #include <linux/badblocks.h>
18*4882a593Smuzhiyun #include <linux/memremap.h>
19*4882a593Smuzhiyun #include <linux/vmalloc.h>
20*4882a593Smuzhiyun #include <linux/blk-mq.h>
21*4882a593Smuzhiyun #include <linux/pfn_t.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/uio.h>
24*4882a593Smuzhiyun #include <linux/dax.h>
25*4882a593Smuzhiyun #include <linux/nd.h>
26*4882a593Smuzhiyun #include <linux/backing-dev.h>
27*4882a593Smuzhiyun #include <linux/mm.h>
28*4882a593Smuzhiyun #include <asm/cacheflush.h>
29*4882a593Smuzhiyun #include "pmem.h"
30*4882a593Smuzhiyun #include "pfn.h"
31*4882a593Smuzhiyun #include "nd.h"
32*4882a593Smuzhiyun
to_dev(struct pmem_device * pmem)33*4882a593Smuzhiyun static struct device *to_dev(struct pmem_device *pmem)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * nvdimm bus services need a 'dev' parameter, and we record the device
37*4882a593Smuzhiyun * at init in bb.dev.
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun return pmem->bb.dev;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
to_region(struct pmem_device * pmem)42*4882a593Smuzhiyun static struct nd_region *to_region(struct pmem_device *pmem)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun return to_nd_region(to_dev(pmem)->parent);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
hwpoison_clear(struct pmem_device * pmem,phys_addr_t phys,unsigned int len)47*4882a593Smuzhiyun static void hwpoison_clear(struct pmem_device *pmem,
48*4882a593Smuzhiyun phys_addr_t phys, unsigned int len)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun unsigned long pfn_start, pfn_end, pfn;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* only pmem in the linear map supports HWPoison */
53*4882a593Smuzhiyun if (is_vmalloc_addr(pmem->virt_addr))
54*4882a593Smuzhiyun return;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun pfn_start = PHYS_PFN(phys);
57*4882a593Smuzhiyun pfn_end = pfn_start + PHYS_PFN(len);
58*4882a593Smuzhiyun for (pfn = pfn_start; pfn < pfn_end; pfn++) {
59*4882a593Smuzhiyun struct page *page = pfn_to_page(pfn);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * Note, no need to hold a get_dev_pagemap() reference
63*4882a593Smuzhiyun * here since we're in the driver I/O path and
64*4882a593Smuzhiyun * outstanding I/O requests pin the dev_pagemap.
65*4882a593Smuzhiyun */
66*4882a593Smuzhiyun if (test_and_clear_pmem_poison(page))
67*4882a593Smuzhiyun clear_mce_nospec(pfn);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
pmem_clear_poison(struct pmem_device * pmem,phys_addr_t offset,unsigned int len)71*4882a593Smuzhiyun static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
72*4882a593Smuzhiyun phys_addr_t offset, unsigned int len)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun struct device *dev = to_dev(pmem);
75*4882a593Smuzhiyun sector_t sector;
76*4882a593Smuzhiyun long cleared;
77*4882a593Smuzhiyun blk_status_t rc = BLK_STS_OK;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun sector = (offset - pmem->data_offset) / 512;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
82*4882a593Smuzhiyun if (cleared < len)
83*4882a593Smuzhiyun rc = BLK_STS_IOERR;
84*4882a593Smuzhiyun if (cleared > 0 && cleared / 512) {
85*4882a593Smuzhiyun hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
86*4882a593Smuzhiyun cleared /= 512;
87*4882a593Smuzhiyun dev_dbg(dev, "%#llx clear %ld sector%s\n",
88*4882a593Smuzhiyun (unsigned long long) sector, cleared,
89*4882a593Smuzhiyun cleared > 1 ? "s" : "");
90*4882a593Smuzhiyun badblocks_clear(&pmem->bb, sector, cleared);
91*4882a593Smuzhiyun if (pmem->bb_state)
92*4882a593Smuzhiyun sysfs_notify_dirent(pmem->bb_state);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun arch_invalidate_pmem(pmem->virt_addr + offset, len);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return rc;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
write_pmem(void * pmem_addr,struct page * page,unsigned int off,unsigned int len)100*4882a593Smuzhiyun static void write_pmem(void *pmem_addr, struct page *page,
101*4882a593Smuzhiyun unsigned int off, unsigned int len)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun unsigned int chunk;
104*4882a593Smuzhiyun void *mem;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun while (len) {
107*4882a593Smuzhiyun mem = kmap_atomic(page);
108*4882a593Smuzhiyun chunk = min_t(unsigned int, len, PAGE_SIZE - off);
109*4882a593Smuzhiyun memcpy_flushcache(pmem_addr, mem + off, chunk);
110*4882a593Smuzhiyun kunmap_atomic(mem);
111*4882a593Smuzhiyun len -= chunk;
112*4882a593Smuzhiyun off = 0;
113*4882a593Smuzhiyun page++;
114*4882a593Smuzhiyun pmem_addr += chunk;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
read_pmem(struct page * page,unsigned int off,void * pmem_addr,unsigned int len)118*4882a593Smuzhiyun static blk_status_t read_pmem(struct page *page, unsigned int off,
119*4882a593Smuzhiyun void *pmem_addr, unsigned int len)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun unsigned int chunk;
122*4882a593Smuzhiyun unsigned long rem;
123*4882a593Smuzhiyun void *mem;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun while (len) {
126*4882a593Smuzhiyun mem = kmap_atomic(page);
127*4882a593Smuzhiyun chunk = min_t(unsigned int, len, PAGE_SIZE - off);
128*4882a593Smuzhiyun rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
129*4882a593Smuzhiyun kunmap_atomic(mem);
130*4882a593Smuzhiyun if (rem)
131*4882a593Smuzhiyun return BLK_STS_IOERR;
132*4882a593Smuzhiyun len -= chunk;
133*4882a593Smuzhiyun off = 0;
134*4882a593Smuzhiyun page++;
135*4882a593Smuzhiyun pmem_addr += chunk;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun return BLK_STS_OK;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
pmem_do_read(struct pmem_device * pmem,struct page * page,unsigned int page_off,sector_t sector,unsigned int len)140*4882a593Smuzhiyun static blk_status_t pmem_do_read(struct pmem_device *pmem,
141*4882a593Smuzhiyun struct page *page, unsigned int page_off,
142*4882a593Smuzhiyun sector_t sector, unsigned int len)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun blk_status_t rc;
145*4882a593Smuzhiyun phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
146*4882a593Smuzhiyun void *pmem_addr = pmem->virt_addr + pmem_off;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
149*4882a593Smuzhiyun return BLK_STS_IOERR;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun rc = read_pmem(page, page_off, pmem_addr, len);
152*4882a593Smuzhiyun flush_dcache_page(page);
153*4882a593Smuzhiyun return rc;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
pmem_do_write(struct pmem_device * pmem,struct page * page,unsigned int page_off,sector_t sector,unsigned int len)156*4882a593Smuzhiyun static blk_status_t pmem_do_write(struct pmem_device *pmem,
157*4882a593Smuzhiyun struct page *page, unsigned int page_off,
158*4882a593Smuzhiyun sector_t sector, unsigned int len)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun blk_status_t rc = BLK_STS_OK;
161*4882a593Smuzhiyun bool bad_pmem = false;
162*4882a593Smuzhiyun phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
163*4882a593Smuzhiyun void *pmem_addr = pmem->virt_addr + pmem_off;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
166*4882a593Smuzhiyun bad_pmem = true;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * Note that we write the data both before and after
170*4882a593Smuzhiyun * clearing poison. The write before clear poison
171*4882a593Smuzhiyun * handles situations where the latest written data is
172*4882a593Smuzhiyun * preserved and the clear poison operation simply marks
173*4882a593Smuzhiyun * the address range as valid without changing the data.
174*4882a593Smuzhiyun * In this case application software can assume that an
175*4882a593Smuzhiyun * interrupted write will either return the new good
176*4882a593Smuzhiyun * data or an error.
177*4882a593Smuzhiyun *
178*4882a593Smuzhiyun * However, if pmem_clear_poison() leaves the data in an
179*4882a593Smuzhiyun * indeterminate state we need to perform the write
180*4882a593Smuzhiyun * after clear poison.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun flush_dcache_page(page);
183*4882a593Smuzhiyun write_pmem(pmem_addr, page, page_off, len);
184*4882a593Smuzhiyun if (unlikely(bad_pmem)) {
185*4882a593Smuzhiyun rc = pmem_clear_poison(pmem, pmem_off, len);
186*4882a593Smuzhiyun write_pmem(pmem_addr, page, page_off, len);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun return rc;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
pmem_submit_bio(struct bio * bio)192*4882a593Smuzhiyun static blk_qc_t pmem_submit_bio(struct bio *bio)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun int ret = 0;
195*4882a593Smuzhiyun blk_status_t rc = 0;
196*4882a593Smuzhiyun bool do_acct;
197*4882a593Smuzhiyun unsigned long start;
198*4882a593Smuzhiyun struct bio_vec bvec;
199*4882a593Smuzhiyun struct bvec_iter iter;
200*4882a593Smuzhiyun struct pmem_device *pmem = bio->bi_disk->private_data;
201*4882a593Smuzhiyun struct nd_region *nd_region = to_region(pmem);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun if (bio->bi_opf & REQ_PREFLUSH)
204*4882a593Smuzhiyun ret = nvdimm_flush(nd_region, bio);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun do_acct = blk_queue_io_stat(bio->bi_disk->queue);
207*4882a593Smuzhiyun if (do_acct)
208*4882a593Smuzhiyun start = bio_start_io_acct(bio);
209*4882a593Smuzhiyun bio_for_each_segment(bvec, bio, iter) {
210*4882a593Smuzhiyun if (op_is_write(bio_op(bio)))
211*4882a593Smuzhiyun rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
212*4882a593Smuzhiyun iter.bi_sector, bvec.bv_len);
213*4882a593Smuzhiyun else
214*4882a593Smuzhiyun rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
215*4882a593Smuzhiyun iter.bi_sector, bvec.bv_len);
216*4882a593Smuzhiyun if (rc) {
217*4882a593Smuzhiyun bio->bi_status = rc;
218*4882a593Smuzhiyun break;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun if (do_acct)
222*4882a593Smuzhiyun bio_end_io_acct(bio, start);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (bio->bi_opf & REQ_FUA)
225*4882a593Smuzhiyun ret = nvdimm_flush(nd_region, bio);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (ret)
228*4882a593Smuzhiyun bio->bi_status = errno_to_blk_status(ret);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun bio_endio(bio);
231*4882a593Smuzhiyun return BLK_QC_T_NONE;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
pmem_rw_page(struct block_device * bdev,sector_t sector,struct page * page,unsigned int op)234*4882a593Smuzhiyun static int pmem_rw_page(struct block_device *bdev, sector_t sector,
235*4882a593Smuzhiyun struct page *page, unsigned int op)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct pmem_device *pmem = bdev->bd_disk->private_data;
238*4882a593Smuzhiyun blk_status_t rc;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (op_is_write(op))
241*4882a593Smuzhiyun rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
242*4882a593Smuzhiyun else
243*4882a593Smuzhiyun rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * The ->rw_page interface is subtle and tricky. The core
246*4882a593Smuzhiyun * retries on any error, so we can only invoke page_endio() in
247*4882a593Smuzhiyun * the successful completion case. Otherwise, we'll see crashes
248*4882a593Smuzhiyun * caused by double completion.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun if (rc == 0)
251*4882a593Smuzhiyun page_endio(page, op_is_write(op), 0);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return blk_status_to_errno(rc);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
__pmem_direct_access(struct pmem_device * pmem,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)257*4882a593Smuzhiyun __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
258*4882a593Smuzhiyun long nr_pages, void **kaddr, pfn_t *pfn)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
263*4882a593Smuzhiyun PFN_PHYS(nr_pages))))
264*4882a593Smuzhiyun return -EIO;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (kaddr)
267*4882a593Smuzhiyun *kaddr = pmem->virt_addr + offset;
268*4882a593Smuzhiyun if (pfn)
269*4882a593Smuzhiyun *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * If badblocks are present, limit known good range to the
273*4882a593Smuzhiyun * requested range.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun if (unlikely(pmem->bb.count))
276*4882a593Smuzhiyun return nr_pages;
277*4882a593Smuzhiyun return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun static const struct block_device_operations pmem_fops = {
281*4882a593Smuzhiyun .owner = THIS_MODULE,
282*4882a593Smuzhiyun .submit_bio = pmem_submit_bio,
283*4882a593Smuzhiyun .rw_page = pmem_rw_page,
284*4882a593Smuzhiyun };
285*4882a593Smuzhiyun
pmem_dax_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)286*4882a593Smuzhiyun static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
287*4882a593Smuzhiyun size_t nr_pages)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun struct pmem_device *pmem = dax_get_private(dax_dev);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
292*4882a593Smuzhiyun PFN_PHYS(pgoff) >> SECTOR_SHIFT,
293*4882a593Smuzhiyun PAGE_SIZE));
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
pmem_dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)296*4882a593Smuzhiyun static long pmem_dax_direct_access(struct dax_device *dax_dev,
297*4882a593Smuzhiyun pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct pmem_device *pmem = dax_get_private(dax_dev);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * Use the 'no check' versions of copy_from_iter_flushcache() and
306*4882a593Smuzhiyun * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
307*4882a593Smuzhiyun * checking, both file offset and device offset, is handled by
308*4882a593Smuzhiyun * dax_iomap_actor()
309*4882a593Smuzhiyun */
pmem_copy_from_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)310*4882a593Smuzhiyun static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
311*4882a593Smuzhiyun void *addr, size_t bytes, struct iov_iter *i)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun return _copy_from_iter_flushcache(addr, bytes, i);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
pmem_copy_to_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)316*4882a593Smuzhiyun static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
317*4882a593Smuzhiyun void *addr, size_t bytes, struct iov_iter *i)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun return _copy_mc_to_iter(addr, bytes, i);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun static const struct dax_operations pmem_dax_ops = {
323*4882a593Smuzhiyun .direct_access = pmem_dax_direct_access,
324*4882a593Smuzhiyun .dax_supported = generic_fsdax_supported,
325*4882a593Smuzhiyun .copy_from_iter = pmem_copy_from_iter,
326*4882a593Smuzhiyun .copy_to_iter = pmem_copy_to_iter,
327*4882a593Smuzhiyun .zero_page_range = pmem_dax_zero_page_range,
328*4882a593Smuzhiyun };
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun static const struct attribute_group *pmem_attribute_groups[] = {
331*4882a593Smuzhiyun &dax_attribute_group,
332*4882a593Smuzhiyun NULL,
333*4882a593Smuzhiyun };
334*4882a593Smuzhiyun
pmem_pagemap_cleanup(struct dev_pagemap * pgmap)335*4882a593Smuzhiyun static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun struct request_queue *q =
338*4882a593Smuzhiyun container_of(pgmap->ref, struct request_queue, q_usage_counter);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun blk_cleanup_queue(q);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
pmem_release_queue(void * pgmap)343*4882a593Smuzhiyun static void pmem_release_queue(void *pgmap)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun pmem_pagemap_cleanup(pgmap);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
pmem_pagemap_kill(struct dev_pagemap * pgmap)348*4882a593Smuzhiyun static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun struct request_queue *q =
351*4882a593Smuzhiyun container_of(pgmap->ref, struct request_queue, q_usage_counter);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun blk_freeze_queue_start(q);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
pmem_release_disk(void * __pmem)356*4882a593Smuzhiyun static void pmem_release_disk(void *__pmem)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun struct pmem_device *pmem = __pmem;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun kill_dax(pmem->dax_dev);
361*4882a593Smuzhiyun put_dax(pmem->dax_dev);
362*4882a593Smuzhiyun del_gendisk(pmem->disk);
363*4882a593Smuzhiyun put_disk(pmem->disk);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun static const struct dev_pagemap_ops fsdax_pagemap_ops = {
367*4882a593Smuzhiyun .kill = pmem_pagemap_kill,
368*4882a593Smuzhiyun .cleanup = pmem_pagemap_cleanup,
369*4882a593Smuzhiyun };
370*4882a593Smuzhiyun
pmem_attach_disk(struct device * dev,struct nd_namespace_common * ndns)371*4882a593Smuzhiyun static int pmem_attach_disk(struct device *dev,
372*4882a593Smuzhiyun struct nd_namespace_common *ndns)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
375*4882a593Smuzhiyun struct nd_region *nd_region = to_nd_region(dev->parent);
376*4882a593Smuzhiyun int nid = dev_to_node(dev), fua;
377*4882a593Smuzhiyun struct resource *res = &nsio->res;
378*4882a593Smuzhiyun struct range bb_range;
379*4882a593Smuzhiyun struct nd_pfn *nd_pfn = NULL;
380*4882a593Smuzhiyun struct dax_device *dax_dev;
381*4882a593Smuzhiyun struct nd_pfn_sb *pfn_sb;
382*4882a593Smuzhiyun struct pmem_device *pmem;
383*4882a593Smuzhiyun struct request_queue *q;
384*4882a593Smuzhiyun struct device *gendev;
385*4882a593Smuzhiyun struct gendisk *disk;
386*4882a593Smuzhiyun void *addr;
387*4882a593Smuzhiyun int rc;
388*4882a593Smuzhiyun unsigned long flags = 0UL;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
391*4882a593Smuzhiyun if (!pmem)
392*4882a593Smuzhiyun return -ENOMEM;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
395*4882a593Smuzhiyun if (rc)
396*4882a593Smuzhiyun return rc;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* while nsio_rw_bytes is active, parse a pfn info block if present */
399*4882a593Smuzhiyun if (is_nd_pfn(dev)) {
400*4882a593Smuzhiyun nd_pfn = to_nd_pfn(dev);
401*4882a593Smuzhiyun rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
402*4882a593Smuzhiyun if (rc)
403*4882a593Smuzhiyun return rc;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* we're attaching a block device, disable raw namespace access */
407*4882a593Smuzhiyun devm_namespace_disable(dev, ndns);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun dev_set_drvdata(dev, pmem);
410*4882a593Smuzhiyun pmem->phys_addr = res->start;
411*4882a593Smuzhiyun pmem->size = resource_size(res);
412*4882a593Smuzhiyun fua = nvdimm_has_flush(nd_region);
413*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
414*4882a593Smuzhiyun dev_warn(dev, "unable to guarantee persistence of writes\n");
415*4882a593Smuzhiyun fua = 0;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (!devm_request_mem_region(dev, res->start, resource_size(res),
419*4882a593Smuzhiyun dev_name(&ndns->dev))) {
420*4882a593Smuzhiyun dev_warn(dev, "could not reserve region %pR\n", res);
421*4882a593Smuzhiyun return -EBUSY;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun q = blk_alloc_queue(dev_to_node(dev));
425*4882a593Smuzhiyun if (!q)
426*4882a593Smuzhiyun return -ENOMEM;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun pmem->pfn_flags = PFN_DEV;
429*4882a593Smuzhiyun pmem->pgmap.ref = &q->q_usage_counter;
430*4882a593Smuzhiyun if (is_nd_pfn(dev)) {
431*4882a593Smuzhiyun pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
432*4882a593Smuzhiyun pmem->pgmap.ops = &fsdax_pagemap_ops;
433*4882a593Smuzhiyun addr = devm_memremap_pages(dev, &pmem->pgmap);
434*4882a593Smuzhiyun pfn_sb = nd_pfn->pfn_sb;
435*4882a593Smuzhiyun pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
436*4882a593Smuzhiyun pmem->pfn_pad = resource_size(res) -
437*4882a593Smuzhiyun range_len(&pmem->pgmap.range);
438*4882a593Smuzhiyun pmem->pfn_flags |= PFN_MAP;
439*4882a593Smuzhiyun bb_range = pmem->pgmap.range;
440*4882a593Smuzhiyun bb_range.start += pmem->data_offset;
441*4882a593Smuzhiyun } else if (pmem_should_map_pages(dev)) {
442*4882a593Smuzhiyun pmem->pgmap.range.start = res->start;
443*4882a593Smuzhiyun pmem->pgmap.range.end = res->end;
444*4882a593Smuzhiyun pmem->pgmap.nr_range = 1;
445*4882a593Smuzhiyun pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
446*4882a593Smuzhiyun pmem->pgmap.ops = &fsdax_pagemap_ops;
447*4882a593Smuzhiyun addr = devm_memremap_pages(dev, &pmem->pgmap);
448*4882a593Smuzhiyun pmem->pfn_flags |= PFN_MAP;
449*4882a593Smuzhiyun bb_range = pmem->pgmap.range;
450*4882a593Smuzhiyun } else {
451*4882a593Smuzhiyun addr = devm_memremap(dev, pmem->phys_addr,
452*4882a593Smuzhiyun pmem->size, ARCH_MEMREMAP_PMEM);
453*4882a593Smuzhiyun if (devm_add_action_or_reset(dev, pmem_release_queue,
454*4882a593Smuzhiyun &pmem->pgmap))
455*4882a593Smuzhiyun return -ENOMEM;
456*4882a593Smuzhiyun bb_range.start = res->start;
457*4882a593Smuzhiyun bb_range.end = res->end;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (IS_ERR(addr))
461*4882a593Smuzhiyun return PTR_ERR(addr);
462*4882a593Smuzhiyun pmem->virt_addr = addr;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun blk_queue_write_cache(q, true, fua);
465*4882a593Smuzhiyun blk_queue_physical_block_size(q, PAGE_SIZE);
466*4882a593Smuzhiyun blk_queue_logical_block_size(q, pmem_sector_size(ndns));
467*4882a593Smuzhiyun blk_queue_max_hw_sectors(q, UINT_MAX);
468*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
469*4882a593Smuzhiyun if (pmem->pfn_flags & PFN_MAP)
470*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_DAX, q);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun disk = alloc_disk_node(0, nid);
473*4882a593Smuzhiyun if (!disk)
474*4882a593Smuzhiyun return -ENOMEM;
475*4882a593Smuzhiyun pmem->disk = disk;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun disk->fops = &pmem_fops;
478*4882a593Smuzhiyun disk->queue = q;
479*4882a593Smuzhiyun disk->flags = GENHD_FL_EXT_DEVT;
480*4882a593Smuzhiyun disk->private_data = pmem;
481*4882a593Smuzhiyun nvdimm_namespace_disk_name(ndns, disk->disk_name);
482*4882a593Smuzhiyun set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
483*4882a593Smuzhiyun / 512);
484*4882a593Smuzhiyun if (devm_init_badblocks(dev, &pmem->bb))
485*4882a593Smuzhiyun return -ENOMEM;
486*4882a593Smuzhiyun nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
487*4882a593Smuzhiyun disk->bb = &pmem->bb;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (is_nvdimm_sync(nd_region))
490*4882a593Smuzhiyun flags = DAXDEV_F_SYNC;
491*4882a593Smuzhiyun dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
492*4882a593Smuzhiyun if (IS_ERR(dax_dev)) {
493*4882a593Smuzhiyun put_disk(disk);
494*4882a593Smuzhiyun return PTR_ERR(dax_dev);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
497*4882a593Smuzhiyun pmem->dax_dev = dax_dev;
498*4882a593Smuzhiyun gendev = disk_to_dev(disk);
499*4882a593Smuzhiyun gendev->groups = pmem_attribute_groups;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun device_add_disk(dev, disk, NULL);
502*4882a593Smuzhiyun if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
503*4882a593Smuzhiyun return -ENOMEM;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun nvdimm_check_and_set_ro(disk);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
508*4882a593Smuzhiyun "badblocks");
509*4882a593Smuzhiyun if (!pmem->bb_state)
510*4882a593Smuzhiyun dev_warn(dev, "'badblocks' notification disabled\n");
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun return 0;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
nd_pmem_probe(struct device * dev)515*4882a593Smuzhiyun static int nd_pmem_probe(struct device *dev)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun int ret;
518*4882a593Smuzhiyun struct nd_namespace_common *ndns;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun ndns = nvdimm_namespace_common_probe(dev);
521*4882a593Smuzhiyun if (IS_ERR(ndns))
522*4882a593Smuzhiyun return PTR_ERR(ndns);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (is_nd_btt(dev))
525*4882a593Smuzhiyun return nvdimm_namespace_attach_btt(ndns);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (is_nd_pfn(dev))
528*4882a593Smuzhiyun return pmem_attach_disk(dev, ndns);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
531*4882a593Smuzhiyun if (ret)
532*4882a593Smuzhiyun return ret;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun ret = nd_btt_probe(dev, ndns);
535*4882a593Smuzhiyun if (ret == 0)
536*4882a593Smuzhiyun return -ENXIO;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /*
539*4882a593Smuzhiyun * We have two failure conditions here, there is no
540*4882a593Smuzhiyun * info reserver block or we found a valid info reserve block
541*4882a593Smuzhiyun * but failed to initialize the pfn superblock.
542*4882a593Smuzhiyun *
543*4882a593Smuzhiyun * For the first case consider namespace as a raw pmem namespace
544*4882a593Smuzhiyun * and attach a disk.
545*4882a593Smuzhiyun *
546*4882a593Smuzhiyun * For the latter, consider this a success and advance the namespace
547*4882a593Smuzhiyun * seed.
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun ret = nd_pfn_probe(dev, ndns);
550*4882a593Smuzhiyun if (ret == 0)
551*4882a593Smuzhiyun return -ENXIO;
552*4882a593Smuzhiyun else if (ret == -EOPNOTSUPP)
553*4882a593Smuzhiyun return ret;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun ret = nd_dax_probe(dev, ndns);
556*4882a593Smuzhiyun if (ret == 0)
557*4882a593Smuzhiyun return -ENXIO;
558*4882a593Smuzhiyun else if (ret == -EOPNOTSUPP)
559*4882a593Smuzhiyun return ret;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* probe complete, attach handles namespace enabling */
562*4882a593Smuzhiyun devm_namespace_disable(dev, ndns);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun return pmem_attach_disk(dev, ndns);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
nd_pmem_remove(struct device * dev)567*4882a593Smuzhiyun static int nd_pmem_remove(struct device *dev)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun struct pmem_device *pmem = dev_get_drvdata(dev);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun if (is_nd_btt(dev))
572*4882a593Smuzhiyun nvdimm_namespace_detach_btt(to_nd_btt(dev));
573*4882a593Smuzhiyun else {
574*4882a593Smuzhiyun /*
575*4882a593Smuzhiyun * Note, this assumes nd_device_lock() context to not
576*4882a593Smuzhiyun * race nd_pmem_notify()
577*4882a593Smuzhiyun */
578*4882a593Smuzhiyun sysfs_put(pmem->bb_state);
579*4882a593Smuzhiyun pmem->bb_state = NULL;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun nvdimm_flush(to_nd_region(dev->parent), NULL);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun return 0;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
nd_pmem_shutdown(struct device * dev)586*4882a593Smuzhiyun static void nd_pmem_shutdown(struct device *dev)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun nvdimm_flush(to_nd_region(dev->parent), NULL);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
nd_pmem_notify(struct device * dev,enum nvdimm_event event)591*4882a593Smuzhiyun static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun struct nd_region *nd_region;
594*4882a593Smuzhiyun resource_size_t offset = 0, end_trunc = 0;
595*4882a593Smuzhiyun struct nd_namespace_common *ndns;
596*4882a593Smuzhiyun struct nd_namespace_io *nsio;
597*4882a593Smuzhiyun struct badblocks *bb;
598*4882a593Smuzhiyun struct range range;
599*4882a593Smuzhiyun struct kernfs_node *bb_state;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun if (event != NVDIMM_REVALIDATE_POISON)
602*4882a593Smuzhiyun return;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (is_nd_btt(dev)) {
605*4882a593Smuzhiyun struct nd_btt *nd_btt = to_nd_btt(dev);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun ndns = nd_btt->ndns;
608*4882a593Smuzhiyun nd_region = to_nd_region(ndns->dev.parent);
609*4882a593Smuzhiyun nsio = to_nd_namespace_io(&ndns->dev);
610*4882a593Smuzhiyun bb = &nsio->bb;
611*4882a593Smuzhiyun bb_state = NULL;
612*4882a593Smuzhiyun } else {
613*4882a593Smuzhiyun struct pmem_device *pmem = dev_get_drvdata(dev);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun nd_region = to_region(pmem);
616*4882a593Smuzhiyun bb = &pmem->bb;
617*4882a593Smuzhiyun bb_state = pmem->bb_state;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun if (is_nd_pfn(dev)) {
620*4882a593Smuzhiyun struct nd_pfn *nd_pfn = to_nd_pfn(dev);
621*4882a593Smuzhiyun struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun ndns = nd_pfn->ndns;
624*4882a593Smuzhiyun offset = pmem->data_offset +
625*4882a593Smuzhiyun __le32_to_cpu(pfn_sb->start_pad);
626*4882a593Smuzhiyun end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
627*4882a593Smuzhiyun } else {
628*4882a593Smuzhiyun ndns = to_ndns(dev);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun nsio = to_nd_namespace_io(&ndns->dev);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun range.start = nsio->res.start + offset;
635*4882a593Smuzhiyun range.end = nsio->res.end - end_trunc;
636*4882a593Smuzhiyun nvdimm_badblocks_populate(nd_region, bb, &range);
637*4882a593Smuzhiyun if (bb_state)
638*4882a593Smuzhiyun sysfs_notify_dirent(bb_state);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun MODULE_ALIAS("pmem");
642*4882a593Smuzhiyun MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
643*4882a593Smuzhiyun MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
644*4882a593Smuzhiyun static struct nd_device_driver nd_pmem_driver = {
645*4882a593Smuzhiyun .probe = nd_pmem_probe,
646*4882a593Smuzhiyun .remove = nd_pmem_remove,
647*4882a593Smuzhiyun .notify = nd_pmem_notify,
648*4882a593Smuzhiyun .shutdown = nd_pmem_shutdown,
649*4882a593Smuzhiyun .drv = {
650*4882a593Smuzhiyun .name = "nd_pmem",
651*4882a593Smuzhiyun },
652*4882a593Smuzhiyun .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
653*4882a593Smuzhiyun };
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun module_nd_driver(nd_pmem_driver);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
658*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
659