1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2014-2016, Intel Corporation.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include "test/nfit_test.h"
6*4882a593Smuzhiyun #include <linux/blkdev.h>
7*4882a593Smuzhiyun #include <pmem.h>
8*4882a593Smuzhiyun #include <nd.h>
9*4882a593Smuzhiyun
__pmem_direct_access(struct pmem_device * pmem,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)10*4882a593Smuzhiyun long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
11*4882a593Smuzhiyun long nr_pages, void **kaddr, pfn_t *pfn)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
16*4882a593Smuzhiyun PFN_PHYS(nr_pages))))
17*4882a593Smuzhiyun return -EIO;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * Limit dax to a single page at a time given vmalloc()-backed
21*4882a593Smuzhiyun * in the nfit_test case.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun if (get_nfit_res(pmem->phys_addr + offset)) {
24*4882a593Smuzhiyun struct page *page;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun if (kaddr)
27*4882a593Smuzhiyun *kaddr = pmem->virt_addr + offset;
28*4882a593Smuzhiyun page = vmalloc_to_page(pmem->virt_addr + offset);
29*4882a593Smuzhiyun if (pfn)
30*4882a593Smuzhiyun *pfn = page_to_pfn_t(page);
31*4882a593Smuzhiyun pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
32*4882a593Smuzhiyun __func__, pmem, pgoff, page_to_pfn(page));
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun return 1;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (kaddr)
38*4882a593Smuzhiyun *kaddr = pmem->virt_addr + offset;
39*4882a593Smuzhiyun if (pfn)
40*4882a593Smuzhiyun *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * If badblocks are present, limit known good range to the
44*4882a593Smuzhiyun * requested range.
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun if (unlikely(pmem->bb.count))
47*4882a593Smuzhiyun return nr_pages;
48*4882a593Smuzhiyun return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
49*4882a593Smuzhiyun }
50