Lines Matching refs:iomap
1014 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) in dax_iomap_sector() argument
1016 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; in dax_iomap_sector()
1019 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, in dax_iomap_pfn() argument
1022 const sector_t sector = dax_iomap_sector(iomap, pos); in dax_iomap_pfn()
1027 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); in dax_iomap_pfn()
1031 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), in dax_iomap_pfn()
1075 s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) in dax_iomap_zero() argument
1077 sector_t sector = iomap_sector(iomap, pos & PAGE_MASK); in dax_iomap_zero()
1089 rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); in dax_iomap_zero()
1096 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); in dax_iomap_zero()
1098 rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL); in dax_iomap_zero()
1106 dax_flush(iomap->dax_dev, kaddr + offset, size); in dax_iomap_zero()
1114 struct iomap *iomap, struct iomap *srcmap) in dax_iomap_actor() argument
1116 struct block_device *bdev = iomap->bdev; in dax_iomap_actor()
1117 struct dax_device *dax_dev = iomap->dax_dev; in dax_iomap_actor()
1129 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) in dax_iomap_actor()
1133 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) in dax_iomap_actor()
1141 if (iomap->flags & IOMAP_F_NEW) { in dax_iomap_actor()
1151 const sector_t sector = dax_iomap_sector(iomap, pos); in dax_iomap_actor()
1259 struct vm_area_struct *vma, struct iomap *iomap) in dax_fault_is_synchronous() argument
1262 && (iomap->flags & IOMAP_F_DIRTY); in dax_fault_is_synchronous()
1274 struct iomap iomap = { .type = IOMAP_HOLE }; in dax_iomap_pte_fault() local
1275 struct iomap srcmap = { .type = IOMAP_HOLE }; in dax_iomap_pte_fault()
1320 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap); in dax_iomap_pte_fault()
1327 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { in dax_iomap_pte_fault()
1333 sector_t sector = dax_iomap_sector(&iomap, pos); in dax_iomap_pte_fault()
1335 switch (iomap.type) { in dax_iomap_pte_fault()
1341 error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev, in dax_iomap_pte_fault()
1360 sync = dax_fault_is_synchronous(flags, vma, &iomap); in dax_iomap_pte_fault()
1362 switch (iomap.type) { in dax_iomap_pte_fault()
1364 if (iomap.flags & IOMAP_F_NEW) { in dax_iomap_pte_fault()
1369 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); in dax_iomap_pte_fault()
1425 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); in dax_iomap_pte_fault()
1436 struct iomap *iomap, void **entry) in dax_pmd_load_hole() argument
1499 struct iomap iomap = { .type = IOMAP_HOLE }; in dax_iomap_pmd_fault() local
1500 struct iomap srcmap = { .type = IOMAP_HOLE }; in dax_iomap_pmd_fault()
1575 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap, in dax_iomap_pmd_fault()
1580 if (iomap.offset + iomap.length < pos + PMD_SIZE) in dax_iomap_pmd_fault()
1583 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); in dax_iomap_pmd_fault()
1585 switch (iomap.type) { in dax_iomap_pmd_fault()
1587 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); in dax_iomap_pmd_fault()
1615 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); in dax_iomap_pmd_fault()
1635 &iomap); in dax_iomap_pmd_fault()