Lines Matching refs:nd_region

62 int nd_region_activate(struct nd_region *nd_region)  in nd_region_activate()  argument
66 struct device *dev = &nd_region->dev; in nd_region_activate()
69 nvdimm_bus_lock(&nd_region->dev); in nd_region_activate()
70 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_activate()
71 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_activate()
75 nvdimm_bus_unlock(&nd_region->dev); in nd_region_activate()
86 nvdimm_bus_unlock(&nd_region->dev); in nd_region_activate()
97 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_activate()
98 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_activate()
100 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); in nd_region_activate()
110 for (i = 0; i < nd_region->ndr_mappings - 1; i++) { in nd_region_activate()
115 for (j = i + 1; j < nd_region->ndr_mappings; j++) in nd_region_activate()
126 struct nd_region *nd_region = to_nd_region(dev); in nd_region_release() local
129 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_release()
130 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_release()
135 free_percpu(nd_region->lane); in nd_region_release()
136 memregion_free(nd_region->id); in nd_region_release()
140 kfree(nd_region); in nd_region_release()
143 struct nd_region *to_nd_region(struct device *dev) in to_nd_region()
145 struct nd_region *nd_region = container_of(dev, struct nd_region, dev); in to_nd_region() local
148 return nd_region; in to_nd_region()
152 struct device *nd_region_dev(struct nd_region *nd_region) in nd_region_dev() argument
154 if (!nd_region) in nd_region_dev()
156 return &nd_region->dev; in nd_region_dev()
162 struct nd_region *nd_region = to_nd_region(dev); in to_nd_blk_region() local
165 return container_of(nd_region, struct nd_blk_region, nd_region); in to_nd_blk_region()
169 void *nd_region_provider_data(struct nd_region *nd_region) in nd_region_provider_data() argument
171 return nd_region->provider_data; in nd_region_provider_data()
195 int nd_region_to_nstype(struct nd_region *nd_region) in nd_region_to_nstype() argument
197 if (is_memory(&nd_region->dev)) { in nd_region_to_nstype()
200 for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) { in nd_region_to_nstype()
201 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_to_nstype()
211 } else if (is_nd_blk(&nd_region->dev)) { in nd_region_to_nstype()
219 static unsigned long long region_size(struct nd_region *nd_region) in region_size() argument
221 if (is_memory(&nd_region->dev)) { in region_size()
222 return nd_region->ndr_size; in region_size()
223 } else if (nd_region->ndr_mappings == 1) { in region_size()
224 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in region_size()
235 struct nd_region *nd_region = to_nd_region(dev); in size_show() local
237 return sprintf(buf, "%llu\n", region_size(nd_region)); in size_show()
244 struct nd_region *nd_region = to_nd_region(dev); in deep_flush_show() local
250 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region)); in deep_flush_show()
258 struct nd_region *nd_region = to_nd_region(dev); in deep_flush_store() local
264 rc = nvdimm_flush(nd_region, NULL); in deep_flush_store()
275 struct nd_region *nd_region = to_nd_region(dev); in mappings_show() local
277 return sprintf(buf, "%d\n", nd_region->ndr_mappings); in mappings_show()
284 struct nd_region *nd_region = to_nd_region(dev); in nstype_show() local
286 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); in nstype_show()
293 struct nd_region *nd_region = to_nd_region(dev); in set_cookie_show() local
294 struct nd_interleave_set *nd_set = nd_region->nd_set; in set_cookie_show()
311 if (nd_region->ndr_mappings) { in set_cookie_show()
312 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in set_cookie_show()
320 nd_region_interleave_set_cookie(nd_region, in set_cookie_show()
333 resource_size_t nd_region_available_dpa(struct nd_region *nd_region) in nd_region_available_dpa() argument
338 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_available_dpa()
343 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_available_dpa()
344 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_available_dpa()
351 if (is_memory(&nd_region->dev)) { in nd_region_available_dpa()
352 available += nd_pmem_available_dpa(nd_region, in nd_region_available_dpa()
358 } else if (is_nd_blk(&nd_region->dev)) in nd_region_available_dpa()
359 available += nd_blk_available_dpa(nd_region); in nd_region_available_dpa()
365 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) in nd_region_allocatable_dpa() argument
370 if (is_memory(&nd_region->dev)) in nd_region_allocatable_dpa()
373 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_allocatable_dpa()
374 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_allocatable_dpa()
375 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_allocatable_dpa()
377 if (is_memory(&nd_region->dev)) in nd_region_allocatable_dpa()
379 nd_pmem_max_contiguous_dpa(nd_region, in nd_region_allocatable_dpa()
381 else if (is_nd_blk(&nd_region->dev)) in nd_region_allocatable_dpa()
382 available += nd_blk_available_dpa(nd_region); in nd_region_allocatable_dpa()
384 if (is_memory(&nd_region->dev)) in nd_region_allocatable_dpa()
385 return available * nd_region->ndr_mappings; in nd_region_allocatable_dpa()
392 struct nd_region *nd_region = to_nd_region(dev); in available_size_show() local
404 available = nd_region_available_dpa(nd_region); in available_size_show()
415 struct nd_region *nd_region = to_nd_region(dev); in max_available_extent_show() local
421 available = nd_region_allocatable_dpa(nd_region); in max_available_extent_show()
449 struct nd_region *nd_region = to_nd_region(dev); in namespace_seed_show() local
453 if (nd_region->ns_seed) in namespace_seed_show()
454 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); in namespace_seed_show()
465 struct nd_region *nd_region = to_nd_region(dev); in btt_seed_show() local
469 if (nd_region->btt_seed) in btt_seed_show()
470 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); in btt_seed_show()
482 struct nd_region *nd_region = to_nd_region(dev); in pfn_seed_show() local
486 if (nd_region->pfn_seed) in pfn_seed_show()
487 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); in pfn_seed_show()
499 struct nd_region *nd_region = to_nd_region(dev); in dax_seed_show() local
503 if (nd_region->dax_seed) in dax_seed_show()
504 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); in dax_seed_show()
516 struct nd_region *nd_region = to_nd_region(dev); in read_only_show() local
518 return sprintf(buf, "%d\n", nd_region->ro); in read_only_show()
526 struct nd_region *nd_region = to_nd_region(dev); in read_only_store() local
531 nd_region->ro = ro; in read_only_store()
539 struct nd_region *nd_region = to_nd_region(dev); in align_show() local
541 return sprintf(buf, "%#lx\n", nd_region->align); in align_show()
547 struct nd_region *nd_region = to_nd_region(dev); in align_store() local
556 if (!nd_region->ndr_mappings) in align_store()
566 dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder); in align_store()
568 || val > region_size(nd_region) || remainder) in align_store()
577 nd_region->align = val; in align_store()
587 struct nd_region *nd_region = to_nd_region(dev); in region_badblocks_show() local
592 rc = badblocks_show(&nd_region->bb, buf, 0); in region_badblocks_show()
604 struct nd_region *nd_region = to_nd_region(dev); in resource_show() local
606 return sprintf(buf, "%#llx\n", nd_region->ndr_start); in resource_show()
613 struct nd_region *nd_region = to_nd_region(dev); in persistence_domain_show() local
615 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags)) in persistence_domain_show()
617 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags)) in persistence_domain_show()
648 struct nd_region *nd_region = to_nd_region(dev); in region_visible() local
649 struct nd_interleave_set *nd_set = nd_region->nd_set; in region_visible()
650 int type = nd_region_to_nstype(nd_region); in region_visible()
665 int has_flush = nvdimm_has_flush(nd_region); in region_visible()
676 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE) in region_visible()
701 struct nd_region *nd_region = to_nd_region(dev); in mappingN() local
705 if (n >= nd_region->ndr_mappings) in mappingN()
707 nd_mapping = &nd_region->mapping[n]; in mappingN()
763 struct nd_region *nd_region = to_nd_region(dev); in mapping_visible() local
765 if (n < nd_region->ndr_mappings) in mapping_visible()
857 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, in nd_region_interleave_set_cookie() argument
860 struct nd_interleave_set *nd_set = nd_region->nd_set; in nd_region_interleave_set_cookie()
871 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) in nd_region_interleave_set_altcookie() argument
873 struct nd_interleave_set *nd_set = nd_region->nd_set; in nd_region_interleave_set_altcookie()
895 void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) in nd_region_advance_seeds() argument
898 if (nd_region->ns_seed == dev) { in nd_region_advance_seeds()
899 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
903 if (nd_region->btt_seed == dev) in nd_region_advance_seeds()
904 nd_region_create_btt_seed(nd_region); in nd_region_advance_seeds()
905 if (nd_region->ns_seed == &nd_btt->ndns->dev) in nd_region_advance_seeds()
906 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
910 if (nd_region->pfn_seed == dev) in nd_region_advance_seeds()
911 nd_region_create_pfn_seed(nd_region); in nd_region_advance_seeds()
912 if (nd_region->ns_seed == &nd_pfn->ndns->dev) in nd_region_advance_seeds()
913 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
917 if (nd_region->dax_seed == dev) in nd_region_advance_seeds()
918 nd_region_create_dax_seed(nd_region); in nd_region_advance_seeds()
919 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) in nd_region_advance_seeds()
920 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
925 int nd_blk_region_init(struct nd_region *nd_region) in nd_blk_region_init() argument
927 struct device *dev = &nd_region->dev; in nd_blk_region_init()
933 if (nd_region->ndr_mappings < 1) { in nd_blk_region_init()
958 unsigned int nd_region_acquire_lane(struct nd_region *nd_region) in nd_region_acquire_lane() argument
963 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_acquire_lane()
966 lane = cpu % nd_region->num_lanes; in nd_region_acquire_lane()
967 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_acquire_lane()
968 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_acquire_lane()
978 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) in nd_region_release_lane() argument
980 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_release_lane()
984 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_release_lane()
985 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_release_lane()
1000 static unsigned long default_align(struct nd_region *nd_region) in default_align() argument
1006 if (is_nd_blk(&nd_region->dev)) in default_align()
1011 for (i = 0; i < nd_region->ndr_mappings; i++) { in default_align()
1012 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in default_align()
1021 if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX) in default_align()
1024 mappings = max_t(u16, 1, nd_region->ndr_mappings); in default_align()
1032 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, in nd_region_create()
1036 struct nd_region *nd_region; in nd_region_create() local
1073 nd_region = &ndbr->nd_region; in nd_region_create()
1079 nd_region = kzalloc(struct_size(nd_region, mapping, in nd_region_create()
1082 region_buf = nd_region; in nd_region_create()
1087 nd_region->id = memregion_alloc(GFP_KERNEL); in nd_region_create()
1088 if (nd_region->id < 0) in nd_region_create()
1091 nd_region->lane = alloc_percpu(struct nd_percpu_lane); in nd_region_create()
1092 if (!nd_region->lane) in nd_region_create()
1098 ndl = per_cpu_ptr(nd_region->lane, i); in nd_region_create()
1107 nd_region->mapping[i].nvdimm = nvdimm; in nd_region_create()
1108 nd_region->mapping[i].start = mapping->start; in nd_region_create()
1109 nd_region->mapping[i].size = mapping->size; in nd_region_create()
1110 nd_region->mapping[i].position = mapping->position; in nd_region_create()
1111 INIT_LIST_HEAD(&nd_region->mapping[i].labels); in nd_region_create()
1112 mutex_init(&nd_region->mapping[i].lock); in nd_region_create()
1116 nd_region->ndr_mappings = ndr_desc->num_mappings; in nd_region_create()
1117 nd_region->provider_data = ndr_desc->provider_data; in nd_region_create()
1118 nd_region->nd_set = ndr_desc->nd_set; in nd_region_create()
1119 nd_region->num_lanes = ndr_desc->num_lanes; in nd_region_create()
1120 nd_region->flags = ndr_desc->flags; in nd_region_create()
1121 nd_region->ro = ro; in nd_region_create()
1122 nd_region->numa_node = ndr_desc->numa_node; in nd_region_create()
1123 nd_region->target_node = ndr_desc->target_node; in nd_region_create()
1124 ida_init(&nd_region->ns_ida); in nd_region_create()
1125 ida_init(&nd_region->btt_ida); in nd_region_create()
1126 ida_init(&nd_region->pfn_ida); in nd_region_create()
1127 ida_init(&nd_region->dax_ida); in nd_region_create()
1128 dev = &nd_region->dev; in nd_region_create()
1129 dev_set_name(dev, "region%d", nd_region->id); in nd_region_create()
1134 nd_region->ndr_size = resource_size(ndr_desc->res); in nd_region_create()
1135 nd_region->ndr_start = ndr_desc->res->start; in nd_region_create()
1136 nd_region->align = default_align(nd_region); in nd_region_create()
1138 nd_region->flush = ndr_desc->flush; in nd_region_create()
1140 nd_region->flush = NULL; in nd_region_create()
1144 return nd_region; in nd_region_create()
1147 memregion_free(nd_region->id); in nd_region_create()
1153 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, in nvdimm_pmem_region_create()
1162 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, in nvdimm_blk_region_create()
1173 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, in nvdimm_volatile_region_create()
1182 int nvdimm_flush(struct nd_region *nd_region, struct bio *bio) in nvdimm_flush() argument
1186 if (!nd_region->flush) in nvdimm_flush()
1187 rc = generic_nvdimm_flush(nd_region); in nvdimm_flush()
1189 if (nd_region->flush(nd_region, bio)) in nvdimm_flush()
1199 int generic_nvdimm_flush(struct nd_region *nd_region) in generic_nvdimm_flush() argument
1201 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); in generic_nvdimm_flush()
1219 for (i = 0; i < nd_region->ndr_mappings; i++) in generic_nvdimm_flush()
1236 int nvdimm_has_flush(struct nd_region *nd_region) in nvdimm_has_flush() argument
1241 if (nd_region->ndr_mappings == 0 in nvdimm_has_flush()
1246 if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush) in nvdimm_has_flush()
1250 for (i = 0; i < nd_region->ndr_mappings; i++) { in nvdimm_has_flush()
1251 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nvdimm_has_flush()
1267 int nvdimm_has_cache(struct nd_region *nd_region) in nvdimm_has_cache() argument
1269 return is_nd_pmem(&nd_region->dev) && in nvdimm_has_cache()
1270 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags); in nvdimm_has_cache()
1274 bool is_nvdimm_sync(struct nd_region *nd_region) in is_nvdimm_sync() argument
1276 if (is_nd_volatile(&nd_region->dev)) in is_nvdimm_sync()
1279 return is_nd_pmem(&nd_region->dev) && in is_nvdimm_sync()
1280 !test_bit(ND_REGION_ASYNC, &nd_region->flags); in is_nvdimm_sync()
1285 struct nd_region *nd_region; member
1291 struct nd_region *nd_region; in region_conflict() local
1298 nd_region = to_nd_region(dev); in region_conflict()
1299 if (nd_region == ctx->nd_region) in region_conflict()
1303 region_start = nd_region->ndr_start; in region_conflict()
1304 region_end = region_start + nd_region->ndr_size; in region_conflict()
1312 int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, in nd_region_conflict() argument
1315 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); in nd_region_conflict()
1317 .nd_region = nd_region, in nd_region_conflict()