Lines Matching refs:nd_region

62 static int nd_region_invalidate_memregion(struct nd_region *nd_region)  in nd_region_invalidate_memregion()  argument
66 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_invalidate_memregion()
67 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_invalidate_memregion()
82 &nd_region->dev, in nd_region_invalidate_memregion()
86 dev_err(&nd_region->dev, in nd_region_invalidate_memregion()
94 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_invalidate_memregion()
95 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_invalidate_memregion()
104 int nd_region_activate(struct nd_region *nd_region) in nd_region_activate() argument
108 struct device *dev = &nd_region->dev; in nd_region_activate()
111 nvdimm_bus_lock(&nd_region->dev); in nd_region_activate()
112 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_activate()
113 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_activate()
117 nvdimm_bus_unlock(&nd_region->dev); in nd_region_activate()
128 nvdimm_bus_unlock(&nd_region->dev); in nd_region_activate()
130 rc = nd_region_invalidate_memregion(nd_region); in nd_region_activate()
143 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_activate()
144 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_activate()
146 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); in nd_region_activate()
156 for (i = 0; i < nd_region->ndr_mappings - 1; i++) { in nd_region_activate()
161 for (j = i + 1; j < nd_region->ndr_mappings; j++) in nd_region_activate()
172 struct nd_region *nd_region = to_nd_region(dev); in nd_region_release() local
175 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_release()
176 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_release()
181 free_percpu(nd_region->lane); in nd_region_release()
182 if (!test_bit(ND_REGION_CXL, &nd_region->flags)) in nd_region_release()
183 memregion_free(nd_region->id); in nd_region_release()
184 kfree(nd_region); in nd_region_release()
187 struct nd_region *to_nd_region(struct device *dev) in to_nd_region()
189 struct nd_region *nd_region = container_of(dev, struct nd_region, dev); in to_nd_region() local
192 return nd_region; in to_nd_region()
196 struct device *nd_region_dev(struct nd_region *nd_region) in nd_region_dev() argument
198 if (!nd_region) in nd_region_dev()
200 return &nd_region->dev; in nd_region_dev()
204 void *nd_region_provider_data(struct nd_region *nd_region) in nd_region_provider_data() argument
206 return nd_region->provider_data; in nd_region_provider_data()
218 int nd_region_to_nstype(struct nd_region *nd_region) in nd_region_to_nstype() argument
220 if (is_memory(&nd_region->dev)) { in nd_region_to_nstype()
223 for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) { in nd_region_to_nstype()
224 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_to_nstype()
240 static unsigned long long region_size(struct nd_region *nd_region) in region_size() argument
242 if (is_memory(&nd_region->dev)) { in region_size()
243 return nd_region->ndr_size; in region_size()
244 } else if (nd_region->ndr_mappings == 1) { in region_size()
245 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in region_size()
256 struct nd_region *nd_region = to_nd_region(dev); in size_show() local
258 return sprintf(buf, "%llu\n", region_size(nd_region)); in size_show()
265 struct nd_region *nd_region = to_nd_region(dev); in deep_flush_show() local
271 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region)); in deep_flush_show()
279 struct nd_region *nd_region = to_nd_region(dev); in deep_flush_store() local
285 rc = nvdimm_flush(nd_region, NULL); in deep_flush_store()
296 struct nd_region *nd_region = to_nd_region(dev); in mappings_show() local
298 return sprintf(buf, "%d\n", nd_region->ndr_mappings); in mappings_show()
305 struct nd_region *nd_region = to_nd_region(dev); in nstype_show() local
307 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); in nstype_show()
314 struct nd_region *nd_region = to_nd_region(dev); in set_cookie_show() local
315 struct nd_interleave_set *nd_set = nd_region->nd_set; in set_cookie_show()
332 if (nd_region->ndr_mappings) { in set_cookie_show()
333 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in set_cookie_show()
341 nd_region_interleave_set_cookie(nd_region, in set_cookie_show()
354 resource_size_t nd_region_available_dpa(struct nd_region *nd_region) in nd_region_available_dpa() argument
359 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_available_dpa()
362 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_available_dpa()
363 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_available_dpa()
370 available += nd_pmem_available_dpa(nd_region, nd_mapping); in nd_region_available_dpa()
376 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) in nd_region_allocatable_dpa() argument
381 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_allocatable_dpa()
382 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_allocatable_dpa()
383 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_allocatable_dpa()
386 nd_region, nd_mapping)); in nd_region_allocatable_dpa()
388 return avail * nd_region->ndr_mappings; in nd_region_allocatable_dpa()
394 struct nd_region *nd_region = to_nd_region(dev); in available_size_show() local
406 available = nd_region_available_dpa(nd_region); in available_size_show()
417 struct nd_region *nd_region = to_nd_region(dev); in max_available_extent_show() local
423 available = nd_region_allocatable_dpa(nd_region); in max_available_extent_show()
451 struct nd_region *nd_region = to_nd_region(dev); in namespace_seed_show() local
455 if (nd_region->ns_seed) in namespace_seed_show()
456 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); in namespace_seed_show()
467 struct nd_region *nd_region = to_nd_region(dev); in btt_seed_show() local
471 if (nd_region->btt_seed) in btt_seed_show()
472 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); in btt_seed_show()
484 struct nd_region *nd_region = to_nd_region(dev); in pfn_seed_show() local
488 if (nd_region->pfn_seed) in pfn_seed_show()
489 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); in pfn_seed_show()
501 struct nd_region *nd_region = to_nd_region(dev); in dax_seed_show() local
505 if (nd_region->dax_seed) in dax_seed_show()
506 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); in dax_seed_show()
518 struct nd_region *nd_region = to_nd_region(dev); in read_only_show() local
520 return sprintf(buf, "%d\n", nd_region->ro); in read_only_show()
534 struct nd_region *nd_region = to_nd_region(dev); in read_only_store() local
539 nd_region->ro = ro; in read_only_store()
548 struct nd_region *nd_region = to_nd_region(dev); in align_show() local
550 return sprintf(buf, "%#lx\n", nd_region->align); in align_show()
556 struct nd_region *nd_region = to_nd_region(dev); in align_store() local
572 mappings = max_t(u32, 1, nd_region->ndr_mappings); in align_store()
575 || val > region_size(nd_region) || remainder) in align_store()
584 nd_region->align = val; in align_store()
594 struct nd_region *nd_region = to_nd_region(dev); in region_badblocks_show() local
599 rc = badblocks_show(&nd_region->bb, buf, 0); in region_badblocks_show()
611 struct nd_region *nd_region = to_nd_region(dev); in resource_show() local
613 return sprintf(buf, "%#llx\n", nd_region->ndr_start); in resource_show()
620 struct nd_region *nd_region = to_nd_region(dev); in persistence_domain_show() local
622 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags)) in persistence_domain_show()
624 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags)) in persistence_domain_show()
655 struct nd_region *nd_region = to_nd_region(dev); in region_visible() local
656 struct nd_interleave_set *nd_set = nd_region->nd_set; in region_visible()
657 int type = nd_region_to_nstype(nd_region); in region_visible()
672 int has_flush = nvdimm_has_flush(nd_region); in region_visible()
683 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE) in region_visible()
707 struct nd_region *nd_region = to_nd_region(dev); in mappingN() local
711 if (n >= nd_region->ndr_mappings) in mappingN()
713 nd_mapping = &nd_region->mapping[n]; in mappingN()
769 struct nd_region *nd_region = to_nd_region(dev); in mapping_visible() local
771 if (n < nd_region->ndr_mappings) in mapping_visible()
852 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, in nd_region_interleave_set_cookie() argument
855 struct nd_interleave_set *nd_set = nd_region->nd_set; in nd_region_interleave_set_cookie()
866 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) in nd_region_interleave_set_altcookie() argument
868 struct nd_interleave_set *nd_set = nd_region->nd_set; in nd_region_interleave_set_altcookie()
890 void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) in nd_region_advance_seeds() argument
893 if (nd_region->ns_seed == dev) { in nd_region_advance_seeds()
894 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
898 if (nd_region->btt_seed == dev) in nd_region_advance_seeds()
899 nd_region_create_btt_seed(nd_region); in nd_region_advance_seeds()
900 if (nd_region->ns_seed == &nd_btt->ndns->dev) in nd_region_advance_seeds()
901 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
905 if (nd_region->pfn_seed == dev) in nd_region_advance_seeds()
906 nd_region_create_pfn_seed(nd_region); in nd_region_advance_seeds()
907 if (nd_region->ns_seed == &nd_pfn->ndns->dev) in nd_region_advance_seeds()
908 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
912 if (nd_region->dax_seed == dev) in nd_region_advance_seeds()
913 nd_region_create_dax_seed(nd_region); in nd_region_advance_seeds()
914 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) in nd_region_advance_seeds()
915 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
937 unsigned int nd_region_acquire_lane(struct nd_region *nd_region) in nd_region_acquire_lane() argument
942 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_acquire_lane()
945 lane = cpu % nd_region->num_lanes; in nd_region_acquire_lane()
946 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_acquire_lane()
947 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_acquire_lane()
957 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) in nd_region_release_lane() argument
959 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_release_lane()
963 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_release_lane()
964 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_release_lane()
979 static unsigned long default_align(struct nd_region *nd_region) in default_align() argument
986 if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX) in default_align()
989 mappings = max_t(u16, 1, nd_region->ndr_mappings); in default_align()
999 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, in nd_region_create()
1003 struct nd_region *nd_region; in nd_region_create() local
1024 nd_region = in nd_region_create()
1025 kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings), in nd_region_create()
1028 if (!nd_region) in nd_region_create()
1032 nd_region->id = ndr_desc->memregion; in nd_region_create()
1034 nd_region->id = memregion_alloc(GFP_KERNEL); in nd_region_create()
1035 if (nd_region->id < 0) in nd_region_create()
1039 nd_region->lane = alloc_percpu(struct nd_percpu_lane); in nd_region_create()
1040 if (!nd_region->lane) in nd_region_create()
1046 ndl = per_cpu_ptr(nd_region->lane, i); in nd_region_create()
1055 nd_region->mapping[i].nvdimm = nvdimm; in nd_region_create()
1056 nd_region->mapping[i].start = mapping->start; in nd_region_create()
1057 nd_region->mapping[i].size = mapping->size; in nd_region_create()
1058 nd_region->mapping[i].position = mapping->position; in nd_region_create()
1059 INIT_LIST_HEAD(&nd_region->mapping[i].labels); in nd_region_create()
1060 mutex_init(&nd_region->mapping[i].lock); in nd_region_create()
1064 nd_region->ndr_mappings = ndr_desc->num_mappings; in nd_region_create()
1065 nd_region->provider_data = ndr_desc->provider_data; in nd_region_create()
1066 nd_region->nd_set = ndr_desc->nd_set; in nd_region_create()
1067 nd_region->num_lanes = ndr_desc->num_lanes; in nd_region_create()
1068 nd_region->flags = ndr_desc->flags; in nd_region_create()
1069 nd_region->ro = ro; in nd_region_create()
1070 nd_region->numa_node = ndr_desc->numa_node; in nd_region_create()
1071 nd_region->target_node = ndr_desc->target_node; in nd_region_create()
1072 ida_init(&nd_region->ns_ida); in nd_region_create()
1073 ida_init(&nd_region->btt_ida); in nd_region_create()
1074 ida_init(&nd_region->pfn_ida); in nd_region_create()
1075 ida_init(&nd_region->dax_ida); in nd_region_create()
1076 dev = &nd_region->dev; in nd_region_create()
1077 dev_set_name(dev, "region%d", nd_region->id); in nd_region_create()
1082 nd_region->ndr_size = resource_size(ndr_desc->res); in nd_region_create()
1083 nd_region->ndr_start = ndr_desc->res->start; in nd_region_create()
1084 nd_region->align = default_align(nd_region); in nd_region_create()
1086 nd_region->flush = ndr_desc->flush; in nd_region_create()
1088 nd_region->flush = NULL; in nd_region_create()
1094 return nd_region; in nd_region_create()
1098 memregion_free(nd_region->id); in nd_region_create()
1100 kfree(nd_region); in nd_region_create()
1104 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, in nvdimm_pmem_region_create()
1113 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, in nvdimm_volatile_region_create()
1122 void nvdimm_region_delete(struct nd_region *nd_region) in nvdimm_region_delete() argument
1124 if (nd_region) in nvdimm_region_delete()
1125 nd_device_unregister(&nd_region->dev, ND_SYNC); in nvdimm_region_delete()
1129 int nvdimm_flush(struct nd_region *nd_region, struct bio *bio) in nvdimm_flush() argument
1133 if (!nd_region->flush) in nvdimm_flush()
1134 rc = generic_nvdimm_flush(nd_region); in nvdimm_flush()
1136 if (nd_region->flush(nd_region, bio)) in nvdimm_flush()
1146 int generic_nvdimm_flush(struct nd_region *nd_region) in generic_nvdimm_flush() argument
1148 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); in generic_nvdimm_flush()
1166 for (i = 0; i < nd_region->ndr_mappings; i++) in generic_nvdimm_flush()
1183 int nvdimm_has_flush(struct nd_region *nd_region) in nvdimm_has_flush() argument
1188 if (nd_region->ndr_mappings == 0 in nvdimm_has_flush()
1193 if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush) in nvdimm_has_flush()
1197 for (i = 0; i < nd_region->ndr_mappings; i++) { in nvdimm_has_flush()
1198 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nvdimm_has_flush()
1214 int nvdimm_has_cache(struct nd_region *nd_region) in nvdimm_has_cache() argument
1216 return is_nd_pmem(&nd_region->dev) && in nvdimm_has_cache()
1217 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags); in nvdimm_has_cache()
1221 bool is_nvdimm_sync(struct nd_region *nd_region) in is_nvdimm_sync() argument
1223 if (is_nd_volatile(&nd_region->dev)) in is_nvdimm_sync()
1226 return is_nd_pmem(&nd_region->dev) && in is_nvdimm_sync()
1227 !test_bit(ND_REGION_ASYNC, &nd_region->flags); in is_nvdimm_sync()
1232 struct nd_region *nd_region; member
1238 struct nd_region *nd_region; in region_conflict() local
1245 nd_region = to_nd_region(dev); in region_conflict()
1246 if (nd_region == ctx->nd_region) in region_conflict()
1250 region_start = nd_region->ndr_start; in region_conflict()
1251 region_end = region_start + nd_region->ndr_size; in region_conflict()
1259 int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, in nd_region_conflict() argument
1262 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); in nd_region_conflict()
1264 .nd_region = nd_region, in nd_region_conflict()