David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | #include <linux/scatterlist.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6 | #include <linux/memregion.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | #include <linux/highmem.h> |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/hash.h> |
| 11 | #include <linux/sort.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/nd.h> |
| 14 | #include "nd-core.h" |
| 15 | #include "nd.h" |
| 16 | |
| 17 | /* |
| 18 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is |
| 19 | * irrelevant. |
| 20 | */ |
| 21 | #include <linux/io-64-nonatomic-hi-lo.h> |
| 22 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | static DEFINE_PER_CPU(int, flush_idx); |
| 24 | |
| 25 | static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, |
| 26 | struct nd_region_data *ndrd) |
| 27 | { |
| 28 | int i, j; |
| 29 | |
| 30 | dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), |
| 31 | nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); |
| 32 | for (i = 0; i < (1 << ndrd->hints_shift); i++) { |
| 33 | struct resource *res = &nvdimm->flush_wpq[i]; |
| 34 | unsigned long pfn = PHYS_PFN(res->start); |
| 35 | void __iomem *flush_page; |
| 36 | |
| 37 | /* check if flush hints share a page */ |
| 38 | for (j = 0; j < i; j++) { |
| 39 | struct resource *res_j = &nvdimm->flush_wpq[j]; |
| 40 | unsigned long pfn_j = PHYS_PFN(res_j->start); |
| 41 | |
| 42 | if (pfn == pfn_j) |
| 43 | break; |
| 44 | } |
| 45 | |
| 46 | if (j < i) |
| 47 | flush_page = (void __iomem *) ((unsigned long) |
| 48 | ndrd_get_flush_wpq(ndrd, dimm, j) |
| 49 | & PAGE_MASK); |
| 50 | else |
| 51 | flush_page = devm_nvdimm_ioremap(dev, |
| 52 | PFN_PHYS(pfn), PAGE_SIZE); |
| 53 | if (!flush_page) |
| 54 | return -ENXIO; |
| 55 | ndrd_set_flush_wpq(ndrd, dimm, i, flush_page |
| 56 | + (res->start & ~PAGE_MASK)); |
| 57 | } |
| 58 | |
| 59 | return 0; |
| 60 | } |
| 61 | |
| 62 | int nd_region_activate(struct nd_region *nd_region) |
| 63 | { |
| 64 | int i, j, num_flush = 0; |
| 65 | struct nd_region_data *ndrd; |
| 66 | struct device *dev = &nd_region->dev; |
| 67 | size_t flush_data_size = sizeof(void *); |
| 68 | |
| 69 | nvdimm_bus_lock(&nd_region->dev); |
| 70 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 71 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 72 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 73 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 74 | if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { |
| 75 | nvdimm_bus_unlock(&nd_region->dev); |
| 76 | return -EBUSY; |
| 77 | } |
| 78 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 79 | /* at least one null hint slot per-dimm for the "no-hint" case */ |
| 80 | flush_data_size += sizeof(void *); |
| 81 | num_flush = min_not_zero(num_flush, nvdimm->num_flush); |
| 82 | if (!nvdimm->num_flush) |
| 83 | continue; |
| 84 | flush_data_size += nvdimm->num_flush * sizeof(void *); |
| 85 | } |
| 86 | nvdimm_bus_unlock(&nd_region->dev); |
| 87 | |
| 88 | ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); |
| 89 | if (!ndrd) |
| 90 | return -ENOMEM; |
| 91 | dev_set_drvdata(dev, ndrd); |
| 92 | |
| 93 | if (!num_flush) |
| 94 | return 0; |
| 95 | |
| 96 | ndrd->hints_shift = ilog2(num_flush); |
| 97 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 98 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 99 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 100 | int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); |
| 101 | |
| 102 | if (rc) |
| 103 | return rc; |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * Clear out entries that are duplicates. This should prevent the |
| 108 | * extra flushings. |
| 109 | */ |
| 110 | for (i = 0; i < nd_region->ndr_mappings - 1; i++) { |
| 111 | /* ignore if NULL already */ |
| 112 | if (!ndrd_get_flush_wpq(ndrd, i, 0)) |
| 113 | continue; |
| 114 | |
| 115 | for (j = i + 1; j < nd_region->ndr_mappings; j++) |
| 116 | if (ndrd_get_flush_wpq(ndrd, i, 0) == |
| 117 | ndrd_get_flush_wpq(ndrd, j, 0)) |
| 118 | ndrd_set_flush_wpq(ndrd, j, 0, NULL); |
| 119 | } |
| 120 | |
| 121 | return 0; |
| 122 | } |
| 123 | |
| 124 | static void nd_region_release(struct device *dev) |
| 125 | { |
| 126 | struct nd_region *nd_region = to_nd_region(dev); |
| 127 | u16 i; |
| 128 | |
| 129 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 130 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 131 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 132 | |
| 133 | put_device(&nvdimm->dev); |
| 134 | } |
| 135 | free_percpu(nd_region->lane); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 136 | memregion_free(nd_region->id); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 137 | if (is_nd_blk(dev)) |
| 138 | kfree(to_nd_blk_region(dev)); |
| 139 | else |
| 140 | kfree(nd_region); |
| 141 | } |
| 142 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 143 | struct nd_region *to_nd_region(struct device *dev) |
| 144 | { |
| 145 | struct nd_region *nd_region = container_of(dev, struct nd_region, dev); |
| 146 | |
| 147 | WARN_ON(dev->type->release != nd_region_release); |
| 148 | return nd_region; |
| 149 | } |
| 150 | EXPORT_SYMBOL_GPL(to_nd_region); |
| 151 | |
| 152 | struct device *nd_region_dev(struct nd_region *nd_region) |
| 153 | { |
| 154 | if (!nd_region) |
| 155 | return NULL; |
| 156 | return &nd_region->dev; |
| 157 | } |
| 158 | EXPORT_SYMBOL_GPL(nd_region_dev); |
| 159 | |
| 160 | struct nd_blk_region *to_nd_blk_region(struct device *dev) |
| 161 | { |
| 162 | struct nd_region *nd_region = to_nd_region(dev); |
| 163 | |
| 164 | WARN_ON(!is_nd_blk(dev)); |
| 165 | return container_of(nd_region, struct nd_blk_region, nd_region); |
| 166 | } |
| 167 | EXPORT_SYMBOL_GPL(to_nd_blk_region); |
| 168 | |
| 169 | void *nd_region_provider_data(struct nd_region *nd_region) |
| 170 | { |
| 171 | return nd_region->provider_data; |
| 172 | } |
| 173 | EXPORT_SYMBOL_GPL(nd_region_provider_data); |
| 174 | |
| 175 | void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) |
| 176 | { |
| 177 | return ndbr->blk_provider_data; |
| 178 | } |
| 179 | EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); |
| 180 | |
| 181 | void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) |
| 182 | { |
| 183 | ndbr->blk_provider_data = data; |
| 184 | } |
| 185 | EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); |
| 186 | |
| 187 | /** |
| 188 | * nd_region_to_nstype() - region to an integer namespace type |
| 189 | * @nd_region: region-device to interrogate |
| 190 | * |
| 191 | * This is the 'nstype' attribute of a region as well, an input to the |
| 192 | * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match |
| 193 | * namespace devices with namespace drivers. |
| 194 | */ |
| 195 | int nd_region_to_nstype(struct nd_region *nd_region) |
| 196 | { |
| 197 | if (is_memory(&nd_region->dev)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 198 | u16 i, label; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 199 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 200 | for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 201 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 202 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 203 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 204 | if (test_bit(NDD_LABELING, &nvdimm->flags)) |
| 205 | label++; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 206 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 207 | if (label) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 208 | return ND_DEVICE_NAMESPACE_PMEM; |
| 209 | else |
| 210 | return ND_DEVICE_NAMESPACE_IO; |
| 211 | } else if (is_nd_blk(&nd_region->dev)) { |
| 212 | return ND_DEVICE_NAMESPACE_BLK; |
| 213 | } |
| 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | EXPORT_SYMBOL(nd_region_to_nstype); |
| 218 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 219 | static unsigned long long region_size(struct nd_region *nd_region) |
| 220 | { |
| 221 | if (is_memory(&nd_region->dev)) { |
| 222 | return nd_region->ndr_size; |
| 223 | } else if (nd_region->ndr_mappings == 1) { |
| 224 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; |
| 225 | |
| 226 | return nd_mapping->size; |
| 227 | } |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 232 | static ssize_t size_show(struct device *dev, |
| 233 | struct device_attribute *attr, char *buf) |
| 234 | { |
| 235 | struct nd_region *nd_region = to_nd_region(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 236 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 237 | return sprintf(buf, "%llu\n", region_size(nd_region)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 238 | } |
| 239 | static DEVICE_ATTR_RO(size); |
| 240 | |
| 241 | static ssize_t deep_flush_show(struct device *dev, |
| 242 | struct device_attribute *attr, char *buf) |
| 243 | { |
| 244 | struct nd_region *nd_region = to_nd_region(dev); |
| 245 | |
| 246 | /* |
| 247 | * NOTE: in the nvdimm_has_flush() error case this attribute is |
| 248 | * not visible. |
| 249 | */ |
| 250 | return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region)); |
| 251 | } |
| 252 | |
| 253 | static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr, |
| 254 | const char *buf, size_t len) |
| 255 | { |
| 256 | bool flush; |
| 257 | int rc = strtobool(buf, &flush); |
| 258 | struct nd_region *nd_region = to_nd_region(dev); |
| 259 | |
| 260 | if (rc) |
| 261 | return rc; |
| 262 | if (!flush) |
| 263 | return -EINVAL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 264 | rc = nvdimm_flush(nd_region, NULL); |
| 265 | if (rc) |
| 266 | return rc; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 267 | |
| 268 | return len; |
| 269 | } |
| 270 | static DEVICE_ATTR_RW(deep_flush); |
| 271 | |
| 272 | static ssize_t mappings_show(struct device *dev, |
| 273 | struct device_attribute *attr, char *buf) |
| 274 | { |
| 275 | struct nd_region *nd_region = to_nd_region(dev); |
| 276 | |
| 277 | return sprintf(buf, "%d\n", nd_region->ndr_mappings); |
| 278 | } |
| 279 | static DEVICE_ATTR_RO(mappings); |
| 280 | |
| 281 | static ssize_t nstype_show(struct device *dev, |
| 282 | struct device_attribute *attr, char *buf) |
| 283 | { |
| 284 | struct nd_region *nd_region = to_nd_region(dev); |
| 285 | |
| 286 | return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); |
| 287 | } |
| 288 | static DEVICE_ATTR_RO(nstype); |
| 289 | |
| 290 | static ssize_t set_cookie_show(struct device *dev, |
| 291 | struct device_attribute *attr, char *buf) |
| 292 | { |
| 293 | struct nd_region *nd_region = to_nd_region(dev); |
| 294 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
| 295 | ssize_t rc = 0; |
| 296 | |
| 297 | if (is_memory(dev) && nd_set) |
| 298 | /* pass, should be precluded by region_visible */; |
| 299 | else |
| 300 | return -ENXIO; |
| 301 | |
| 302 | /* |
| 303 | * The cookie to show depends on which specification of the |
| 304 | * labels we are using. If there are not labels then default to |
| 305 | * the v1.1 namespace label cookie definition. To read all this |
| 306 | * data we need to wait for probing to settle. |
| 307 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 308 | nd_device_lock(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 309 | nvdimm_bus_lock(dev); |
| 310 | wait_nvdimm_bus_probe_idle(dev); |
| 311 | if (nd_region->ndr_mappings) { |
| 312 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; |
| 313 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 314 | |
| 315 | if (ndd) { |
| 316 | struct nd_namespace_index *nsindex; |
| 317 | |
| 318 | nsindex = to_namespace_index(ndd, ndd->ns_current); |
| 319 | rc = sprintf(buf, "%#llx\n", |
| 320 | nd_region_interleave_set_cookie(nd_region, |
| 321 | nsindex)); |
| 322 | } |
| 323 | } |
| 324 | nvdimm_bus_unlock(dev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 325 | nd_device_unlock(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 326 | |
| 327 | if (rc) |
| 328 | return rc; |
| 329 | return sprintf(buf, "%#llx\n", nd_set->cookie1); |
| 330 | } |
| 331 | static DEVICE_ATTR_RO(set_cookie); |
| 332 | |
| 333 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region) |
| 334 | { |
| 335 | resource_size_t blk_max_overlap = 0, available, overlap; |
| 336 | int i; |
| 337 | |
| 338 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); |
| 339 | |
| 340 | retry: |
| 341 | available = 0; |
| 342 | overlap = blk_max_overlap; |
| 343 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 344 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 345 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 346 | |
| 347 | /* if a dimm is disabled the available capacity is zero */ |
| 348 | if (!ndd) |
| 349 | return 0; |
| 350 | |
| 351 | if (is_memory(&nd_region->dev)) { |
| 352 | available += nd_pmem_available_dpa(nd_region, |
| 353 | nd_mapping, &overlap); |
| 354 | if (overlap > blk_max_overlap) { |
| 355 | blk_max_overlap = overlap; |
| 356 | goto retry; |
| 357 | } |
| 358 | } else if (is_nd_blk(&nd_region->dev)) |
| 359 | available += nd_blk_available_dpa(nd_region); |
| 360 | } |
| 361 | |
| 362 | return available; |
| 363 | } |
| 364 | |
| 365 | resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) |
| 366 | { |
| 367 | resource_size_t available = 0; |
| 368 | int i; |
| 369 | |
| 370 | if (is_memory(&nd_region->dev)) |
| 371 | available = PHYS_ADDR_MAX; |
| 372 | |
| 373 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); |
| 374 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 375 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 376 | |
| 377 | if (is_memory(&nd_region->dev)) |
| 378 | available = min(available, |
| 379 | nd_pmem_max_contiguous_dpa(nd_region, |
| 380 | nd_mapping)); |
| 381 | else if (is_nd_blk(&nd_region->dev)) |
| 382 | available += nd_blk_available_dpa(nd_region); |
| 383 | } |
| 384 | if (is_memory(&nd_region->dev)) |
| 385 | return available * nd_region->ndr_mappings; |
| 386 | return available; |
| 387 | } |
| 388 | |
| 389 | static ssize_t available_size_show(struct device *dev, |
| 390 | struct device_attribute *attr, char *buf) |
| 391 | { |
| 392 | struct nd_region *nd_region = to_nd_region(dev); |
| 393 | unsigned long long available = 0; |
| 394 | |
| 395 | /* |
| 396 | * Flush in-flight updates and grab a snapshot of the available |
| 397 | * size. Of course, this value is potentially invalidated the |
| 398 | * memory nvdimm_bus_lock() is dropped, but that's userspace's |
| 399 | * problem to not race itself. |
| 400 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 401 | nd_device_lock(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 402 | nvdimm_bus_lock(dev); |
| 403 | wait_nvdimm_bus_probe_idle(dev); |
| 404 | available = nd_region_available_dpa(nd_region); |
| 405 | nvdimm_bus_unlock(dev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 406 | nd_device_unlock(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 407 | |
| 408 | return sprintf(buf, "%llu\n", available); |
| 409 | } |
| 410 | static DEVICE_ATTR_RO(available_size); |
| 411 | |
| 412 | static ssize_t max_available_extent_show(struct device *dev, |
| 413 | struct device_attribute *attr, char *buf) |
| 414 | { |
| 415 | struct nd_region *nd_region = to_nd_region(dev); |
| 416 | unsigned long long available = 0; |
| 417 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 418 | nd_device_lock(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 419 | nvdimm_bus_lock(dev); |
| 420 | wait_nvdimm_bus_probe_idle(dev); |
| 421 | available = nd_region_allocatable_dpa(nd_region); |
| 422 | nvdimm_bus_unlock(dev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 423 | nd_device_unlock(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 424 | |
| 425 | return sprintf(buf, "%llu\n", available); |
| 426 | } |
| 427 | static DEVICE_ATTR_RO(max_available_extent); |
| 428 | |
| 429 | static ssize_t init_namespaces_show(struct device *dev, |
| 430 | struct device_attribute *attr, char *buf) |
| 431 | { |
| 432 | struct nd_region_data *ndrd = dev_get_drvdata(dev); |
| 433 | ssize_t rc; |
| 434 | |
| 435 | nvdimm_bus_lock(dev); |
| 436 | if (ndrd) |
| 437 | rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); |
| 438 | else |
| 439 | rc = -ENXIO; |
| 440 | nvdimm_bus_unlock(dev); |
| 441 | |
| 442 | return rc; |
| 443 | } |
| 444 | static DEVICE_ATTR_RO(init_namespaces); |
| 445 | |
| 446 | static ssize_t namespace_seed_show(struct device *dev, |
| 447 | struct device_attribute *attr, char *buf) |
| 448 | { |
| 449 | struct nd_region *nd_region = to_nd_region(dev); |
| 450 | ssize_t rc; |
| 451 | |
| 452 | nvdimm_bus_lock(dev); |
| 453 | if (nd_region->ns_seed) |
| 454 | rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); |
| 455 | else |
| 456 | rc = sprintf(buf, "\n"); |
| 457 | nvdimm_bus_unlock(dev); |
| 458 | return rc; |
| 459 | } |
| 460 | static DEVICE_ATTR_RO(namespace_seed); |
| 461 | |
| 462 | static ssize_t btt_seed_show(struct device *dev, |
| 463 | struct device_attribute *attr, char *buf) |
| 464 | { |
| 465 | struct nd_region *nd_region = to_nd_region(dev); |
| 466 | ssize_t rc; |
| 467 | |
| 468 | nvdimm_bus_lock(dev); |
| 469 | if (nd_region->btt_seed) |
| 470 | rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); |
| 471 | else |
| 472 | rc = sprintf(buf, "\n"); |
| 473 | nvdimm_bus_unlock(dev); |
| 474 | |
| 475 | return rc; |
| 476 | } |
| 477 | static DEVICE_ATTR_RO(btt_seed); |
| 478 | |
| 479 | static ssize_t pfn_seed_show(struct device *dev, |
| 480 | struct device_attribute *attr, char *buf) |
| 481 | { |
| 482 | struct nd_region *nd_region = to_nd_region(dev); |
| 483 | ssize_t rc; |
| 484 | |
| 485 | nvdimm_bus_lock(dev); |
| 486 | if (nd_region->pfn_seed) |
| 487 | rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); |
| 488 | else |
| 489 | rc = sprintf(buf, "\n"); |
| 490 | nvdimm_bus_unlock(dev); |
| 491 | |
| 492 | return rc; |
| 493 | } |
| 494 | static DEVICE_ATTR_RO(pfn_seed); |
| 495 | |
| 496 | static ssize_t dax_seed_show(struct device *dev, |
| 497 | struct device_attribute *attr, char *buf) |
| 498 | { |
| 499 | struct nd_region *nd_region = to_nd_region(dev); |
| 500 | ssize_t rc; |
| 501 | |
| 502 | nvdimm_bus_lock(dev); |
| 503 | if (nd_region->dax_seed) |
| 504 | rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); |
| 505 | else |
| 506 | rc = sprintf(buf, "\n"); |
| 507 | nvdimm_bus_unlock(dev); |
| 508 | |
| 509 | return rc; |
| 510 | } |
| 511 | static DEVICE_ATTR_RO(dax_seed); |
| 512 | |
| 513 | static ssize_t read_only_show(struct device *dev, |
| 514 | struct device_attribute *attr, char *buf) |
| 515 | { |
| 516 | struct nd_region *nd_region = to_nd_region(dev); |
| 517 | |
| 518 | return sprintf(buf, "%d\n", nd_region->ro); |
| 519 | } |
| 520 | |
| 521 | static ssize_t read_only_store(struct device *dev, |
| 522 | struct device_attribute *attr, const char *buf, size_t len) |
| 523 | { |
| 524 | bool ro; |
| 525 | int rc = strtobool(buf, &ro); |
| 526 | struct nd_region *nd_region = to_nd_region(dev); |
| 527 | |
| 528 | if (rc) |
| 529 | return rc; |
| 530 | |
| 531 | nd_region->ro = ro; |
| 532 | return len; |
| 533 | } |
| 534 | static DEVICE_ATTR_RW(read_only); |
| 535 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 536 | static ssize_t align_show(struct device *dev, |
| 537 | struct device_attribute *attr, char *buf) |
| 538 | { |
| 539 | struct nd_region *nd_region = to_nd_region(dev); |
| 540 | |
| 541 | return sprintf(buf, "%#lx\n", nd_region->align); |
| 542 | } |
| 543 | |
| 544 | static ssize_t align_store(struct device *dev, |
| 545 | struct device_attribute *attr, const char *buf, size_t len) |
| 546 | { |
| 547 | struct nd_region *nd_region = to_nd_region(dev); |
| 548 | unsigned long val, dpa; |
| 549 | u32 remainder; |
| 550 | int rc; |
| 551 | |
| 552 | rc = kstrtoul(buf, 0, &val); |
| 553 | if (rc) |
| 554 | return rc; |
| 555 | |
| 556 | if (!nd_region->ndr_mappings) |
| 557 | return -ENXIO; |
| 558 | |
| 559 | /* |
| 560 | * Ensure space-align is evenly divisible by the region |
| 561 | * interleave-width because the kernel typically has no facility |
| 562 | * to determine which DIMM(s), dimm-physical-addresses, would |
| 563 | * contribute to the tail capacity in system-physical-address |
| 564 | * space for the namespace. |
| 565 | */ |
| 566 | dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder); |
| 567 | if (!is_power_of_2(dpa) || dpa < PAGE_SIZE |
| 568 | || val > region_size(nd_region) || remainder) |
| 569 | return -EINVAL; |
| 570 | |
| 571 | /* |
| 572 | * Given that space allocation consults this value multiple |
| 573 | * times ensure it does not change for the duration of the |
| 574 | * allocation. |
| 575 | */ |
| 576 | nvdimm_bus_lock(dev); |
| 577 | nd_region->align = val; |
| 578 | nvdimm_bus_unlock(dev); |
| 579 | |
| 580 | return len; |
| 581 | } |
| 582 | static DEVICE_ATTR_RW(align); |
| 583 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 584 | static ssize_t region_badblocks_show(struct device *dev, |
| 585 | struct device_attribute *attr, char *buf) |
| 586 | { |
| 587 | struct nd_region *nd_region = to_nd_region(dev); |
| 588 | ssize_t rc; |
| 589 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 590 | nd_device_lock(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 591 | if (dev->driver) |
| 592 | rc = badblocks_show(&nd_region->bb, buf, 0); |
| 593 | else |
| 594 | rc = -ENXIO; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 595 | nd_device_unlock(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 596 | |
| 597 | return rc; |
| 598 | } |
| 599 | static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL); |
| 600 | |
| 601 | static ssize_t resource_show(struct device *dev, |
| 602 | struct device_attribute *attr, char *buf) |
| 603 | { |
| 604 | struct nd_region *nd_region = to_nd_region(dev); |
| 605 | |
| 606 | return sprintf(buf, "%#llx\n", nd_region->ndr_start); |
| 607 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 608 | static DEVICE_ATTR_ADMIN_RO(resource); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 609 | |
| 610 | static ssize_t persistence_domain_show(struct device *dev, |
| 611 | struct device_attribute *attr, char *buf) |
| 612 | { |
| 613 | struct nd_region *nd_region = to_nd_region(dev); |
| 614 | |
| 615 | if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags)) |
| 616 | return sprintf(buf, "cpu_cache\n"); |
| 617 | else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags)) |
| 618 | return sprintf(buf, "memory_controller\n"); |
| 619 | else |
| 620 | return sprintf(buf, "\n"); |
| 621 | } |
| 622 | static DEVICE_ATTR_RO(persistence_domain); |
| 623 | |
| 624 | static struct attribute *nd_region_attributes[] = { |
| 625 | &dev_attr_size.attr, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 626 | &dev_attr_align.attr, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 627 | &dev_attr_nstype.attr, |
| 628 | &dev_attr_mappings.attr, |
| 629 | &dev_attr_btt_seed.attr, |
| 630 | &dev_attr_pfn_seed.attr, |
| 631 | &dev_attr_dax_seed.attr, |
| 632 | &dev_attr_deep_flush.attr, |
| 633 | &dev_attr_read_only.attr, |
| 634 | &dev_attr_set_cookie.attr, |
| 635 | &dev_attr_available_size.attr, |
| 636 | &dev_attr_max_available_extent.attr, |
| 637 | &dev_attr_namespace_seed.attr, |
| 638 | &dev_attr_init_namespaces.attr, |
| 639 | &dev_attr_badblocks.attr, |
| 640 | &dev_attr_resource.attr, |
| 641 | &dev_attr_persistence_domain.attr, |
| 642 | NULL, |
| 643 | }; |
| 644 | |
| 645 | static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) |
| 646 | { |
| 647 | struct device *dev = container_of(kobj, typeof(*dev), kobj); |
| 648 | struct nd_region *nd_region = to_nd_region(dev); |
| 649 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
| 650 | int type = nd_region_to_nstype(nd_region); |
| 651 | |
| 652 | if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr) |
| 653 | return 0; |
| 654 | |
| 655 | if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) |
| 656 | return 0; |
| 657 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 658 | if (!is_memory(dev) && a == &dev_attr_badblocks.attr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 659 | return 0; |
| 660 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 661 | if (a == &dev_attr_resource.attr && !is_memory(dev)) |
| 662 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 663 | |
| 664 | if (a == &dev_attr_deep_flush.attr) { |
| 665 | int has_flush = nvdimm_has_flush(nd_region); |
| 666 | |
| 667 | if (has_flush == 1) |
| 668 | return a->mode; |
| 669 | else if (has_flush == 0) |
| 670 | return 0444; |
| 671 | else |
| 672 | return 0; |
| 673 | } |
| 674 | |
| 675 | if (a == &dev_attr_persistence_domain.attr) { |
| 676 | if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE) |
| 677 | | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0) |
| 678 | return 0; |
| 679 | return a->mode; |
| 680 | } |
| 681 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 682 | if (a == &dev_attr_align.attr) |
| 683 | return a->mode; |
| 684 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 685 | if (a != &dev_attr_set_cookie.attr |
| 686 | && a != &dev_attr_available_size.attr) |
| 687 | return a->mode; |
| 688 | |
| 689 | if ((type == ND_DEVICE_NAMESPACE_PMEM |
| 690 | || type == ND_DEVICE_NAMESPACE_BLK) |
| 691 | && a == &dev_attr_available_size.attr) |
| 692 | return a->mode; |
| 693 | else if (is_memory(dev) && nd_set) |
| 694 | return a->mode; |
| 695 | |
| 696 | return 0; |
| 697 | } |
| 698 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 699 | static ssize_t mappingN(struct device *dev, char *buf, int n) |
| 700 | { |
| 701 | struct nd_region *nd_region = to_nd_region(dev); |
| 702 | struct nd_mapping *nd_mapping; |
| 703 | struct nvdimm *nvdimm; |
| 704 | |
| 705 | if (n >= nd_region->ndr_mappings) |
| 706 | return -ENXIO; |
| 707 | nd_mapping = &nd_region->mapping[n]; |
| 708 | nvdimm = nd_mapping->nvdimm; |
| 709 | |
| 710 | return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev), |
| 711 | nd_mapping->start, nd_mapping->size, |
| 712 | nd_mapping->position); |
| 713 | } |
| 714 | |
| 715 | #define REGION_MAPPING(idx) \ |
| 716 | static ssize_t mapping##idx##_show(struct device *dev, \ |
| 717 | struct device_attribute *attr, char *buf) \ |
| 718 | { \ |
| 719 | return mappingN(dev, buf, idx); \ |
| 720 | } \ |
| 721 | static DEVICE_ATTR_RO(mapping##idx) |
| 722 | |
| 723 | /* |
| 724 | * 32 should be enough for a while, even in the presence of socket |
| 725 | * interleave a 32-way interleave set is a degenerate case. |
| 726 | */ |
| 727 | REGION_MAPPING(0); |
| 728 | REGION_MAPPING(1); |
| 729 | REGION_MAPPING(2); |
| 730 | REGION_MAPPING(3); |
| 731 | REGION_MAPPING(4); |
| 732 | REGION_MAPPING(5); |
| 733 | REGION_MAPPING(6); |
| 734 | REGION_MAPPING(7); |
| 735 | REGION_MAPPING(8); |
| 736 | REGION_MAPPING(9); |
| 737 | REGION_MAPPING(10); |
| 738 | REGION_MAPPING(11); |
| 739 | REGION_MAPPING(12); |
| 740 | REGION_MAPPING(13); |
| 741 | REGION_MAPPING(14); |
| 742 | REGION_MAPPING(15); |
| 743 | REGION_MAPPING(16); |
| 744 | REGION_MAPPING(17); |
| 745 | REGION_MAPPING(18); |
| 746 | REGION_MAPPING(19); |
| 747 | REGION_MAPPING(20); |
| 748 | REGION_MAPPING(21); |
| 749 | REGION_MAPPING(22); |
| 750 | REGION_MAPPING(23); |
| 751 | REGION_MAPPING(24); |
| 752 | REGION_MAPPING(25); |
| 753 | REGION_MAPPING(26); |
| 754 | REGION_MAPPING(27); |
| 755 | REGION_MAPPING(28); |
| 756 | REGION_MAPPING(29); |
| 757 | REGION_MAPPING(30); |
| 758 | REGION_MAPPING(31); |
| 759 | |
| 760 | static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) |
| 761 | { |
| 762 | struct device *dev = container_of(kobj, struct device, kobj); |
| 763 | struct nd_region *nd_region = to_nd_region(dev); |
| 764 | |
| 765 | if (n < nd_region->ndr_mappings) |
| 766 | return a->mode; |
| 767 | return 0; |
| 768 | } |
| 769 | |
| 770 | static struct attribute *mapping_attributes[] = { |
| 771 | &dev_attr_mapping0.attr, |
| 772 | &dev_attr_mapping1.attr, |
| 773 | &dev_attr_mapping2.attr, |
| 774 | &dev_attr_mapping3.attr, |
| 775 | &dev_attr_mapping4.attr, |
| 776 | &dev_attr_mapping5.attr, |
| 777 | &dev_attr_mapping6.attr, |
| 778 | &dev_attr_mapping7.attr, |
| 779 | &dev_attr_mapping8.attr, |
| 780 | &dev_attr_mapping9.attr, |
| 781 | &dev_attr_mapping10.attr, |
| 782 | &dev_attr_mapping11.attr, |
| 783 | &dev_attr_mapping12.attr, |
| 784 | &dev_attr_mapping13.attr, |
| 785 | &dev_attr_mapping14.attr, |
| 786 | &dev_attr_mapping15.attr, |
| 787 | &dev_attr_mapping16.attr, |
| 788 | &dev_attr_mapping17.attr, |
| 789 | &dev_attr_mapping18.attr, |
| 790 | &dev_attr_mapping19.attr, |
| 791 | &dev_attr_mapping20.attr, |
| 792 | &dev_attr_mapping21.attr, |
| 793 | &dev_attr_mapping22.attr, |
| 794 | &dev_attr_mapping23.attr, |
| 795 | &dev_attr_mapping24.attr, |
| 796 | &dev_attr_mapping25.attr, |
| 797 | &dev_attr_mapping26.attr, |
| 798 | &dev_attr_mapping27.attr, |
| 799 | &dev_attr_mapping28.attr, |
| 800 | &dev_attr_mapping29.attr, |
| 801 | &dev_attr_mapping30.attr, |
| 802 | &dev_attr_mapping31.attr, |
| 803 | NULL, |
| 804 | }; |
| 805 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 806 | static const struct attribute_group nd_mapping_attribute_group = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 807 | .is_visible = mapping_visible, |
| 808 | .attrs = mapping_attributes, |
| 809 | }; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 810 | |
| 811 | static const struct attribute_group nd_region_attribute_group = { |
| 812 | .attrs = nd_region_attributes, |
| 813 | .is_visible = region_visible, |
| 814 | }; |
| 815 | |
| 816 | static const struct attribute_group *nd_region_attribute_groups[] = { |
| 817 | &nd_device_attribute_group, |
| 818 | &nd_region_attribute_group, |
| 819 | &nd_numa_attribute_group, |
| 820 | &nd_mapping_attribute_group, |
| 821 | NULL, |
| 822 | }; |
| 823 | |
| 824 | static const struct device_type nd_blk_device_type = { |
| 825 | .name = "nd_blk", |
| 826 | .release = nd_region_release, |
| 827 | .groups = nd_region_attribute_groups, |
| 828 | }; |
| 829 | |
| 830 | static const struct device_type nd_pmem_device_type = { |
| 831 | .name = "nd_pmem", |
| 832 | .release = nd_region_release, |
| 833 | .groups = nd_region_attribute_groups, |
| 834 | }; |
| 835 | |
| 836 | static const struct device_type nd_volatile_device_type = { |
| 837 | .name = "nd_volatile", |
| 838 | .release = nd_region_release, |
| 839 | .groups = nd_region_attribute_groups, |
| 840 | }; |
| 841 | |
| 842 | bool is_nd_pmem(struct device *dev) |
| 843 | { |
| 844 | return dev ? dev->type == &nd_pmem_device_type : false; |
| 845 | } |
| 846 | |
| 847 | bool is_nd_blk(struct device *dev) |
| 848 | { |
| 849 | return dev ? dev->type == &nd_blk_device_type : false; |
| 850 | } |
| 851 | |
| 852 | bool is_nd_volatile(struct device *dev) |
| 853 | { |
| 854 | return dev ? dev->type == &nd_volatile_device_type : false; |
| 855 | } |
| 856 | |
| 857 | u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, |
| 858 | struct nd_namespace_index *nsindex) |
| 859 | { |
| 860 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
| 861 | |
| 862 | if (!nd_set) |
| 863 | return 0; |
| 864 | |
| 865 | if (nsindex && __le16_to_cpu(nsindex->major) == 1 |
| 866 | && __le16_to_cpu(nsindex->minor) == 1) |
| 867 | return nd_set->cookie1; |
| 868 | return nd_set->cookie2; |
| 869 | } |
| 870 | |
| 871 | u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) |
| 872 | { |
| 873 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
| 874 | |
| 875 | if (nd_set) |
| 876 | return nd_set->altcookie; |
| 877 | return 0; |
| 878 | } |
| 879 | |
| 880 | void nd_mapping_free_labels(struct nd_mapping *nd_mapping) |
| 881 | { |
| 882 | struct nd_label_ent *label_ent, *e; |
| 883 | |
| 884 | lockdep_assert_held(&nd_mapping->lock); |
| 885 | list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { |
| 886 | list_del(&label_ent->list); |
| 887 | kfree(label_ent); |
| 888 | } |
| 889 | } |
| 890 | |
| 891 | /* |
| 892 | * When a namespace is activated create new seeds for the next |
| 893 | * namespace, or namespace-personality to be configured. |
| 894 | */ |
| 895 | void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) |
| 896 | { |
| 897 | nvdimm_bus_lock(dev); |
| 898 | if (nd_region->ns_seed == dev) { |
| 899 | nd_region_create_ns_seed(nd_region); |
| 900 | } else if (is_nd_btt(dev)) { |
| 901 | struct nd_btt *nd_btt = to_nd_btt(dev); |
| 902 | |
| 903 | if (nd_region->btt_seed == dev) |
| 904 | nd_region_create_btt_seed(nd_region); |
| 905 | if (nd_region->ns_seed == &nd_btt->ndns->dev) |
| 906 | nd_region_create_ns_seed(nd_region); |
| 907 | } else if (is_nd_pfn(dev)) { |
| 908 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); |
| 909 | |
| 910 | if (nd_region->pfn_seed == dev) |
| 911 | nd_region_create_pfn_seed(nd_region); |
| 912 | if (nd_region->ns_seed == &nd_pfn->ndns->dev) |
| 913 | nd_region_create_ns_seed(nd_region); |
| 914 | } else if (is_nd_dax(dev)) { |
| 915 | struct nd_dax *nd_dax = to_nd_dax(dev); |
| 916 | |
| 917 | if (nd_region->dax_seed == dev) |
| 918 | nd_region_create_dax_seed(nd_region); |
| 919 | if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) |
| 920 | nd_region_create_ns_seed(nd_region); |
| 921 | } |
| 922 | nvdimm_bus_unlock(dev); |
| 923 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 924 | |
| 925 | int nd_blk_region_init(struct nd_region *nd_region) |
| 926 | { |
| 927 | struct device *dev = &nd_region->dev; |
| 928 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 929 | |
| 930 | if (!is_nd_blk(dev)) |
| 931 | return 0; |
| 932 | |
| 933 | if (nd_region->ndr_mappings < 1) { |
| 934 | dev_dbg(dev, "invalid BLK region\n"); |
| 935 | return -ENXIO; |
| 936 | } |
| 937 | |
| 938 | return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); |
| 939 | } |
| 940 | |
| 941 | /** |
| 942 | * nd_region_acquire_lane - allocate and lock a lane |
| 943 | * @nd_region: region id and number of lanes possible |
| 944 | * |
| 945 | * A lane correlates to a BLK-data-window and/or a log slot in the BTT. |
| 946 | * We optimize for the common case where there are 256 lanes, one |
| 947 | * per-cpu. For larger systems we need to lock to share lanes. For now |
| 948 | * this implementation assumes the cost of maintaining an allocator for |
| 949 | * free lanes is on the order of the lock hold time, so it implements a |
| 950 | * static lane = cpu % num_lanes mapping. |
| 951 | * |
| 952 | * In the case of a BTT instance on top of a BLK namespace a lane may be |
| 953 | * acquired recursively. We lock on the first instance. |
| 954 | * |
| 955 | * In the case of a BTT instance on top of PMEM, we only acquire a lane |
| 956 | * for the BTT metadata updates. |
| 957 | */ |
| 958 | unsigned int nd_region_acquire_lane(struct nd_region *nd_region) |
| 959 | { |
| 960 | unsigned int cpu, lane; |
| 961 | |
| 962 | cpu = get_cpu(); |
| 963 | if (nd_region->num_lanes < nr_cpu_ids) { |
| 964 | struct nd_percpu_lane *ndl_lock, *ndl_count; |
| 965 | |
| 966 | lane = cpu % nd_region->num_lanes; |
| 967 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); |
| 968 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); |
| 969 | if (ndl_count->count++ == 0) |
| 970 | spin_lock(&ndl_lock->lock); |
| 971 | } else |
| 972 | lane = cpu; |
| 973 | |
| 974 | return lane; |
| 975 | } |
| 976 | EXPORT_SYMBOL(nd_region_acquire_lane); |
| 977 | |
| 978 | void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) |
| 979 | { |
| 980 | if (nd_region->num_lanes < nr_cpu_ids) { |
| 981 | unsigned int cpu = get_cpu(); |
| 982 | struct nd_percpu_lane *ndl_lock, *ndl_count; |
| 983 | |
| 984 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); |
| 985 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); |
| 986 | if (--ndl_count->count == 0) |
| 987 | spin_unlock(&ndl_lock->lock); |
| 988 | put_cpu(); |
| 989 | } |
| 990 | put_cpu(); |
| 991 | } |
| 992 | EXPORT_SYMBOL(nd_region_release_lane); |
| 993 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 994 | /* |
| 995 | * PowerPC requires this alignment for memremap_pages(). All other archs |
| 996 | * should be ok with SUBSECTION_SIZE (see memremap_compat_align()). |
| 997 | */ |
| 998 | #define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M |
| 999 | |
| 1000 | static unsigned long default_align(struct nd_region *nd_region) |
| 1001 | { |
| 1002 | unsigned long align; |
| 1003 | int i, mappings; |
| 1004 | u32 remainder; |
| 1005 | |
| 1006 | if (is_nd_blk(&nd_region->dev)) |
| 1007 | align = PAGE_SIZE; |
| 1008 | else |
| 1009 | align = MEMREMAP_COMPAT_ALIGN_MAX; |
| 1010 | |
| 1011 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 1012 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 1013 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 1014 | |
| 1015 | if (test_bit(NDD_ALIASING, &nvdimm->flags)) { |
| 1016 | align = MEMREMAP_COMPAT_ALIGN_MAX; |
| 1017 | break; |
| 1018 | } |
| 1019 | } |
| 1020 | |
| 1021 | mappings = max_t(u16, 1, nd_region->ndr_mappings); |
| 1022 | div_u64_rem(align, mappings, &remainder); |
| 1023 | if (remainder) |
| 1024 | align *= mappings; |
| 1025 | |
| 1026 | return align; |
| 1027 | } |
| 1028 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1029 | static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1030 | struct nd_region_desc *ndr_desc, |
| 1031 | const struct device_type *dev_type, const char *caller) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1032 | { |
| 1033 | struct nd_region *nd_region; |
| 1034 | struct device *dev; |
| 1035 | void *region_buf; |
| 1036 | unsigned int i; |
| 1037 | int ro = 0; |
| 1038 | |
| 1039 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
| 1040 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
| 1041 | struct nvdimm *nvdimm = mapping->nvdimm; |
| 1042 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1043 | if ((mapping->start | mapping->size) % PAGE_SIZE) { |
| 1044 | dev_err(&nvdimm_bus->dev, |
| 1045 | "%s: %s mapping%d is not %ld aligned\n", |
| 1046 | caller, dev_name(&nvdimm->dev), i, PAGE_SIZE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1047 | return NULL; |
| 1048 | } |
| 1049 | |
| 1050 | if (test_bit(NDD_UNARMED, &nvdimm->flags)) |
| 1051 | ro = 1; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1052 | |
| 1053 | if (test_bit(NDD_NOBLK, &nvdimm->flags) |
| 1054 | && dev_type == &nd_blk_device_type) { |
| 1055 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n", |
| 1056 | caller, dev_name(&nvdimm->dev), i); |
| 1057 | return NULL; |
| 1058 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1059 | } |
| 1060 | |
| 1061 | if (dev_type == &nd_blk_device_type) { |
| 1062 | struct nd_blk_region_desc *ndbr_desc; |
| 1063 | struct nd_blk_region *ndbr; |
| 1064 | |
| 1065 | ndbr_desc = to_blk_region_desc(ndr_desc); |
| 1066 | ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) |
| 1067 | * ndr_desc->num_mappings, |
| 1068 | GFP_KERNEL); |
| 1069 | if (ndbr) { |
| 1070 | nd_region = &ndbr->nd_region; |
| 1071 | ndbr->enable = ndbr_desc->enable; |
| 1072 | ndbr->do_io = ndbr_desc->do_io; |
| 1073 | } |
| 1074 | region_buf = ndbr; |
| 1075 | } else { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1076 | nd_region = kzalloc(struct_size(nd_region, mapping, |
| 1077 | ndr_desc->num_mappings), |
| 1078 | GFP_KERNEL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1079 | region_buf = nd_region; |
| 1080 | } |
| 1081 | |
| 1082 | if (!region_buf) |
| 1083 | return NULL; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1084 | nd_region->id = memregion_alloc(GFP_KERNEL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1085 | if (nd_region->id < 0) |
| 1086 | goto err_id; |
| 1087 | |
| 1088 | nd_region->lane = alloc_percpu(struct nd_percpu_lane); |
| 1089 | if (!nd_region->lane) |
| 1090 | goto err_percpu; |
| 1091 | |
| 1092 | for (i = 0; i < nr_cpu_ids; i++) { |
| 1093 | struct nd_percpu_lane *ndl; |
| 1094 | |
| 1095 | ndl = per_cpu_ptr(nd_region->lane, i); |
| 1096 | spin_lock_init(&ndl->lock); |
| 1097 | ndl->count = 0; |
| 1098 | } |
| 1099 | |
| 1100 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
| 1101 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
| 1102 | struct nvdimm *nvdimm = mapping->nvdimm; |
| 1103 | |
| 1104 | nd_region->mapping[i].nvdimm = nvdimm; |
| 1105 | nd_region->mapping[i].start = mapping->start; |
| 1106 | nd_region->mapping[i].size = mapping->size; |
| 1107 | nd_region->mapping[i].position = mapping->position; |
| 1108 | INIT_LIST_HEAD(&nd_region->mapping[i].labels); |
| 1109 | mutex_init(&nd_region->mapping[i].lock); |
| 1110 | |
| 1111 | get_device(&nvdimm->dev); |
| 1112 | } |
| 1113 | nd_region->ndr_mappings = ndr_desc->num_mappings; |
| 1114 | nd_region->provider_data = ndr_desc->provider_data; |
| 1115 | nd_region->nd_set = ndr_desc->nd_set; |
| 1116 | nd_region->num_lanes = ndr_desc->num_lanes; |
| 1117 | nd_region->flags = ndr_desc->flags; |
| 1118 | nd_region->ro = ro; |
| 1119 | nd_region->numa_node = ndr_desc->numa_node; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1120 | nd_region->target_node = ndr_desc->target_node; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1121 | ida_init(&nd_region->ns_ida); |
| 1122 | ida_init(&nd_region->btt_ida); |
| 1123 | ida_init(&nd_region->pfn_ida); |
| 1124 | ida_init(&nd_region->dax_ida); |
| 1125 | dev = &nd_region->dev; |
| 1126 | dev_set_name(dev, "region%d", nd_region->id); |
| 1127 | dev->parent = &nvdimm_bus->dev; |
| 1128 | dev->type = dev_type; |
| 1129 | dev->groups = ndr_desc->attr_groups; |
| 1130 | dev->of_node = ndr_desc->of_node; |
| 1131 | nd_region->ndr_size = resource_size(ndr_desc->res); |
| 1132 | nd_region->ndr_start = ndr_desc->res->start; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1133 | nd_region->align = default_align(nd_region); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1134 | if (ndr_desc->flush) |
| 1135 | nd_region->flush = ndr_desc->flush; |
| 1136 | else |
| 1137 | nd_region->flush = NULL; |
| 1138 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1139 | nd_device_register(dev); |
| 1140 | |
| 1141 | return nd_region; |
| 1142 | |
| 1143 | err_percpu: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1144 | memregion_free(nd_region->id); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1145 | err_id: |
| 1146 | kfree(region_buf); |
| 1147 | return NULL; |
| 1148 | } |
| 1149 | |
| 1150 | struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, |
| 1151 | struct nd_region_desc *ndr_desc) |
| 1152 | { |
| 1153 | ndr_desc->num_lanes = ND_MAX_LANES; |
| 1154 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, |
| 1155 | __func__); |
| 1156 | } |
| 1157 | EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); |
| 1158 | |
| 1159 | struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, |
| 1160 | struct nd_region_desc *ndr_desc) |
| 1161 | { |
| 1162 | if (ndr_desc->num_mappings > 1) |
| 1163 | return NULL; |
| 1164 | ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); |
| 1165 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, |
| 1166 | __func__); |
| 1167 | } |
| 1168 | EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); |
| 1169 | |
| 1170 | struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, |
| 1171 | struct nd_region_desc *ndr_desc) |
| 1172 | { |
| 1173 | ndr_desc->num_lanes = ND_MAX_LANES; |
| 1174 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, |
| 1175 | __func__); |
| 1176 | } |
| 1177 | EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); |
| 1178 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1179 | int nvdimm_flush(struct nd_region *nd_region, struct bio *bio) |
| 1180 | { |
| 1181 | int rc = 0; |
| 1182 | |
| 1183 | if (!nd_region->flush) |
| 1184 | rc = generic_nvdimm_flush(nd_region); |
| 1185 | else { |
| 1186 | if (nd_region->flush(nd_region, bio)) |
| 1187 | rc = -EIO; |
| 1188 | } |
| 1189 | |
| 1190 | return rc; |
| 1191 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1192 | /** |
| 1193 | * nvdimm_flush - flush any posted write queues between the cpu and pmem media |
| 1194 | * @nd_region: blk or interleaved pmem region |
| 1195 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1196 | int generic_nvdimm_flush(struct nd_region *nd_region) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1197 | { |
| 1198 | struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); |
| 1199 | int i, idx; |
| 1200 | |
| 1201 | /* |
| 1202 | * Try to encourage some diversity in flush hint addresses |
| 1203 | * across cpus assuming a limited number of flush hints. |
| 1204 | */ |
| 1205 | idx = this_cpu_read(flush_idx); |
| 1206 | idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); |
| 1207 | |
| 1208 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1209 | * The pmem_wmb() is needed to 'sfence' all |
| 1210 | * previous writes such that they are architecturally visible for |
| 1211 | * the platform buffer flush. Note that we've already arranged for pmem |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1212 | * writes to avoid the cache via memcpy_flushcache(). The final |
| 1213 | * wmb() ensures ordering for the NVDIMM flush write. |
| 1214 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1215 | pmem_wmb(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1216 | for (i = 0; i < nd_region->ndr_mappings; i++) |
| 1217 | if (ndrd_get_flush_wpq(ndrd, i, 0)) |
| 1218 | writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); |
| 1219 | wmb(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1220 | |
| 1221 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1222 | } |
| 1223 | EXPORT_SYMBOL_GPL(nvdimm_flush); |
| 1224 | |
| 1225 | /** |
| 1226 | * nvdimm_has_flush - determine write flushing requirements |
| 1227 | * @nd_region: blk or interleaved pmem region |
| 1228 | * |
| 1229 | * Returns 1 if writes require flushing |
| 1230 | * Returns 0 if writes do not require flushing |
| 1231 | * Returns -ENXIO if flushing capability can not be determined |
| 1232 | */ |
| 1233 | int nvdimm_has_flush(struct nd_region *nd_region) |
| 1234 | { |
| 1235 | int i; |
| 1236 | |
| 1237 | /* no nvdimm or pmem api == flushing capability unknown */ |
| 1238 | if (nd_region->ndr_mappings == 0 |
| 1239 | || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) |
| 1240 | return -ENXIO; |
| 1241 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1242 | /* Test if an explicit flush function is defined */ |
| 1243 | if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush) |
| 1244 | return 1; |
| 1245 | |
| 1246 | /* Test if any flush hints for the region are available */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1247 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 1248 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 1249 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 1250 | |
| 1251 | /* flush hints present / available */ |
| 1252 | if (nvdimm->num_flush) |
| 1253 | return 1; |
| 1254 | } |
| 1255 | |
| 1256 | /* |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1257 | * The platform defines dimm devices without hints nor explicit flush, |
| 1258 | * assume platform persistence mechanism like ADR |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1259 | */ |
| 1260 | return 0; |
| 1261 | } |
| 1262 | EXPORT_SYMBOL_GPL(nvdimm_has_flush); |
| 1263 | |
| 1264 | int nvdimm_has_cache(struct nd_region *nd_region) |
| 1265 | { |
| 1266 | return is_nd_pmem(&nd_region->dev) && |
| 1267 | !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags); |
| 1268 | } |
| 1269 | EXPORT_SYMBOL_GPL(nvdimm_has_cache); |
| 1270 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1271 | bool is_nvdimm_sync(struct nd_region *nd_region) |
| 1272 | { |
| 1273 | if (is_nd_volatile(&nd_region->dev)) |
| 1274 | return true; |
| 1275 | |
| 1276 | return is_nd_pmem(&nd_region->dev) && |
| 1277 | !test_bit(ND_REGION_ASYNC, &nd_region->flags); |
| 1278 | } |
| 1279 | EXPORT_SYMBOL_GPL(is_nvdimm_sync); |
| 1280 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1281 | struct conflict_context { |
| 1282 | struct nd_region *nd_region; |
| 1283 | resource_size_t start, size; |
| 1284 | }; |
| 1285 | |
| 1286 | static int region_conflict(struct device *dev, void *data) |
| 1287 | { |
| 1288 | struct nd_region *nd_region; |
| 1289 | struct conflict_context *ctx = data; |
| 1290 | resource_size_t res_end, region_end, region_start; |
| 1291 | |
| 1292 | if (!is_memory(dev)) |
| 1293 | return 0; |
| 1294 | |
| 1295 | nd_region = to_nd_region(dev); |
| 1296 | if (nd_region == ctx->nd_region) |
| 1297 | return 0; |
| 1298 | |
| 1299 | res_end = ctx->start + ctx->size; |
| 1300 | region_start = nd_region->ndr_start; |
| 1301 | region_end = region_start + nd_region->ndr_size; |
| 1302 | if (ctx->start >= region_start && ctx->start < region_end) |
| 1303 | return -EBUSY; |
| 1304 | if (res_end > region_start && res_end <= region_end) |
| 1305 | return -EBUSY; |
| 1306 | return 0; |
| 1307 | } |
| 1308 | |
| 1309 | int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, |
| 1310 | resource_size_t size) |
| 1311 | { |
| 1312 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); |
| 1313 | struct conflict_context ctx = { |
| 1314 | .nd_region = nd_region, |
| 1315 | .start = start, |
| 1316 | .size = size, |
| 1317 | }; |
| 1318 | |
| 1319 | return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict); |
| 1320 | } |