root/drivers/nvdimm/dimm_devs.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nvdimm_check_config_data
  2. validate_dimm
  3. nvdimm_init_nsarea
  4. nvdimm_get_config_data
  5. nvdimm_set_config_data
  6. nvdimm_set_aliasing
  7. nvdimm_set_locked
  8. nvdimm_clear_locked
  9. nvdimm_release
  10. is_nvdimm
  11. to_nvdimm
  12. nd_blk_region_to_dimm
  13. nd_blk_memremap_flags
  14. to_ndd
  15. nvdimm_drvdata_release
  16. get_ndd
  17. put_ndd
  18. nvdimm_name
  19. nvdimm_kobj
  20. nvdimm_cmd_mask
  21. nvdimm_provider_data
  22. commands_show
  23. flags_show
  24. state_show
  25. available_slots_show
  26. security_show
  27. frozen_show
  28. security_store
  29. nvdimm_visible
  30. __nvdimm_create
  31. shutdown_security_notify
  32. nvdimm_security_setup_events
  33. nvdimm_in_overwrite
  34. nvdimm_security_freeze
  35. alias_dpa_busy
  36. nd_blk_available_dpa
  37. nd_pmem_max_contiguous_dpa
  38. nd_pmem_available_dpa
  39. nvdimm_free_dpa
  40. nvdimm_allocate_dpa
  41. nvdimm_allocated_dpa
  42. count_dimms
  43. nvdimm_bus_check_dimm_count
  44. nvdimm_devs_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   4  */
   5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   6 #include <linux/moduleparam.h>
   7 #include <linux/vmalloc.h>
   8 #include <linux/device.h>
   9 #include <linux/ndctl.h>
  10 #include <linux/slab.h>
  11 #include <linux/io.h>
  12 #include <linux/fs.h>
  13 #include <linux/mm.h>
  14 #include "nd-core.h"
  15 #include "label.h"
  16 #include "pmem.h"
  17 #include "nd.h"
  18 
  19 static DEFINE_IDA(dimm_ida);
  20 
  21 static bool noblk;
  22 module_param(noblk, bool, 0444);
  23 MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
  24 
  25 /*
  26  * Retrieve bus and dimm handle and return if this bus supports
  27  * get_config_data commands
  28  */
  29 int nvdimm_check_config_data(struct device *dev)
  30 {
  31         struct nvdimm *nvdimm = to_nvdimm(dev);
  32 
  33         if (!nvdimm->cmd_mask ||
  34             !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
  35                 if (test_bit(NDD_ALIASING, &nvdimm->flags))
  36                         return -ENXIO;
  37                 else
  38                         return -ENOTTY;
  39         }
  40 
  41         return 0;
  42 }
  43 
  44 static int validate_dimm(struct nvdimm_drvdata *ndd)
  45 {
  46         int rc;
  47 
  48         if (!ndd)
  49                 return -EINVAL;
  50 
  51         rc = nvdimm_check_config_data(ndd->dev);
  52         if (rc)
  53                 dev_dbg(ndd->dev, "%ps: %s error: %d\n",
  54                                 __builtin_return_address(0), __func__, rc);
  55         return rc;
  56 }
  57 
  58 /**
  59  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
  60  * @nvdimm: dimm to initialize
  61  */
  62 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
  63 {
  64         struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
  65         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  66         struct nvdimm_bus_descriptor *nd_desc;
  67         int rc = validate_dimm(ndd);
  68         int cmd_rc = 0;
  69 
  70         if (rc)
  71                 return rc;
  72 
  73         if (cmd->config_size)
  74                 return 0; /* already valid */
  75 
  76         memset(cmd, 0, sizeof(*cmd));
  77         nd_desc = nvdimm_bus->nd_desc;
  78         rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  79                         ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
  80         if (rc < 0)
  81                 return rc;
  82         return cmd_rc;
  83 }
  84 
  85 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
  86                            size_t offset, size_t len)
  87 {
  88         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  89         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  90         int rc = validate_dimm(ndd), cmd_rc = 0;
  91         struct nd_cmd_get_config_data_hdr *cmd;
  92         size_t max_cmd_size, buf_offset;
  93 
  94         if (rc)
  95                 return rc;
  96 
  97         if (offset + len > ndd->nsarea.config_size)
  98                 return -ENXIO;
  99 
 100         max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
 101         cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
 102         if (!cmd)
 103                 return -ENOMEM;
 104 
 105         for (buf_offset = 0; len;
 106              len -= cmd->in_length, buf_offset += cmd->in_length) {
 107                 size_t cmd_size;
 108 
 109                 cmd->in_offset = offset + buf_offset;
 110                 cmd->in_length = min(max_cmd_size, len);
 111 
 112                 cmd_size = sizeof(*cmd) + cmd->in_length;
 113 
 114                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
 115                                 ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
 116                 if (rc < 0)
 117                         break;
 118                 if (cmd_rc < 0) {
 119                         rc = cmd_rc;
 120                         break;
 121                 }
 122 
 123                 /* out_buf should be valid, copy it into our output buffer */
 124                 memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
 125         }
 126         kvfree(cmd);
 127 
 128         return rc;
 129 }
 130 
 131 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
 132                 void *buf, size_t len)
 133 {
 134         size_t max_cmd_size, buf_offset;
 135         struct nd_cmd_set_config_hdr *cmd;
 136         int rc = validate_dimm(ndd), cmd_rc = 0;
 137         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
 138         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 139 
 140         if (rc)
 141                 return rc;
 142 
 143         if (offset + len > ndd->nsarea.config_size)
 144                 return -ENXIO;
 145 
 146         max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
 147         cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
 148         if (!cmd)
 149                 return -ENOMEM;
 150 
 151         for (buf_offset = 0; len; len -= cmd->in_length,
 152                         buf_offset += cmd->in_length) {
 153                 size_t cmd_size;
 154 
 155                 cmd->in_offset = offset + buf_offset;
 156                 cmd->in_length = min(max_cmd_size, len);
 157                 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
 158 
 159                 /* status is output in the last 4-bytes of the command buffer */
 160                 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
 161 
 162                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
 163                                 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
 164                 if (rc < 0)
 165                         break;
 166                 if (cmd_rc < 0) {
 167                         rc = cmd_rc;
 168                         break;
 169                 }
 170         }
 171         kvfree(cmd);
 172 
 173         return rc;
 174 }
 175 
 176 void nvdimm_set_aliasing(struct device *dev)
 177 {
 178         struct nvdimm *nvdimm = to_nvdimm(dev);
 179 
 180         set_bit(NDD_ALIASING, &nvdimm->flags);
 181 }
 182 
 183 void nvdimm_set_locked(struct device *dev)
 184 {
 185         struct nvdimm *nvdimm = to_nvdimm(dev);
 186 
 187         set_bit(NDD_LOCKED, &nvdimm->flags);
 188 }
 189 
 190 void nvdimm_clear_locked(struct device *dev)
 191 {
 192         struct nvdimm *nvdimm = to_nvdimm(dev);
 193 
 194         clear_bit(NDD_LOCKED, &nvdimm->flags);
 195 }
 196 
 197 static void nvdimm_release(struct device *dev)
 198 {
 199         struct nvdimm *nvdimm = to_nvdimm(dev);
 200 
 201         ida_simple_remove(&dimm_ida, nvdimm->id);
 202         kfree(nvdimm);
 203 }
 204 
 205 static struct device_type nvdimm_device_type = {
 206         .name = "nvdimm",
 207         .release = nvdimm_release,
 208 };
 209 
 210 bool is_nvdimm(struct device *dev)
 211 {
 212         return dev->type == &nvdimm_device_type;
 213 }
 214 
 215 struct nvdimm *to_nvdimm(struct device *dev)
 216 {
 217         struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
 218 
 219         WARN_ON(!is_nvdimm(dev));
 220         return nvdimm;
 221 }
 222 EXPORT_SYMBOL_GPL(to_nvdimm);
 223 
 224 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
 225 {
 226         struct nd_region *nd_region = &ndbr->nd_region;
 227         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 228 
 229         return nd_mapping->nvdimm;
 230 }
 231 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
 232 
 233 unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
 234 {
 235         /* pmem mapping properties are private to libnvdimm */
 236         return ARCH_MEMREMAP_PMEM;
 237 }
 238 EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
 239 
 240 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
 241 {
 242         struct nvdimm *nvdimm = nd_mapping->nvdimm;
 243 
 244         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
 245 
 246         return dev_get_drvdata(&nvdimm->dev);
 247 }
 248 EXPORT_SYMBOL(to_ndd);
 249 
 250 void nvdimm_drvdata_release(struct kref *kref)
 251 {
 252         struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
 253         struct device *dev = ndd->dev;
 254         struct resource *res, *_r;
 255 
 256         dev_dbg(dev, "trace\n");
 257         nvdimm_bus_lock(dev);
 258         for_each_dpa_resource_safe(ndd, res, _r)
 259                 nvdimm_free_dpa(ndd, res);
 260         nvdimm_bus_unlock(dev);
 261 
 262         kvfree(ndd->data);
 263         kfree(ndd);
 264         put_device(dev);
 265 }
 266 
 267 void get_ndd(struct nvdimm_drvdata *ndd)
 268 {
 269         kref_get(&ndd->kref);
 270 }
 271 
 272 void put_ndd(struct nvdimm_drvdata *ndd)
 273 {
 274         if (ndd)
 275                 kref_put(&ndd->kref, nvdimm_drvdata_release);
 276 }
 277 
 278 const char *nvdimm_name(struct nvdimm *nvdimm)
 279 {
 280         return dev_name(&nvdimm->dev);
 281 }
 282 EXPORT_SYMBOL_GPL(nvdimm_name);
 283 
 284 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
 285 {
 286         return &nvdimm->dev.kobj;
 287 }
 288 EXPORT_SYMBOL_GPL(nvdimm_kobj);
 289 
 290 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
 291 {
 292         return nvdimm->cmd_mask;
 293 }
 294 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
 295 
 296 void *nvdimm_provider_data(struct nvdimm *nvdimm)
 297 {
 298         if (nvdimm)
 299                 return nvdimm->provider_data;
 300         return NULL;
 301 }
 302 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
 303 
 304 static ssize_t commands_show(struct device *dev,
 305                 struct device_attribute *attr, char *buf)
 306 {
 307         struct nvdimm *nvdimm = to_nvdimm(dev);
 308         int cmd, len = 0;
 309 
 310         if (!nvdimm->cmd_mask)
 311                 return sprintf(buf, "\n");
 312 
 313         for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
 314                 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
 315         len += sprintf(buf + len, "\n");
 316         return len;
 317 }
 318 static DEVICE_ATTR_RO(commands);
 319 
 320 static ssize_t flags_show(struct device *dev,
 321                 struct device_attribute *attr, char *buf)
 322 {
 323         struct nvdimm *nvdimm = to_nvdimm(dev);
 324 
 325         return sprintf(buf, "%s%s\n",
 326                         test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
 327                         test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
 328 }
 329 static DEVICE_ATTR_RO(flags);
 330 
 331 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 332                 char *buf)
 333 {
 334         struct nvdimm *nvdimm = to_nvdimm(dev);
 335 
 336         /*
 337          * The state may be in the process of changing, userspace should
 338          * quiesce probing if it wants a static answer
 339          */
 340         nvdimm_bus_lock(dev);
 341         nvdimm_bus_unlock(dev);
 342         return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
 343                         ? "active" : "idle");
 344 }
 345 static DEVICE_ATTR_RO(state);
 346 
 347 static ssize_t available_slots_show(struct device *dev,
 348                 struct device_attribute *attr, char *buf)
 349 {
 350         struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
 351         ssize_t rc;
 352         u32 nfree;
 353 
 354         if (!ndd)
 355                 return -ENXIO;
 356 
 357         nvdimm_bus_lock(dev);
 358         nfree = nd_label_nfree(ndd);
 359         if (nfree - 1 > nfree) {
 360                 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
 361                 nfree = 0;
 362         } else
 363                 nfree--;
 364         rc = sprintf(buf, "%d\n", nfree);
 365         nvdimm_bus_unlock(dev);
 366         return rc;
 367 }
 368 static DEVICE_ATTR_RO(available_slots);
 369 
 370 __weak ssize_t security_show(struct device *dev,
 371                 struct device_attribute *attr, char *buf)
 372 {
 373         struct nvdimm *nvdimm = to_nvdimm(dev);
 374 
 375         if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
 376                 return sprintf(buf, "disabled\n");
 377         if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
 378                 return sprintf(buf, "unlocked\n");
 379         if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
 380                 return sprintf(buf, "locked\n");
 381         if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
 382                 return sprintf(buf, "overwrite\n");
 383         return -ENOTTY;
 384 }
 385 
 386 static ssize_t frozen_show(struct device *dev,
 387                 struct device_attribute *attr, char *buf)
 388 {
 389         struct nvdimm *nvdimm = to_nvdimm(dev);
 390 
 391         return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
 392                                 &nvdimm->sec.flags));
 393 }
 394 static DEVICE_ATTR_RO(frozen);
 395 
 396 static ssize_t security_store(struct device *dev,
 397                 struct device_attribute *attr, const char *buf, size_t len)
 398 
 399 {
 400         ssize_t rc;
 401 
 402         /*
 403          * Require all userspace triggered security management to be
 404          * done while probing is idle and the DIMM is not in active use
 405          * in any region.
 406          */
 407         nd_device_lock(dev);
 408         nvdimm_bus_lock(dev);
 409         wait_nvdimm_bus_probe_idle(dev);
 410         rc = nvdimm_security_store(dev, buf, len);
 411         nvdimm_bus_unlock(dev);
 412         nd_device_unlock(dev);
 413 
 414         return rc;
 415 }
 416 static DEVICE_ATTR_RW(security);
 417 
 418 static struct attribute *nvdimm_attributes[] = {
 419         &dev_attr_state.attr,
 420         &dev_attr_flags.attr,
 421         &dev_attr_commands.attr,
 422         &dev_attr_available_slots.attr,
 423         &dev_attr_security.attr,
 424         &dev_attr_frozen.attr,
 425         NULL,
 426 };
 427 
 428 static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
 429 {
 430         struct device *dev = container_of(kobj, typeof(*dev), kobj);
 431         struct nvdimm *nvdimm = to_nvdimm(dev);
 432 
 433         if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
 434                 return a->mode;
 435         if (!nvdimm->sec.flags)
 436                 return 0;
 437 
 438         if (a == &dev_attr_security.attr) {
 439                 /* Are there any state mutation ops (make writable)? */
 440                 if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
 441                                 || nvdimm->sec.ops->change_key
 442                                 || nvdimm->sec.ops->erase
 443                                 || nvdimm->sec.ops->overwrite)
 444                         return a->mode;
 445                 return 0444;
 446         }
 447 
 448         if (nvdimm->sec.ops->freeze)
 449                 return a->mode;
 450         return 0;
 451 }
 452 
 453 struct attribute_group nvdimm_attribute_group = {
 454         .attrs = nvdimm_attributes,
 455         .is_visible = nvdimm_visible,
 456 };
 457 EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
 458 
 459 struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
 460                 void *provider_data, const struct attribute_group **groups,
 461                 unsigned long flags, unsigned long cmd_mask, int num_flush,
 462                 struct resource *flush_wpq, const char *dimm_id,
 463                 const struct nvdimm_security_ops *sec_ops)
 464 {
 465         struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
 466         struct device *dev;
 467 
 468         if (!nvdimm)
 469                 return NULL;
 470 
 471         nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
 472         if (nvdimm->id < 0) {
 473                 kfree(nvdimm);
 474                 return NULL;
 475         }
 476 
 477         nvdimm->dimm_id = dimm_id;
 478         nvdimm->provider_data = provider_data;
 479         if (noblk)
 480                 flags |= 1 << NDD_NOBLK;
 481         nvdimm->flags = flags;
 482         nvdimm->cmd_mask = cmd_mask;
 483         nvdimm->num_flush = num_flush;
 484         nvdimm->flush_wpq = flush_wpq;
 485         atomic_set(&nvdimm->busy, 0);
 486         dev = &nvdimm->dev;
 487         dev_set_name(dev, "nmem%d", nvdimm->id);
 488         dev->parent = &nvdimm_bus->dev;
 489         dev->type = &nvdimm_device_type;
 490         dev->devt = MKDEV(nvdimm_major, nvdimm->id);
 491         dev->groups = groups;
 492         nvdimm->sec.ops = sec_ops;
 493         nvdimm->sec.overwrite_tmo = 0;
 494         INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
 495         /*
 496          * Security state must be initialized before device_add() for
 497          * attribute visibility.
 498          */
 499         /* get security state and extended (master) state */
 500         nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
 501         nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
 502         nd_device_register(dev);
 503 
 504         return nvdimm;
 505 }
 506 EXPORT_SYMBOL_GPL(__nvdimm_create);
 507 
 508 static void shutdown_security_notify(void *data)
 509 {
 510         struct nvdimm *nvdimm = data;
 511 
 512         sysfs_put(nvdimm->sec.overwrite_state);
 513 }
 514 
 515 int nvdimm_security_setup_events(struct device *dev)
 516 {
 517         struct nvdimm *nvdimm = to_nvdimm(dev);
 518 
 519         if (!nvdimm->sec.flags || !nvdimm->sec.ops
 520                         || !nvdimm->sec.ops->overwrite)
 521                 return 0;
 522         nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
 523         if (!nvdimm->sec.overwrite_state)
 524                 return -ENOMEM;
 525 
 526         return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
 527 }
 528 EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
 529 
 530 int nvdimm_in_overwrite(struct nvdimm *nvdimm)
 531 {
 532         return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
 533 }
 534 EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
 535 
 536 int nvdimm_security_freeze(struct nvdimm *nvdimm)
 537 {
 538         int rc;
 539 
 540         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
 541 
 542         if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
 543                 return -EOPNOTSUPP;
 544 
 545         if (!nvdimm->sec.flags)
 546                 return -EIO;
 547 
 548         if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
 549                 dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
 550                 return -EBUSY;
 551         }
 552 
 553         rc = nvdimm->sec.ops->freeze(nvdimm);
 554         nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
 555 
 556         return rc;
 557 }
 558 
 559 int alias_dpa_busy(struct device *dev, void *data)
 560 {
 561         resource_size_t map_end, blk_start, new;
 562         struct blk_alloc_info *info = data;
 563         struct nd_mapping *nd_mapping;
 564         struct nd_region *nd_region;
 565         struct nvdimm_drvdata *ndd;
 566         struct resource *res;
 567         int i;
 568 
 569         if (!is_memory(dev))
 570                 return 0;
 571 
 572         nd_region = to_nd_region(dev);
 573         for (i = 0; i < nd_region->ndr_mappings; i++) {
 574                 nd_mapping  = &nd_region->mapping[i];
 575                 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
 576                         break;
 577         }
 578 
 579         if (i >= nd_region->ndr_mappings)
 580                 return 0;
 581 
 582         ndd = to_ndd(nd_mapping);
 583         map_end = nd_mapping->start + nd_mapping->size - 1;
 584         blk_start = nd_mapping->start;
 585 
 586         /*
 587          * In the allocation case ->res is set to free space that we are
 588          * looking to validate against PMEM aliasing collision rules
 589          * (i.e. BLK is allocated after all aliased PMEM).
 590          */
 591         if (info->res) {
 592                 if (info->res->start >= nd_mapping->start
 593                                 && info->res->start < map_end)
 594                         /* pass */;
 595                 else
 596                         return 0;
 597         }
 598 
 599  retry:
 600         /*
 601          * Find the free dpa from the end of the last pmem allocation to
 602          * the end of the interleave-set mapping.
 603          */
 604         for_each_dpa_resource(ndd, res) {
 605                 if (strncmp(res->name, "pmem", 4) != 0)
 606                         continue;
 607                 if ((res->start >= blk_start && res->start < map_end)
 608                                 || (res->end >= blk_start
 609                                         && res->end <= map_end)) {
 610                         new = max(blk_start, min(map_end + 1, res->end + 1));
 611                         if (new != blk_start) {
 612                                 blk_start = new;
 613                                 goto retry;
 614                         }
 615                 }
 616         }
 617 
 618         /* update the free space range with the probed blk_start */
 619         if (info->res && blk_start > info->res->start) {
 620                 info->res->start = max(info->res->start, blk_start);
 621                 if (info->res->start > info->res->end)
 622                         info->res->end = info->res->start - 1;
 623                 return 1;
 624         }
 625 
 626         info->available -= blk_start - nd_mapping->start;
 627 
 628         return 0;
 629 }
 630 
 631 /**
 632  * nd_blk_available_dpa - account the unused dpa of BLK region
 633  * @nd_mapping: container of dpa-resource-root + labels
 634  *
 635  * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
 636  * we arrange for them to never start at an lower dpa than the last
 637  * PMEM allocation in an aliased region.
 638  */
 639 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
 640 {
 641         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
 642         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 643         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 644         struct blk_alloc_info info = {
 645                 .nd_mapping = nd_mapping,
 646                 .available = nd_mapping->size,
 647                 .res = NULL,
 648         };
 649         struct resource *res;
 650 
 651         if (!ndd)
 652                 return 0;
 653 
 654         device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
 655 
 656         /* now account for busy blk allocations in unaliased dpa */
 657         for_each_dpa_resource(ndd, res) {
 658                 if (strncmp(res->name, "blk", 3) != 0)
 659                         continue;
 660                 info.available -= resource_size(res);
 661         }
 662 
 663         return info.available;
 664 }
 665 
 666 /**
 667  * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
 668  *                         contiguous unallocated dpa range.
 669  * @nd_region: constrain available space check to this reference region
 670  * @nd_mapping: container of dpa-resource-root + labels
 671  */
 672 resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
 673                                            struct nd_mapping *nd_mapping)
 674 {
 675         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 676         struct nvdimm_bus *nvdimm_bus;
 677         resource_size_t max = 0;
 678         struct resource *res;
 679 
 680         /* if a dimm is disabled the available capacity is zero */
 681         if (!ndd)
 682                 return 0;
 683 
 684         nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
 685         if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
 686                 return 0;
 687         for_each_dpa_resource(ndd, res) {
 688                 if (strcmp(res->name, "pmem-reserve") != 0)
 689                         continue;
 690                 if (resource_size(res) > max)
 691                         max = resource_size(res);
 692         }
 693         release_free_pmem(nvdimm_bus, nd_mapping);
 694         return max;
 695 }
 696 
 697 /**
 698  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
 699  * @nd_mapping: container of dpa-resource-root + labels
 700  * @nd_region: constrain available space check to this reference region
 701  * @overlap: calculate available space assuming this level of overlap
 702  *
 703  * Validate that a PMEM label, if present, aligns with the start of an
 704  * interleave set and truncate the available size at the lowest BLK
 705  * overlap point.
 706  *
 707  * The expectation is that this routine is called multiple times as it
 708  * probes for the largest BLK encroachment for any single member DIMM of
 709  * the interleave set.  Once that value is determined the PMEM-limit for
 710  * the set can be established.
 711  */
 712 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
 713                 struct nd_mapping *nd_mapping, resource_size_t *overlap)
 714 {
 715         resource_size_t map_start, map_end, busy = 0, available, blk_start;
 716         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 717         struct resource *res;
 718         const char *reason;
 719 
 720         if (!ndd)
 721                 return 0;
 722 
 723         map_start = nd_mapping->start;
 724         map_end = map_start + nd_mapping->size - 1;
 725         blk_start = max(map_start, map_end + 1 - *overlap);
 726         for_each_dpa_resource(ndd, res) {
 727                 if (res->start >= map_start && res->start < map_end) {
 728                         if (strncmp(res->name, "blk", 3) == 0)
 729                                 blk_start = min(blk_start,
 730                                                 max(map_start, res->start));
 731                         else if (res->end > map_end) {
 732                                 reason = "misaligned to iset";
 733                                 goto err;
 734                         } else
 735                                 busy += resource_size(res);
 736                 } else if (res->end >= map_start && res->end <= map_end) {
 737                         if (strncmp(res->name, "blk", 3) == 0) {
 738                                 /*
 739                                  * If a BLK allocation overlaps the start of
 740                                  * PMEM the entire interleave set may now only
 741                                  * be used for BLK.
 742                                  */
 743                                 blk_start = map_start;
 744                         } else
 745                                 busy += resource_size(res);
 746                 } else if (map_start > res->start && map_start < res->end) {
 747                         /* total eclipse of the mapping */
 748                         busy += nd_mapping->size;
 749                         blk_start = map_start;
 750                 }
 751         }
 752 
 753         *overlap = map_end + 1 - blk_start;
 754         available = blk_start - map_start;
 755         if (busy < available)
 756                 return available - busy;
 757         return 0;
 758 
 759  err:
 760         nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
 761         return 0;
 762 }
 763 
 764 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
 765 {
 766         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
 767         kfree(res->name);
 768         __release_region(&ndd->dpa, res->start, resource_size(res));
 769 }
 770 
 771 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
 772                 struct nd_label_id *label_id, resource_size_t start,
 773                 resource_size_t n)
 774 {
 775         char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
 776         struct resource *res;
 777 
 778         if (!name)
 779                 return NULL;
 780 
 781         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
 782         res = __request_region(&ndd->dpa, start, n, name, 0);
 783         if (!res)
 784                 kfree(name);
 785         return res;
 786 }
 787 
 788 /**
 789  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
 790  * @nvdimm: container of dpa-resource-root + labels
 791  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
 792  */
 793 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
 794                 struct nd_label_id *label_id)
 795 {
 796         resource_size_t allocated = 0;
 797         struct resource *res;
 798 
 799         for_each_dpa_resource(ndd, res)
 800                 if (strcmp(res->name, label_id->id) == 0)
 801                         allocated += resource_size(res);
 802 
 803         return allocated;
 804 }
 805 
 806 static int count_dimms(struct device *dev, void *c)
 807 {
 808         int *count = c;
 809 
 810         if (is_nvdimm(dev))
 811                 (*count)++;
 812         return 0;
 813 }
 814 
 815 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
 816 {
 817         int count = 0;
 818         /* Flush any possible dimm registration failures */
 819         nd_synchronize();
 820 
 821         device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
 822         dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
 823         if (count != dimm_count)
 824                 return -ENXIO;
 825         return 0;
 826 }
 827 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
 828 
 829 void __exit nvdimm_devs_exit(void)
 830 {
 831         ida_destroy(&dimm_ida);
 832 }

/* [<][>][^][v][top][bottom][index][help] */