root/drivers/nvmem/core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nvmem_reg_read
  2. nvmem_reg_write
  3. nvmem_release
  4. of_nvmem_find
  5. nvmem_find
  6. nvmem_cell_drop
  7. nvmem_device_remove_all_cells
  8. nvmem_cell_add
  9. nvmem_cell_info_to_nvmem_cell
  10. nvmem_add_cells
  11. nvmem_register_notifier
  12. nvmem_unregister_notifier
  13. nvmem_add_cells_from_table
  14. nvmem_find_cell_by_name
  15. nvmem_add_cells_from_of
  16. nvmem_register
  17. nvmem_device_release
  18. nvmem_unregister
  19. devm_nvmem_release
  20. devm_nvmem_register
  21. devm_nvmem_match
  22. devm_nvmem_unregister
  23. __nvmem_device_get
  24. __nvmem_device_put
  25. of_nvmem_device_get
  26. nvmem_device_get
  27. devm_nvmem_device_match
  28. devm_nvmem_device_release
  29. devm_nvmem_device_put
  30. nvmem_device_put
  31. devm_nvmem_device_get
  32. nvmem_cell_get_from_lookup
  33. nvmem_find_cell_by_node
  34. of_nvmem_cell_get
  35. nvmem_cell_get
  36. devm_nvmem_cell_release
  37. devm_nvmem_cell_get
  38. devm_nvmem_cell_match
  39. devm_nvmem_cell_put
  40. nvmem_cell_put
  41. nvmem_shift_read_buffer_in_place
  42. __nvmem_cell_read
  43. nvmem_cell_read
  44. nvmem_cell_prepare_write_buffer
  45. nvmem_cell_write
  46. nvmem_cell_read_u16
  47. nvmem_cell_read_u32
  48. nvmem_device_cell_read
  49. nvmem_device_cell_write
  50. nvmem_device_read
  51. nvmem_device_write
  52. nvmem_add_cell_table
  53. nvmem_del_cell_table
  54. nvmem_add_cell_lookups
  55. nvmem_del_cell_lookups
  56. nvmem_dev_name
  57. nvmem_init
  58. nvmem_exit

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * nvmem framework core.
   4  *
   5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
   6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
   7  */
   8 
   9 #include <linux/device.h>
  10 #include <linux/export.h>
  11 #include <linux/fs.h>
  12 #include <linux/idr.h>
  13 #include <linux/init.h>
  14 #include <linux/kref.h>
  15 #include <linux/module.h>
  16 #include <linux/nvmem-consumer.h>
  17 #include <linux/nvmem-provider.h>
  18 #include <linux/of.h>
  19 #include <linux/slab.h>
  20 #include "nvmem.h"
  21 
  22 struct nvmem_cell {
  23         const char              *name;
  24         int                     offset;
  25         int                     bytes;
  26         int                     bit_offset;
  27         int                     nbits;
  28         struct device_node      *np;
  29         struct nvmem_device     *nvmem;
  30         struct list_head        node;
  31 };
  32 
  33 static DEFINE_MUTEX(nvmem_mutex);
  34 static DEFINE_IDA(nvmem_ida);
  35 
  36 static DEFINE_MUTEX(nvmem_cell_mutex);
  37 static LIST_HEAD(nvmem_cell_tables);
  38 
  39 static DEFINE_MUTEX(nvmem_lookup_mutex);
  40 static LIST_HEAD(nvmem_lookup_list);
  41 
  42 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
  43 
  44 
  45 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
  46                           void *val, size_t bytes)
  47 {
  48         if (nvmem->reg_read)
  49                 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
  50 
  51         return -EINVAL;
  52 }
  53 
  54 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
  55                            void *val, size_t bytes)
  56 {
  57         if (nvmem->reg_write)
  58                 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
  59 
  60         return -EINVAL;
  61 }
  62 
  63 static void nvmem_release(struct device *dev)
  64 {
  65         struct nvmem_device *nvmem = to_nvmem_device(dev);
  66 
  67         ida_simple_remove(&nvmem_ida, nvmem->id);
  68         kfree(nvmem);
  69 }
  70 
  71 static const struct device_type nvmem_provider_type = {
  72         .release        = nvmem_release,
  73 };
  74 
  75 static struct bus_type nvmem_bus_type = {
  76         .name           = "nvmem",
  77 };
  78 
  79 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
  80 {
  81         struct device *d;
  82 
  83         if (!nvmem_np)
  84                 return NULL;
  85 
  86         d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np);
  87 
  88         if (!d)
  89                 return NULL;
  90 
  91         return to_nvmem_device(d);
  92 }
  93 
  94 static struct nvmem_device *nvmem_find(const char *name)
  95 {
  96         struct device *d;
  97 
  98         d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
  99 
 100         if (!d)
 101                 return NULL;
 102 
 103         return to_nvmem_device(d);
 104 }
 105 
 106 static void nvmem_cell_drop(struct nvmem_cell *cell)
 107 {
 108         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
 109         mutex_lock(&nvmem_mutex);
 110         list_del(&cell->node);
 111         mutex_unlock(&nvmem_mutex);
 112         of_node_put(cell->np);
 113         kfree_const(cell->name);
 114         kfree(cell);
 115 }
 116 
 117 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
 118 {
 119         struct nvmem_cell *cell, *p;
 120 
 121         list_for_each_entry_safe(cell, p, &nvmem->cells, node)
 122                 nvmem_cell_drop(cell);
 123 }
 124 
 125 static void nvmem_cell_add(struct nvmem_cell *cell)
 126 {
 127         mutex_lock(&nvmem_mutex);
 128         list_add_tail(&cell->node, &cell->nvmem->cells);
 129         mutex_unlock(&nvmem_mutex);
 130         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
 131 }
 132 
 133 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
 134                                    const struct nvmem_cell_info *info,
 135                                    struct nvmem_cell *cell)
 136 {
 137         cell->nvmem = nvmem;
 138         cell->offset = info->offset;
 139         cell->bytes = info->bytes;
 140         cell->name = kstrdup_const(info->name, GFP_KERNEL);
 141         if (!cell->name)
 142                 return -ENOMEM;
 143 
 144         cell->bit_offset = info->bit_offset;
 145         cell->nbits = info->nbits;
 146 
 147         if (cell->nbits)
 148                 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
 149                                            BITS_PER_BYTE);
 150 
 151         if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
 152                 dev_err(&nvmem->dev,
 153                         "cell %s unaligned to nvmem stride %d\n",
 154                         cell->name, nvmem->stride);
 155                 return -EINVAL;
 156         }
 157 
 158         return 0;
 159 }
 160 
 161 /**
 162  * nvmem_add_cells() - Add cell information to an nvmem device
 163  *
 164  * @nvmem: nvmem device to add cells to.
 165  * @info: nvmem cell info to add to the device
 166  * @ncells: number of cells in info
 167  *
 168  * Return: 0 or negative error code on failure.
 169  */
 170 static int nvmem_add_cells(struct nvmem_device *nvmem,
 171                     const struct nvmem_cell_info *info,
 172                     int ncells)
 173 {
 174         struct nvmem_cell **cells;
 175         int i, rval;
 176 
 177         cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
 178         if (!cells)
 179                 return -ENOMEM;
 180 
 181         for (i = 0; i < ncells; i++) {
 182                 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
 183                 if (!cells[i]) {
 184                         rval = -ENOMEM;
 185                         goto err;
 186                 }
 187 
 188                 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
 189                 if (rval) {
 190                         kfree(cells[i]);
 191                         goto err;
 192                 }
 193 
 194                 nvmem_cell_add(cells[i]);
 195         }
 196 
 197         /* remove tmp array */
 198         kfree(cells);
 199 
 200         return 0;
 201 err:
 202         while (i--)
 203                 nvmem_cell_drop(cells[i]);
 204 
 205         kfree(cells);
 206 
 207         return rval;
 208 }
 209 
 210 /**
 211  * nvmem_register_notifier() - Register a notifier block for nvmem events.
 212  *
 213  * @nb: notifier block to be called on nvmem events.
 214  *
 215  * Return: 0 on success, negative error number on failure.
 216  */
 217 int nvmem_register_notifier(struct notifier_block *nb)
 218 {
 219         return blocking_notifier_chain_register(&nvmem_notifier, nb);
 220 }
 221 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
 222 
 223 /**
 224  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
 225  *
 226  * @nb: notifier block to be unregistered.
 227  *
 228  * Return: 0 on success, negative error number on failure.
 229  */
 230 int nvmem_unregister_notifier(struct notifier_block *nb)
 231 {
 232         return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
 233 }
 234 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
 235 
 236 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
 237 {
 238         const struct nvmem_cell_info *info;
 239         struct nvmem_cell_table *table;
 240         struct nvmem_cell *cell;
 241         int rval = 0, i;
 242 
 243         mutex_lock(&nvmem_cell_mutex);
 244         list_for_each_entry(table, &nvmem_cell_tables, node) {
 245                 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
 246                         for (i = 0; i < table->ncells; i++) {
 247                                 info = &table->cells[i];
 248 
 249                                 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
 250                                 if (!cell) {
 251                                         rval = -ENOMEM;
 252                                         goto out;
 253                                 }
 254 
 255                                 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
 256                                                                      info,
 257                                                                      cell);
 258                                 if (rval) {
 259                                         kfree(cell);
 260                                         goto out;
 261                                 }
 262 
 263                                 nvmem_cell_add(cell);
 264                         }
 265                 }
 266         }
 267 
 268 out:
 269         mutex_unlock(&nvmem_cell_mutex);
 270         return rval;
 271 }
 272 
 273 static struct nvmem_cell *
 274 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
 275 {
 276         struct nvmem_cell *iter, *cell = NULL;
 277 
 278         mutex_lock(&nvmem_mutex);
 279         list_for_each_entry(iter, &nvmem->cells, node) {
 280                 if (strcmp(cell_id, iter->name) == 0) {
 281                         cell = iter;
 282                         break;
 283                 }
 284         }
 285         mutex_unlock(&nvmem_mutex);
 286 
 287         return cell;
 288 }
 289 
 290 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
 291 {
 292         struct device_node *parent, *child;
 293         struct device *dev = &nvmem->dev;
 294         struct nvmem_cell *cell;
 295         const __be32 *addr;
 296         int len;
 297 
 298         parent = dev->of_node;
 299 
 300         for_each_child_of_node(parent, child) {
 301                 addr = of_get_property(child, "reg", &len);
 302                 if (!addr || (len < 2 * sizeof(u32))) {
 303                         dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
 304                         return -EINVAL;
 305                 }
 306 
 307                 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
 308                 if (!cell)
 309                         return -ENOMEM;
 310 
 311                 cell->nvmem = nvmem;
 312                 cell->np = of_node_get(child);
 313                 cell->offset = be32_to_cpup(addr++);
 314                 cell->bytes = be32_to_cpup(addr);
 315                 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
 316 
 317                 addr = of_get_property(child, "bits", &len);
 318                 if (addr && len == (2 * sizeof(u32))) {
 319                         cell->bit_offset = be32_to_cpup(addr++);
 320                         cell->nbits = be32_to_cpup(addr);
 321                 }
 322 
 323                 if (cell->nbits)
 324                         cell->bytes = DIV_ROUND_UP(
 325                                         cell->nbits + cell->bit_offset,
 326                                         BITS_PER_BYTE);
 327 
 328                 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
 329                         dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
 330                                 cell->name, nvmem->stride);
 331                         /* Cells already added will be freed later. */
 332                         kfree_const(cell->name);
 333                         kfree(cell);
 334                         return -EINVAL;
 335                 }
 336 
 337                 nvmem_cell_add(cell);
 338         }
 339 
 340         return 0;
 341 }
 342 
 343 /**
 344  * nvmem_register() - Register a nvmem device for given nvmem_config.
 345  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
 346  *
 347  * @config: nvmem device configuration with which nvmem device is created.
 348  *
 349  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
 350  * on success.
 351  */
 352 
 353 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 354 {
 355         struct nvmem_device *nvmem;
 356         int rval;
 357 
 358         if (!config->dev)
 359                 return ERR_PTR(-EINVAL);
 360 
 361         nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
 362         if (!nvmem)
 363                 return ERR_PTR(-ENOMEM);
 364 
 365         rval  = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
 366         if (rval < 0) {
 367                 kfree(nvmem);
 368                 return ERR_PTR(rval);
 369         }
 370 
 371         kref_init(&nvmem->refcnt);
 372         INIT_LIST_HEAD(&nvmem->cells);
 373 
 374         nvmem->id = rval;
 375         nvmem->owner = config->owner;
 376         if (!nvmem->owner && config->dev->driver)
 377                 nvmem->owner = config->dev->driver->owner;
 378         nvmem->stride = config->stride ?: 1;
 379         nvmem->word_size = config->word_size ?: 1;
 380         nvmem->size = config->size;
 381         nvmem->dev.type = &nvmem_provider_type;
 382         nvmem->dev.bus = &nvmem_bus_type;
 383         nvmem->dev.parent = config->dev;
 384         nvmem->priv = config->priv;
 385         nvmem->type = config->type;
 386         nvmem->reg_read = config->reg_read;
 387         nvmem->reg_write = config->reg_write;
 388         if (!config->no_of_node)
 389                 nvmem->dev.of_node = config->dev->of_node;
 390 
 391         if (config->id == -1 && config->name) {
 392                 dev_set_name(&nvmem->dev, "%s", config->name);
 393         } else {
 394                 dev_set_name(&nvmem->dev, "%s%d",
 395                              config->name ? : "nvmem",
 396                              config->name ? config->id : nvmem->id);
 397         }
 398 
 399         nvmem->read_only = device_property_present(config->dev, "read-only") ||
 400                            config->read_only || !nvmem->reg_write;
 401 
 402         nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
 403 
 404         device_initialize(&nvmem->dev);
 405 
 406         dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
 407 
 408         rval = device_add(&nvmem->dev);
 409         if (rval)
 410                 goto err_put_device;
 411 
 412         if (config->compat) {
 413                 rval = nvmem_sysfs_setup_compat(nvmem, config);
 414                 if (rval)
 415                         goto err_device_del;
 416         }
 417 
 418         if (config->cells) {
 419                 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
 420                 if (rval)
 421                         goto err_teardown_compat;
 422         }
 423 
 424         rval = nvmem_add_cells_from_table(nvmem);
 425         if (rval)
 426                 goto err_remove_cells;
 427 
 428         rval = nvmem_add_cells_from_of(nvmem);
 429         if (rval)
 430                 goto err_remove_cells;
 431 
 432         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
 433 
 434         return nvmem;
 435 
 436 err_remove_cells:
 437         nvmem_device_remove_all_cells(nvmem);
 438 err_teardown_compat:
 439         if (config->compat)
 440                 nvmem_sysfs_remove_compat(nvmem, config);
 441 err_device_del:
 442         device_del(&nvmem->dev);
 443 err_put_device:
 444         put_device(&nvmem->dev);
 445 
 446         return ERR_PTR(rval);
 447 }
 448 EXPORT_SYMBOL_GPL(nvmem_register);
 449 
 450 static void nvmem_device_release(struct kref *kref)
 451 {
 452         struct nvmem_device *nvmem;
 453 
 454         nvmem = container_of(kref, struct nvmem_device, refcnt);
 455 
 456         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
 457 
 458         if (nvmem->flags & FLAG_COMPAT)
 459                 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
 460 
 461         nvmem_device_remove_all_cells(nvmem);
 462         device_del(&nvmem->dev);
 463         put_device(&nvmem->dev);
 464 }
 465 
 466 /**
 467  * nvmem_unregister() - Unregister previously registered nvmem device
 468  *
 469  * @nvmem: Pointer to previously registered nvmem device.
 470  */
 471 void nvmem_unregister(struct nvmem_device *nvmem)
 472 {
 473         kref_put(&nvmem->refcnt, nvmem_device_release);
 474 }
 475 EXPORT_SYMBOL_GPL(nvmem_unregister);
 476 
 477 static void devm_nvmem_release(struct device *dev, void *res)
 478 {
 479         nvmem_unregister(*(struct nvmem_device **)res);
 480 }
 481 
 482 /**
 483  * devm_nvmem_register() - Register a managed nvmem device for given
 484  * nvmem_config.
 485  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
 486  *
 487  * @dev: Device that uses the nvmem device.
 488  * @config: nvmem device configuration with which nvmem device is created.
 489  *
 490  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
 491  * on success.
 492  */
 493 struct nvmem_device *devm_nvmem_register(struct device *dev,
 494                                          const struct nvmem_config *config)
 495 {
 496         struct nvmem_device **ptr, *nvmem;
 497 
 498         ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
 499         if (!ptr)
 500                 return ERR_PTR(-ENOMEM);
 501 
 502         nvmem = nvmem_register(config);
 503 
 504         if (!IS_ERR(nvmem)) {
 505                 *ptr = nvmem;
 506                 devres_add(dev, ptr);
 507         } else {
 508                 devres_free(ptr);
 509         }
 510 
 511         return nvmem;
 512 }
 513 EXPORT_SYMBOL_GPL(devm_nvmem_register);
 514 
 515 static int devm_nvmem_match(struct device *dev, void *res, void *data)
 516 {
 517         struct nvmem_device **r = res;
 518 
 519         return *r == data;
 520 }
 521 
 522 /**
 523  * devm_nvmem_unregister() - Unregister previously registered managed nvmem
 524  * device.
 525  *
 526  * @dev: Device that uses the nvmem device.
 527  * @nvmem: Pointer to previously registered nvmem device.
 528  *
 529  * Return: Will be an negative on error or a zero on success.
 530  */
 531 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
 532 {
 533         return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
 534 }
 535 EXPORT_SYMBOL(devm_nvmem_unregister);
 536 
 537 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
 538                                                const char *nvmem_name)
 539 {
 540         struct nvmem_device *nvmem = NULL;
 541 
 542         mutex_lock(&nvmem_mutex);
 543         nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
 544         mutex_unlock(&nvmem_mutex);
 545         if (!nvmem)
 546                 return ERR_PTR(-EPROBE_DEFER);
 547 
 548         if (!try_module_get(nvmem->owner)) {
 549                 dev_err(&nvmem->dev,
 550                         "could not increase module refcount for cell %s\n",
 551                         nvmem_dev_name(nvmem));
 552 
 553                 put_device(&nvmem->dev);
 554                 return ERR_PTR(-EINVAL);
 555         }
 556 
 557         kref_get(&nvmem->refcnt);
 558 
 559         return nvmem;
 560 }
 561 
 562 static void __nvmem_device_put(struct nvmem_device *nvmem)
 563 {
 564         put_device(&nvmem->dev);
 565         module_put(nvmem->owner);
 566         kref_put(&nvmem->refcnt, nvmem_device_release);
 567 }
 568 
 569 #if IS_ENABLED(CONFIG_OF)
 570 /**
 571  * of_nvmem_device_get() - Get nvmem device from a given id
 572  *
 573  * @np: Device tree node that uses the nvmem device.
 574  * @id: nvmem name from nvmem-names property.
 575  *
 576  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
 577  * on success.
 578  */
 579 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
 580 {
 581 
 582         struct device_node *nvmem_np;
 583         int index = 0;
 584 
 585         if (id)
 586                 index = of_property_match_string(np, "nvmem-names", id);
 587 
 588         nvmem_np = of_parse_phandle(np, "nvmem", index);
 589         if (!nvmem_np)
 590                 return ERR_PTR(-ENOENT);
 591 
 592         return __nvmem_device_get(nvmem_np, NULL);
 593 }
 594 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
 595 #endif
 596 
 597 /**
 598  * nvmem_device_get() - Get nvmem device from a given id
 599  *
 600  * @dev: Device that uses the nvmem device.
 601  * @dev_name: name of the requested nvmem device.
 602  *
 603  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
 604  * on success.
 605  */
 606 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
 607 {
 608         if (dev->of_node) { /* try dt first */
 609                 struct nvmem_device *nvmem;
 610 
 611                 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
 612 
 613                 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
 614                         return nvmem;
 615 
 616         }
 617 
 618         return __nvmem_device_get(NULL, dev_name);
 619 }
 620 EXPORT_SYMBOL_GPL(nvmem_device_get);
 621 
 622 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
 623 {
 624         struct nvmem_device **nvmem = res;
 625 
 626         if (WARN_ON(!nvmem || !*nvmem))
 627                 return 0;
 628 
 629         return *nvmem == data;
 630 }
 631 
 632 static void devm_nvmem_device_release(struct device *dev, void *res)
 633 {
 634         nvmem_device_put(*(struct nvmem_device **)res);
 635 }
 636 
 637 /**
 638  * devm_nvmem_device_put() - put alredy got nvmem device
 639  *
 640  * @dev: Device that uses the nvmem device.
 641  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
 642  * that needs to be released.
 643  */
 644 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
 645 {
 646         int ret;
 647 
 648         ret = devres_release(dev, devm_nvmem_device_release,
 649                              devm_nvmem_device_match, nvmem);
 650 
 651         WARN_ON(ret);
 652 }
 653 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
 654 
 655 /**
 656  * nvmem_device_put() - put alredy got nvmem device
 657  *
 658  * @nvmem: pointer to nvmem device that needs to be released.
 659  */
 660 void nvmem_device_put(struct nvmem_device *nvmem)
 661 {
 662         __nvmem_device_put(nvmem);
 663 }
 664 EXPORT_SYMBOL_GPL(nvmem_device_put);
 665 
 666 /**
 667  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
 668  *
 669  * @dev: Device that requests the nvmem device.
 670  * @id: name id for the requested nvmem device.
 671  *
 672  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
 673  * on success.  The nvmem_cell will be freed by the automatically once the
 674  * device is freed.
 675  */
 676 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
 677 {
 678         struct nvmem_device **ptr, *nvmem;
 679 
 680         ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
 681         if (!ptr)
 682                 return ERR_PTR(-ENOMEM);
 683 
 684         nvmem = nvmem_device_get(dev, id);
 685         if (!IS_ERR(nvmem)) {
 686                 *ptr = nvmem;
 687                 devres_add(dev, ptr);
 688         } else {
 689                 devres_free(ptr);
 690         }
 691 
 692         return nvmem;
 693 }
 694 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
 695 
 696 static struct nvmem_cell *
 697 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
 698 {
 699         struct nvmem_cell *cell = ERR_PTR(-ENOENT);
 700         struct nvmem_cell_lookup *lookup;
 701         struct nvmem_device *nvmem;
 702         const char *dev_id;
 703 
 704         if (!dev)
 705                 return ERR_PTR(-EINVAL);
 706 
 707         dev_id = dev_name(dev);
 708 
 709         mutex_lock(&nvmem_lookup_mutex);
 710 
 711         list_for_each_entry(lookup, &nvmem_lookup_list, node) {
 712                 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
 713                     (strcmp(lookup->con_id, con_id) == 0)) {
 714                         /* This is the right entry. */
 715                         nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
 716                         if (IS_ERR(nvmem)) {
 717                                 /* Provider may not be registered yet. */
 718                                 cell = ERR_CAST(nvmem);
 719                                 break;
 720                         }
 721 
 722                         cell = nvmem_find_cell_by_name(nvmem,
 723                                                        lookup->cell_name);
 724                         if (!cell) {
 725                                 __nvmem_device_put(nvmem);
 726                                 cell = ERR_PTR(-ENOENT);
 727                         }
 728                         break;
 729                 }
 730         }
 731 
 732         mutex_unlock(&nvmem_lookup_mutex);
 733         return cell;
 734 }
 735 
 736 #if IS_ENABLED(CONFIG_OF)
 737 static struct nvmem_cell *
 738 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
 739 {
 740         struct nvmem_cell *iter, *cell = NULL;
 741 
 742         mutex_lock(&nvmem_mutex);
 743         list_for_each_entry(iter, &nvmem->cells, node) {
 744                 if (np == iter->np) {
 745                         cell = iter;
 746                         break;
 747                 }
 748         }
 749         mutex_unlock(&nvmem_mutex);
 750 
 751         return cell;
 752 }
 753 
 754 /**
 755  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
 756  *
 757  * @np: Device tree node that uses the nvmem cell.
 758  * @id: nvmem cell name from nvmem-cell-names property, or NULL
 759  *      for the cell at index 0 (the lone cell with no accompanying
 760  *      nvmem-cell-names property).
 761  *
 762  * Return: Will be an ERR_PTR() on error or a valid pointer
 763  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
 764  * nvmem_cell_put().
 765  */
 766 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
 767 {
 768         struct device_node *cell_np, *nvmem_np;
 769         struct nvmem_device *nvmem;
 770         struct nvmem_cell *cell;
 771         int index = 0;
 772 
 773         /* if cell name exists, find index to the name */
 774         if (id)
 775                 index = of_property_match_string(np, "nvmem-cell-names", id);
 776 
 777         cell_np = of_parse_phandle(np, "nvmem-cells", index);
 778         if (!cell_np)
 779                 return ERR_PTR(-ENOENT);
 780 
 781         nvmem_np = of_get_next_parent(cell_np);
 782         if (!nvmem_np)
 783                 return ERR_PTR(-EINVAL);
 784 
 785         nvmem = __nvmem_device_get(nvmem_np, NULL);
 786         of_node_put(nvmem_np);
 787         if (IS_ERR(nvmem))
 788                 return ERR_CAST(nvmem);
 789 
 790         cell = nvmem_find_cell_by_node(nvmem, cell_np);
 791         if (!cell) {
 792                 __nvmem_device_put(nvmem);
 793                 return ERR_PTR(-ENOENT);
 794         }
 795 
 796         return cell;
 797 }
 798 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
 799 #endif
 800 
 801 /**
 802  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
 803  *
 804  * @dev: Device that requests the nvmem cell.
 805  * @id: nvmem cell name to get (this corresponds with the name from the
 806  *      nvmem-cell-names property for DT systems and with the con_id from
 807  *      the lookup entry for non-DT systems).
 808  *
 809  * Return: Will be an ERR_PTR() on error or a valid pointer
 810  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
 811  * nvmem_cell_put().
 812  */
 813 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
 814 {
 815         struct nvmem_cell *cell;
 816 
 817         if (dev->of_node) { /* try dt first */
 818                 cell = of_nvmem_cell_get(dev->of_node, id);
 819                 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
 820                         return cell;
 821         }
 822 
 823         /* NULL cell id only allowed for device tree; invalid otherwise */
 824         if (!id)
 825                 return ERR_PTR(-EINVAL);
 826 
 827         return nvmem_cell_get_from_lookup(dev, id);
 828 }
 829 EXPORT_SYMBOL_GPL(nvmem_cell_get);
 830 
 831 static void devm_nvmem_cell_release(struct device *dev, void *res)
 832 {
 833         nvmem_cell_put(*(struct nvmem_cell **)res);
 834 }
 835 
 836 /**
 837  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
 838  *
 839  * @dev: Device that requests the nvmem cell.
 840  * @id: nvmem cell name id to get.
 841  *
 842  * Return: Will be an ERR_PTR() on error or a valid pointer
 843  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
 844  * automatically once the device is freed.
 845  */
 846 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
 847 {
 848         struct nvmem_cell **ptr, *cell;
 849 
 850         ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
 851         if (!ptr)
 852                 return ERR_PTR(-ENOMEM);
 853 
 854         cell = nvmem_cell_get(dev, id);
 855         if (!IS_ERR(cell)) {
 856                 *ptr = cell;
 857                 devres_add(dev, ptr);
 858         } else {
 859                 devres_free(ptr);
 860         }
 861 
 862         return cell;
 863 }
 864 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
 865 
 866 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
 867 {
 868         struct nvmem_cell **c = res;
 869 
 870         if (WARN_ON(!c || !*c))
 871                 return 0;
 872 
 873         return *c == data;
 874 }
 875 
 876 /**
 877  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
 878  * from devm_nvmem_cell_get.
 879  *
 880  * @dev: Device that requests the nvmem cell.
 881  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
 882  */
 883 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
 884 {
 885         int ret;
 886 
 887         ret = devres_release(dev, devm_nvmem_cell_release,
 888                                 devm_nvmem_cell_match, cell);
 889 
 890         WARN_ON(ret);
 891 }
 892 EXPORT_SYMBOL(devm_nvmem_cell_put);
 893 
 894 /**
 895  * nvmem_cell_put() - Release previously allocated nvmem cell.
 896  *
 897  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
 898  */
 899 void nvmem_cell_put(struct nvmem_cell *cell)
 900 {
 901         struct nvmem_device *nvmem = cell->nvmem;
 902 
 903         __nvmem_device_put(nvmem);
 904 }
 905 EXPORT_SYMBOL_GPL(nvmem_cell_put);
 906 
 907 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
 908 {
 909         u8 *p, *b;
 910         int i, extra, bit_offset = cell->bit_offset;
 911 
 912         p = b = buf;
 913         if (bit_offset) {
 914                 /* First shift */
 915                 *b++ >>= bit_offset;
 916 
 917                 /* setup rest of the bytes if any */
 918                 for (i = 1; i < cell->bytes; i++) {
 919                         /* Get bits from next byte and shift them towards msb */
 920                         *p |= *b << (BITS_PER_BYTE - bit_offset);
 921 
 922                         p = b;
 923                         *b++ >>= bit_offset;
 924                 }
 925         } else {
 926                 /* point to the msb */
 927                 p += cell->bytes - 1;
 928         }
 929 
 930         /* result fits in less bytes */
 931         extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
 932         while (--extra >= 0)
 933                 *p-- = 0;
 934 
 935         /* clear msb bits if any leftover in the last byte */
 936         *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
 937 }
 938 
 939 static int __nvmem_cell_read(struct nvmem_device *nvmem,
 940                       struct nvmem_cell *cell,
 941                       void *buf, size_t *len)
 942 {
 943         int rc;
 944 
 945         rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
 946 
 947         if (rc)
 948                 return rc;
 949 
 950         /* shift bits in-place */
 951         if (cell->bit_offset || cell->nbits)
 952                 nvmem_shift_read_buffer_in_place(cell, buf);
 953 
 954         if (len)
 955                 *len = cell->bytes;
 956 
 957         return 0;
 958 }
 959 
 960 /**
 961  * nvmem_cell_read() - Read a given nvmem cell
 962  *
 963  * @cell: nvmem cell to be read.
 964  * @len: pointer to length of cell which will be populated on successful read;
 965  *       can be NULL.
 966  *
 967  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
 968  * buffer should be freed by the consumer with a kfree().
 969  */
 970 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
 971 {
 972         struct nvmem_device *nvmem = cell->nvmem;
 973         u8 *buf;
 974         int rc;
 975 
 976         if (!nvmem)
 977                 return ERR_PTR(-EINVAL);
 978 
 979         buf = kzalloc(cell->bytes, GFP_KERNEL);
 980         if (!buf)
 981                 return ERR_PTR(-ENOMEM);
 982 
 983         rc = __nvmem_cell_read(nvmem, cell, buf, len);
 984         if (rc) {
 985                 kfree(buf);
 986                 return ERR_PTR(rc);
 987         }
 988 
 989         return buf;
 990 }
 991 EXPORT_SYMBOL_GPL(nvmem_cell_read);
 992 
 993 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
 994                                              u8 *_buf, int len)
 995 {
 996         struct nvmem_device *nvmem = cell->nvmem;
 997         int i, rc, nbits, bit_offset = cell->bit_offset;
 998         u8 v, *p, *buf, *b, pbyte, pbits;
 999 
1000         nbits = cell->nbits;
1001         buf = kzalloc(cell->bytes, GFP_KERNEL);
1002         if (!buf)
1003                 return ERR_PTR(-ENOMEM);
1004 
1005         memcpy(buf, _buf, len);
1006         p = b = buf;
1007 
1008         if (bit_offset) {
1009                 pbyte = *b;
1010                 *b <<= bit_offset;
1011 
1012                 /* setup the first byte with lsb bits from nvmem */
1013                 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1014                 if (rc)
1015                         goto err;
1016                 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1017 
1018                 /* setup rest of the byte if any */
1019                 for (i = 1; i < cell->bytes; i++) {
1020                         /* Get last byte bits and shift them towards lsb */
1021                         pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1022                         pbyte = *b;
1023                         p = b;
1024                         *b <<= bit_offset;
1025                         *b++ |= pbits;
1026                 }
1027         }
1028 
1029         /* if it's not end on byte boundary */
1030         if ((nbits + bit_offset) % BITS_PER_BYTE) {
1031                 /* setup the last byte with msb bits from nvmem */
1032                 rc = nvmem_reg_read(nvmem,
1033                                     cell->offset + cell->bytes - 1, &v, 1);
1034                 if (rc)
1035                         goto err;
1036                 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1037 
1038         }
1039 
1040         return buf;
1041 err:
1042         kfree(buf);
1043         return ERR_PTR(rc);
1044 }
1045 
1046 /**
1047  * nvmem_cell_write() - Write to a given nvmem cell
1048  *
1049  * @cell: nvmem cell to be written.
1050  * @buf: Buffer to be written.
1051  * @len: length of buffer to be written to nvmem cell.
1052  *
1053  * Return: length of bytes written or negative on failure.
1054  */
1055 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1056 {
1057         struct nvmem_device *nvmem = cell->nvmem;
1058         int rc;
1059 
1060         if (!nvmem || nvmem->read_only ||
1061             (cell->bit_offset == 0 && len != cell->bytes))
1062                 return -EINVAL;
1063 
1064         if (cell->bit_offset || cell->nbits) {
1065                 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1066                 if (IS_ERR(buf))
1067                         return PTR_ERR(buf);
1068         }
1069 
1070         rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1071 
1072         /* free the tmp buffer */
1073         if (cell->bit_offset || cell->nbits)
1074                 kfree(buf);
1075 
1076         if (rc)
1077                 return rc;
1078 
1079         return len;
1080 }
1081 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1082 
1083 /**
1084  * nvmem_cell_read_u16() - Read a cell value as an u16
1085  *
1086  * @dev: Device that requests the nvmem cell.
1087  * @cell_id: Name of nvmem cell to read.
1088  * @val: pointer to output value.
1089  *
1090  * Return: 0 on success or negative errno.
1091  */
1092 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1093 {
1094         struct nvmem_cell *cell;
1095         void *buf;
1096         size_t len;
1097 
1098         cell = nvmem_cell_get(dev, cell_id);
1099         if (IS_ERR(cell))
1100                 return PTR_ERR(cell);
1101 
1102         buf = nvmem_cell_read(cell, &len);
1103         if (IS_ERR(buf)) {
1104                 nvmem_cell_put(cell);
1105                 return PTR_ERR(buf);
1106         }
1107         if (len != sizeof(*val)) {
1108                 kfree(buf);
1109                 nvmem_cell_put(cell);
1110                 return -EINVAL;
1111         }
1112         memcpy(val, buf, sizeof(*val));
1113         kfree(buf);
1114         nvmem_cell_put(cell);
1115 
1116         return 0;
1117 }
1118 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1119 
1120 /**
1121  * nvmem_cell_read_u32() - Read a cell value as an u32
1122  *
1123  * @dev: Device that requests the nvmem cell.
1124  * @cell_id: Name of nvmem cell to read.
1125  * @val: pointer to output value.
1126  *
1127  * Return: 0 on success or negative errno.
1128  */
1129 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1130 {
1131         struct nvmem_cell *cell;
1132         void *buf;
1133         size_t len;
1134 
1135         cell = nvmem_cell_get(dev, cell_id);
1136         if (IS_ERR(cell))
1137                 return PTR_ERR(cell);
1138 
1139         buf = nvmem_cell_read(cell, &len);
1140         if (IS_ERR(buf)) {
1141                 nvmem_cell_put(cell);
1142                 return PTR_ERR(buf);
1143         }
1144         if (len != sizeof(*val)) {
1145                 kfree(buf);
1146                 nvmem_cell_put(cell);
1147                 return -EINVAL;
1148         }
1149         memcpy(val, buf, sizeof(*val));
1150 
1151         kfree(buf);
1152         nvmem_cell_put(cell);
1153         return 0;
1154 }
1155 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1156 
1157 /**
1158  * nvmem_device_cell_read() - Read a given nvmem device and cell
1159  *
1160  * @nvmem: nvmem device to read from.
1161  * @info: nvmem cell info to be read.
1162  * @buf: buffer pointer which will be populated on successful read.
1163  *
1164  * Return: length of successful bytes read on success and negative
1165  * error code on error.
1166  */
1167 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1168                            struct nvmem_cell_info *info, void *buf)
1169 {
1170         struct nvmem_cell cell;
1171         int rc;
1172         ssize_t len;
1173 
1174         if (!nvmem)
1175                 return -EINVAL;
1176 
1177         rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1178         if (rc)
1179                 return rc;
1180 
1181         rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1182         if (rc)
1183                 return rc;
1184 
1185         return len;
1186 }
1187 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1188 
1189 /**
1190  * nvmem_device_cell_write() - Write cell to a given nvmem device
1191  *
1192  * @nvmem: nvmem device to be written to.
1193  * @info: nvmem cell info to be written.
1194  * @buf: buffer to be written to cell.
1195  *
1196  * Return: length of bytes written or negative error code on failure.
1197  */
1198 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1199                             struct nvmem_cell_info *info, void *buf)
1200 {
1201         struct nvmem_cell cell;
1202         int rc;
1203 
1204         if (!nvmem)
1205                 return -EINVAL;
1206 
1207         rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1208         if (rc)
1209                 return rc;
1210 
1211         return nvmem_cell_write(&cell, buf, cell.bytes);
1212 }
1213 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1214 
1215 /**
1216  * nvmem_device_read() - Read from a given nvmem device
1217  *
1218  * @nvmem: nvmem device to read from.
1219  * @offset: offset in nvmem device.
1220  * @bytes: number of bytes to read.
1221  * @buf: buffer pointer which will be populated on successful read.
1222  *
1223  * Return: length of successful bytes read on success and negative
1224  * error code on error.
1225  */
1226 int nvmem_device_read(struct nvmem_device *nvmem,
1227                       unsigned int offset,
1228                       size_t bytes, void *buf)
1229 {
1230         int rc;
1231 
1232         if (!nvmem)
1233                 return -EINVAL;
1234 
1235         rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1236 
1237         if (rc)
1238                 return rc;
1239 
1240         return bytes;
1241 }
1242 EXPORT_SYMBOL_GPL(nvmem_device_read);
1243 
1244 /**
1245  * nvmem_device_write() - Write cell to a given nvmem device
1246  *
1247  * @nvmem: nvmem device to be written to.
1248  * @offset: offset in nvmem device.
1249  * @bytes: number of bytes to write.
1250  * @buf: buffer to be written.
1251  *
1252  * Return: length of bytes written or negative error code on failure.
1253  */
1254 int nvmem_device_write(struct nvmem_device *nvmem,
1255                        unsigned int offset,
1256                        size_t bytes, void *buf)
1257 {
1258         int rc;
1259 
1260         if (!nvmem)
1261                 return -EINVAL;
1262 
1263         rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1264 
1265         if (rc)
1266                 return rc;
1267 
1268 
1269         return bytes;
1270 }
1271 EXPORT_SYMBOL_GPL(nvmem_device_write);
1272 
1273 /**
1274  * nvmem_add_cell_table() - register a table of cell info entries
1275  *
1276  * @table: table of cell info entries
1277  */
1278 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1279 {
1280         mutex_lock(&nvmem_cell_mutex);
1281         list_add_tail(&table->node, &nvmem_cell_tables);
1282         mutex_unlock(&nvmem_cell_mutex);
1283 }
1284 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1285 
1286 /**
1287  * nvmem_del_cell_table() - remove a previously registered cell info table
1288  *
1289  * @table: table of cell info entries
1290  */
1291 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1292 {
1293         mutex_lock(&nvmem_cell_mutex);
1294         list_del(&table->node);
1295         mutex_unlock(&nvmem_cell_mutex);
1296 }
1297 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1298 
1299 /**
1300  * nvmem_add_cell_lookups() - register a list of cell lookup entries
1301  *
1302  * @entries: array of cell lookup entries
1303  * @nentries: number of cell lookup entries in the array
1304  */
1305 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1306 {
1307         int i;
1308 
1309         mutex_lock(&nvmem_lookup_mutex);
1310         for (i = 0; i < nentries; i++)
1311                 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1312         mutex_unlock(&nvmem_lookup_mutex);
1313 }
1314 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1315 
1316 /**
1317  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1318  *                            entries
1319  *
1320  * @entries: array of cell lookup entries
1321  * @nentries: number of cell lookup entries in the array
1322  */
1323 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1324 {
1325         int i;
1326 
1327         mutex_lock(&nvmem_lookup_mutex);
1328         for (i = 0; i < nentries; i++)
1329                 list_del(&entries[i].node);
1330         mutex_unlock(&nvmem_lookup_mutex);
1331 }
1332 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1333 
1334 /**
1335  * nvmem_dev_name() - Get the name of a given nvmem device.
1336  *
1337  * @nvmem: nvmem device.
1338  *
1339  * Return: name of the nvmem device.
1340  */
1341 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1342 {
1343         return dev_name(&nvmem->dev);
1344 }
1345 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1346 
1347 static int __init nvmem_init(void)
1348 {
1349         return bus_register(&nvmem_bus_type);
1350 }
1351 
1352 static void __exit nvmem_exit(void)
1353 {
1354         bus_unregister(&nvmem_bus_type);
1355 }
1356 
1357 subsys_initcall(nvmem_init);
1358 module_exit(nvmem_exit);
1359 
1360 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1361 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1362 MODULE_DESCRIPTION("nvmem Driver Core");
1363 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */