root/drivers/nvdimm/nd.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. ndrd_get_flush_wpq
  2. ndrd_set_flush_wpq
  3. to_namespace_index
  4. to_current_namespace_index
  5. to_next_namespace_index
  6. nd_inc_seq
  7. nvdimm_security_unlock
  8. nd_btt_probe
  9. is_nd_btt
  10. nd_btt_create
  11. nd_pfn_probe
  12. is_nd_pfn
  13. nd_pfn_create
  14. nd_pfn_validate
  15. nd_dax_probe
  16. is_nd_dax
  17. nd_dax_create
  18. nvdimm_setup_pfn
  19. devm_nsio_enable
  20. devm_nsio_disable
  21. nd_iostat_start
  22. nd_iostat_end
  23. is_bad_pmem

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   4  */
   5 #ifndef __ND_H__
   6 #define __ND_H__
   7 #include <linux/libnvdimm.h>
   8 #include <linux/badblocks.h>
   9 #include <linux/blkdev.h>
  10 #include <linux/device.h>
  11 #include <linux/mutex.h>
  12 #include <linux/ndctl.h>
  13 #include <linux/types.h>
  14 #include <linux/nd.h>
  15 #include "label.h"
  16 
  17 enum {
  18         /*
  19          * Limits the maximum number of block apertures a dimm can
  20          * support and is an input to the geometry/on-disk-format of a
  21          * BTT instance
  22          */
  23         ND_MAX_LANES = 256,
  24         INT_LBASIZE_ALIGNMENT = 64,
  25         NVDIMM_IO_ATOMIC = 1,
  26 };
  27 
  28 struct nvdimm_drvdata {
  29         struct device *dev;
  30         int nslabel_size;
  31         struct nd_cmd_get_config_size nsarea;
  32         void *data;
  33         int ns_current, ns_next;
  34         struct resource dpa;
  35         struct kref kref;
  36 };
  37 
  38 struct nd_region_data {
  39         int ns_count;
  40         int ns_active;
  41         unsigned int hints_shift;
  42         void __iomem *flush_wpq[0];
  43 };
  44 
  45 static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
  46                 int dimm, int hint)
  47 {
  48         unsigned int num = 1 << ndrd->hints_shift;
  49         unsigned int mask = num - 1;
  50 
  51         return ndrd->flush_wpq[dimm * num + (hint & mask)];
  52 }
  53 
  54 static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
  55                 int hint, void __iomem *flush)
  56 {
  57         unsigned int num = 1 << ndrd->hints_shift;
  58         unsigned int mask = num - 1;
  59 
  60         ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
  61 }
  62 
  63 static inline struct nd_namespace_index *to_namespace_index(
  64                 struct nvdimm_drvdata *ndd, int i)
  65 {
  66         if (i < 0)
  67                 return NULL;
  68 
  69         return ndd->data + sizeof_namespace_index(ndd) * i;
  70 }
  71 
  72 static inline struct nd_namespace_index *to_current_namespace_index(
  73                 struct nvdimm_drvdata *ndd)
  74 {
  75         return to_namespace_index(ndd, ndd->ns_current);
  76 }
  77 
  78 static inline struct nd_namespace_index *to_next_namespace_index(
  79                 struct nvdimm_drvdata *ndd)
  80 {
  81         return to_namespace_index(ndd, ndd->ns_next);
  82 }
  83 
  84 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
  85 
  86 #define namespace_label_has(ndd, field) \
  87         (offsetof(struct nd_namespace_label, field) \
  88                 < sizeof_namespace_label(ndd))
  89 
  90 #define nd_dbg_dpa(r, d, res, fmt, arg...) \
  91         dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
  92                 (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
  93                 (unsigned long long) (res ? resource_size(res) : 0), \
  94                 (unsigned long long) (res ? res->start : 0), ##arg)
  95 
  96 #define for_each_dpa_resource(ndd, res) \
  97         for (res = (ndd)->dpa.child; res; res = res->sibling)
  98 
  99 #define for_each_dpa_resource_safe(ndd, res, next) \
 100         for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
 101                         res; res = next, next = next ? next->sibling : NULL)
 102 
 103 struct nd_percpu_lane {
 104         int count;
 105         spinlock_t lock;
 106 };
 107 
 108 enum nd_label_flags {
 109         ND_LABEL_REAP,
 110 };
 111 struct nd_label_ent {
 112         struct list_head list;
 113         unsigned long flags;
 114         struct nd_namespace_label *label;
 115 };
 116 
 117 enum nd_mapping_lock_class {
 118         ND_MAPPING_CLASS0,
 119         ND_MAPPING_UUID_SCAN,
 120 };
 121 
 122 struct nd_mapping {
 123         struct nvdimm *nvdimm;
 124         u64 start;
 125         u64 size;
 126         int position;
 127         struct list_head labels;
 128         struct mutex lock;
 129         /*
 130          * @ndd is for private use at region enable / disable time for
 131          * get_ndd() + put_ndd(), all other nd_mapping to ndd
 132          * conversions use to_ndd() which respects enabled state of the
 133          * nvdimm.
 134          */
 135         struct nvdimm_drvdata *ndd;
 136 };
 137 
 138 struct nd_region {
 139         struct device dev;
 140         struct ida ns_ida;
 141         struct ida btt_ida;
 142         struct ida pfn_ida;
 143         struct ida dax_ida;
 144         unsigned long flags;
 145         struct device *ns_seed;
 146         struct device *btt_seed;
 147         struct device *pfn_seed;
 148         struct device *dax_seed;
 149         u16 ndr_mappings;
 150         u64 ndr_size;
 151         u64 ndr_start;
 152         int id, num_lanes, ro, numa_node, target_node;
 153         void *provider_data;
 154         struct kernfs_node *bb_state;
 155         struct badblocks bb;
 156         struct nd_interleave_set *nd_set;
 157         struct nd_percpu_lane __percpu *lane;
 158         int (*flush)(struct nd_region *nd_region, struct bio *bio);
 159         struct nd_mapping mapping[0];
 160 };
 161 
 162 struct nd_blk_region {
 163         int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
 164         int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
 165                         void *iobuf, u64 len, int rw);
 166         void *blk_provider_data;
 167         struct nd_region nd_region;
 168 };
 169 
 170 /*
 171  * Lookup next in the repeating sequence of 01, 10, and 11.
 172  */
 173 static inline unsigned nd_inc_seq(unsigned seq)
 174 {
 175         static const unsigned next[] = { 0, 2, 3, 1 };
 176 
 177         return next[seq & 3];
 178 }
 179 
 180 struct btt;
 181 struct nd_btt {
 182         struct device dev;
 183         struct nd_namespace_common *ndns;
 184         struct btt *btt;
 185         unsigned long lbasize;
 186         u64 size;
 187         u8 *uuid;
 188         int id;
 189         int initial_offset;
 190         u16 version_major;
 191         u16 version_minor;
 192 };
 193 
 194 enum nd_pfn_mode {
 195         PFN_MODE_NONE,
 196         PFN_MODE_RAM,
 197         PFN_MODE_PMEM,
 198 };
 199 
 200 struct nd_pfn {
 201         int id;
 202         u8 *uuid;
 203         struct device dev;
 204         unsigned long align;
 205         unsigned long npfns;
 206         enum nd_pfn_mode mode;
 207         struct nd_pfn_sb *pfn_sb;
 208         struct nd_namespace_common *ndns;
 209 };
 210 
 211 struct nd_dax {
 212         struct nd_pfn nd_pfn;
 213 };
 214 
 215 enum nd_async_mode {
 216         ND_SYNC,
 217         ND_ASYNC,
 218 };
 219 
 220 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
 221 void wait_nvdimm_bus_probe_idle(struct device *dev);
 222 void nd_device_register(struct device *dev);
 223 void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
 224 void nd_device_notify(struct device *dev, enum nvdimm_event event);
 225 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
 226                 size_t len);
 227 ssize_t nd_size_select_show(unsigned long current_size,
 228                 const unsigned long *supported, char *buf);
 229 ssize_t nd_size_select_store(struct device *dev, const char *buf,
 230                 unsigned long *current_size, const unsigned long *supported);
 231 int __init nvdimm_init(void);
 232 int __init nd_region_init(void);
 233 int __init nd_label_init(void);
 234 void nvdimm_exit(void);
 235 void nd_region_exit(void);
 236 struct nvdimm;
 237 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
 238 int nvdimm_check_config_data(struct device *dev);
 239 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
 240 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
 241 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
 242                            size_t offset, size_t len);
 243 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
 244                 void *buf, size_t len);
 245 long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
 246                 unsigned int len);
 247 void nvdimm_set_aliasing(struct device *dev);
 248 void nvdimm_set_locked(struct device *dev);
 249 void nvdimm_clear_locked(struct device *dev);
 250 int nvdimm_security_setup_events(struct device *dev);
 251 #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
 252 int nvdimm_security_unlock(struct device *dev);
 253 #else
 254 static inline int nvdimm_security_unlock(struct device *dev)
 255 {
 256         return 0;
 257 }
 258 #endif
 259 struct nd_btt *to_nd_btt(struct device *dev);
 260 
 261 struct nd_gen_sb {
 262         char reserved[SZ_4K - 8];
 263         __le64 checksum;
 264 };
 265 
 266 u64 nd_sb_checksum(struct nd_gen_sb *sb);
 267 #if IS_ENABLED(CONFIG_BTT)
 268 int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
 269 bool is_nd_btt(struct device *dev);
 270 struct device *nd_btt_create(struct nd_region *nd_region);
 271 #else
 272 static inline int nd_btt_probe(struct device *dev,
 273                 struct nd_namespace_common *ndns)
 274 {
 275         return -ENODEV;
 276 }
 277 
 278 static inline bool is_nd_btt(struct device *dev)
 279 {
 280         return false;
 281 }
 282 
 283 static inline struct device *nd_btt_create(struct nd_region *nd_region)
 284 {
 285         return NULL;
 286 }
 287 #endif
 288 
 289 struct nd_pfn *to_nd_pfn(struct device *dev);
 290 #if IS_ENABLED(CONFIG_NVDIMM_PFN)
 291 
 292 #define MAX_NVDIMM_ALIGN        4
 293 
 294 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
 295 bool is_nd_pfn(struct device *dev);
 296 struct device *nd_pfn_create(struct nd_region *nd_region);
 297 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
 298                 struct nd_namespace_common *ndns);
 299 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
 300 extern struct attribute_group nd_pfn_attribute_group;
 301 #else
 302 static inline int nd_pfn_probe(struct device *dev,
 303                 struct nd_namespace_common *ndns)
 304 {
 305         return -ENODEV;
 306 }
 307 
 308 static inline bool is_nd_pfn(struct device *dev)
 309 {
 310         return false;
 311 }
 312 
 313 static inline struct device *nd_pfn_create(struct nd_region *nd_region)
 314 {
 315         return NULL;
 316 }
 317 
 318 static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 319 {
 320         return -ENODEV;
 321 }
 322 #endif
 323 
 324 struct nd_dax *to_nd_dax(struct device *dev);
 325 #if IS_ENABLED(CONFIG_NVDIMM_DAX)
 326 int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
 327 bool is_nd_dax(struct device *dev);
 328 struct device *nd_dax_create(struct nd_region *nd_region);
 329 #else
 330 static inline int nd_dax_probe(struct device *dev,
 331                 struct nd_namespace_common *ndns)
 332 {
 333         return -ENODEV;
 334 }
 335 
 336 static inline bool is_nd_dax(struct device *dev)
 337 {
 338         return false;
 339 }
 340 
 341 static inline struct device *nd_dax_create(struct nd_region *nd_region)
 342 {
 343         return NULL;
 344 }
 345 #endif
 346 
 347 int nd_region_to_nstype(struct nd_region *nd_region);
 348 int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
 349 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
 350                 struct nd_namespace_index *nsindex);
 351 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
 352 void nvdimm_bus_lock(struct device *dev);
 353 void nvdimm_bus_unlock(struct device *dev);
 354 bool is_nvdimm_bus_locked(struct device *dev);
 355 int nvdimm_revalidate_disk(struct gendisk *disk);
 356 void nvdimm_drvdata_release(struct kref *kref);
 357 void put_ndd(struct nvdimm_drvdata *ndd);
 358 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
 359 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
 360 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
 361                 struct nd_label_id *label_id, resource_size_t start,
 362                 resource_size_t n);
 363 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
 364 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
 365 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
 366 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
 367 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
 368 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
 369                 char *name);
 370 unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
 371 void nvdimm_badblocks_populate(struct nd_region *nd_region,
 372                 struct badblocks *bb, const struct resource *res);
 373 #if IS_ENABLED(CONFIG_ND_CLAIM)
 374 
 375 /* max struct page size independent of kernel config */
 376 #define MAX_STRUCT_PAGE_SIZE 64
 377 
 378 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
 379 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
 380 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
 381 #else
 382 static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
 383                                    struct dev_pagemap *pgmap)
 384 {
 385         return -ENXIO;
 386 }
 387 static inline int devm_nsio_enable(struct device *dev,
 388                 struct nd_namespace_io *nsio)
 389 {
 390         return -ENXIO;
 391 }
 392 static inline void devm_nsio_disable(struct device *dev,
 393                 struct nd_namespace_io *nsio)
 394 {
 395 }
 396 #endif
 397 int nd_blk_region_init(struct nd_region *nd_region);
 398 int nd_region_activate(struct nd_region *nd_region);
 399 void __nd_iostat_start(struct bio *bio, unsigned long *start);
 400 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
 401 {
 402         struct gendisk *disk = bio->bi_disk;
 403 
 404         if (!blk_queue_io_stat(disk->queue))
 405                 return false;
 406 
 407         *start = jiffies;
 408         generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio),
 409                               &disk->part0);
 410         return true;
 411 }
 412 static inline void nd_iostat_end(struct bio *bio, unsigned long start)
 413 {
 414         struct gendisk *disk = bio->bi_disk;
 415 
 416         generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
 417 }
 418 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
 419                 unsigned int len)
 420 {
 421         if (bb->count) {
 422                 sector_t first_bad;
 423                 int num_bad;
 424 
 425                 return !!badblocks_check(bb, sector, len / 512, &first_bad,
 426                                 &num_bad);
 427         }
 428 
 429         return false;
 430 }
 431 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
 432 const u8 *nd_dev_to_uuid(struct device *dev);
 433 bool pmem_should_map_pages(struct device *dev);
 434 #endif /* __ND_H__ */

/* [<][>][^][v][top][bottom][index][help] */