root/drivers/mtd/mtd_blkdevs.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. blktrans_dev_release
  2. blktrans_dev_get
  3. blktrans_dev_put
  4. do_blktrans_request
  5. mtd_blktrans_cease_background
  6. mtd_next_request
  7. mtd_blktrans_work
  8. mtd_queue_rq
  9. blktrans_open
  10. blktrans_release
  11. blktrans_getgeo
  12. blktrans_ioctl
  13. add_mtd_blktrans_dev
  14. del_mtd_blktrans_dev
  15. blktrans_notify_remove
  16. blktrans_notify_add
  17. register_mtd_blktrans
  18. deregister_mtd_blktrans
  19. mtd_blktrans_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Interface to Linux block layer for MTD 'translation layers'.
   4  *
   5  * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
   6  */
   7 
   8 #include <linux/kernel.h>
   9 #include <linux/slab.h>
  10 #include <linux/module.h>
  11 #include <linux/list.h>
  12 #include <linux/fs.h>
  13 #include <linux/mtd/blktrans.h>
  14 #include <linux/mtd/mtd.h>
  15 #include <linux/blkdev.h>
  16 #include <linux/blk-mq.h>
  17 #include <linux/blkpg.h>
  18 #include <linux/spinlock.h>
  19 #include <linux/hdreg.h>
  20 #include <linux/mutex.h>
  21 #include <linux/uaccess.h>
  22 
  23 #include "mtdcore.h"
  24 
  25 static LIST_HEAD(blktrans_majors);
  26 static DEFINE_MUTEX(blktrans_ref_mutex);
  27 
  28 static void blktrans_dev_release(struct kref *kref)
  29 {
  30         struct mtd_blktrans_dev *dev =
  31                 container_of(kref, struct mtd_blktrans_dev, ref);
  32 
  33         dev->disk->private_data = NULL;
  34         blk_cleanup_queue(dev->rq);
  35         blk_mq_free_tag_set(dev->tag_set);
  36         kfree(dev->tag_set);
  37         put_disk(dev->disk);
  38         list_del(&dev->list);
  39         kfree(dev);
  40 }
  41 
  42 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
  43 {
  44         struct mtd_blktrans_dev *dev;
  45 
  46         mutex_lock(&blktrans_ref_mutex);
  47         dev = disk->private_data;
  48 
  49         if (!dev)
  50                 goto unlock;
  51         kref_get(&dev->ref);
  52 unlock:
  53         mutex_unlock(&blktrans_ref_mutex);
  54         return dev;
  55 }
  56 
  57 static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
  58 {
  59         mutex_lock(&blktrans_ref_mutex);
  60         kref_put(&dev->ref, blktrans_dev_release);
  61         mutex_unlock(&blktrans_ref_mutex);
  62 }
  63 
  64 
  65 static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
  66                                struct mtd_blktrans_dev *dev,
  67                                struct request *req)
  68 {
  69         unsigned long block, nsect;
  70         char *buf;
  71 
  72         block = blk_rq_pos(req) << 9 >> tr->blkshift;
  73         nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
  74 
  75         if (req_op(req) == REQ_OP_FLUSH) {
  76                 if (tr->flush(dev))
  77                         return BLK_STS_IOERR;
  78                 return BLK_STS_OK;
  79         }
  80 
  81         if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
  82             get_capacity(req->rq_disk))
  83                 return BLK_STS_IOERR;
  84 
  85         switch (req_op(req)) {
  86         case REQ_OP_DISCARD:
  87                 if (tr->discard(dev, block, nsect))
  88                         return BLK_STS_IOERR;
  89                 return BLK_STS_OK;
  90         case REQ_OP_READ:
  91                 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
  92                 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
  93                         if (tr->readsect(dev, block, buf)) {
  94                                 kunmap(bio_page(req->bio));
  95                                 return BLK_STS_IOERR;
  96                         }
  97                 }
  98                 kunmap(bio_page(req->bio));
  99                 rq_flush_dcache_pages(req);
 100                 return BLK_STS_OK;
 101         case REQ_OP_WRITE:
 102                 if (!tr->writesect)
 103                         return BLK_STS_IOERR;
 104 
 105                 rq_flush_dcache_pages(req);
 106                 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
 107                 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
 108                         if (tr->writesect(dev, block, buf)) {
 109                                 kunmap(bio_page(req->bio));
 110                                 return BLK_STS_IOERR;
 111                         }
 112                 }
 113                 kunmap(bio_page(req->bio));
 114                 return BLK_STS_OK;
 115         default:
 116                 return BLK_STS_IOERR;
 117         }
 118 }
 119 
 120 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
 121 {
 122         return dev->bg_stop;
 123 }
 124 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
 125 
 126 static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
 127 {
 128         struct request *rq;
 129 
 130         rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
 131         if (rq) {
 132                 list_del_init(&rq->queuelist);
 133                 blk_mq_start_request(rq);
 134                 return rq;
 135         }
 136 
 137         return NULL;
 138 }
 139 
 140 static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
 141         __releases(&dev->queue_lock)
 142         __acquires(&dev->queue_lock)
 143 {
 144         struct mtd_blktrans_ops *tr = dev->tr;
 145         struct request *req = NULL;
 146         int background_done = 0;
 147 
 148         while (1) {
 149                 blk_status_t res;
 150 
 151                 dev->bg_stop = false;
 152                 if (!req && !(req = mtd_next_request(dev))) {
 153                         if (tr->background && !background_done) {
 154                                 spin_unlock_irq(&dev->queue_lock);
 155                                 mutex_lock(&dev->lock);
 156                                 tr->background(dev);
 157                                 mutex_unlock(&dev->lock);
 158                                 spin_lock_irq(&dev->queue_lock);
 159                                 /*
 160                                  * Do background processing just once per idle
 161                                  * period.
 162                                  */
 163                                 background_done = !dev->bg_stop;
 164                                 continue;
 165                         }
 166                         break;
 167                 }
 168 
 169                 spin_unlock_irq(&dev->queue_lock);
 170 
 171                 mutex_lock(&dev->lock);
 172                 res = do_blktrans_request(dev->tr, dev, req);
 173                 mutex_unlock(&dev->lock);
 174 
 175                 if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
 176                         __blk_mq_end_request(req, res);
 177                         req = NULL;
 178                 }
 179 
 180                 background_done = 0;
 181                 spin_lock_irq(&dev->queue_lock);
 182         }
 183 }
 184 
 185 static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
 186                                  const struct blk_mq_queue_data *bd)
 187 {
 188         struct mtd_blktrans_dev *dev;
 189 
 190         dev = hctx->queue->queuedata;
 191         if (!dev) {
 192                 blk_mq_start_request(bd->rq);
 193                 return BLK_STS_IOERR;
 194         }
 195 
 196         spin_lock_irq(&dev->queue_lock);
 197         list_add_tail(&bd->rq->queuelist, &dev->rq_list);
 198         mtd_blktrans_work(dev);
 199         spin_unlock_irq(&dev->queue_lock);
 200 
 201         return BLK_STS_OK;
 202 }
 203 
 204 static int blktrans_open(struct block_device *bdev, fmode_t mode)
 205 {
 206         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 207         int ret = 0;
 208 
 209         if (!dev)
 210                 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
 211 
 212         mutex_lock(&mtd_table_mutex);
 213         mutex_lock(&dev->lock);
 214 
 215         if (dev->open)
 216                 goto unlock;
 217 
 218         kref_get(&dev->ref);
 219         __module_get(dev->tr->owner);
 220 
 221         if (!dev->mtd)
 222                 goto unlock;
 223 
 224         if (dev->tr->open) {
 225                 ret = dev->tr->open(dev);
 226                 if (ret)
 227                         goto error_put;
 228         }
 229 
 230         ret = __get_mtd_device(dev->mtd);
 231         if (ret)
 232                 goto error_release;
 233         dev->file_mode = mode;
 234 
 235 unlock:
 236         dev->open++;
 237         mutex_unlock(&dev->lock);
 238         mutex_unlock(&mtd_table_mutex);
 239         blktrans_dev_put(dev);
 240         return ret;
 241 
 242 error_release:
 243         if (dev->tr->release)
 244                 dev->tr->release(dev);
 245 error_put:
 246         module_put(dev->tr->owner);
 247         kref_put(&dev->ref, blktrans_dev_release);
 248         mutex_unlock(&dev->lock);
 249         mutex_unlock(&mtd_table_mutex);
 250         blktrans_dev_put(dev);
 251         return ret;
 252 }
 253 
 254 static void blktrans_release(struct gendisk *disk, fmode_t mode)
 255 {
 256         struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
 257 
 258         if (!dev)
 259                 return;
 260 
 261         mutex_lock(&mtd_table_mutex);
 262         mutex_lock(&dev->lock);
 263 
 264         if (--dev->open)
 265                 goto unlock;
 266 
 267         kref_put(&dev->ref, blktrans_dev_release);
 268         module_put(dev->tr->owner);
 269 
 270         if (dev->mtd) {
 271                 if (dev->tr->release)
 272                         dev->tr->release(dev);
 273                 __put_mtd_device(dev->mtd);
 274         }
 275 unlock:
 276         mutex_unlock(&dev->lock);
 277         mutex_unlock(&mtd_table_mutex);
 278         blktrans_dev_put(dev);
 279 }
 280 
 281 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 282 {
 283         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 284         int ret = -ENXIO;
 285 
 286         if (!dev)
 287                 return ret;
 288 
 289         mutex_lock(&dev->lock);
 290 
 291         if (!dev->mtd)
 292                 goto unlock;
 293 
 294         ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
 295 unlock:
 296         mutex_unlock(&dev->lock);
 297         blktrans_dev_put(dev);
 298         return ret;
 299 }
 300 
 301 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
 302                               unsigned int cmd, unsigned long arg)
 303 {
 304         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 305         int ret = -ENXIO;
 306 
 307         if (!dev)
 308                 return ret;
 309 
 310         mutex_lock(&dev->lock);
 311 
 312         if (!dev->mtd)
 313                 goto unlock;
 314 
 315         switch (cmd) {
 316         case BLKFLSBUF:
 317                 ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
 318                 break;
 319         default:
 320                 ret = -ENOTTY;
 321         }
 322 unlock:
 323         mutex_unlock(&dev->lock);
 324         blktrans_dev_put(dev);
 325         return ret;
 326 }
 327 
 328 static const struct block_device_operations mtd_block_ops = {
 329         .owner          = THIS_MODULE,
 330         .open           = blktrans_open,
 331         .release        = blktrans_release,
 332         .ioctl          = blktrans_ioctl,
 333         .getgeo         = blktrans_getgeo,
 334 };
 335 
 336 static const struct blk_mq_ops mtd_mq_ops = {
 337         .queue_rq       = mtd_queue_rq,
 338 };
 339 
 340 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
 341 {
 342         struct mtd_blktrans_ops *tr = new->tr;
 343         struct mtd_blktrans_dev *d;
 344         int last_devnum = -1;
 345         struct gendisk *gd;
 346         int ret;
 347 
 348         if (mutex_trylock(&mtd_table_mutex)) {
 349                 mutex_unlock(&mtd_table_mutex);
 350                 BUG();
 351         }
 352 
 353         mutex_lock(&blktrans_ref_mutex);
 354         list_for_each_entry(d, &tr->devs, list) {
 355                 if (new->devnum == -1) {
 356                         /* Use first free number */
 357                         if (d->devnum != last_devnum+1) {
 358                                 /* Found a free devnum. Plug it in here */
 359                                 new->devnum = last_devnum+1;
 360                                 list_add_tail(&new->list, &d->list);
 361                                 goto added;
 362                         }
 363                 } else if (d->devnum == new->devnum) {
 364                         /* Required number taken */
 365                         mutex_unlock(&blktrans_ref_mutex);
 366                         return -EBUSY;
 367                 } else if (d->devnum > new->devnum) {
 368                         /* Required number was free */
 369                         list_add_tail(&new->list, &d->list);
 370                         goto added;
 371                 }
 372                 last_devnum = d->devnum;
 373         }
 374 
 375         ret = -EBUSY;
 376         if (new->devnum == -1)
 377                 new->devnum = last_devnum+1;
 378 
 379         /* Check that the device and any partitions will get valid
 380          * minor numbers and that the disk naming code below can cope
 381          * with this number. */
 382         if (new->devnum > (MINORMASK >> tr->part_bits) ||
 383             (tr->part_bits && new->devnum >= 27 * 26)) {
 384                 mutex_unlock(&blktrans_ref_mutex);
 385                 goto error1;
 386         }
 387 
 388         list_add_tail(&new->list, &tr->devs);
 389  added:
 390         mutex_unlock(&blktrans_ref_mutex);
 391 
 392         mutex_init(&new->lock);
 393         kref_init(&new->ref);
 394         if (!tr->writesect)
 395                 new->readonly = 1;
 396 
 397         /* Create gendisk */
 398         ret = -ENOMEM;
 399         gd = alloc_disk(1 << tr->part_bits);
 400 
 401         if (!gd)
 402                 goto error2;
 403 
 404         new->disk = gd;
 405         gd->private_data = new;
 406         gd->major = tr->major;
 407         gd->first_minor = (new->devnum) << tr->part_bits;
 408         gd->fops = &mtd_block_ops;
 409 
 410         if (tr->part_bits)
 411                 if (new->devnum < 26)
 412                         snprintf(gd->disk_name, sizeof(gd->disk_name),
 413                                  "%s%c", tr->name, 'a' + new->devnum);
 414                 else
 415                         snprintf(gd->disk_name, sizeof(gd->disk_name),
 416                                  "%s%c%c", tr->name,
 417                                  'a' - 1 + new->devnum / 26,
 418                                  'a' + new->devnum % 26);
 419         else
 420                 snprintf(gd->disk_name, sizeof(gd->disk_name),
 421                          "%s%d", tr->name, new->devnum);
 422 
 423         set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
 424 
 425         /* Create the request queue */
 426         spin_lock_init(&new->queue_lock);
 427         INIT_LIST_HEAD(&new->rq_list);
 428 
 429         new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
 430         if (!new->tag_set)
 431                 goto error3;
 432 
 433         new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
 434                                 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
 435         if (IS_ERR(new->rq)) {
 436                 ret = PTR_ERR(new->rq);
 437                 new->rq = NULL;
 438                 goto error4;
 439         }
 440 
 441         if (tr->flush)
 442                 blk_queue_write_cache(new->rq, true, false);
 443 
 444         new->rq->queuedata = new;
 445         blk_queue_logical_block_size(new->rq, tr->blksize);
 446 
 447         blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
 448         blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
 449 
 450         if (tr->discard) {
 451                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
 452                 blk_queue_max_discard_sectors(new->rq, UINT_MAX);
 453         }
 454 
 455         gd->queue = new->rq;
 456 
 457         if (new->readonly)
 458                 set_disk_ro(gd, 1);
 459 
 460         device_add_disk(&new->mtd->dev, gd, NULL);
 461 
 462         if (new->disk_attributes) {
 463                 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
 464                                         new->disk_attributes);
 465                 WARN_ON(ret);
 466         }
 467         return 0;
 468 error4:
 469         kfree(new->tag_set);
 470 error3:
 471         put_disk(new->disk);
 472 error2:
 473         list_del(&new->list);
 474 error1:
 475         return ret;
 476 }
 477 
 478 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
 479 {
 480         unsigned long flags;
 481 
 482         if (mutex_trylock(&mtd_table_mutex)) {
 483                 mutex_unlock(&mtd_table_mutex);
 484                 BUG();
 485         }
 486 
 487         if (old->disk_attributes)
 488                 sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
 489                                                 old->disk_attributes);
 490 
 491         /* Stop new requests to arrive */
 492         del_gendisk(old->disk);
 493 
 494         /* Kill current requests */
 495         spin_lock_irqsave(&old->queue_lock, flags);
 496         old->rq->queuedata = NULL;
 497         spin_unlock_irqrestore(&old->queue_lock, flags);
 498 
 499         /* freeze+quiesce queue to ensure all requests are flushed */
 500         blk_mq_freeze_queue(old->rq);
 501         blk_mq_quiesce_queue(old->rq);
 502         blk_mq_unquiesce_queue(old->rq);
 503         blk_mq_unfreeze_queue(old->rq);
 504 
 505         /* If the device is currently open, tell trans driver to close it,
 506                 then put mtd device, and don't touch it again */
 507         mutex_lock(&old->lock);
 508         if (old->open) {
 509                 if (old->tr->release)
 510                         old->tr->release(old);
 511                 __put_mtd_device(old->mtd);
 512         }
 513 
 514         old->mtd = NULL;
 515 
 516         mutex_unlock(&old->lock);
 517         blktrans_dev_put(old);
 518         return 0;
 519 }
 520 
 521 static void blktrans_notify_remove(struct mtd_info *mtd)
 522 {
 523         struct mtd_blktrans_ops *tr;
 524         struct mtd_blktrans_dev *dev, *next;
 525 
 526         list_for_each_entry(tr, &blktrans_majors, list)
 527                 list_for_each_entry_safe(dev, next, &tr->devs, list)
 528                         if (dev->mtd == mtd)
 529                                 tr->remove_dev(dev);
 530 }
 531 
 532 static void blktrans_notify_add(struct mtd_info *mtd)
 533 {
 534         struct mtd_blktrans_ops *tr;
 535 
 536         if (mtd->type == MTD_ABSENT)
 537                 return;
 538 
 539         list_for_each_entry(tr, &blktrans_majors, list)
 540                 tr->add_mtd(tr, mtd);
 541 }
 542 
 543 static struct mtd_notifier blktrans_notifier = {
 544         .add = blktrans_notify_add,
 545         .remove = blktrans_notify_remove,
 546 };
 547 
 548 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
 549 {
 550         struct mtd_info *mtd;
 551         int ret;
 552 
 553         /* Register the notifier if/when the first device type is
 554            registered, to prevent the link/init ordering from fucking
 555            us over. */
 556         if (!blktrans_notifier.list.next)
 557                 register_mtd_user(&blktrans_notifier);
 558 
 559 
 560         mutex_lock(&mtd_table_mutex);
 561 
 562         ret = register_blkdev(tr->major, tr->name);
 563         if (ret < 0) {
 564                 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
 565                        tr->name, tr->major, ret);
 566                 mutex_unlock(&mtd_table_mutex);
 567                 return ret;
 568         }
 569 
 570         if (ret)
 571                 tr->major = ret;
 572 
 573         tr->blkshift = ffs(tr->blksize) - 1;
 574 
 575         INIT_LIST_HEAD(&tr->devs);
 576         list_add(&tr->list, &blktrans_majors);
 577 
 578         mtd_for_each_device(mtd)
 579                 if (mtd->type != MTD_ABSENT)
 580                         tr->add_mtd(tr, mtd);
 581 
 582         mutex_unlock(&mtd_table_mutex);
 583         return 0;
 584 }
 585 
 586 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
 587 {
 588         struct mtd_blktrans_dev *dev, *next;
 589 
 590         mutex_lock(&mtd_table_mutex);
 591 
 592         /* Remove it from the list of active majors */
 593         list_del(&tr->list);
 594 
 595         list_for_each_entry_safe(dev, next, &tr->devs, list)
 596                 tr->remove_dev(dev);
 597 
 598         unregister_blkdev(tr->major, tr->name);
 599         mutex_unlock(&mtd_table_mutex);
 600 
 601         BUG_ON(!list_empty(&tr->devs));
 602         return 0;
 603 }
 604 
 605 static void __exit mtd_blktrans_exit(void)
 606 {
 607         /* No race here -- if someone's currently in register_mtd_blktrans
 608            we're screwed anyway. */
 609         if (blktrans_notifier.list.next)
 610                 unregister_mtd_user(&blktrans_notifier);
 611 }
 612 
 613 module_exit(mtd_blktrans_exit);
 614 
 615 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
 616 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
 617 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
 618 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
 619 
 620 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 621 MODULE_LICENSE("GPL");
 622 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");

/* [<][>][^][v][top][bottom][index][help] */