root/drivers/s390/char/vmur.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. urdev_alloc
  2. urdev_free
  3. urdev_get
  4. urdev_get_from_cdev
  5. urdev_get_from_devno
  6. urdev_put
  7. ur_pm_suspend
  8. free_chan_prog
  9. alloc_chan_prog
  10. do_ur_io
  11. ur_int_handler
  12. ur_attr_reclen_show
  13. ur_create_attributes
  14. ur_remove_attributes
  15. get_urd_class
  16. urfile_alloc
  17. urfile_free
  18. do_write
  19. ur_write
  20. diag_position_to_record
  21. diag_read_file
  22. diag14_read
  23. ur_read
  24. diag_read_next_file_info
  25. verify_uri_device
  26. verify_device
  27. get_uri_file_reclen
  28. get_file_reclen
  29. ur_open
  30. ur_release
  31. ur_llseek
  32. ur_probe
  33. ur_set_online
  34. ur_set_offline_force
  35. ur_set_offline
  36. ur_remove
  37. ur_init
  38. ur_exit

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Linux driver for System z and s390 unit record devices
   4  * (z/VM virtual punch, reader, printer)
   5  *
   6  * Copyright IBM Corp. 2001, 2009
   7  * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
   8  *          Michael Holzheu <holzheu@de.ibm.com>
   9  *          Frank Munzert <munzert@de.ibm.com>
  10  */
  11 
  12 #define KMSG_COMPONENT "vmur"
  13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  14 
  15 #include <linux/cdev.h>
  16 #include <linux/slab.h>
  17 #include <linux/module.h>
  18 
  19 #include <linux/uaccess.h>
  20 #include <asm/cio.h>
  21 #include <asm/ccwdev.h>
  22 #include <asm/debug.h>
  23 #include <asm/diag.h>
  24 
  25 #include "vmur.h"
  26 
  27 /*
  28  * Driver overview
  29  *
  30  * Unit record device support is implemented as a character device driver.
  31  * We can fit at least 16 bits into a device minor number and use the
  32  * simple method of mapping a character device number with minor abcd
  33  * to the unit record device with devno abcd.
  34  * I/O to virtual unit record devices is handled as follows:
  35  * Reads: Diagnose code 0x14 (input spool file manipulation)
  36  * is used to read spool data page-wise.
  37  * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
  38  * is available by reading sysfs attr reclen. Each write() to the device
  39  * must specify an integral multiple (maximal 511) of reclen.
  40  */
  41 
  42 static char ur_banner[] = "z/VM virtual unit record device driver";
  43 
  44 MODULE_AUTHOR("IBM Corporation");
  45 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
  46 MODULE_LICENSE("GPL");
  47 
  48 static dev_t ur_first_dev_maj_min;
  49 static struct class *vmur_class;
  50 static struct debug_info *vmur_dbf;
  51 
  52 /* We put the device's record length (for writes) in the driver_info field */
  53 static struct ccw_device_id ur_ids[] = {
  54         { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
  55         { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
  56         { /* end of list */ }
  57 };
  58 
  59 MODULE_DEVICE_TABLE(ccw, ur_ids);
  60 
  61 static int ur_probe(struct ccw_device *cdev);
  62 static void ur_remove(struct ccw_device *cdev);
  63 static int ur_set_online(struct ccw_device *cdev);
  64 static int ur_set_offline(struct ccw_device *cdev);
  65 static int ur_pm_suspend(struct ccw_device *cdev);
  66 
  67 static struct ccw_driver ur_driver = {
  68         .driver = {
  69                 .name   = "vmur",
  70                 .owner  = THIS_MODULE,
  71         },
  72         .ids            = ur_ids,
  73         .probe          = ur_probe,
  74         .remove         = ur_remove,
  75         .set_online     = ur_set_online,
  76         .set_offline    = ur_set_offline,
  77         .freeze         = ur_pm_suspend,
  78         .int_class      = IRQIO_VMR,
  79 };
  80 
  81 static DEFINE_MUTEX(vmur_mutex);
  82 
  83 /*
  84  * Allocation, freeing, getting and putting of urdev structures
  85  *
  86  * Each ur device (urd) contains a reference to its corresponding ccw device
  87  * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
  88  * ur device using dev_get_drvdata(&cdev->dev) pointer.
  89  *
  90  * urd references:
  91  * - ur_probe gets a urd reference, ur_remove drops the reference
  92  *   dev_get_drvdata(&cdev->dev)
  93  * - ur_open gets a urd reference, ur_release drops the reference
  94  *   (urf->urd)
  95  *
  96  * cdev references:
  97  * - urdev_alloc get a cdev reference (urd->cdev)
  98  * - urdev_free drops the cdev reference (urd->cdev)
  99  *
 100  * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
 101  */
 102 static struct urdev *urdev_alloc(struct ccw_device *cdev)
 103 {
 104         struct urdev *urd;
 105 
 106         urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
 107         if (!urd)
 108                 return NULL;
 109         urd->reclen = cdev->id.driver_info;
 110         ccw_device_get_id(cdev, &urd->dev_id);
 111         mutex_init(&urd->io_mutex);
 112         init_waitqueue_head(&urd->wait);
 113         spin_lock_init(&urd->open_lock);
 114         refcount_set(&urd->ref_count,  1);
 115         urd->cdev = cdev;
 116         get_device(&cdev->dev);
 117         return urd;
 118 }
 119 
 120 static void urdev_free(struct urdev *urd)
 121 {
 122         TRACE("urdev_free: %p\n", urd);
 123         if (urd->cdev)
 124                 put_device(&urd->cdev->dev);
 125         kfree(urd);
 126 }
 127 
 128 static void urdev_get(struct urdev *urd)
 129 {
 130         refcount_inc(&urd->ref_count);
 131 }
 132 
 133 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
 134 {
 135         struct urdev *urd;
 136         unsigned long flags;
 137 
 138         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
 139         urd = dev_get_drvdata(&cdev->dev);
 140         if (urd)
 141                 urdev_get(urd);
 142         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 143         return urd;
 144 }
 145 
 146 static struct urdev *urdev_get_from_devno(u16 devno)
 147 {
 148         char bus_id[16];
 149         struct ccw_device *cdev;
 150         struct urdev *urd;
 151 
 152         sprintf(bus_id, "0.0.%04x", devno);
 153         cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
 154         if (!cdev)
 155                 return NULL;
 156         urd = urdev_get_from_cdev(cdev);
 157         put_device(&cdev->dev);
 158         return urd;
 159 }
 160 
 161 static void urdev_put(struct urdev *urd)
 162 {
 163         if (refcount_dec_and_test(&urd->ref_count))
 164                 urdev_free(urd);
 165 }
 166 
 167 /*
 168  * State and contents of ur devices can be changed by class D users issuing
 169  * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended.
 170  * Also the Linux guest might be logged off, which causes all active spool
 171  * files to be closed.
 172  * So we cannot guarantee that spool files are still the same when the Linux
 173  * guest is resumed. In order to avoid unpredictable results at resume time
 174  * we simply refuse to suspend if a ur device node is open.
 175  */
 176 static int ur_pm_suspend(struct ccw_device *cdev)
 177 {
 178         struct urdev *urd = dev_get_drvdata(&cdev->dev);
 179 
 180         TRACE("ur_pm_suspend: cdev=%p\n", cdev);
 181         if (urd->open_flag) {
 182                 pr_err("Unit record device %s is busy, %s refusing to "
 183                        "suspend.\n", dev_name(&cdev->dev), ur_banner);
 184                 return -EBUSY;
 185         }
 186         return 0;
 187 }
 188 
 189 /*
 190  * Low-level functions to do I/O to a ur device.
 191  *     alloc_chan_prog
 192  *     free_chan_prog
 193  *     do_ur_io
 194  *     ur_int_handler
 195  *
 196  * alloc_chan_prog allocates and builds the channel program
 197  * free_chan_prog frees memory of the channel program
 198  *
 199  * do_ur_io issues the channel program to the device and blocks waiting
 200  * on a completion event it publishes at urd->io_done. The function
 201  * serialises itself on the device's mutex so that only one I/O
 202  * is issued at a time (and that I/O is synchronous).
 203  *
 204  * ur_int_handler catches the "I/O done" interrupt, writes the
 205  * subchannel status word into the scsw member of the urdev structure
 206  * and complete()s the io_done to wake the waiting do_ur_io.
 207  *
 208  * The caller of do_ur_io is responsible for kfree()ing the channel program
 209  * address pointer that alloc_chan_prog returned.
 210  */
 211 
 212 static void free_chan_prog(struct ccw1 *cpa)
 213 {
 214         struct ccw1 *ptr = cpa;
 215 
 216         while (ptr->cda) {
 217                 kfree((void *)(addr_t) ptr->cda);
 218                 ptr++;
 219         }
 220         kfree(cpa);
 221 }
 222 
 223 /*
 224  * alloc_chan_prog
 225  * The channel program we use is write commands chained together
 226  * with a final NOP CCW command-chained on (which ensures that CE and DE
 227  * are presented together in a single interrupt instead of as separate
 228  * interrupts unless an incorrect length indication kicks in first). The
 229  * data length in each CCW is reclen.
 230  */
 231 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
 232                                     int reclen)
 233 {
 234         struct ccw1 *cpa;
 235         void *kbuf;
 236         int i;
 237 
 238         TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
 239 
 240         /*
 241          * We chain a NOP onto the writes to force CE+DE together.
 242          * That means we allocate room for CCWs to cover count/reclen
 243          * records plus a NOP.
 244          */
 245         cpa = kcalloc(rec_count + 1, sizeof(struct ccw1),
 246                       GFP_KERNEL | GFP_DMA);
 247         if (!cpa)
 248                 return ERR_PTR(-ENOMEM);
 249 
 250         for (i = 0; i < rec_count; i++) {
 251                 cpa[i].cmd_code = WRITE_CCW_CMD;
 252                 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
 253                 cpa[i].count = reclen;
 254                 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
 255                 if (!kbuf) {
 256                         free_chan_prog(cpa);
 257                         return ERR_PTR(-ENOMEM);
 258                 }
 259                 cpa[i].cda = (u32)(addr_t) kbuf;
 260                 if (copy_from_user(kbuf, ubuf, reclen)) {
 261                         free_chan_prog(cpa);
 262                         return ERR_PTR(-EFAULT);
 263                 }
 264                 ubuf += reclen;
 265         }
 266         /* The following NOP CCW forces CE+DE to be presented together */
 267         cpa[i].cmd_code = CCW_CMD_NOOP;
 268         return cpa;
 269 }
 270 
 271 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
 272 {
 273         int rc;
 274         struct ccw_device *cdev = urd->cdev;
 275         DECLARE_COMPLETION_ONSTACK(event);
 276 
 277         TRACE("do_ur_io: cpa=%p\n", cpa);
 278 
 279         rc = mutex_lock_interruptible(&urd->io_mutex);
 280         if (rc)
 281                 return rc;
 282 
 283         urd->io_done = &event;
 284 
 285         spin_lock_irq(get_ccwdev_lock(cdev));
 286         rc = ccw_device_start(cdev, cpa, 1, 0, 0);
 287         spin_unlock_irq(get_ccwdev_lock(cdev));
 288 
 289         TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
 290         if (rc)
 291                 goto out;
 292 
 293         wait_for_completion(&event);
 294         TRACE("do_ur_io: I/O complete\n");
 295         rc = 0;
 296 
 297 out:
 298         mutex_unlock(&urd->io_mutex);
 299         return rc;
 300 }
 301 
 302 /*
 303  * ur interrupt handler, called from the ccw_device layer
 304  */
 305 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
 306                            struct irb *irb)
 307 {
 308         struct urdev *urd;
 309 
 310         if (!IS_ERR(irb)) {
 311                 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
 312                       intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
 313                       irb->scsw.cmd.count);
 314         }
 315         if (!intparm) {
 316                 TRACE("ur_int_handler: unsolicited interrupt\n");
 317                 return;
 318         }
 319         urd = dev_get_drvdata(&cdev->dev);
 320         BUG_ON(!urd);
 321         /* On special conditions irb is an error pointer */
 322         if (IS_ERR(irb))
 323                 urd->io_request_rc = PTR_ERR(irb);
 324         else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
 325                 urd->io_request_rc = 0;
 326         else
 327                 urd->io_request_rc = -EIO;
 328 
 329         complete(urd->io_done);
 330 }
 331 
 332 /*
 333  * reclen sysfs attribute - The record length to be used for write CCWs
 334  */
 335 static ssize_t ur_attr_reclen_show(struct device *dev,
 336                                    struct device_attribute *attr, char *buf)
 337 {
 338         struct urdev *urd;
 339         int rc;
 340 
 341         urd = urdev_get_from_cdev(to_ccwdev(dev));
 342         if (!urd)
 343                 return -ENODEV;
 344         rc = sprintf(buf, "%zu\n", urd->reclen);
 345         urdev_put(urd);
 346         return rc;
 347 }
 348 
 349 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
 350 
 351 static int ur_create_attributes(struct device *dev)
 352 {
 353         return device_create_file(dev, &dev_attr_reclen);
 354 }
 355 
 356 static void ur_remove_attributes(struct device *dev)
 357 {
 358         device_remove_file(dev, &dev_attr_reclen);
 359 }
 360 
 361 /*
 362  * diagnose code 0x210 - retrieve device information
 363  * cc=0  normal completion, we have a real device
 364  * cc=1  CP paging error
 365  * cc=2  The virtual device exists, but is not associated with a real device
 366  * cc=3  Invalid device address, or the virtual device does not exist
 367  */
 368 static int get_urd_class(struct urdev *urd)
 369 {
 370         static struct diag210 ur_diag210;
 371         int cc;
 372 
 373         ur_diag210.vrdcdvno = urd->dev_id.devno;
 374         ur_diag210.vrdclen = sizeof(struct diag210);
 375 
 376         cc = diag210(&ur_diag210);
 377         switch (cc) {
 378         case 0:
 379                 return -EOPNOTSUPP;
 380         case 2:
 381                 return ur_diag210.vrdcvcla; /* virtual device class */
 382         case 3:
 383                 return -ENODEV;
 384         default:
 385                 return -EIO;
 386         }
 387 }
 388 
 389 /*
 390  * Allocation and freeing of urfile structures
 391  */
 392 static struct urfile *urfile_alloc(struct urdev *urd)
 393 {
 394         struct urfile *urf;
 395 
 396         urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
 397         if (!urf)
 398                 return NULL;
 399         urf->urd = urd;
 400 
 401         TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
 402               urf->dev_reclen);
 403 
 404         return urf;
 405 }
 406 
 407 static void urfile_free(struct urfile *urf)
 408 {
 409         TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
 410         kfree(urf);
 411 }
 412 
 413 /*
 414  * The fops implementation of the character device driver
 415  */
 416 static ssize_t do_write(struct urdev *urd, const char __user *udata,
 417                         size_t count, size_t reclen, loff_t *ppos)
 418 {
 419         struct ccw1 *cpa;
 420         int rc;
 421 
 422         cpa = alloc_chan_prog(udata, count / reclen, reclen);
 423         if (IS_ERR(cpa))
 424                 return PTR_ERR(cpa);
 425 
 426         rc = do_ur_io(urd, cpa);
 427         if (rc)
 428                 goto fail_kfree_cpa;
 429 
 430         if (urd->io_request_rc) {
 431                 rc = urd->io_request_rc;
 432                 goto fail_kfree_cpa;
 433         }
 434         *ppos += count;
 435         rc = count;
 436 
 437 fail_kfree_cpa:
 438         free_chan_prog(cpa);
 439         return rc;
 440 }
 441 
 442 static ssize_t ur_write(struct file *file, const char __user *udata,
 443                         size_t count, loff_t *ppos)
 444 {
 445         struct urfile *urf = file->private_data;
 446 
 447         TRACE("ur_write: count=%zu\n", count);
 448 
 449         if (count == 0)
 450                 return 0;
 451 
 452         if (count % urf->dev_reclen)
 453                 return -EINVAL; /* count must be a multiple of reclen */
 454 
 455         if (count > urf->dev_reclen * MAX_RECS_PER_IO)
 456                 count = urf->dev_reclen * MAX_RECS_PER_IO;
 457 
 458         return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
 459 }
 460 
 461 /*
 462  * diagnose code 0x14 subcode 0x0028 - position spool file to designated
 463  *                                     record
 464  * cc=0  normal completion
 465  * cc=2  no file active on the virtual reader or device not ready
 466  * cc=3  record specified is beyond EOF
 467  */
 468 static int diag_position_to_record(int devno, int record)
 469 {
 470         int cc;
 471 
 472         cc = diag14(record, devno, 0x28);
 473         switch (cc) {
 474         case 0:
 475                 return 0;
 476         case 2:
 477                 return -ENOMEDIUM;
 478         case 3:
 479                 return -ENODATA; /* position beyond end of file */
 480         default:
 481                 return -EIO;
 482         }
 483 }
 484 
 485 /*
 486  * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
 487  * cc=0  normal completion
 488  * cc=1  EOF reached
 489  * cc=2  no file active on the virtual reader, and no file eligible
 490  * cc=3  file already active on the virtual reader or specified virtual
 491  *       reader does not exist or is not a reader
 492  */
 493 static int diag_read_file(int devno, char *buf)
 494 {
 495         int cc;
 496 
 497         cc = diag14((unsigned long) buf, devno, 0x00);
 498         switch (cc) {
 499         case 0:
 500                 return 0;
 501         case 1:
 502                 return -ENODATA;
 503         case 2:
 504                 return -ENOMEDIUM;
 505         default:
 506                 return -EIO;
 507         }
 508 }
 509 
 510 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
 511                            loff_t *offs)
 512 {
 513         size_t len, copied, res;
 514         char *buf;
 515         int rc;
 516         u16 reclen;
 517         struct urdev *urd;
 518 
 519         urd = ((struct urfile *) file->private_data)->urd;
 520         reclen = ((struct urfile *) file->private_data)->file_reclen;
 521 
 522         rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
 523         if (rc == -ENODATA)
 524                 return 0;
 525         if (rc)
 526                 return rc;
 527 
 528         len = min((size_t) PAGE_SIZE, count);
 529         buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
 530         if (!buf)
 531                 return -ENOMEM;
 532 
 533         copied = 0;
 534         res = (size_t) (*offs % PAGE_SIZE);
 535         do {
 536                 rc = diag_read_file(urd->dev_id.devno, buf);
 537                 if (rc == -ENODATA) {
 538                         break;
 539                 }
 540                 if (rc)
 541                         goto fail;
 542                 if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
 543                         *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
 544                 len = min(count - copied, PAGE_SIZE - res);
 545                 if (copy_to_user(ubuf + copied, buf + res, len)) {
 546                         rc = -EFAULT;
 547                         goto fail;
 548                 }
 549                 res = 0;
 550                 copied += len;
 551         } while (copied != count);
 552 
 553         *offs += copied;
 554         rc = copied;
 555 fail:
 556         free_page((unsigned long) buf);
 557         return rc;
 558 }
 559 
 560 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
 561                        loff_t *offs)
 562 {
 563         struct urdev *urd;
 564         int rc;
 565 
 566         TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
 567 
 568         if (count == 0)
 569                 return 0;
 570 
 571         urd = ((struct urfile *) file->private_data)->urd;
 572         rc = mutex_lock_interruptible(&urd->io_mutex);
 573         if (rc)
 574                 return rc;
 575         rc = diag14_read(file, ubuf, count, offs);
 576         mutex_unlock(&urd->io_mutex);
 577         return rc;
 578 }
 579 
 580 /*
 581  * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
 582  * cc=0  normal completion
 583  * cc=1  no files on reader queue or no subsequent file
 584  * cc=2  spid specified is invalid
 585  */
 586 static int diag_read_next_file_info(struct file_control_block *buf, int spid)
 587 {
 588         int cc;
 589 
 590         cc = diag14((unsigned long) buf, spid, 0xfff);
 591         switch (cc) {
 592         case 0:
 593                 return 0;
 594         default:
 595                 return -ENODATA;
 596         }
 597 }
 598 
 599 static int verify_uri_device(struct urdev *urd)
 600 {
 601         struct file_control_block *fcb;
 602         char *buf;
 603         int rc;
 604 
 605         fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
 606         if (!fcb)
 607                 return -ENOMEM;
 608 
 609         /* check for empty reader device (beginning of chain) */
 610         rc = diag_read_next_file_info(fcb, 0);
 611         if (rc)
 612                 goto fail_free_fcb;
 613 
 614         /* if file is in hold status, we do not read it */
 615         if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
 616                 rc = -EPERM;
 617                 goto fail_free_fcb;
 618         }
 619 
 620         /* open file on virtual reader  */
 621         buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
 622         if (!buf) {
 623                 rc = -ENOMEM;
 624                 goto fail_free_fcb;
 625         }
 626         rc = diag_read_file(urd->dev_id.devno, buf);
 627         if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
 628                 goto fail_free_buf;
 629 
 630         /* check if the file on top of the queue is open now */
 631         rc = diag_read_next_file_info(fcb, 0);
 632         if (rc)
 633                 goto fail_free_buf;
 634         if (!(fcb->file_stat & FLG_IN_USE)) {
 635                 rc = -EMFILE;
 636                 goto fail_free_buf;
 637         }
 638         rc = 0;
 639 
 640 fail_free_buf:
 641         free_page((unsigned long) buf);
 642 fail_free_fcb:
 643         kfree(fcb);
 644         return rc;
 645 }
 646 
 647 static int verify_device(struct urdev *urd)
 648 {
 649         switch (urd->class) {
 650         case DEV_CLASS_UR_O:
 651                 return 0; /* no check needed here */
 652         case DEV_CLASS_UR_I:
 653                 return verify_uri_device(urd);
 654         default:
 655                 return -EOPNOTSUPP;
 656         }
 657 }
 658 
 659 static int get_uri_file_reclen(struct urdev *urd)
 660 {
 661         struct file_control_block *fcb;
 662         int rc;
 663 
 664         fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
 665         if (!fcb)
 666                 return -ENOMEM;
 667         rc = diag_read_next_file_info(fcb, 0);
 668         if (rc)
 669                 goto fail_free;
 670         if (fcb->file_stat & FLG_CP_DUMP)
 671                 rc = 0;
 672         else
 673                 rc = fcb->rec_len;
 674 
 675 fail_free:
 676         kfree(fcb);
 677         return rc;
 678 }
 679 
 680 static int get_file_reclen(struct urdev *urd)
 681 {
 682         switch (urd->class) {
 683         case DEV_CLASS_UR_O:
 684                 return 0;
 685         case DEV_CLASS_UR_I:
 686                 return get_uri_file_reclen(urd);
 687         default:
 688                 return -EOPNOTSUPP;
 689         }
 690 }
 691 
 692 static int ur_open(struct inode *inode, struct file *file)
 693 {
 694         u16 devno;
 695         struct urdev *urd;
 696         struct urfile *urf;
 697         unsigned short accmode;
 698         int rc;
 699 
 700         accmode = file->f_flags & O_ACCMODE;
 701 
 702         if (accmode == O_RDWR)
 703                 return -EACCES;
 704         /*
 705          * We treat the minor number as the devno of the ur device
 706          * to find in the driver tree.
 707          */
 708         devno = MINOR(file_inode(file)->i_rdev);
 709 
 710         urd = urdev_get_from_devno(devno);
 711         if (!urd) {
 712                 rc = -ENXIO;
 713                 goto out;
 714         }
 715 
 716         spin_lock(&urd->open_lock);
 717         while (urd->open_flag) {
 718                 spin_unlock(&urd->open_lock);
 719                 if (file->f_flags & O_NONBLOCK) {
 720                         rc = -EBUSY;
 721                         goto fail_put;
 722                 }
 723                 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
 724                         rc = -ERESTARTSYS;
 725                         goto fail_put;
 726                 }
 727                 spin_lock(&urd->open_lock);
 728         }
 729         urd->open_flag++;
 730         spin_unlock(&urd->open_lock);
 731 
 732         TRACE("ur_open\n");
 733 
 734         if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
 735             ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
 736                 TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
 737                 rc = -EACCES;
 738                 goto fail_unlock;
 739         }
 740 
 741         rc = verify_device(urd);
 742         if (rc)
 743                 goto fail_unlock;
 744 
 745         urf = urfile_alloc(urd);
 746         if (!urf) {
 747                 rc = -ENOMEM;
 748                 goto fail_unlock;
 749         }
 750 
 751         urf->dev_reclen = urd->reclen;
 752         rc = get_file_reclen(urd);
 753         if (rc < 0)
 754                 goto fail_urfile_free;
 755         urf->file_reclen = rc;
 756         file->private_data = urf;
 757         return 0;
 758 
 759 fail_urfile_free:
 760         urfile_free(urf);
 761 fail_unlock:
 762         spin_lock(&urd->open_lock);
 763         urd->open_flag--;
 764         spin_unlock(&urd->open_lock);
 765 fail_put:
 766         urdev_put(urd);
 767 out:
 768         return rc;
 769 }
 770 
 771 static int ur_release(struct inode *inode, struct file *file)
 772 {
 773         struct urfile *urf = file->private_data;
 774 
 775         TRACE("ur_release\n");
 776         spin_lock(&urf->urd->open_lock);
 777         urf->urd->open_flag--;
 778         spin_unlock(&urf->urd->open_lock);
 779         wake_up_interruptible(&urf->urd->wait);
 780         urdev_put(urf->urd);
 781         urfile_free(urf);
 782         return 0;
 783 }
 784 
 785 static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
 786 {
 787         if ((file->f_flags & O_ACCMODE) != O_RDONLY)
 788                 return -ESPIPE; /* seek allowed only for reader */
 789         if (offset % PAGE_SIZE)
 790                 return -ESPIPE; /* only multiples of 4K allowed */
 791         return no_seek_end_llseek(file, offset, whence);
 792 }
 793 
 794 static const struct file_operations ur_fops = {
 795         .owner   = THIS_MODULE,
 796         .open    = ur_open,
 797         .release = ur_release,
 798         .read    = ur_read,
 799         .write   = ur_write,
 800         .llseek  = ur_llseek,
 801 };
 802 
 803 /*
 804  * ccw_device infrastructure:
 805  *     ur_probe creates the struct urdev (with refcount = 1), the device
 806  *     attributes, sets up the interrupt handler and validates the virtual
 807  *     unit record device.
 808  *     ur_remove removes the device attributes and drops the reference to
 809  *     struct urdev.
 810  *
 811  *     ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
 812  *     by the vmur_mutex lock.
 813  *
 814  *     urd->char_device is used as indication that the online function has
 815  *     been completed successfully.
 816  */
 817 static int ur_probe(struct ccw_device *cdev)
 818 {
 819         struct urdev *urd;
 820         int rc;
 821 
 822         TRACE("ur_probe: cdev=%p\n", cdev);
 823 
 824         mutex_lock(&vmur_mutex);
 825         urd = urdev_alloc(cdev);
 826         if (!urd) {
 827                 rc = -ENOMEM;
 828                 goto fail_unlock;
 829         }
 830 
 831         rc = ur_create_attributes(&cdev->dev);
 832         if (rc) {
 833                 rc = -ENOMEM;
 834                 goto fail_urdev_put;
 835         }
 836         cdev->handler = ur_int_handler;
 837 
 838         /* validate virtual unit record device */
 839         urd->class = get_urd_class(urd);
 840         if (urd->class < 0) {
 841                 rc = urd->class;
 842                 goto fail_remove_attr;
 843         }
 844         if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
 845                 rc = -EOPNOTSUPP;
 846                 goto fail_remove_attr;
 847         }
 848         spin_lock_irq(get_ccwdev_lock(cdev));
 849         dev_set_drvdata(&cdev->dev, urd);
 850         spin_unlock_irq(get_ccwdev_lock(cdev));
 851 
 852         mutex_unlock(&vmur_mutex);
 853         return 0;
 854 
 855 fail_remove_attr:
 856         ur_remove_attributes(&cdev->dev);
 857 fail_urdev_put:
 858         urdev_put(urd);
 859 fail_unlock:
 860         mutex_unlock(&vmur_mutex);
 861         return rc;
 862 }
 863 
 864 static int ur_set_online(struct ccw_device *cdev)
 865 {
 866         struct urdev *urd;
 867         int minor, major, rc;
 868         char node_id[16];
 869 
 870         TRACE("ur_set_online: cdev=%p\n", cdev);
 871 
 872         mutex_lock(&vmur_mutex);
 873         urd = urdev_get_from_cdev(cdev);
 874         if (!urd) {
 875                 /* ur_remove already deleted our urd */
 876                 rc = -ENODEV;
 877                 goto fail_unlock;
 878         }
 879 
 880         if (urd->char_device) {
 881                 /* Another ur_set_online was faster */
 882                 rc = -EBUSY;
 883                 goto fail_urdev_put;
 884         }
 885 
 886         minor = urd->dev_id.devno;
 887         major = MAJOR(ur_first_dev_maj_min);
 888 
 889         urd->char_device = cdev_alloc();
 890         if (!urd->char_device) {
 891                 rc = -ENOMEM;
 892                 goto fail_urdev_put;
 893         }
 894 
 895         urd->char_device->ops = &ur_fops;
 896         urd->char_device->owner = ur_fops.owner;
 897 
 898         rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
 899         if (rc)
 900                 goto fail_free_cdev;
 901         if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
 902                 if (urd->class == DEV_CLASS_UR_I)
 903                         sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
 904                 if (urd->class == DEV_CLASS_UR_O)
 905                         sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
 906         } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
 907                 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
 908         } else {
 909                 rc = -EOPNOTSUPP;
 910                 goto fail_free_cdev;
 911         }
 912 
 913         urd->device = device_create(vmur_class, &cdev->dev,
 914                                     urd->char_device->dev, NULL, "%s", node_id);
 915         if (IS_ERR(urd->device)) {
 916                 rc = PTR_ERR(urd->device);
 917                 TRACE("ur_set_online: device_create rc=%d\n", rc);
 918                 goto fail_free_cdev;
 919         }
 920         urdev_put(urd);
 921         mutex_unlock(&vmur_mutex);
 922         return 0;
 923 
 924 fail_free_cdev:
 925         cdev_del(urd->char_device);
 926         urd->char_device = NULL;
 927 fail_urdev_put:
 928         urdev_put(urd);
 929 fail_unlock:
 930         mutex_unlock(&vmur_mutex);
 931         return rc;
 932 }
 933 
 934 static int ur_set_offline_force(struct ccw_device *cdev, int force)
 935 {
 936         struct urdev *urd;
 937         int rc;
 938 
 939         TRACE("ur_set_offline: cdev=%p\n", cdev);
 940         urd = urdev_get_from_cdev(cdev);
 941         if (!urd)
 942                 /* ur_remove already deleted our urd */
 943                 return -ENODEV;
 944         if (!urd->char_device) {
 945                 /* Another ur_set_offline was faster */
 946                 rc = -EBUSY;
 947                 goto fail_urdev_put;
 948         }
 949         if (!force && (refcount_read(&urd->ref_count) > 2)) {
 950                 /* There is still a user of urd (e.g. ur_open) */
 951                 TRACE("ur_set_offline: BUSY\n");
 952                 rc = -EBUSY;
 953                 goto fail_urdev_put;
 954         }
 955         device_destroy(vmur_class, urd->char_device->dev);
 956         cdev_del(urd->char_device);
 957         urd->char_device = NULL;
 958         rc = 0;
 959 
 960 fail_urdev_put:
 961         urdev_put(urd);
 962         return rc;
 963 }
 964 
 965 static int ur_set_offline(struct ccw_device *cdev)
 966 {
 967         int rc;
 968 
 969         mutex_lock(&vmur_mutex);
 970         rc = ur_set_offline_force(cdev, 0);
 971         mutex_unlock(&vmur_mutex);
 972         return rc;
 973 }
 974 
 975 static void ur_remove(struct ccw_device *cdev)
 976 {
 977         unsigned long flags;
 978 
 979         TRACE("ur_remove\n");
 980 
 981         mutex_lock(&vmur_mutex);
 982 
 983         if (cdev->online)
 984                 ur_set_offline_force(cdev, 1);
 985         ur_remove_attributes(&cdev->dev);
 986 
 987         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
 988         urdev_put(dev_get_drvdata(&cdev->dev));
 989         dev_set_drvdata(&cdev->dev, NULL);
 990         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 991 
 992         mutex_unlock(&vmur_mutex);
 993 }
 994 
 995 /*
 996  * Module initialisation and cleanup
 997  */
 998 static int __init ur_init(void)
 999 {
1000         int rc;
1001         dev_t dev;
1002 
1003         if (!MACHINE_IS_VM) {
1004                 pr_err("The %s cannot be loaded without z/VM\n",
1005                        ur_banner);
1006                 return -ENODEV;
1007         }
1008 
1009         vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
1010         if (!vmur_dbf)
1011                 return -ENOMEM;
1012         rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
1013         if (rc)
1014                 goto fail_free_dbf;
1015 
1016         debug_set_level(vmur_dbf, 6);
1017 
1018         vmur_class = class_create(THIS_MODULE, "vmur");
1019         if (IS_ERR(vmur_class)) {
1020                 rc = PTR_ERR(vmur_class);
1021                 goto fail_free_dbf;
1022         }
1023 
1024         rc = ccw_driver_register(&ur_driver);
1025         if (rc)
1026                 goto fail_class_destroy;
1027 
1028         rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
1029         if (rc) {
1030                 pr_err("Kernel function alloc_chrdev_region failed with "
1031                        "error code %d\n", rc);
1032                 goto fail_unregister_driver;
1033         }
1034         ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
1035 
1036         pr_info("%s loaded.\n", ur_banner);
1037         return 0;
1038 
1039 fail_unregister_driver:
1040         ccw_driver_unregister(&ur_driver);
1041 fail_class_destroy:
1042         class_destroy(vmur_class);
1043 fail_free_dbf:
1044         debug_unregister(vmur_dbf);
1045         return rc;
1046 }
1047 
1048 static void __exit ur_exit(void)
1049 {
1050         unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
1051         ccw_driver_unregister(&ur_driver);
1052         class_destroy(vmur_class);
1053         debug_unregister(vmur_dbf);
1054         pr_info("%s unloaded.\n", ur_banner);
1055 }
1056 
1057 module_init(ur_init);
1058 module_exit(ur_exit);

/* [<][>][^][v][top][bottom][index][help] */