root/drivers/staging/gasket/gasket_core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. check_and_invoke_callback
  2. gasket_check_and_invoke_callback_nolock
  3. gasket_owned_by_current_tgid
  4. gasket_find_dev_slot
  5. gasket_alloc_dev
  6. gasket_free_dev
  7. gasket_map_pci_bar
  8. gasket_unmap_pci_bar
  9. gasket_setup_pci
  10. gasket_cleanup_pci
  11. gasket_get_hw_status
  12. gasket_write_mappable_regions
  13. gasket_sysfs_data_show
  14. gasket_add_cdev
  15. gasket_disable_device
  16. lookup_pci_internal_desc
  17. gasket_mmap_has_permissions
  18. gasket_is_coherent_region
  19. gasket_get_bar_index
  20. gasket_mm_get_mapping_addrs
  21. gasket_mm_vma_bar_offset
  22. gasket_mm_unmap_region
  23. do_map_region
  24. gasket_mmap_coherent
  25. gasket_mmap
  26. gasket_open
  27. gasket_release
  28. gasket_ioctl
  29. gasket_enable_device
  30. __gasket_add_device
  31. __gasket_remove_device
  32. gasket_pci_add_device
  33. gasket_pci_remove_device
  34. gasket_num_name_lookup
  35. gasket_reset
  36. gasket_reset_nolock
  37. gasket_get_ioctl_permissions_cb
  38. gasket_get_driver_desc
  39. gasket_get_device
  40. gasket_wait_with_reschedule
  41. gasket_register_device
  42. gasket_unregister_device
  43. gasket_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Gasket generic driver framework. This file contains the implementation
   4  * for the Gasket generic driver framework - the functionality that is common
   5  * across Gasket devices.
   6  *
   7  * Copyright (C) 2018 Google, Inc.
   8  */
   9 
  10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11 
  12 #include "gasket_core.h"
  13 
  14 #include "gasket_interrupt.h"
  15 #include "gasket_ioctl.h"
  16 #include "gasket_page_table.h"
  17 #include "gasket_sysfs.h"
  18 
  19 #include <linux/capability.h>
  20 #include <linux/compiler.h>
  21 #include <linux/delay.h>
  22 #include <linux/device.h>
  23 #include <linux/fs.h>
  24 #include <linux/init.h>
  25 #include <linux/of.h>
  26 #include <linux/pid_namespace.h>
  27 #include <linux/printk.h>
  28 #include <linux/sched.h>
  29 
  30 #ifdef GASKET_KERNEL_TRACE_SUPPORT
  31 #define CREATE_TRACE_POINTS
  32 #include <trace/events/gasket_mmap.h>
  33 #else
  34 #define trace_gasket_mmap_exit(x)
  35 #define trace_gasket_mmap_entry(x, ...)
  36 #endif
  37 
  38 /*
  39  * "Private" members of gasket_driver_desc.
  40  *
  41  * Contains internal per-device type tracking data, i.e., data not appropriate
  42  * as part of the public interface for the generic framework.
  43  */
  44 struct gasket_internal_desc {
  45         /* Device-specific-driver-provided configuration information. */
  46         const struct gasket_driver_desc *driver_desc;
  47 
  48         /* Protects access to per-driver data (i.e. this structure). */
  49         struct mutex mutex;
  50 
  51         /* Kernel-internal device class. */
  52         struct class *class;
  53 
  54         /* Instantiated / present devices of this type. */
  55         struct gasket_dev *devs[GASKET_DEV_MAX];
  56 };
  57 
  58 /* do_map_region() needs be able to return more than just true/false. */
  59 enum do_map_region_status {
  60         /* The region was successfully mapped. */
  61         DO_MAP_REGION_SUCCESS,
  62 
  63         /* Attempted to map region and failed. */
  64         DO_MAP_REGION_FAILURE,
  65 
  66         /* The requested region to map was not part of a mappable region. */
  67         DO_MAP_REGION_INVALID,
  68 };
  69 
  70 /* Global data definitions. */
  71 /* Mutex - only for framework-wide data. Other data should be protected by
  72  * finer-grained locks.
  73  */
  74 static DEFINE_MUTEX(g_mutex);
  75 
  76 /* List of all registered device descriptions & their supporting data. */
  77 static struct gasket_internal_desc g_descs[GASKET_FRAMEWORK_DESC_MAX];
  78 
  79 /* Mapping of statuses to human-readable strings. Must end with {0,NULL}. */
  80 static const struct gasket_num_name gasket_status_name_table[] = {
  81         { GASKET_STATUS_DEAD, "DEAD" },
  82         { GASKET_STATUS_ALIVE, "ALIVE" },
  83         { GASKET_STATUS_LAMED, "LAMED" },
  84         { GASKET_STATUS_DRIVER_EXIT, "DRIVER_EXITING" },
  85         { 0, NULL },
  86 };
  87 
  88 /* Enumeration of the automatic Gasket framework sysfs nodes. */
  89 enum gasket_sysfs_attribute_type {
  90         ATTR_BAR_OFFSETS,
  91         ATTR_BAR_SIZES,
  92         ATTR_DRIVER_VERSION,
  93         ATTR_FRAMEWORK_VERSION,
  94         ATTR_DEVICE_TYPE,
  95         ATTR_HARDWARE_REVISION,
  96         ATTR_PCI_ADDRESS,
  97         ATTR_STATUS,
  98         ATTR_IS_DEVICE_OWNED,
  99         ATTR_DEVICE_OWNER,
 100         ATTR_WRITE_OPEN_COUNT,
 101         ATTR_RESET_COUNT,
 102         ATTR_USER_MEM_RANGES
 103 };
 104 
 105 /* Perform a standard Gasket callback. */
 106 static inline int
 107 check_and_invoke_callback(struct gasket_dev *gasket_dev,
 108                           int (*cb_function)(struct gasket_dev *))
 109 {
 110         int ret = 0;
 111 
 112         if (cb_function) {
 113                 mutex_lock(&gasket_dev->mutex);
 114                 ret = cb_function(gasket_dev);
 115                 mutex_unlock(&gasket_dev->mutex);
 116         }
 117         return ret;
 118 }
 119 
 120 /* Perform a standard Gasket callback without grabbing gasket_dev->mutex. */
 121 static inline int
 122 gasket_check_and_invoke_callback_nolock(struct gasket_dev *gasket_dev,
 123                                         int (*cb_function)(struct gasket_dev *))
 124 {
 125         int ret = 0;
 126 
 127         if (cb_function)
 128                 ret = cb_function(gasket_dev);
 129         return ret;
 130 }
 131 
 132 /*
 133  * Return nonzero if the gasket_cdev_info is owned by the current thread group
 134  * ID.
 135  */
 136 static int gasket_owned_by_current_tgid(struct gasket_cdev_info *info)
 137 {
 138         return (info->ownership.is_owned &&
 139                 (info->ownership.owner == current->tgid));
 140 }
 141 
 142 /*
 143  * Find the next free gasket_internal_dev slot.
 144  *
 145  * Returns the located slot number on success or a negative number on failure.
 146  */
 147 static int gasket_find_dev_slot(struct gasket_internal_desc *internal_desc,
 148                                 const char *kobj_name)
 149 {
 150         int i;
 151 
 152         mutex_lock(&internal_desc->mutex);
 153 
 154         /* Search for a previous instance of this device. */
 155         for (i = 0; i < GASKET_DEV_MAX; i++) {
 156                 if (internal_desc->devs[i] &&
 157                     strcmp(internal_desc->devs[i]->kobj_name, kobj_name) == 0) {
 158                         pr_err("Duplicate device %s\n", kobj_name);
 159                         mutex_unlock(&internal_desc->mutex);
 160                         return -EBUSY;
 161                 }
 162         }
 163 
 164         /* Find a free device slot. */
 165         for (i = 0; i < GASKET_DEV_MAX; i++) {
 166                 if (!internal_desc->devs[i])
 167                         break;
 168         }
 169 
 170         if (i == GASKET_DEV_MAX) {
 171                 pr_err("Too many registered devices; max %d\n", GASKET_DEV_MAX);
 172                 mutex_unlock(&internal_desc->mutex);
 173                 return -EBUSY;
 174         }
 175 
 176         mutex_unlock(&internal_desc->mutex);
 177         return i;
 178 }
 179 
 180 /*
 181  * Allocate and initialize a Gasket device structure, add the device to the
 182  * device list.
 183  *
 184  * Returns 0 if successful, a negative error code otherwise.
 185  */
 186 static int gasket_alloc_dev(struct gasket_internal_desc *internal_desc,
 187                             struct device *parent, struct gasket_dev **pdev)
 188 {
 189         int dev_idx;
 190         const struct gasket_driver_desc *driver_desc =
 191                 internal_desc->driver_desc;
 192         struct gasket_dev *gasket_dev;
 193         struct gasket_cdev_info *dev_info;
 194         const char *parent_name = dev_name(parent);
 195 
 196         pr_debug("Allocating a Gasket device, parent %s.\n", parent_name);
 197 
 198         *pdev = NULL;
 199 
 200         dev_idx = gasket_find_dev_slot(internal_desc, parent_name);
 201         if (dev_idx < 0)
 202                 return dev_idx;
 203 
 204         gasket_dev = *pdev = kzalloc(sizeof(*gasket_dev), GFP_KERNEL);
 205         if (!gasket_dev) {
 206                 pr_err("no memory for device, parent %s\n", parent_name);
 207                 return -ENOMEM;
 208         }
 209         internal_desc->devs[dev_idx] = gasket_dev;
 210 
 211         mutex_init(&gasket_dev->mutex);
 212 
 213         gasket_dev->internal_desc = internal_desc;
 214         gasket_dev->dev_idx = dev_idx;
 215         snprintf(gasket_dev->kobj_name, GASKET_NAME_MAX, "%s", parent_name);
 216         gasket_dev->dev = get_device(parent);
 217         /* gasket_bar_data is uninitialized. */
 218         gasket_dev->num_page_tables = driver_desc->num_page_tables;
 219         /* max_page_table_size and *page table are uninit'ed */
 220         /* interrupt_data is not initialized. */
 221         /* status is 0, or GASKET_STATUS_DEAD */
 222 
 223         dev_info = &gasket_dev->dev_info;
 224         snprintf(dev_info->name, GASKET_NAME_MAX, "%s_%u", driver_desc->name,
 225                  gasket_dev->dev_idx);
 226         dev_info->devt =
 227                 MKDEV(driver_desc->major, driver_desc->minor +
 228                       gasket_dev->dev_idx);
 229         dev_info->device =
 230                 device_create(internal_desc->class, parent, dev_info->devt,
 231                               gasket_dev, dev_info->name);
 232 
 233         /* cdev has not yet been added; cdev_added is 0 */
 234         dev_info->gasket_dev_ptr = gasket_dev;
 235         /* ownership is all 0, indicating no owner or opens. */
 236 
 237         return 0;
 238 }
 239 
 240 /* Free a Gasket device. */
 241 static void gasket_free_dev(struct gasket_dev *gasket_dev)
 242 {
 243         struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
 244 
 245         mutex_lock(&internal_desc->mutex);
 246         internal_desc->devs[gasket_dev->dev_idx] = NULL;
 247         mutex_unlock(&internal_desc->mutex);
 248         put_device(gasket_dev->dev);
 249         kfree(gasket_dev);
 250 }
 251 
 252 /*
 253  * Maps the specified bar into kernel space.
 254  *
 255  * Returns 0 on success, a negative error code otherwise.
 256  * A zero-sized BAR will not be mapped, but is not an error.
 257  */
 258 static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
 259 {
 260         struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
 261         const struct gasket_driver_desc *driver_desc =
 262                 internal_desc->driver_desc;
 263         ulong desc_bytes = driver_desc->bar_descriptions[bar_num].size;
 264         int ret;
 265 
 266         if (desc_bytes == 0)
 267                 return 0;
 268 
 269         if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR) {
 270                 /* not PCI: skip this entry */
 271                 return 0;
 272         }
 273         /*
 274          * pci_resource_start and pci_resource_len return a "resource_size_t",
 275          * which is safely castable to ulong (which itself is the arg to
 276          * request_mem_region).
 277          */
 278         gasket_dev->bar_data[bar_num].phys_base =
 279                 (ulong)pci_resource_start(gasket_dev->pci_dev, bar_num);
 280         if (!gasket_dev->bar_data[bar_num].phys_base) {
 281                 dev_err(gasket_dev->dev, "Cannot get BAR%u base address\n",
 282                         bar_num);
 283                 return -EINVAL;
 284         }
 285 
 286         gasket_dev->bar_data[bar_num].length_bytes =
 287                 (ulong)pci_resource_len(gasket_dev->pci_dev, bar_num);
 288         if (gasket_dev->bar_data[bar_num].length_bytes < desc_bytes) {
 289                 dev_err(gasket_dev->dev,
 290                         "PCI BAR %u space is too small: %lu; expected >= %lu\n",
 291                         bar_num, gasket_dev->bar_data[bar_num].length_bytes,
 292                         desc_bytes);
 293                 return -ENOMEM;
 294         }
 295 
 296         if (!request_mem_region(gasket_dev->bar_data[bar_num].phys_base,
 297                                 gasket_dev->bar_data[bar_num].length_bytes,
 298                                 gasket_dev->dev_info.name)) {
 299                 dev_err(gasket_dev->dev,
 300                         "Cannot get BAR %d memory region %p\n",
 301                         bar_num, &gasket_dev->pci_dev->resource[bar_num]);
 302                 return -EINVAL;
 303         }
 304 
 305         gasket_dev->bar_data[bar_num].virt_base =
 306                 ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
 307                                 gasket_dev->bar_data[bar_num].length_bytes);
 308         if (!gasket_dev->bar_data[bar_num].virt_base) {
 309                 dev_err(gasket_dev->dev,
 310                         "Cannot remap BAR %d memory region %p\n",
 311                         bar_num, &gasket_dev->pci_dev->resource[bar_num]);
 312                 ret = -ENOMEM;
 313                 goto fail;
 314         }
 315 
 316         dma_set_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
 317         dma_set_coherent_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
 318 
 319         return 0;
 320 
 321 fail:
 322         iounmap(gasket_dev->bar_data[bar_num].virt_base);
 323         release_mem_region(gasket_dev->bar_data[bar_num].phys_base,
 324                            gasket_dev->bar_data[bar_num].length_bytes);
 325         return ret;
 326 }
 327 
 328 /*
 329  * Releases PCI BAR mapping.
 330  *
 331  * A zero-sized or not-mapped BAR will not be unmapped, but is not an error.
 332  */
 333 static void gasket_unmap_pci_bar(struct gasket_dev *dev, int bar_num)
 334 {
 335         ulong base, bytes;
 336         struct gasket_internal_desc *internal_desc = dev->internal_desc;
 337         const struct gasket_driver_desc *driver_desc =
 338                 internal_desc->driver_desc;
 339 
 340         if (driver_desc->bar_descriptions[bar_num].size == 0 ||
 341             !dev->bar_data[bar_num].virt_base)
 342                 return;
 343 
 344         if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR)
 345                 return;
 346 
 347         iounmap(dev->bar_data[bar_num].virt_base);
 348         dev->bar_data[bar_num].virt_base = NULL;
 349 
 350         base = pci_resource_start(dev->pci_dev, bar_num);
 351         if (!base) {
 352                 dev_err(dev->dev, "cannot get PCI BAR%u base address\n",
 353                         bar_num);
 354                 return;
 355         }
 356 
 357         bytes = pci_resource_len(dev->pci_dev, bar_num);
 358         release_mem_region(base, bytes);
 359 }
 360 
 361 /*
 362  * Setup PCI memory mapping for the specified device.
 363  *
 364  * Reads the BAR registers and sets up pointers to the device's memory mapped
 365  * IO space.
 366  *
 367  * Returns 0 on success and a negative value otherwise.
 368  */
 369 static int gasket_setup_pci(struct pci_dev *pci_dev,
 370                             struct gasket_dev *gasket_dev)
 371 {
 372         int i, mapped_bars, ret;
 373 
 374         for (i = 0; i < GASKET_NUM_BARS; i++) {
 375                 ret = gasket_map_pci_bar(gasket_dev, i);
 376                 if (ret) {
 377                         mapped_bars = i;
 378                         goto fail;
 379                 }
 380         }
 381 
 382         return 0;
 383 
 384 fail:
 385         for (i = 0; i < mapped_bars; i++)
 386                 gasket_unmap_pci_bar(gasket_dev, i);
 387 
 388         return -ENOMEM;
 389 }
 390 
 391 /* Unmaps memory for the specified device. */
 392 static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
 393 {
 394         int i;
 395 
 396         for (i = 0; i < GASKET_NUM_BARS; i++)
 397                 gasket_unmap_pci_bar(gasket_dev, i);
 398 }
 399 
 400 /* Determine the health of the Gasket device. */
 401 static int gasket_get_hw_status(struct gasket_dev *gasket_dev)
 402 {
 403         int status;
 404         int i;
 405         const struct gasket_driver_desc *driver_desc =
 406                 gasket_dev->internal_desc->driver_desc;
 407 
 408         status = gasket_check_and_invoke_callback_nolock(gasket_dev,
 409                                                          driver_desc->device_status_cb);
 410         if (status != GASKET_STATUS_ALIVE) {
 411                 dev_dbg(gasket_dev->dev, "Hardware reported status %d.\n",
 412                         status);
 413                 return status;
 414         }
 415 
 416         status = gasket_interrupt_system_status(gasket_dev);
 417         if (status != GASKET_STATUS_ALIVE) {
 418                 dev_dbg(gasket_dev->dev,
 419                         "Interrupt system reported status %d.\n", status);
 420                 return status;
 421         }
 422 
 423         for (i = 0; i < driver_desc->num_page_tables; ++i) {
 424                 status = gasket_page_table_system_status(gasket_dev->page_table[i]);
 425                 if (status != GASKET_STATUS_ALIVE) {
 426                         dev_dbg(gasket_dev->dev,
 427                                 "Page table %d reported status %d.\n",
 428                                 i, status);
 429                         return status;
 430                 }
 431         }
 432 
 433         return GASKET_STATUS_ALIVE;
 434 }
 435 
 436 static ssize_t
 437 gasket_write_mappable_regions(char *buf,
 438                               const struct gasket_driver_desc *driver_desc,
 439                               int bar_index)
 440 {
 441         int i;
 442         ssize_t written;
 443         ssize_t total_written = 0;
 444         ulong min_addr, max_addr;
 445         struct gasket_bar_desc bar_desc =
 446                 driver_desc->bar_descriptions[bar_index];
 447 
 448         if (bar_desc.permissions == GASKET_NOMAP)
 449                 return 0;
 450         for (i = 0;
 451              i < bar_desc.num_mappable_regions && total_written < PAGE_SIZE;
 452              i++) {
 453                 min_addr = bar_desc.mappable_regions[i].start -
 454                            driver_desc->legacy_mmap_address_offset;
 455                 max_addr = bar_desc.mappable_regions[i].start -
 456                            driver_desc->legacy_mmap_address_offset +
 457                            bar_desc.mappable_regions[i].length_bytes;
 458                 written = scnprintf(buf, PAGE_SIZE - total_written,
 459                                     "0x%08lx-0x%08lx\n", min_addr, max_addr);
 460                 total_written += written;
 461                 buf += written;
 462         }
 463         return total_written;
 464 }
 465 
 466 static ssize_t gasket_sysfs_data_show(struct device *device,
 467                                       struct device_attribute *attr, char *buf)
 468 {
 469         int i, ret = 0;
 470         ssize_t current_written = 0;
 471         const struct gasket_driver_desc *driver_desc;
 472         struct gasket_dev *gasket_dev;
 473         struct gasket_sysfs_attribute *gasket_attr;
 474         const struct gasket_bar_desc *bar_desc;
 475         enum gasket_sysfs_attribute_type sysfs_type;
 476 
 477         gasket_dev = gasket_sysfs_get_device_data(device);
 478         if (!gasket_dev) {
 479                 dev_err(device, "No sysfs mapping found for device\n");
 480                 return 0;
 481         }
 482 
 483         gasket_attr = gasket_sysfs_get_attr(device, attr);
 484         if (!gasket_attr) {
 485                 dev_err(device, "No sysfs attr found for device\n");
 486                 gasket_sysfs_put_device_data(device, gasket_dev);
 487                 return 0;
 488         }
 489 
 490         driver_desc = gasket_dev->internal_desc->driver_desc;
 491 
 492         sysfs_type =
 493                 (enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
 494         switch (sysfs_type) {
 495         case ATTR_BAR_OFFSETS:
 496                 for (i = 0; i < GASKET_NUM_BARS; i++) {
 497                         bar_desc = &driver_desc->bar_descriptions[i];
 498                         if (bar_desc->size == 0)
 499                                 continue;
 500                         current_written =
 501                                 snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
 502                                          (ulong)bar_desc->base);
 503                         buf += current_written;
 504                         ret += current_written;
 505                 }
 506                 break;
 507         case ATTR_BAR_SIZES:
 508                 for (i = 0; i < GASKET_NUM_BARS; i++) {
 509                         bar_desc = &driver_desc->bar_descriptions[i];
 510                         if (bar_desc->size == 0)
 511                                 continue;
 512                         current_written =
 513                                 snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
 514                                          (ulong)bar_desc->size);
 515                         buf += current_written;
 516                         ret += current_written;
 517                 }
 518                 break;
 519         case ATTR_DRIVER_VERSION:
 520                 ret = snprintf(buf, PAGE_SIZE, "%s\n",
 521                                gasket_dev->internal_desc->driver_desc->driver_version);
 522                 break;
 523         case ATTR_FRAMEWORK_VERSION:
 524                 ret = snprintf(buf, PAGE_SIZE, "%s\n",
 525                                GASKET_FRAMEWORK_VERSION);
 526                 break;
 527         case ATTR_DEVICE_TYPE:
 528                 ret = snprintf(buf, PAGE_SIZE, "%s\n",
 529                                gasket_dev->internal_desc->driver_desc->name);
 530                 break;
 531         case ATTR_HARDWARE_REVISION:
 532                 ret = snprintf(buf, PAGE_SIZE, "%d\n",
 533                                gasket_dev->hardware_revision);
 534                 break;
 535         case ATTR_PCI_ADDRESS:
 536                 ret = snprintf(buf, PAGE_SIZE, "%s\n", gasket_dev->kobj_name);
 537                 break;
 538         case ATTR_STATUS:
 539                 ret = snprintf(buf, PAGE_SIZE, "%s\n",
 540                                gasket_num_name_lookup(gasket_dev->status,
 541                                                       gasket_status_name_table));
 542                 break;
 543         case ATTR_IS_DEVICE_OWNED:
 544                 ret = snprintf(buf, PAGE_SIZE, "%d\n",
 545                                gasket_dev->dev_info.ownership.is_owned);
 546                 break;
 547         case ATTR_DEVICE_OWNER:
 548                 ret = snprintf(buf, PAGE_SIZE, "%d\n",
 549                                gasket_dev->dev_info.ownership.owner);
 550                 break;
 551         case ATTR_WRITE_OPEN_COUNT:
 552                 ret = snprintf(buf, PAGE_SIZE, "%d\n",
 553                                gasket_dev->dev_info.ownership.write_open_count);
 554                 break;
 555         case ATTR_RESET_COUNT:
 556                 ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
 557                 break;
 558         case ATTR_USER_MEM_RANGES:
 559                 for (i = 0; i < GASKET_NUM_BARS; ++i) {
 560                         current_written =
 561                                 gasket_write_mappable_regions(buf, driver_desc,
 562                                                               i);
 563                         buf += current_written;
 564                         ret += current_written;
 565                 }
 566                 break;
 567         default:
 568                 dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
 569                         attr->attr.name);
 570                 ret = 0;
 571                 break;
 572         }
 573 
 574         gasket_sysfs_put_attr(device, gasket_attr);
 575         gasket_sysfs_put_device_data(device, gasket_dev);
 576         return ret;
 577 }
 578 
 579 /* These attributes apply to all Gasket driver instances. */
 580 static const struct gasket_sysfs_attribute gasket_sysfs_generic_attrs[] = {
 581         GASKET_SYSFS_RO(bar_offsets, gasket_sysfs_data_show, ATTR_BAR_OFFSETS),
 582         GASKET_SYSFS_RO(bar_sizes, gasket_sysfs_data_show, ATTR_BAR_SIZES),
 583         GASKET_SYSFS_RO(driver_version, gasket_sysfs_data_show,
 584                         ATTR_DRIVER_VERSION),
 585         GASKET_SYSFS_RO(framework_version, gasket_sysfs_data_show,
 586                         ATTR_FRAMEWORK_VERSION),
 587         GASKET_SYSFS_RO(device_type, gasket_sysfs_data_show, ATTR_DEVICE_TYPE),
 588         GASKET_SYSFS_RO(revision, gasket_sysfs_data_show,
 589                         ATTR_HARDWARE_REVISION),
 590         GASKET_SYSFS_RO(pci_address, gasket_sysfs_data_show, ATTR_PCI_ADDRESS),
 591         GASKET_SYSFS_RO(status, gasket_sysfs_data_show, ATTR_STATUS),
 592         GASKET_SYSFS_RO(is_device_owned, gasket_sysfs_data_show,
 593                         ATTR_IS_DEVICE_OWNED),
 594         GASKET_SYSFS_RO(device_owner, gasket_sysfs_data_show,
 595                         ATTR_DEVICE_OWNER),
 596         GASKET_SYSFS_RO(write_open_count, gasket_sysfs_data_show,
 597                         ATTR_WRITE_OPEN_COUNT),
 598         GASKET_SYSFS_RO(reset_count, gasket_sysfs_data_show, ATTR_RESET_COUNT),
 599         GASKET_SYSFS_RO(user_mem_ranges, gasket_sysfs_data_show,
 600                         ATTR_USER_MEM_RANGES),
 601         GASKET_END_OF_ATTR_ARRAY
 602 };
 603 
 604 /* Add a char device and related info. */
 605 static int gasket_add_cdev(struct gasket_cdev_info *dev_info,
 606                            const struct file_operations *file_ops,
 607                            struct module *owner)
 608 {
 609         int ret;
 610 
 611         cdev_init(&dev_info->cdev, file_ops);
 612         dev_info->cdev.owner = owner;
 613         ret = cdev_add(&dev_info->cdev, dev_info->devt, 1);
 614         if (ret) {
 615                 dev_err(dev_info->gasket_dev_ptr->dev,
 616                         "cannot add char device [ret=%d]\n", ret);
 617                 return ret;
 618         }
 619         dev_info->cdev_added = 1;
 620 
 621         return 0;
 622 }
 623 
 624 /* Disable device operations. */
 625 void gasket_disable_device(struct gasket_dev *gasket_dev)
 626 {
 627         const struct gasket_driver_desc *driver_desc =
 628                 gasket_dev->internal_desc->driver_desc;
 629         int i;
 630 
 631         /* Only delete the device if it has been successfully added. */
 632         if (gasket_dev->dev_info.cdev_added)
 633                 cdev_del(&gasket_dev->dev_info.cdev);
 634 
 635         gasket_dev->status = GASKET_STATUS_DEAD;
 636 
 637         gasket_interrupt_cleanup(gasket_dev);
 638 
 639         for (i = 0; i < driver_desc->num_page_tables; ++i) {
 640                 if (gasket_dev->page_table[i]) {
 641                         gasket_page_table_reset(gasket_dev->page_table[i]);
 642                         gasket_page_table_cleanup(gasket_dev->page_table[i]);
 643                 }
 644         }
 645 }
 646 EXPORT_SYMBOL(gasket_disable_device);
 647 
 648 /*
 649  * Registered driver descriptor lookup for PCI devices.
 650  *
 651  * Precondition: Called with g_mutex held (to avoid a race on return).
 652  * Returns NULL if no matching device was found.
 653  */
 654 static struct gasket_internal_desc *
 655 lookup_pci_internal_desc(struct pci_dev *pci_dev)
 656 {
 657         int i;
 658 
 659         __must_hold(&g_mutex);
 660         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
 661                 if (g_descs[i].driver_desc &&
 662                     g_descs[i].driver_desc->pci_id_table &&
 663                     pci_match_id(g_descs[i].driver_desc->pci_id_table, pci_dev))
 664                         return &g_descs[i];
 665         }
 666 
 667         return NULL;
 668 }
 669 
 670 /*
 671  * Verifies that the user has permissions to perform the requested mapping and
 672  * that the provided descriptor/range is of adequate size to hold the range to
 673  * be mapped.
 674  */
 675 static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
 676                                         struct vm_area_struct *vma,
 677                                         int bar_permissions)
 678 {
 679         int requested_permissions;
 680         /* Always allow sysadmin to access. */
 681         if (capable(CAP_SYS_ADMIN))
 682                 return true;
 683 
 684         /* Never allow non-sysadmins to access to a dead device. */
 685         if (gasket_dev->status != GASKET_STATUS_ALIVE) {
 686                 dev_dbg(gasket_dev->dev, "Device is dead.\n");
 687                 return false;
 688         }
 689 
 690         /* Make sure that no wrong flags are set. */
 691         requested_permissions =
 692                 (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC));
 693         if (requested_permissions & ~(bar_permissions)) {
 694                 dev_dbg(gasket_dev->dev,
 695                         "Attempting to map a region with requested permissions "
 696                         "0x%x, but region has permissions 0x%x.\n",
 697                         requested_permissions, bar_permissions);
 698                 return false;
 699         }
 700 
 701         /* Do not allow a non-owner to write. */
 702         if ((vma->vm_flags & VM_WRITE) &&
 703             !gasket_owned_by_current_tgid(&gasket_dev->dev_info)) {
 704                 dev_dbg(gasket_dev->dev,
 705                         "Attempting to mmap a region for write without owning device.\n");
 706                 return false;
 707         }
 708 
 709         return true;
 710 }
 711 
 712 /*
 713  * Verifies that the input address is within the region allocated to coherent
 714  * buffer.
 715  */
 716 static bool
 717 gasket_is_coherent_region(const struct gasket_driver_desc *driver_desc,
 718                           ulong address)
 719 {
 720         struct gasket_coherent_buffer_desc coh_buff_desc =
 721                 driver_desc->coherent_buffer_description;
 722 
 723         if (coh_buff_desc.permissions != GASKET_NOMAP) {
 724                 if ((address >= coh_buff_desc.base) &&
 725                     (address < coh_buff_desc.base + coh_buff_desc.size)) {
 726                         return true;
 727                 }
 728         }
 729         return false;
 730 }
 731 
 732 static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
 733                                 ulong phys_addr)
 734 {
 735         int i;
 736         const struct gasket_driver_desc *driver_desc;
 737 
 738         driver_desc = gasket_dev->internal_desc->driver_desc;
 739         for (i = 0; i < GASKET_NUM_BARS; ++i) {
 740                 struct gasket_bar_desc bar_desc =
 741                         driver_desc->bar_descriptions[i];
 742 
 743                 if (bar_desc.permissions != GASKET_NOMAP) {
 744                         if (phys_addr >= bar_desc.base &&
 745                             phys_addr < (bar_desc.base + bar_desc.size)) {
 746                                 return i;
 747                         }
 748                 }
 749         }
 750         /* If we haven't found the address by now, it is invalid. */
 751         return -EINVAL;
 752 }
 753 
 754 /*
 755  * Sets the actual bounds to map, given the device's mappable region.
 756  *
 757  * Given the device's mappable region, along with the user-requested mapping
 758  * start offset and length of the user region, determine how much of this
 759  * mappable region can be mapped into the user's region (start/end offsets),
 760  * and the physical offset (phys_offset) into the BAR where the mapping should
 761  * begin (either the VMA's or region lower bound).
 762  *
 763  * In other words, this calculates the overlap between the VMA
 764  * (bar_offset, requested_length) and the given gasket_mappable_region.
 765  *
 766  * Returns true if there's anything to map, and false otherwise.
 767  */
 768 static bool
 769 gasket_mm_get_mapping_addrs(const struct gasket_mappable_region *region,
 770                             ulong bar_offset, ulong requested_length,
 771                             struct gasket_mappable_region *mappable_region,
 772                             ulong *virt_offset)
 773 {
 774         ulong range_start = region->start;
 775         ulong range_length = region->length_bytes;
 776         ulong range_end = range_start + range_length;
 777 
 778         *virt_offset = 0;
 779         if (bar_offset + requested_length < range_start) {
 780                 /*
 781                  * If the requested region is completely below the range,
 782                  * there is nothing to map.
 783                  */
 784                 return false;
 785         } else if (bar_offset <= range_start) {
 786                 /* If the bar offset is below this range's start
 787                  * but the requested length continues into it:
 788                  * 1) Only map starting from the beginning of this
 789                  *      range's phys. offset, so we don't map unmappable
 790                  *      memory.
 791                  * 2) The length of the virtual memory to not map is the
 792                  *      delta between the bar offset and the
 793                  *      mappable start (and since the mappable start is
 794                  *      bigger, start - req.)
 795                  * 3) The map length is the minimum of the mappable
 796                  *      requested length (requested_length - virt_offset)
 797                  *      and the actual mappable length of the range.
 798                  */
 799                 mappable_region->start = range_start;
 800                 *virt_offset = range_start - bar_offset;
 801                 mappable_region->length_bytes =
 802                         min(requested_length - *virt_offset, range_length);
 803                 return true;
 804         } else if (bar_offset > range_start &&
 805                    bar_offset < range_end) {
 806                 /*
 807                  * If the bar offset is within this range:
 808                  * 1) Map starting from the bar offset.
 809                  * 2) Because there is no forbidden memory between the
 810                  *      bar offset and the range start,
 811                  *      virt_offset is 0.
 812                  * 3) The map length is the minimum of the requested
 813                  *      length and the remaining length in the buffer
 814                  *      (range_end - bar_offset)
 815                  */
 816                 mappable_region->start = bar_offset;
 817                 *virt_offset = 0;
 818                 mappable_region->length_bytes =
 819                         min(requested_length, range_end - bar_offset);
 820                 return true;
 821         }
 822 
 823         /*
 824          * If the requested [start] offset is above range_end,
 825          * there's nothing to map.
 826          */
 827         return false;
 828 }
 829 
 830 /*
 831  * Calculates the offset where the VMA range begins in its containing BAR.
 832  * The offset is written into bar_offset on success.
 833  * Returns zero on success, anything else on error.
 834  */
 835 static int gasket_mm_vma_bar_offset(const struct gasket_dev *gasket_dev,
 836                                     const struct vm_area_struct *vma,
 837                                     ulong *bar_offset)
 838 {
 839         ulong raw_offset;
 840         int bar_index;
 841         const struct gasket_driver_desc *driver_desc =
 842                 gasket_dev->internal_desc->driver_desc;
 843 
 844         raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
 845                 driver_desc->legacy_mmap_address_offset;
 846         bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
 847         if (bar_index < 0) {
 848                 dev_err(gasket_dev->dev,
 849                         "Unable to find matching bar for address 0x%lx\n",
 850                         raw_offset);
 851                 trace_gasket_mmap_exit(bar_index);
 852                 return bar_index;
 853         }
 854         *bar_offset =
 855                 raw_offset - driver_desc->bar_descriptions[bar_index].base;
 856 
 857         return 0;
 858 }
 859 
 860 int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
 861                            struct vm_area_struct *vma,
 862                            const struct gasket_mappable_region *map_region)
 863 {
 864         ulong bar_offset;
 865         ulong virt_offset;
 866         struct gasket_mappable_region mappable_region;
 867         int ret;
 868 
 869         if (map_region->length_bytes == 0)
 870                 return 0;
 871 
 872         ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
 873         if (ret)
 874                 return ret;
 875 
 876         if (!gasket_mm_get_mapping_addrs(map_region, bar_offset,
 877                                          vma->vm_end - vma->vm_start,
 878                                          &mappable_region, &virt_offset))
 879                 return 1;
 880 
 881         /*
 882          * The length passed to zap_vma_ptes MUST BE A MULTIPLE OF
 883          * PAGE_SIZE! Trust me. I have the scars.
 884          *
 885          * Next multiple of y: ceil_div(x, y) * y
 886          */
 887         zap_vma_ptes(vma, vma->vm_start + virt_offset,
 888                      DIV_ROUND_UP(mappable_region.length_bytes, PAGE_SIZE) *
 889                      PAGE_SIZE);
 890         return 0;
 891 }
 892 EXPORT_SYMBOL(gasket_mm_unmap_region);
 893 
 894 /* Maps a virtual address + range to a physical offset of a BAR. */
 895 static enum do_map_region_status
 896 do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
 897               struct gasket_mappable_region *mappable_region)
 898 {
 899         /* Maximum size of a single call to io_remap_pfn_range. */
 900         /* I pulled this number out of thin air. */
 901         const ulong max_chunk_size = 64 * 1024 * 1024;
 902         ulong chunk_size, mapped_bytes = 0;
 903 
 904         const struct gasket_driver_desc *driver_desc =
 905                 gasket_dev->internal_desc->driver_desc;
 906 
 907         ulong bar_offset, virt_offset;
 908         struct gasket_mappable_region region_to_map;
 909         ulong phys_offset, map_length;
 910         ulong virt_base, phys_base;
 911         int bar_index, ret;
 912 
 913         ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
 914         if (ret)
 915                 return DO_MAP_REGION_INVALID;
 916 
 917         if (!gasket_mm_get_mapping_addrs(mappable_region, bar_offset,
 918                                          vma->vm_end - vma->vm_start,
 919                                          &region_to_map, &virt_offset))
 920                 return DO_MAP_REGION_INVALID;
 921         phys_offset = region_to_map.start;
 922         map_length = region_to_map.length_bytes;
 923 
 924         virt_base = vma->vm_start + virt_offset;
 925         bar_index =
 926                 gasket_get_bar_index(gasket_dev,
 927                                      (vma->vm_pgoff << PAGE_SHIFT) +
 928                                      driver_desc->legacy_mmap_address_offset);
 929 
 930         if (bar_index < 0)
 931                 return DO_MAP_REGION_INVALID;
 932 
 933         phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
 934         while (mapped_bytes < map_length) {
 935                 /*
 936                  * io_remap_pfn_range can take a while, so we chunk its
 937                  * calls and call cond_resched between each.
 938                  */
 939                 chunk_size = min(max_chunk_size, map_length - mapped_bytes);
 940 
 941                 cond_resched();
 942                 ret = io_remap_pfn_range(vma, virt_base + mapped_bytes,
 943                                          (phys_base + mapped_bytes) >>
 944                                          PAGE_SHIFT, chunk_size,
 945                                          vma->vm_page_prot);
 946                 if (ret) {
 947                         dev_err(gasket_dev->dev,
 948                                 "Error remapping PFN range.\n");
 949                         goto fail;
 950                 }
 951                 mapped_bytes += chunk_size;
 952         }
 953 
 954         return DO_MAP_REGION_SUCCESS;
 955 
 956 fail:
 957         /* Unmap the partial chunk we mapped. */
 958         mappable_region->length_bytes = mapped_bytes;
 959         if (gasket_mm_unmap_region(gasket_dev, vma, mappable_region))
 960                 dev_err(gasket_dev->dev,
 961                         "Error unmapping partial region 0x%lx (0x%lx bytes)\n",
 962                         (ulong)virt_offset,
 963                         (ulong)mapped_bytes);
 964 
 965         return DO_MAP_REGION_FAILURE;
 966 }
 967 
 968 /* Map a region of coherent memory. */
 969 static int gasket_mmap_coherent(struct gasket_dev *gasket_dev,
 970                                 struct vm_area_struct *vma)
 971 {
 972         const struct gasket_driver_desc *driver_desc =
 973                 gasket_dev->internal_desc->driver_desc;
 974         const ulong requested_length = vma->vm_end - vma->vm_start;
 975         int ret;
 976         ulong permissions;
 977 
 978         if (requested_length == 0 || requested_length >
 979             gasket_dev->coherent_buffer.length_bytes) {
 980                 trace_gasket_mmap_exit(-EINVAL);
 981                 return -EINVAL;
 982         }
 983 
 984         permissions = driver_desc->coherent_buffer_description.permissions;
 985         if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
 986                 dev_err(gasket_dev->dev, "Permission checking failed.\n");
 987                 trace_gasket_mmap_exit(-EPERM);
 988                 return -EPERM;
 989         }
 990 
 991         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 992 
 993         ret = remap_pfn_range(vma, vma->vm_start,
 994                               (gasket_dev->coherent_buffer.phys_base) >>
 995                               PAGE_SHIFT, requested_length, vma->vm_page_prot);
 996         if (ret) {
 997                 dev_err(gasket_dev->dev, "Error remapping PFN range err=%d.\n",
 998                         ret);
 999                 trace_gasket_mmap_exit(ret);
1000                 return ret;
1001         }
1002 
1003         /* Record the user virtual to dma_address mapping that was
1004          * created by the kernel.
1005          */
1006         gasket_set_user_virt(gasket_dev, requested_length,
1007                              gasket_dev->coherent_buffer.phys_base,
1008                              vma->vm_start);
1009         return 0;
1010 }
1011 
1012 /* Map a device's BARs into user space. */
1013 static int gasket_mmap(struct file *filp, struct vm_area_struct *vma)
1014 {
1015         int i, ret;
1016         int bar_index;
1017         int has_mapped_anything = 0;
1018         ulong permissions;
1019         ulong raw_offset, vma_size;
1020         bool is_coherent_region;
1021         const struct gasket_driver_desc *driver_desc;
1022         struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
1023         const struct gasket_bar_desc *bar_desc;
1024         struct gasket_mappable_region *map_regions = NULL;
1025         int num_map_regions = 0;
1026         enum do_map_region_status map_status;
1027 
1028         driver_desc = gasket_dev->internal_desc->driver_desc;
1029 
1030         if (vma->vm_start & ~PAGE_MASK) {
1031                 dev_err(gasket_dev->dev,
1032                         "Base address not page-aligned: 0x%lx\n",
1033                         vma->vm_start);
1034                 trace_gasket_mmap_exit(-EINVAL);
1035                 return -EINVAL;
1036         }
1037 
1038         /* Calculate the offset of this range into physical mem. */
1039         raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
1040                 driver_desc->legacy_mmap_address_offset;
1041         vma_size = vma->vm_end - vma->vm_start;
1042         trace_gasket_mmap_entry(gasket_dev->dev_info.name, raw_offset,
1043                                 vma_size);
1044 
1045         /*
1046          * Check if the raw offset is within a bar region. If not, check if it
1047          * is a coherent region.
1048          */
1049         bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
1050         is_coherent_region = gasket_is_coherent_region(driver_desc, raw_offset);
1051         if (bar_index < 0 && !is_coherent_region) {
1052                 dev_err(gasket_dev->dev,
1053                         "Unable to find matching bar for address 0x%lx\n",
1054                         raw_offset);
1055                 trace_gasket_mmap_exit(bar_index);
1056                 return bar_index;
1057         }
1058         if (bar_index > 0 && is_coherent_region) {
1059                 dev_err(gasket_dev->dev,
1060                         "double matching bar and coherent buffers for address 0x%lx\n",
1061                         raw_offset);
1062                 trace_gasket_mmap_exit(bar_index);
1063                 return -EINVAL;
1064         }
1065 
1066         vma->vm_private_data = gasket_dev;
1067 
1068         if (is_coherent_region)
1069                 return gasket_mmap_coherent(gasket_dev, vma);
1070 
1071         /* Everything in the rest of this function is for normal BAR mapping. */
1072 
1073         /*
1074          * Subtract the base of the bar from the raw offset to get the
1075          * memory location within the bar to map.
1076          */
1077         bar_desc = &driver_desc->bar_descriptions[bar_index];
1078         permissions = bar_desc->permissions;
1079         if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
1080                 dev_err(gasket_dev->dev, "Permission checking failed.\n");
1081                 trace_gasket_mmap_exit(-EPERM);
1082                 return -EPERM;
1083         }
1084 
1085         if (driver_desc->get_mappable_regions_cb) {
1086                 ret = driver_desc->get_mappable_regions_cb(gasket_dev,
1087                                                            bar_index,
1088                                                            &map_regions,
1089                                                            &num_map_regions);
1090                 if (ret)
1091                         return ret;
1092         } else {
1093                 if (!gasket_mmap_has_permissions(gasket_dev, vma,
1094                                                  bar_desc->permissions)) {
1095                         dev_err(gasket_dev->dev,
1096                                 "Permission checking failed.\n");
1097                         trace_gasket_mmap_exit(-EPERM);
1098                         return -EPERM;
1099                 }
1100                 num_map_regions = bar_desc->num_mappable_regions;
1101                 map_regions = kcalloc(num_map_regions,
1102                                       sizeof(*bar_desc->mappable_regions),
1103                                       GFP_KERNEL);
1104                 if (map_regions) {
1105                         memcpy(map_regions, bar_desc->mappable_regions,
1106                                num_map_regions *
1107                                         sizeof(*bar_desc->mappable_regions));
1108                 }
1109         }
1110 
1111         if (!map_regions || num_map_regions == 0) {
1112                 dev_err(gasket_dev->dev, "No mappable regions returned!\n");
1113                 return -EINVAL;
1114         }
1115 
1116         /* Marks the VMA's pages as uncacheable. */
1117         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1118         for (i = 0; i < num_map_regions; i++) {
1119                 map_status = do_map_region(gasket_dev, vma, &map_regions[i]);
1120                 /* Try the next region if this one was not mappable. */
1121                 if (map_status == DO_MAP_REGION_INVALID)
1122                         continue;
1123                 if (map_status == DO_MAP_REGION_FAILURE) {
1124                         ret = -ENOMEM;
1125                         goto fail;
1126                 }
1127 
1128                 has_mapped_anything = 1;
1129         }
1130 
1131         kfree(map_regions);
1132 
1133         /* If we could not map any memory, the request was invalid. */
1134         if (!has_mapped_anything) {
1135                 dev_err(gasket_dev->dev,
1136                         "Map request did not contain a valid region.\n");
1137                 trace_gasket_mmap_exit(-EINVAL);
1138                 return -EINVAL;
1139         }
1140 
1141         trace_gasket_mmap_exit(0);
1142         return 0;
1143 
1144 fail:
1145         /* Need to unmap any mapped ranges. */
1146         num_map_regions = i;
1147         for (i = 0; i < num_map_regions; i++)
1148                 if (gasket_mm_unmap_region(gasket_dev, vma,
1149                                            &bar_desc->mappable_regions[i]))
1150                         dev_err(gasket_dev->dev, "Error unmapping range %d.\n",
1151                                 i);
1152         kfree(map_regions);
1153 
1154         return ret;
1155 }
1156 
1157 /*
1158  * Open the char device file.
1159  *
1160  * If the open is for writing, and the device is not owned, this process becomes
1161  * the owner.  If the open is for writing and the device is already owned by
1162  * some other process, it is an error.  If this process is the owner, increment
1163  * the open count.
1164  *
1165  * Returns 0 if successful, a negative error number otherwise.
1166  */
1167 static int gasket_open(struct inode *inode, struct file *filp)
1168 {
1169         int ret;
1170         struct gasket_dev *gasket_dev;
1171         const struct gasket_driver_desc *driver_desc;
1172         struct gasket_ownership *ownership;
1173         char task_name[TASK_COMM_LEN];
1174         struct gasket_cdev_info *dev_info =
1175             container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1176         struct pid_namespace *pid_ns = task_active_pid_ns(current);
1177         bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1178 
1179         gasket_dev = dev_info->gasket_dev_ptr;
1180         driver_desc = gasket_dev->internal_desc->driver_desc;
1181         ownership = &dev_info->ownership;
1182         get_task_comm(task_name, current);
1183         filp->private_data = gasket_dev;
1184         inode->i_size = 0;
1185 
1186         dev_dbg(gasket_dev->dev,
1187                 "Attempting to open with tgid %u (%s) (f_mode: 0%03o, "
1188                 "fmode_write: %d is_root: %u)\n",
1189                 current->tgid, task_name, filp->f_mode,
1190                 (filp->f_mode & FMODE_WRITE), is_root);
1191 
1192         /* Always allow non-writing accesses. */
1193         if (!(filp->f_mode & FMODE_WRITE)) {
1194                 dev_dbg(gasket_dev->dev, "Allowing read-only opening.\n");
1195                 return 0;
1196         }
1197 
1198         mutex_lock(&gasket_dev->mutex);
1199 
1200         dev_dbg(gasket_dev->dev,
1201                 "Current owner open count (owning tgid %u): %d.\n",
1202                 ownership->owner, ownership->write_open_count);
1203 
1204         /* Opening a node owned by another TGID is an error (unless root) */
1205         if (ownership->is_owned && ownership->owner != current->tgid &&
1206             !is_root) {
1207                 dev_err(gasket_dev->dev,
1208                         "Process %u is opening a node held by %u.\n",
1209                         current->tgid, ownership->owner);
1210                 mutex_unlock(&gasket_dev->mutex);
1211                 return -EPERM;
1212         }
1213 
1214         /* If the node is not owned, assign it to the current TGID. */
1215         if (!ownership->is_owned) {
1216                 ret = gasket_check_and_invoke_callback_nolock(gasket_dev,
1217                                                               driver_desc->device_open_cb);
1218                 if (ret) {
1219                         dev_err(gasket_dev->dev,
1220                                 "Error in device open cb: %d\n", ret);
1221                         mutex_unlock(&gasket_dev->mutex);
1222                         return ret;
1223                 }
1224                 ownership->is_owned = 1;
1225                 ownership->owner = current->tgid;
1226                 dev_dbg(gasket_dev->dev, "Device owner is now tgid %u\n",
1227                         ownership->owner);
1228         }
1229 
1230         ownership->write_open_count++;
1231 
1232         dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1233                 ownership->owner, ownership->write_open_count);
1234 
1235         mutex_unlock(&gasket_dev->mutex);
1236         return 0;
1237 }
1238 
1239 /*
1240  * Called on a close of the device file.  If this process is the owner,
1241  * decrement the open count.  On last close by the owner, free up buffers and
1242  * eventfd contexts, and release ownership.
1243  *
1244  * Returns 0 if successful, a negative error number otherwise.
1245  */
1246 static int gasket_release(struct inode *inode, struct file *file)
1247 {
1248         int i;
1249         struct gasket_dev *gasket_dev;
1250         struct gasket_ownership *ownership;
1251         const struct gasket_driver_desc *driver_desc;
1252         char task_name[TASK_COMM_LEN];
1253         struct gasket_cdev_info *dev_info =
1254                 container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1255         struct pid_namespace *pid_ns = task_active_pid_ns(current);
1256         bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1257 
1258         gasket_dev = dev_info->gasket_dev_ptr;
1259         driver_desc = gasket_dev->internal_desc->driver_desc;
1260         ownership = &dev_info->ownership;
1261         get_task_comm(task_name, current);
1262         mutex_lock(&gasket_dev->mutex);
1263 
1264         dev_dbg(gasket_dev->dev,
1265                 "Releasing device node. Call origin: tgid %u (%s) "
1266                 "(f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
1267                 current->tgid, task_name, file->f_mode,
1268                 (file->f_mode & FMODE_WRITE), is_root);
1269         dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n",
1270                 ownership->owner, ownership->write_open_count);
1271 
1272         if (file->f_mode & FMODE_WRITE) {
1273                 ownership->write_open_count--;
1274                 if (ownership->write_open_count == 0) {
1275                         dev_dbg(gasket_dev->dev, "Device is now free\n");
1276                         ownership->is_owned = 0;
1277                         ownership->owner = 0;
1278 
1279                         /* Forces chip reset before we unmap the page tables. */
1280                         driver_desc->device_reset_cb(gasket_dev);
1281 
1282                         for (i = 0; i < driver_desc->num_page_tables; ++i) {
1283                                 gasket_page_table_unmap_all(gasket_dev->page_table[i]);
1284                                 gasket_page_table_garbage_collect(gasket_dev->page_table[i]);
1285                                 gasket_free_coherent_memory_all(gasket_dev, i);
1286                         }
1287 
1288                         /* Closes device, enters power save. */
1289                         gasket_check_and_invoke_callback_nolock(gasket_dev,
1290                                                                 driver_desc->device_close_cb);
1291                 }
1292         }
1293 
1294         dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1295                 ownership->owner, ownership->write_open_count);
1296         mutex_unlock(&gasket_dev->mutex);
1297         return 0;
1298 }
1299 
1300 /*
1301  * Gasket ioctl dispatch function.
1302  *
1303  * Check if the ioctl is a generic ioctl. If not, pass the ioctl to the
1304  * ioctl_handler_cb registered in the driver description.
1305  * If the ioctl is a generic ioctl, pass it to gasket_ioctl_handler.
1306  */
1307 static long gasket_ioctl(struct file *filp, uint cmd, ulong arg)
1308 {
1309         struct gasket_dev *gasket_dev;
1310         const struct gasket_driver_desc *driver_desc;
1311         void __user *argp = (void __user *)arg;
1312         char path[256];
1313 
1314         gasket_dev = (struct gasket_dev *)filp->private_data;
1315         driver_desc = gasket_dev->internal_desc->driver_desc;
1316         if (!driver_desc) {
1317                 dev_dbg(gasket_dev->dev,
1318                         "Unable to find device descriptor for file %s\n",
1319                         d_path(&filp->f_path, path, 256));
1320                 return -ENODEV;
1321         }
1322 
1323         if (!gasket_is_supported_ioctl(cmd)) {
1324                 /*
1325                  * The ioctl handler is not a standard Gasket callback, since
1326                  * it requires different arguments. This means we can't use
1327                  * check_and_invoke_callback.
1328                  */
1329                 if (driver_desc->ioctl_handler_cb)
1330                         return driver_desc->ioctl_handler_cb(filp, cmd, argp);
1331 
1332                 dev_dbg(gasket_dev->dev, "Received unknown ioctl 0x%x\n", cmd);
1333                 return -EINVAL;
1334         }
1335 
1336         return gasket_handle_ioctl(filp, cmd, argp);
1337 }
1338 
1339 /* File operations for all Gasket devices. */
1340 static const struct file_operations gasket_file_ops = {
1341         .owner = THIS_MODULE,
1342         .llseek = no_llseek,
1343         .mmap = gasket_mmap,
1344         .open = gasket_open,
1345         .release = gasket_release,
1346         .unlocked_ioctl = gasket_ioctl,
1347 };
1348 
1349 /* Perform final init and marks the device as active. */
1350 int gasket_enable_device(struct gasket_dev *gasket_dev)
1351 {
1352         int tbl_idx;
1353         int ret;
1354         const struct gasket_driver_desc *driver_desc =
1355                 gasket_dev->internal_desc->driver_desc;
1356 
1357         ret = gasket_interrupt_init(gasket_dev);
1358         if (ret) {
1359                 dev_err(gasket_dev->dev,
1360                         "Critical failure to allocate interrupts: %d\n", ret);
1361                 gasket_interrupt_cleanup(gasket_dev);
1362                 return ret;
1363         }
1364 
1365         for (tbl_idx = 0; tbl_idx < driver_desc->num_page_tables; tbl_idx++) {
1366                 dev_dbg(gasket_dev->dev, "Initializing page table %d.\n",
1367                         tbl_idx);
1368                 ret = gasket_page_table_init(&gasket_dev->page_table[tbl_idx],
1369                                              &gasket_dev->bar_data[driver_desc->page_table_bar_index],
1370                                              &driver_desc->page_table_configs[tbl_idx],
1371                                              gasket_dev->dev,
1372                                              gasket_dev->pci_dev);
1373                 if (ret) {
1374                         dev_err(gasket_dev->dev,
1375                                 "Couldn't init page table %d: %d\n",
1376                                 tbl_idx, ret);
1377                         return ret;
1378                 }
1379                 /*
1380                  * Make sure that the page table is clear and set to simple
1381                  * addresses.
1382                  */
1383                 gasket_page_table_reset(gasket_dev->page_table[tbl_idx]);
1384         }
1385 
1386         /*
1387          * hardware_revision_cb returns a positive integer (the rev) if
1388          * successful.)
1389          */
1390         ret = check_and_invoke_callback(gasket_dev,
1391                                         driver_desc->hardware_revision_cb);
1392         if (ret < 0) {
1393                 dev_err(gasket_dev->dev,
1394                         "Error getting hardware revision: %d\n", ret);
1395                 return ret;
1396         }
1397         gasket_dev->hardware_revision = ret;
1398 
1399         /* device_status_cb returns a device status, not an error code. */
1400         gasket_dev->status = gasket_get_hw_status(gasket_dev);
1401         if (gasket_dev->status == GASKET_STATUS_DEAD)
1402                 dev_err(gasket_dev->dev, "Device reported as unhealthy.\n");
1403 
1404         ret = gasket_add_cdev(&gasket_dev->dev_info, &gasket_file_ops,
1405                               driver_desc->module);
1406         if (ret)
1407                 return ret;
1408 
1409         return 0;
1410 }
1411 EXPORT_SYMBOL(gasket_enable_device);
1412 
1413 static int __gasket_add_device(struct device *parent_dev,
1414                                struct gasket_internal_desc *internal_desc,
1415                                struct gasket_dev **gasket_devp)
1416 {
1417         int ret;
1418         struct gasket_dev *gasket_dev;
1419         const struct gasket_driver_desc *driver_desc =
1420             internal_desc->driver_desc;
1421 
1422         ret = gasket_alloc_dev(internal_desc, parent_dev, &gasket_dev);
1423         if (ret)
1424                 return ret;
1425         if (IS_ERR(gasket_dev->dev_info.device)) {
1426                 dev_err(parent_dev, "Cannot create %s device %s [ret = %ld]\n",
1427                         driver_desc->name, gasket_dev->dev_info.name,
1428                         PTR_ERR(gasket_dev->dev_info.device));
1429                 ret = -ENODEV;
1430                 goto free_gasket_dev;
1431         }
1432 
1433         ret = gasket_sysfs_create_mapping(gasket_dev->dev_info.device,
1434                                           gasket_dev);
1435         if (ret)
1436                 goto remove_device;
1437 
1438         ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
1439                                           gasket_sysfs_generic_attrs);
1440         if (ret)
1441                 goto remove_sysfs_mapping;
1442 
1443         *gasket_devp = gasket_dev;
1444         return 0;
1445 
1446 remove_sysfs_mapping:
1447         gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1448 remove_device:
1449         device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1450 free_gasket_dev:
1451         gasket_free_dev(gasket_dev);
1452         return ret;
1453 }
1454 
1455 static void __gasket_remove_device(struct gasket_internal_desc *internal_desc,
1456                                    struct gasket_dev *gasket_dev)
1457 {
1458         gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1459         device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1460         gasket_free_dev(gasket_dev);
1461 }
1462 
1463 /*
1464  * Add PCI gasket device.
1465  *
1466  * Called by Gasket device probe function.
1467  * Allocates device metadata and maps device memory.  The device driver must
1468  * call gasket_enable_device after driver init is complete to place the device
1469  * in active use.
1470  */
1471 int gasket_pci_add_device(struct pci_dev *pci_dev,
1472                           struct gasket_dev **gasket_devp)
1473 {
1474         int ret;
1475         struct gasket_internal_desc *internal_desc;
1476         struct gasket_dev *gasket_dev;
1477         struct device *parent;
1478 
1479         dev_dbg(&pci_dev->dev, "add PCI gasket device\n");
1480 
1481         mutex_lock(&g_mutex);
1482         internal_desc = lookup_pci_internal_desc(pci_dev);
1483         mutex_unlock(&g_mutex);
1484         if (!internal_desc) {
1485                 dev_err(&pci_dev->dev,
1486                         "PCI add device called for unknown driver type\n");
1487                 return -ENODEV;
1488         }
1489 
1490         parent = &pci_dev->dev;
1491         ret = __gasket_add_device(parent, internal_desc, &gasket_dev);
1492         if (ret)
1493                 return ret;
1494 
1495         gasket_dev->pci_dev = pci_dev;
1496         ret = gasket_setup_pci(pci_dev, gasket_dev);
1497         if (ret)
1498                 goto cleanup_pci;
1499 
1500         /*
1501          * Once we've created the mapping structures successfully, attempt to
1502          * create a symlink to the pci directory of this object.
1503          */
1504         ret = sysfs_create_link(&gasket_dev->dev_info.device->kobj,
1505                                 &pci_dev->dev.kobj, dev_name(&pci_dev->dev));
1506         if (ret) {
1507                 dev_err(gasket_dev->dev,
1508                         "Cannot create sysfs pci link: %d\n", ret);
1509                 goto cleanup_pci;
1510         }
1511 
1512         *gasket_devp = gasket_dev;
1513         return 0;
1514 
1515 cleanup_pci:
1516         gasket_cleanup_pci(gasket_dev);
1517         __gasket_remove_device(internal_desc, gasket_dev);
1518         return ret;
1519 }
1520 EXPORT_SYMBOL(gasket_pci_add_device);
1521 
1522 /* Remove a PCI gasket device. */
1523 void gasket_pci_remove_device(struct pci_dev *pci_dev)
1524 {
1525         int i;
1526         struct gasket_internal_desc *internal_desc;
1527         struct gasket_dev *gasket_dev = NULL;
1528         /* Find the device desc. */
1529         mutex_lock(&g_mutex);
1530         internal_desc = lookup_pci_internal_desc(pci_dev);
1531         if (!internal_desc) {
1532                 mutex_unlock(&g_mutex);
1533                 return;
1534         }
1535         mutex_unlock(&g_mutex);
1536 
1537         /* Now find the specific device */
1538         mutex_lock(&internal_desc->mutex);
1539         for (i = 0; i < GASKET_DEV_MAX; i++) {
1540                 if (internal_desc->devs[i] &&
1541                     internal_desc->devs[i]->pci_dev == pci_dev) {
1542                         gasket_dev = internal_desc->devs[i];
1543                         break;
1544                 }
1545         }
1546         mutex_unlock(&internal_desc->mutex);
1547 
1548         if (!gasket_dev)
1549                 return;
1550 
1551         dev_dbg(gasket_dev->dev, "remove %s PCI gasket device\n",
1552                 internal_desc->driver_desc->name);
1553 
1554         gasket_cleanup_pci(gasket_dev);
1555         __gasket_remove_device(internal_desc, gasket_dev);
1556 }
1557 EXPORT_SYMBOL(gasket_pci_remove_device);
1558 
1559 /**
1560  * Lookup a name by number in a num_name table.
1561  * @num: Number to lookup.
1562  * @table: Array of num_name structures, the table for the lookup.
1563  *
1564  * Description: Searches for num in the table.  If found, the
1565  *              corresponding name is returned; otherwise NULL
1566  *              is returned.
1567  *
1568  *              The table must have a NULL name pointer at the end.
1569  */
1570 const char *gasket_num_name_lookup(uint num,
1571                                    const struct gasket_num_name *table)
1572 {
1573         uint i = 0;
1574 
1575         while (table[i].snn_name) {
1576                 if (num == table[i].snn_num)
1577                         break;
1578                 ++i;
1579         }
1580 
1581         return table[i].snn_name;
1582 }
1583 EXPORT_SYMBOL(gasket_num_name_lookup);
1584 
1585 int gasket_reset(struct gasket_dev *gasket_dev)
1586 {
1587         int ret;
1588 
1589         mutex_lock(&gasket_dev->mutex);
1590         ret = gasket_reset_nolock(gasket_dev);
1591         mutex_unlock(&gasket_dev->mutex);
1592         return ret;
1593 }
1594 EXPORT_SYMBOL(gasket_reset);
1595 
1596 int gasket_reset_nolock(struct gasket_dev *gasket_dev)
1597 {
1598         int ret;
1599         int i;
1600         const struct gasket_driver_desc *driver_desc;
1601 
1602         driver_desc = gasket_dev->internal_desc->driver_desc;
1603         if (!driver_desc->device_reset_cb)
1604                 return 0;
1605 
1606         ret = driver_desc->device_reset_cb(gasket_dev);
1607         if (ret) {
1608                 dev_dbg(gasket_dev->dev, "Device reset cb returned %d.\n",
1609                         ret);
1610                 return ret;
1611         }
1612 
1613         /* Reinitialize the page tables and interrupt framework. */
1614         for (i = 0; i < driver_desc->num_page_tables; ++i)
1615                 gasket_page_table_reset(gasket_dev->page_table[i]);
1616 
1617         ret = gasket_interrupt_reinit(gasket_dev);
1618         if (ret) {
1619                 dev_dbg(gasket_dev->dev, "Unable to reinit interrupts: %d.\n",
1620                         ret);
1621                 return ret;
1622         }
1623 
1624         /* Get current device health. */
1625         gasket_dev->status = gasket_get_hw_status(gasket_dev);
1626         if (gasket_dev->status == GASKET_STATUS_DEAD) {
1627                 dev_dbg(gasket_dev->dev, "Device reported as dead.\n");
1628                 return -EINVAL;
1629         }
1630 
1631         return 0;
1632 }
1633 EXPORT_SYMBOL(gasket_reset_nolock);
1634 
1635 gasket_ioctl_permissions_cb_t
1636 gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev)
1637 {
1638         return gasket_dev->internal_desc->driver_desc->ioctl_permissions_cb;
1639 }
1640 EXPORT_SYMBOL(gasket_get_ioctl_permissions_cb);
1641 
1642 /* Get the driver structure for a given gasket_dev.
1643  * @dev: pointer to gasket_dev, implementing the requested driver.
1644  */
1645 const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev)
1646 {
1647         return dev->internal_desc->driver_desc;
1648 }
1649 
1650 /* Get the device structure for a given gasket_dev.
1651  * @dev: pointer to gasket_dev, implementing the requested driver.
1652  */
1653 struct device *gasket_get_device(struct gasket_dev *dev)
1654 {
1655         return dev->dev;
1656 }
1657 
1658 /**
1659  * Asynchronously waits on device.
1660  * @gasket_dev: Device struct.
1661  * @bar: Bar
1662  * @offset: Register offset
1663  * @mask: Register mask
1664  * @val: Expected value
1665  * @max_retries: number of sleep periods
1666  * @delay_ms: Timeout in milliseconds
1667  *
1668  * Description: Busy waits for a specific combination of bits to be set on a
1669  * Gasket register.
1670  **/
1671 int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
1672                                 u64 offset, u64 mask, u64 val,
1673                                 uint max_retries, u64 delay_ms)
1674 {
1675         uint retries = 0;
1676         u64 tmp;
1677 
1678         while (retries < max_retries) {
1679                 tmp = gasket_dev_read_64(gasket_dev, bar, offset);
1680                 if ((tmp & mask) == val)
1681                         return 0;
1682                 msleep(delay_ms);
1683                 retries++;
1684         }
1685         dev_dbg(gasket_dev->dev, "%s timeout: reg %llx timeout (%llu ms)\n",
1686                 __func__, offset, max_retries * delay_ms);
1687         return -ETIMEDOUT;
1688 }
1689 EXPORT_SYMBOL(gasket_wait_with_reschedule);
1690 
1691 /* See gasket_core.h for description. */
1692 int gasket_register_device(const struct gasket_driver_desc *driver_desc)
1693 {
1694         int i, ret;
1695         int desc_idx = -1;
1696         struct gasket_internal_desc *internal;
1697 
1698         pr_debug("Loading %s driver version %s\n", driver_desc->name,
1699                  driver_desc->driver_version);
1700         /* Check for duplicates and find a free slot. */
1701         mutex_lock(&g_mutex);
1702 
1703         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1704                 if (g_descs[i].driver_desc == driver_desc) {
1705                         pr_err("%s driver already loaded/registered\n",
1706                                driver_desc->name);
1707                         mutex_unlock(&g_mutex);
1708                         return -EBUSY;
1709                 }
1710         }
1711 
1712         /* This and the above loop could be combined, but this reads easier. */
1713         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1714                 if (!g_descs[i].driver_desc) {
1715                         g_descs[i].driver_desc = driver_desc;
1716                         desc_idx = i;
1717                         break;
1718                 }
1719         }
1720         mutex_unlock(&g_mutex);
1721 
1722         if (desc_idx == -1) {
1723                 pr_err("too many drivers loaded, max %d\n",
1724                        GASKET_FRAMEWORK_DESC_MAX);
1725                 return -EBUSY;
1726         }
1727 
1728         internal = &g_descs[desc_idx];
1729         mutex_init(&internal->mutex);
1730         memset(internal->devs, 0, sizeof(struct gasket_dev *) * GASKET_DEV_MAX);
1731         internal->class =
1732                 class_create(driver_desc->module, driver_desc->name);
1733 
1734         if (IS_ERR(internal->class)) {
1735                 pr_err("Cannot register %s class [ret=%ld]\n",
1736                        driver_desc->name, PTR_ERR(internal->class));
1737                 ret = PTR_ERR(internal->class);
1738                 goto unregister_gasket_driver;
1739         }
1740 
1741         ret = register_chrdev_region(MKDEV(driver_desc->major,
1742                                            driver_desc->minor), GASKET_DEV_MAX,
1743                                      driver_desc->name);
1744         if (ret) {
1745                 pr_err("cannot register %s char driver [ret=%d]\n",
1746                        driver_desc->name, ret);
1747                 goto destroy_class;
1748         }
1749 
1750         return 0;
1751 
1752 destroy_class:
1753         class_destroy(internal->class);
1754 
1755 unregister_gasket_driver:
1756         mutex_lock(&g_mutex);
1757         g_descs[desc_idx].driver_desc = NULL;
1758         mutex_unlock(&g_mutex);
1759         return ret;
1760 }
1761 EXPORT_SYMBOL(gasket_register_device);
1762 
1763 /* See gasket_core.h for description. */
1764 void gasket_unregister_device(const struct gasket_driver_desc *driver_desc)
1765 {
1766         int i, desc_idx;
1767         struct gasket_internal_desc *internal_desc = NULL;
1768 
1769         mutex_lock(&g_mutex);
1770         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1771                 if (g_descs[i].driver_desc == driver_desc) {
1772                         internal_desc = &g_descs[i];
1773                         desc_idx = i;
1774                         break;
1775                 }
1776         }
1777 
1778         if (!internal_desc) {
1779                 mutex_unlock(&g_mutex);
1780                 pr_err("request to unregister unknown desc: %s, %d:%d\n",
1781                        driver_desc->name, driver_desc->major,
1782                        driver_desc->minor);
1783                 return;
1784         }
1785 
1786         unregister_chrdev_region(MKDEV(driver_desc->major, driver_desc->minor),
1787                                  GASKET_DEV_MAX);
1788 
1789         class_destroy(internal_desc->class);
1790 
1791         /* Finally, effectively "remove" the driver. */
1792         g_descs[desc_idx].driver_desc = NULL;
1793         mutex_unlock(&g_mutex);
1794 
1795         pr_debug("removed %s driver\n", driver_desc->name);
1796 }
1797 EXPORT_SYMBOL(gasket_unregister_device);
1798 
1799 static int __init gasket_init(void)
1800 {
1801         int i;
1802 
1803         mutex_lock(&g_mutex);
1804         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1805                 g_descs[i].driver_desc = NULL;
1806                 mutex_init(&g_descs[i].mutex);
1807         }
1808 
1809         gasket_sysfs_init();
1810 
1811         mutex_unlock(&g_mutex);
1812         return 0;
1813 }
1814 
1815 MODULE_DESCRIPTION("Google Gasket driver framework");
1816 MODULE_VERSION(GASKET_FRAMEWORK_VERSION);
1817 MODULE_LICENSE("GPL v2");
1818 MODULE_AUTHOR("Rob Springer <rspringer@google.com>");
1819 module_init(gasket_init);

/* [<][>][^][v][top][bottom][index][help] */