root/drivers/misc/sgi-gru/grufile.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. gru_supported
  2. gru_vma_close
  3. gru_file_mmap
  4. gru_create_new_context
  5. gru_get_config_info
  6. gru_file_unlocked_ioctl
  7. gru_init_chiplet
  8. gru_init_tables
  9. gru_free_tables
  10. gru_chiplet_cpu_to_mmr
  11. gru_noop
  12. gru_chiplet_setup_tlb_irq
  13. gru_chiplet_teardown_tlb_irq
  14. gru_chiplet_setup_tlb_irq
  15. gru_chiplet_teardown_tlb_irq
  16. gru_teardown_tlb_irqs
  17. gru_setup_tlb_irqs
  18. gru_init
  19. gru_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * SN Platform GRU Driver
   4  *
   5  *              FILE OPERATIONS & DRIVER INITIALIZATION
   6  *
   7  * This file supports the user system call for file open, close, mmap, etc.
   8  * This also incudes the driver initialization code.
   9  *
  10  *  Copyright (c) 2008-2014 Silicon Graphics, Inc.  All Rights Reserved.
  11  */
  12 
  13 #include <linux/module.h>
  14 #include <linux/kernel.h>
  15 #include <linux/errno.h>
  16 #include <linux/slab.h>
  17 #include <linux/mm.h>
  18 #include <linux/io.h>
  19 #include <linux/spinlock.h>
  20 #include <linux/device.h>
  21 #include <linux/miscdevice.h>
  22 #include <linux/interrupt.h>
  23 #include <linux/proc_fs.h>
  24 #include <linux/uaccess.h>
  25 #ifdef CONFIG_X86_64
  26 #include <asm/uv/uv_irq.h>
  27 #endif
  28 #include <asm/uv/uv.h>
  29 #include "gru.h"
  30 #include "grulib.h"
  31 #include "grutables.h"
  32 
  33 #include <asm/uv/uv_hub.h>
  34 #include <asm/uv/uv_mmrs.h>
  35 
  36 struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
  37 unsigned long gru_start_paddr __read_mostly;
  38 void *gru_start_vaddr __read_mostly;
  39 unsigned long gru_end_paddr __read_mostly;
  40 unsigned int gru_max_gids __read_mostly;
  41 struct gru_stats_s gru_stats;
  42 
  43 /* Guaranteed user available resources on each node */
  44 static int max_user_cbrs, max_user_dsr_bytes;
  45 
  46 static struct miscdevice gru_miscdev;
  47 
  48 static int gru_supported(void)
  49 {
  50         return is_uv_system() &&
  51                 (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE);
  52 }
  53 
  54 /*
  55  * gru_vma_close
  56  *
  57  * Called when unmapping a device mapping. Frees all gru resources
  58  * and tables belonging to the vma.
  59  */
  60 static void gru_vma_close(struct vm_area_struct *vma)
  61 {
  62         struct gru_vma_data *vdata;
  63         struct gru_thread_state *gts;
  64         struct list_head *entry, *next;
  65 
  66         if (!vma->vm_private_data)
  67                 return;
  68 
  69         vdata = vma->vm_private_data;
  70         vma->vm_private_data = NULL;
  71         gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file,
  72                                 vdata);
  73         list_for_each_safe(entry, next, &vdata->vd_head) {
  74                 gts =
  75                     list_entry(entry, struct gru_thread_state, ts_next);
  76                 list_del(&gts->ts_next);
  77                 mutex_lock(&gts->ts_ctxlock);
  78                 if (gts->ts_gru)
  79                         gru_unload_context(gts, 0);
  80                 mutex_unlock(&gts->ts_ctxlock);
  81                 gts_drop(gts);
  82         }
  83         kfree(vdata);
  84         STAT(vdata_free);
  85 }
  86 
  87 /*
  88  * gru_file_mmap
  89  *
  90  * Called when mmapping the device.  Initializes the vma with a fault handler
  91  * and private data structure necessary to allocate, track, and free the
  92  * underlying pages.
  93  */
  94 static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
  95 {
  96         if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE))
  97                 return -EPERM;
  98 
  99         if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) ||
 100                                 vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
 101                 return -EINVAL;
 102 
 103         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
 104                          VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
 105         vma->vm_page_prot = PAGE_SHARED;
 106         vma->vm_ops = &gru_vm_ops;
 107 
 108         vma->vm_private_data = gru_alloc_vma_data(vma, 0);
 109         if (!vma->vm_private_data)
 110                 return -ENOMEM;
 111 
 112         gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n",
 113                 file, vma->vm_start, vma, vma->vm_private_data);
 114         return 0;
 115 }
 116 
 117 /*
 118  * Create a new GRU context
 119  */
 120 static int gru_create_new_context(unsigned long arg)
 121 {
 122         struct gru_create_context_req req;
 123         struct vm_area_struct *vma;
 124         struct gru_vma_data *vdata;
 125         int ret = -EINVAL;
 126 
 127         if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
 128                 return -EFAULT;
 129 
 130         if (req.data_segment_bytes > max_user_dsr_bytes)
 131                 return -EINVAL;
 132         if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count)
 133                 return -EINVAL;
 134 
 135         if (!(req.options & GRU_OPT_MISS_MASK))
 136                 req.options |= GRU_OPT_MISS_FMM_INTR;
 137 
 138         down_write(&current->mm->mmap_sem);
 139         vma = gru_find_vma(req.gseg);
 140         if (vma) {
 141                 vdata = vma->vm_private_data;
 142                 vdata->vd_user_options = req.options;
 143                 vdata->vd_dsr_au_count =
 144                     GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
 145                 vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
 146                 vdata->vd_tlb_preload_count = req.tlb_preload_count;
 147                 ret = 0;
 148         }
 149         up_write(&current->mm->mmap_sem);
 150 
 151         return ret;
 152 }
 153 
 154 /*
 155  * Get GRU configuration info (temp - for emulator testing)
 156  */
 157 static long gru_get_config_info(unsigned long arg)
 158 {
 159         struct gru_config_info info;
 160         int nodesperblade;
 161 
 162         if (num_online_nodes() > 1 &&
 163                         (uv_node_to_blade_id(1) == uv_node_to_blade_id(0)))
 164                 nodesperblade = 2;
 165         else
 166                 nodesperblade = 1;
 167         memset(&info, 0, sizeof(info));
 168         info.cpus = num_online_cpus();
 169         info.nodes = num_online_nodes();
 170         info.blades = info.nodes / nodesperblade;
 171         info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades;
 172 
 173         if (copy_to_user((void __user *)arg, &info, sizeof(info)))
 174                 return -EFAULT;
 175         return 0;
 176 }
 177 
 178 /*
 179  * gru_file_unlocked_ioctl
 180  *
 181  * Called to update file attributes via IOCTL calls.
 182  */
 183 static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
 184                                     unsigned long arg)
 185 {
 186         int err = -EBADRQC;
 187 
 188         gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
 189 
 190         switch (req) {
 191         case GRU_CREATE_CONTEXT:
 192                 err = gru_create_new_context(arg);
 193                 break;
 194         case GRU_SET_CONTEXT_OPTION:
 195                 err = gru_set_context_option(arg);
 196                 break;
 197         case GRU_USER_GET_EXCEPTION_DETAIL:
 198                 err = gru_get_exception_detail(arg);
 199                 break;
 200         case GRU_USER_UNLOAD_CONTEXT:
 201                 err = gru_user_unload_context(arg);
 202                 break;
 203         case GRU_USER_FLUSH_TLB:
 204                 err = gru_user_flush_tlb(arg);
 205                 break;
 206         case GRU_USER_CALL_OS:
 207                 err = gru_handle_user_call_os(arg);
 208                 break;
 209         case GRU_GET_GSEG_STATISTICS:
 210                 err = gru_get_gseg_statistics(arg);
 211                 break;
 212         case GRU_KTEST:
 213                 err = gru_ktest(arg);
 214                 break;
 215         case GRU_GET_CONFIG_INFO:
 216                 err = gru_get_config_info(arg);
 217                 break;
 218         case GRU_DUMP_CHIPLET_STATE:
 219                 err = gru_dump_chiplet_request(arg);
 220                 break;
 221         }
 222         return err;
 223 }
 224 
 225 /*
 226  * Called at init time to build tables for all GRUs that are present in the
 227  * system.
 228  */
 229 static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
 230                              void *vaddr, int blade_id, int chiplet_id)
 231 {
 232         spin_lock_init(&gru->gs_lock);
 233         spin_lock_init(&gru->gs_asid_lock);
 234         gru->gs_gru_base_paddr = paddr;
 235         gru->gs_gru_base_vaddr = vaddr;
 236         gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
 237         gru->gs_blade = gru_base[blade_id];
 238         gru->gs_blade_id = blade_id;
 239         gru->gs_chiplet_id = chiplet_id;
 240         gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
 241         gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
 242         gru->gs_asid_limit = MAX_ASID;
 243         gru_tgh_flush_init(gru);
 244         if (gru->gs_gid >= gru_max_gids)
 245                 gru_max_gids = gru->gs_gid + 1;
 246         gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
 247                 blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
 248                 gru->gs_gru_base_paddr);
 249 }
 250 
 251 static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
 252 {
 253         int pnode, nid, bid, chip;
 254         int cbrs, dsrbytes, n;
 255         int order = get_order(sizeof(struct gru_blade_state));
 256         struct page *page;
 257         struct gru_state *gru;
 258         unsigned long paddr;
 259         void *vaddr;
 260 
 261         max_user_cbrs = GRU_NUM_CB;
 262         max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
 263         for_each_possible_blade(bid) {
 264                 pnode = uv_blade_to_pnode(bid);
 265                 nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */
 266                 page = alloc_pages_node(nid, GFP_KERNEL, order);
 267                 if (!page)
 268                         goto fail;
 269                 gru_base[bid] = page_address(page);
 270                 memset(gru_base[bid], 0, sizeof(struct gru_blade_state));
 271                 gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0];
 272                 spin_lock_init(&gru_base[bid]->bs_lock);
 273                 init_rwsem(&gru_base[bid]->bs_kgts_sema);
 274 
 275                 dsrbytes = 0;
 276                 cbrs = 0;
 277                 for (gru = gru_base[bid]->bs_grus, chip = 0;
 278                                 chip < GRU_CHIPLETS_PER_BLADE;
 279                                 chip++, gru++) {
 280                         paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
 281                         vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
 282                         gru_init_chiplet(gru, paddr, vaddr, bid, chip);
 283                         n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
 284                         cbrs = max(cbrs, n);
 285                         n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
 286                         dsrbytes = max(dsrbytes, n);
 287                 }
 288                 max_user_cbrs = min(max_user_cbrs, cbrs);
 289                 max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes);
 290         }
 291 
 292         return 0;
 293 
 294 fail:
 295         for (bid--; bid >= 0; bid--)
 296                 free_pages((unsigned long)gru_base[bid], order);
 297         return -ENOMEM;
 298 }
 299 
 300 static void gru_free_tables(void)
 301 {
 302         int bid;
 303         int order = get_order(sizeof(struct gru_state) *
 304                               GRU_CHIPLETS_PER_BLADE);
 305 
 306         for (bid = 0; bid < GRU_MAX_BLADES; bid++)
 307                 free_pages((unsigned long)gru_base[bid], order);
 308 }
 309 
 310 static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
 311 {
 312         unsigned long mmr = 0;
 313         int core;
 314 
 315         /*
 316          * We target the cores of a blade and not the hyperthreads themselves.
 317          * There is a max of 8 cores per socket and 2 sockets per blade,
 318          * making for a max total of 16 cores (i.e., 16 CPUs without
 319          * hyperthreading and 32 CPUs with hyperthreading).
 320          */
 321         core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
 322         if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
 323                 return 0;
 324 
 325         if (chiplet == 0) {
 326                 mmr = UVH_GR0_TLB_INT0_CONFIG +
 327                     core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
 328         } else if (chiplet == 1) {
 329                 mmr = UVH_GR1_TLB_INT0_CONFIG +
 330                     core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
 331         } else {
 332                 BUG();
 333         }
 334 
 335         *corep = core;
 336         return mmr;
 337 }
 338 
 339 #ifdef CONFIG_IA64
 340 
 341 static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
 342 
 343 static void gru_noop(struct irq_data *d)
 344 {
 345 }
 346 
 347 static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
 348         [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
 349                 .irq_mask       = gru_noop,
 350                 .irq_unmask     = gru_noop,
 351                 .irq_ack        = gru_noop
 352         }
 353 };
 354 
 355 static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
 356                         irq_handler_t irq_handler, int cpu, int blade)
 357 {
 358         unsigned long mmr;
 359         int irq = IRQ_GRU + chiplet;
 360         int ret, core;
 361 
 362         mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
 363         if (mmr == 0)
 364                 return 0;
 365 
 366         if (gru_irq_count[chiplet] == 0) {
 367                 gru_chip[chiplet].name = irq_name;
 368                 ret = irq_set_chip(irq, &gru_chip[chiplet]);
 369                 if (ret) {
 370                         printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
 371                                GRU_DRIVER_ID_STR, -ret);
 372                         return ret;
 373                 }
 374 
 375                 ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
 376                 if (ret) {
 377                         printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
 378                                GRU_DRIVER_ID_STR, -ret);
 379                         return ret;
 380                 }
 381         }
 382         gru_irq_count[chiplet]++;
 383 
 384         return 0;
 385 }
 386 
 387 static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
 388 {
 389         unsigned long mmr;
 390         int core, irq = IRQ_GRU + chiplet;
 391 
 392         if (gru_irq_count[chiplet] == 0)
 393                 return;
 394 
 395         mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
 396         if (mmr == 0)
 397                 return;
 398 
 399         if (--gru_irq_count[chiplet] == 0)
 400                 free_irq(irq, NULL);
 401 }
 402 
 403 #elif defined CONFIG_X86_64
 404 
 405 static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
 406                         irq_handler_t irq_handler, int cpu, int blade)
 407 {
 408         unsigned long mmr;
 409         int irq, core;
 410         int ret;
 411 
 412         mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
 413         if (mmr == 0)
 414                 return 0;
 415 
 416         irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
 417         if (irq < 0) {
 418                 printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
 419                        GRU_DRIVER_ID_STR, -irq);
 420                 return irq;
 421         }
 422 
 423         ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
 424         if (ret) {
 425                 uv_teardown_irq(irq);
 426                 printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
 427                        GRU_DRIVER_ID_STR, -ret);
 428                 return ret;
 429         }
 430         gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
 431         return 0;
 432 }
 433 
 434 static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
 435 {
 436         int irq, core;
 437         unsigned long mmr;
 438 
 439         mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
 440         if (mmr) {
 441                 irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
 442                 if (irq) {
 443                         free_irq(irq, NULL);
 444                         uv_teardown_irq(irq);
 445                 }
 446         }
 447 }
 448 
 449 #endif
 450 
 451 static void gru_teardown_tlb_irqs(void)
 452 {
 453         int blade;
 454         int cpu;
 455 
 456         for_each_online_cpu(cpu) {
 457                 blade = uv_cpu_to_blade_id(cpu);
 458                 gru_chiplet_teardown_tlb_irq(0, cpu, blade);
 459                 gru_chiplet_teardown_tlb_irq(1, cpu, blade);
 460         }
 461         for_each_possible_blade(blade) {
 462                 if (uv_blade_nr_possible_cpus(blade))
 463                         continue;
 464                 gru_chiplet_teardown_tlb_irq(0, 0, blade);
 465                 gru_chiplet_teardown_tlb_irq(1, 0, blade);
 466         }
 467 }
 468 
 469 static int gru_setup_tlb_irqs(void)
 470 {
 471         int blade;
 472         int cpu;
 473         int ret;
 474 
 475         for_each_online_cpu(cpu) {
 476                 blade = uv_cpu_to_blade_id(cpu);
 477                 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
 478                 if (ret != 0)
 479                         goto exit1;
 480 
 481                 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
 482                 if (ret != 0)
 483                         goto exit1;
 484         }
 485         for_each_possible_blade(blade) {
 486                 if (uv_blade_nr_possible_cpus(blade))
 487                         continue;
 488                 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
 489                 if (ret != 0)
 490                         goto exit1;
 491 
 492                 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
 493                 if (ret != 0)
 494                         goto exit1;
 495         }
 496 
 497         return 0;
 498 
 499 exit1:
 500         gru_teardown_tlb_irqs();
 501         return ret;
 502 }
 503 
 504 /*
 505  * gru_init
 506  *
 507  * Called at boot or module load time to initialize the GRUs.
 508  */
 509 static int __init gru_init(void)
 510 {
 511         int ret;
 512 
 513         if (!gru_supported())
 514                 return 0;
 515 
 516 #if defined CONFIG_IA64
 517         gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
 518 #else
 519         gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) &
 520                                 0x7fffffffffffUL;
 521 #endif
 522         gru_start_vaddr = __va(gru_start_paddr);
 523         gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
 524         printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
 525                gru_start_paddr, gru_end_paddr);
 526         ret = misc_register(&gru_miscdev);
 527         if (ret) {
 528                 printk(KERN_ERR "%s: misc_register failed\n",
 529                        GRU_DRIVER_ID_STR);
 530                 goto exit0;
 531         }
 532 
 533         ret = gru_proc_init();
 534         if (ret) {
 535                 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
 536                 goto exit1;
 537         }
 538 
 539         ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
 540         if (ret) {
 541                 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
 542                 goto exit2;
 543         }
 544 
 545         ret = gru_setup_tlb_irqs();
 546         if (ret != 0)
 547                 goto exit3;
 548 
 549         gru_kservices_init();
 550 
 551         printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
 552                GRU_DRIVER_VERSION_STR);
 553         return 0;
 554 
 555 exit3:
 556         gru_free_tables();
 557 exit2:
 558         gru_proc_exit();
 559 exit1:
 560         misc_deregister(&gru_miscdev);
 561 exit0:
 562         return ret;
 563 
 564 }
 565 
 566 static void __exit gru_exit(void)
 567 {
 568         if (!gru_supported())
 569                 return;
 570 
 571         gru_teardown_tlb_irqs();
 572         gru_kservices_exit();
 573         gru_free_tables();
 574         misc_deregister(&gru_miscdev);
 575         gru_proc_exit();
 576         mmu_notifier_synchronize();
 577 }
 578 
 579 static const struct file_operations gru_fops = {
 580         .owner          = THIS_MODULE,
 581         .unlocked_ioctl = gru_file_unlocked_ioctl,
 582         .mmap           = gru_file_mmap,
 583         .llseek         = noop_llseek,
 584 };
 585 
 586 static struct miscdevice gru_miscdev = {
 587         .minor          = MISC_DYNAMIC_MINOR,
 588         .name           = "gru",
 589         .fops           = &gru_fops,
 590 };
 591 
 592 const struct vm_operations_struct gru_vm_ops = {
 593         .close          = gru_vma_close,
 594         .fault          = gru_fault,
 595 };
 596 
 597 #ifndef MODULE
 598 fs_initcall(gru_init);
 599 #else
 600 module_init(gru_init);
 601 #endif
 602 module_exit(gru_exit);
 603 
 604 module_param(gru_options, ulong, 0644);
 605 MODULE_PARM_DESC(gru_options, "Various debug options");
 606 
 607 MODULE_AUTHOR("Silicon Graphics, Inc.");
 608 MODULE_LICENSE("GPL");
 609 MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR);
 610 MODULE_VERSION(GRU_DRIVER_VERSION_STR);
 611 

/* [<][>][^][v][top][bottom][index][help] */