root/arch/s390/pci/pci_clp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. update_uid_checking
  2. zpci_err_clp
  3. clp_get_ilp
  4. clp_req
  5. clp_alloc_block
  6. clp_free_block
  7. clp_store_query_pci_fngrp
  8. clp_query_pci_fngrp
  9. clp_store_query_pci_fn
  10. clp_query_pci_fn
  11. clp_add_pci_device
  12. clp_set_pci_fn
  13. clp_enable_fh
  14. clp_disable_fh
  15. clp_list_pci
  16. __clp_add
  17. __clp_update
  18. clp_scan_pci_devices
  19. clp_rescan_pci_devices
  20. clp_rescan_pci_devices_simple
  21. __clp_get_state
  22. clp_get_state
  23. clp_base_slpc
  24. clp_base_command
  25. clp_pci_slpc
  26. clp_pci_list
  27. clp_pci_query
  28. clp_pci_query_grp
  29. clp_pci_command
  30. clp_normal_command
  31. clp_immediate_command
  32. clp_misc_ioctl
  33. clp_misc_release
  34. clp_misc_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright IBM Corp. 2012
   4  *
   5  * Author(s):
   6  *   Jan Glauber <jang@linux.vnet.ibm.com>
   7  */
   8 
   9 #define KMSG_COMPONENT "zpci"
  10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11 
  12 #include <linux/compat.h>
  13 #include <linux/kernel.h>
  14 #include <linux/miscdevice.h>
  15 #include <linux/slab.h>
  16 #include <linux/err.h>
  17 #include <linux/delay.h>
  18 #include <linux/pci.h>
  19 #include <linux/uaccess.h>
  20 #include <asm/pci_debug.h>
  21 #include <asm/pci_clp.h>
  22 #include <asm/clp.h>
  23 #include <uapi/asm/clp.h>
  24 
  25 bool zpci_unique_uid;
  26 
  27 static void update_uid_checking(bool new)
  28 {
  29         if (zpci_unique_uid != new)
  30                 zpci_dbg(1, "uid checking:%d\n", new);
  31 
  32         zpci_unique_uid = new;
  33 }
  34 
  35 static inline void zpci_err_clp(unsigned int rsp, int rc)
  36 {
  37         struct {
  38                 unsigned int rsp;
  39                 int rc;
  40         } __packed data = {rsp, rc};
  41 
  42         zpci_err_hex(&data, sizeof(data));
  43 }
  44 
  45 /*
  46  * Call Logical Processor with c=1, lps=0 and command 1
  47  * to get the bit mask of installed logical processors
  48  */
  49 static inline int clp_get_ilp(unsigned long *ilp)
  50 {
  51         unsigned long mask;
  52         int cc = 3;
  53 
  54         asm volatile (
  55                 "       .insn   rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
  56                 "0:     ipm     %[cc]\n"
  57                 "       srl     %[cc],28\n"
  58                 "1:\n"
  59                 EX_TABLE(0b, 1b)
  60                 : [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
  61                 : "cc");
  62         *ilp = mask;
  63         return cc;
  64 }
  65 
  66 /*
  67  * Call Logical Processor with c=0, the give constant lps and an lpcb request.
  68  */
  69 static __always_inline int clp_req(void *data, unsigned int lps)
  70 {
  71         struct { u8 _[CLP_BLK_SIZE]; } *req = data;
  72         u64 ignored;
  73         int cc = 3;
  74 
  75         asm volatile (
  76                 "       .insn   rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
  77                 "0:     ipm     %[cc]\n"
  78                 "       srl     %[cc],28\n"
  79                 "1:\n"
  80                 EX_TABLE(0b, 1b)
  81                 : [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
  82                 : [req] "a" (req), [lps] "i" (lps)
  83                 : "cc");
  84         return cc;
  85 }
  86 
  87 static void *clp_alloc_block(gfp_t gfp_mask)
  88 {
  89         return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
  90 }
  91 
  92 static void clp_free_block(void *ptr)
  93 {
  94         free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
  95 }
  96 
  97 static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
  98                                       struct clp_rsp_query_pci_grp *response)
  99 {
 100         zdev->tlb_refresh = response->refresh;
 101         zdev->dma_mask = response->dasm;
 102         zdev->msi_addr = response->msia;
 103         zdev->max_msi = response->noi;
 104         zdev->fmb_update = response->mui;
 105 
 106         switch (response->version) {
 107         case 1:
 108                 zdev->max_bus_speed = PCIE_SPEED_5_0GT;
 109                 break;
 110         default:
 111                 zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
 112                 break;
 113         }
 114 }
 115 
 116 static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
 117 {
 118         struct clp_req_rsp_query_pci_grp *rrb;
 119         int rc;
 120 
 121         rrb = clp_alloc_block(GFP_KERNEL);
 122         if (!rrb)
 123                 return -ENOMEM;
 124 
 125         memset(rrb, 0, sizeof(*rrb));
 126         rrb->request.hdr.len = sizeof(rrb->request);
 127         rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
 128         rrb->response.hdr.len = sizeof(rrb->response);
 129         rrb->request.pfgid = pfgid;
 130 
 131         rc = clp_req(rrb, CLP_LPS_PCI);
 132         if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
 133                 clp_store_query_pci_fngrp(zdev, &rrb->response);
 134         else {
 135                 zpci_err("Q PCI FGRP:\n");
 136                 zpci_err_clp(rrb->response.hdr.rsp, rc);
 137                 rc = -EIO;
 138         }
 139         clp_free_block(rrb);
 140         return rc;
 141 }
 142 
 143 static int clp_store_query_pci_fn(struct zpci_dev *zdev,
 144                                   struct clp_rsp_query_pci *response)
 145 {
 146         int i;
 147 
 148         for (i = 0; i < PCI_BAR_COUNT; i++) {
 149                 zdev->bars[i].val = le32_to_cpu(response->bar[i]);
 150                 zdev->bars[i].size = response->bar_size[i];
 151         }
 152         zdev->start_dma = response->sdma;
 153         zdev->end_dma = response->edma;
 154         zdev->pchid = response->pchid;
 155         zdev->pfgid = response->pfgid;
 156         zdev->pft = response->pft;
 157         zdev->vfn = response->vfn;
 158         zdev->uid = response->uid;
 159         zdev->fmb_length = sizeof(u32) * response->fmb_len;
 160 
 161         memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
 162         if (response->util_str_avail) {
 163                 memcpy(zdev->util_str, response->util_str,
 164                        sizeof(zdev->util_str));
 165         }
 166         zdev->mio_capable = response->mio_addr_avail;
 167         for (i = 0; i < PCI_BAR_COUNT; i++) {
 168                 if (!(response->mio.valid & (1 << (PCI_BAR_COUNT - i - 1))))
 169                         continue;
 170 
 171                 zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
 172                 zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
 173         }
 174         return 0;
 175 }
 176 
 177 static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
 178 {
 179         struct clp_req_rsp_query_pci *rrb;
 180         int rc;
 181 
 182         rrb = clp_alloc_block(GFP_KERNEL);
 183         if (!rrb)
 184                 return -ENOMEM;
 185 
 186         memset(rrb, 0, sizeof(*rrb));
 187         rrb->request.hdr.len = sizeof(rrb->request);
 188         rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
 189         rrb->response.hdr.len = sizeof(rrb->response);
 190         rrb->request.fh = fh;
 191 
 192         rc = clp_req(rrb, CLP_LPS_PCI);
 193         if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
 194                 rc = clp_store_query_pci_fn(zdev, &rrb->response);
 195                 if (rc)
 196                         goto out;
 197                 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
 198         } else {
 199                 zpci_err("Q PCI FN:\n");
 200                 zpci_err_clp(rrb->response.hdr.rsp, rc);
 201                 rc = -EIO;
 202         }
 203 out:
 204         clp_free_block(rrb);
 205         return rc;
 206 }
 207 
 208 int clp_add_pci_device(u32 fid, u32 fh, int configured)
 209 {
 210         struct zpci_dev *zdev;
 211         int rc = -ENOMEM;
 212 
 213         zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
 214         zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
 215         if (!zdev)
 216                 goto error;
 217 
 218         zdev->fh = fh;
 219         zdev->fid = fid;
 220 
 221         /* Query function properties and update zdev */
 222         rc = clp_query_pci_fn(zdev, fh);
 223         if (rc)
 224                 goto error;
 225 
 226         if (configured)
 227                 zdev->state = ZPCI_FN_STATE_CONFIGURED;
 228         else
 229                 zdev->state = ZPCI_FN_STATE_STANDBY;
 230 
 231         rc = zpci_create_device(zdev);
 232         if (rc)
 233                 goto error;
 234         return 0;
 235 
 236 error:
 237         zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
 238         kfree(zdev);
 239         return rc;
 240 }
 241 
 242 /*
 243  * Enable/Disable a given PCI function and update its function handle if
 244  * necessary
 245  */
 246 static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
 247 {
 248         struct clp_req_rsp_set_pci *rrb;
 249         int rc, retries = 100;
 250         u32 fid = zdev->fid;
 251 
 252         rrb = clp_alloc_block(GFP_KERNEL);
 253         if (!rrb)
 254                 return -ENOMEM;
 255 
 256         do {
 257                 memset(rrb, 0, sizeof(*rrb));
 258                 rrb->request.hdr.len = sizeof(rrb->request);
 259                 rrb->request.hdr.cmd = CLP_SET_PCI_FN;
 260                 rrb->response.hdr.len = sizeof(rrb->response);
 261                 rrb->request.fh = zdev->fh;
 262                 rrb->request.oc = command;
 263                 rrb->request.ndas = nr_dma_as;
 264 
 265                 rc = clp_req(rrb, CLP_LPS_PCI);
 266                 if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
 267                         retries--;
 268                         if (retries < 0)
 269                                 break;
 270                         msleep(20);
 271                 }
 272         } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
 273 
 274         if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
 275                 zpci_err("Set PCI FN:\n");
 276                 zpci_err_clp(rrb->response.hdr.rsp, rc);
 277         }
 278 
 279         if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
 280                 zdev->fh = rrb->response.fh;
 281         } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
 282                         rrb->response.fh == 0) {
 283                 /* Function is already in desired state - update handle */
 284                 rc = clp_rescan_pci_devices_simple(&fid);
 285         }
 286         clp_free_block(rrb);
 287         return rc;
 288 }
 289 
 290 int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
 291 {
 292         int rc;
 293 
 294         rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
 295         zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
 296         if (rc)
 297                 goto out;
 298 
 299         if (zpci_use_mio(zdev)) {
 300                 rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
 301                 zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
 302                                 zdev->fid, zdev->fh, rc);
 303                 if (rc)
 304                         clp_disable_fh(zdev);
 305         }
 306 out:
 307         return rc;
 308 }
 309 
 310 int clp_disable_fh(struct zpci_dev *zdev)
 311 {
 312         int rc;
 313 
 314         if (!zdev_enabled(zdev))
 315                 return 0;
 316 
 317         rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
 318         zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
 319         return rc;
 320 }
 321 
 322 static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
 323                         void (*cb)(struct clp_fh_list_entry *, void *))
 324 {
 325         u64 resume_token = 0;
 326         int entries, i, rc;
 327 
 328         do {
 329                 memset(rrb, 0, sizeof(*rrb));
 330                 rrb->request.hdr.len = sizeof(rrb->request);
 331                 rrb->request.hdr.cmd = CLP_LIST_PCI;
 332                 /* store as many entries as possible */
 333                 rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
 334                 rrb->request.resume_token = resume_token;
 335 
 336                 /* Get PCI function handle list */
 337                 rc = clp_req(rrb, CLP_LPS_PCI);
 338                 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
 339                         zpci_err("List PCI FN:\n");
 340                         zpci_err_clp(rrb->response.hdr.rsp, rc);
 341                         rc = -EIO;
 342                         goto out;
 343                 }
 344 
 345                 update_uid_checking(rrb->response.uid_checking);
 346                 WARN_ON_ONCE(rrb->response.entry_size !=
 347                         sizeof(struct clp_fh_list_entry));
 348 
 349                 entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
 350                         rrb->response.entry_size;
 351 
 352                 resume_token = rrb->response.resume_token;
 353                 for (i = 0; i < entries; i++)
 354                         cb(&rrb->response.fh_list[i], data);
 355         } while (resume_token);
 356 out:
 357         return rc;
 358 }
 359 
 360 static void __clp_add(struct clp_fh_list_entry *entry, void *data)
 361 {
 362         struct zpci_dev *zdev;
 363 
 364         if (!entry->vendor_id)
 365                 return;
 366 
 367         zdev = get_zdev_by_fid(entry->fid);
 368         if (!zdev)
 369                 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
 370 }
 371 
 372 static void __clp_update(struct clp_fh_list_entry *entry, void *data)
 373 {
 374         struct zpci_dev *zdev;
 375         u32 *fid = data;
 376 
 377         if (!entry->vendor_id)
 378                 return;
 379 
 380         if (fid && *fid != entry->fid)
 381                 return;
 382 
 383         zdev = get_zdev_by_fid(entry->fid);
 384         if (!zdev)
 385                 return;
 386 
 387         zdev->fh = entry->fh;
 388 }
 389 
 390 int clp_scan_pci_devices(void)
 391 {
 392         struct clp_req_rsp_list_pci *rrb;
 393         int rc;
 394 
 395         rrb = clp_alloc_block(GFP_KERNEL);
 396         if (!rrb)
 397                 return -ENOMEM;
 398 
 399         rc = clp_list_pci(rrb, NULL, __clp_add);
 400 
 401         clp_free_block(rrb);
 402         return rc;
 403 }
 404 
 405 int clp_rescan_pci_devices(void)
 406 {
 407         struct clp_req_rsp_list_pci *rrb;
 408         int rc;
 409 
 410         zpci_remove_reserved_devices();
 411 
 412         rrb = clp_alloc_block(GFP_KERNEL);
 413         if (!rrb)
 414                 return -ENOMEM;
 415 
 416         rc = clp_list_pci(rrb, NULL, __clp_add);
 417 
 418         clp_free_block(rrb);
 419         return rc;
 420 }
 421 
 422 /* Rescan PCI functions and refresh function handles. If fid is non-NULL only
 423  * refresh the handle of the function matching @fid
 424  */
 425 int clp_rescan_pci_devices_simple(u32 *fid)
 426 {
 427         struct clp_req_rsp_list_pci *rrb;
 428         int rc;
 429 
 430         rrb = clp_alloc_block(GFP_NOWAIT);
 431         if (!rrb)
 432                 return -ENOMEM;
 433 
 434         rc = clp_list_pci(rrb, fid, __clp_update);
 435 
 436         clp_free_block(rrb);
 437         return rc;
 438 }
 439 
 440 struct clp_state_data {
 441         u32 fid;
 442         enum zpci_state state;
 443 };
 444 
 445 static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
 446 {
 447         struct clp_state_data *sd = data;
 448 
 449         if (entry->fid != sd->fid)
 450                 return;
 451 
 452         sd->state = entry->config_state;
 453 }
 454 
 455 int clp_get_state(u32 fid, enum zpci_state *state)
 456 {
 457         struct clp_req_rsp_list_pci *rrb;
 458         struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
 459         int rc;
 460 
 461         rrb = clp_alloc_block(GFP_ATOMIC);
 462         if (!rrb)
 463                 return -ENOMEM;
 464 
 465         rc = clp_list_pci(rrb, &sd, __clp_get_state);
 466         if (!rc)
 467                 *state = sd.state;
 468 
 469         clp_free_block(rrb);
 470         return rc;
 471 }
 472 
 473 static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
 474 {
 475         unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 476 
 477         if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 478             lpcb->response.hdr.len > limit)
 479                 return -EINVAL;
 480         return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
 481 }
 482 
 483 static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
 484 {
 485         switch (lpcb->cmd) {
 486         case 0x0001: /* store logical-processor characteristics */
 487                 return clp_base_slpc(req, (void *) lpcb);
 488         default:
 489                 return -EINVAL;
 490         }
 491 }
 492 
 493 static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
 494 {
 495         unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 496 
 497         if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 498             lpcb->response.hdr.len > limit)
 499                 return -EINVAL;
 500         return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
 501 }
 502 
 503 static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
 504 {
 505         unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 506 
 507         if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 508             lpcb->response.hdr.len > limit)
 509                 return -EINVAL;
 510         if (lpcb->request.reserved2 != 0)
 511                 return -EINVAL;
 512         return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
 513 }
 514 
 515 static int clp_pci_query(struct clp_req *req,
 516                          struct clp_req_rsp_query_pci *lpcb)
 517 {
 518         unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 519 
 520         if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 521             lpcb->response.hdr.len > limit)
 522                 return -EINVAL;
 523         if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
 524                 return -EINVAL;
 525         return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
 526 }
 527 
 528 static int clp_pci_query_grp(struct clp_req *req,
 529                              struct clp_req_rsp_query_pci_grp *lpcb)
 530 {
 531         unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
 532 
 533         if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
 534             lpcb->response.hdr.len > limit)
 535                 return -EINVAL;
 536         if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
 537             lpcb->request.reserved4 != 0)
 538                 return -EINVAL;
 539         return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
 540 }
 541 
 542 static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
 543 {
 544         switch (lpcb->cmd) {
 545         case 0x0001: /* store logical-processor characteristics */
 546                 return clp_pci_slpc(req, (void *) lpcb);
 547         case 0x0002: /* list PCI functions */
 548                 return clp_pci_list(req, (void *) lpcb);
 549         case 0x0003: /* query PCI function */
 550                 return clp_pci_query(req, (void *) lpcb);
 551         case 0x0004: /* query PCI function group */
 552                 return clp_pci_query_grp(req, (void *) lpcb);
 553         default:
 554                 return -EINVAL;
 555         }
 556 }
 557 
 558 static int clp_normal_command(struct clp_req *req)
 559 {
 560         struct clp_req_hdr *lpcb;
 561         void __user *uptr;
 562         int rc;
 563 
 564         rc = -EINVAL;
 565         if (req->lps != 0 && req->lps != 2)
 566                 goto out;
 567 
 568         rc = -ENOMEM;
 569         lpcb = clp_alloc_block(GFP_KERNEL);
 570         if (!lpcb)
 571                 goto out;
 572 
 573         rc = -EFAULT;
 574         uptr = (void __force __user *)(unsigned long) req->data_p;
 575         if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
 576                 goto out_free;
 577 
 578         rc = -EINVAL;
 579         if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
 580                 goto out_free;
 581 
 582         switch (req->lps) {
 583         case 0:
 584                 rc = clp_base_command(req, lpcb);
 585                 break;
 586         case 2:
 587                 rc = clp_pci_command(req, lpcb);
 588                 break;
 589         }
 590         if (rc)
 591                 goto out_free;
 592 
 593         rc = -EFAULT;
 594         if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
 595                 goto out_free;
 596 
 597         rc = 0;
 598 
 599 out_free:
 600         clp_free_block(lpcb);
 601 out:
 602         return rc;
 603 }
 604 
 605 static int clp_immediate_command(struct clp_req *req)
 606 {
 607         void __user *uptr;
 608         unsigned long ilp;
 609         int exists;
 610 
 611         if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
 612                 return -EINVAL;
 613 
 614         uptr = (void __force __user *)(unsigned long) req->data_p;
 615         if (req->cmd == 0) {
 616                 /* Command code 0: test for a specific processor */
 617                 exists = test_bit_inv(req->lps, &ilp);
 618                 return put_user(exists, (int __user *) uptr);
 619         }
 620         /* Command code 1: return bit mask of installed processors */
 621         return put_user(ilp, (unsigned long __user *) uptr);
 622 }
 623 
 624 static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
 625                            unsigned long arg)
 626 {
 627         struct clp_req req;
 628         void __user *argp;
 629 
 630         if (cmd != CLP_SYNC)
 631                 return -EINVAL;
 632 
 633         argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
 634         if (copy_from_user(&req, argp, sizeof(req)))
 635                 return -EFAULT;
 636         if (req.r != 0)
 637                 return -EINVAL;
 638         return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
 639 }
 640 
 641 static int clp_misc_release(struct inode *inode, struct file *filp)
 642 {
 643         return 0;
 644 }
 645 
 646 static const struct file_operations clp_misc_fops = {
 647         .owner = THIS_MODULE,
 648         .open = nonseekable_open,
 649         .release = clp_misc_release,
 650         .unlocked_ioctl = clp_misc_ioctl,
 651         .compat_ioctl = clp_misc_ioctl,
 652         .llseek = no_llseek,
 653 };
 654 
 655 static struct miscdevice clp_misc_device = {
 656         .minor = MISC_DYNAMIC_MINOR,
 657         .name = "clp",
 658         .fops = &clp_misc_fops,
 659 };
 660 
 661 static int __init clp_misc_init(void)
 662 {
 663         return misc_register(&clp_misc_device);
 664 }
 665 
 666 device_initcall(clp_misc_init);

/* [<][>][^][v][top][bottom][index][help] */