This source file includes following definitions.
- csio_nondata_isr
- csio_fwevt_handler
- csio_fwevt_isr
- csio_fwevt_intx_handler
- csio_process_scsi_cmpl
- csio_scsi_isr_handler
- csio_scsi_isr
- csio_scsi_intx_handler
- csio_fcoe_isr
- csio_add_msix_desc
- csio_request_irqs
- csio_reduce_sqsets
- csio_calc_sets
- csio_enable_msix
- csio_intr_enable
- csio_intr_disable
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 
  30 
  31 
  32 
  33 
  34 
  35 #include <linux/kernel.h>
  36 #include <linux/pci.h>
  37 #include <linux/interrupt.h>
  38 #include <linux/cpumask.h>
  39 #include <linux/string.h>
  40 
  41 #include "csio_init.h"
  42 #include "csio_hw.h"
  43 
  44 static irqreturn_t
  45 csio_nondata_isr(int irq, void *dev_id)
  46 {
  47         struct csio_hw *hw = (struct csio_hw *) dev_id;
  48         int rv;
  49         unsigned long flags;
  50 
  51         if (unlikely(!hw))
  52                 return IRQ_NONE;
  53 
  54         if (unlikely(pci_channel_offline(hw->pdev))) {
  55                 CSIO_INC_STATS(hw, n_pcich_offline);
  56                 return IRQ_NONE;
  57         }
  58 
  59         spin_lock_irqsave(&hw->lock, flags);
  60         csio_hw_slow_intr_handler(hw);
  61         rv = csio_mb_isr_handler(hw);
  62 
  63         if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
  64                 hw->flags |= CSIO_HWF_FWEVT_PENDING;
  65                 spin_unlock_irqrestore(&hw->lock, flags);
  66                 schedule_work(&hw->evtq_work);
  67                 return IRQ_HANDLED;
  68         }
  69         spin_unlock_irqrestore(&hw->lock, flags);
  70         return IRQ_HANDLED;
  71 }
  72 
  73 
  74 
  75 
  76 
  77 
  78 
  79 
  80 static void
  81 csio_fwevt_handler(struct csio_hw *hw)
  82 {
  83         int rv;
  84         unsigned long flags;
  85 
  86         rv = csio_fwevtq_handler(hw);
  87 
  88         spin_lock_irqsave(&hw->lock, flags);
  89         if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
  90                 hw->flags |= CSIO_HWF_FWEVT_PENDING;
  91                 spin_unlock_irqrestore(&hw->lock, flags);
  92                 schedule_work(&hw->evtq_work);
  93                 return;
  94         }
  95         spin_unlock_irqrestore(&hw->lock, flags);
  96 
  97 } 
  98 
  99 
 100 
 101 
 102 
 103 
 104 
 105 
 106 
 107 static irqreturn_t
 108 csio_fwevt_isr(int irq, void *dev_id)
 109 {
 110         struct csio_hw *hw = (struct csio_hw *) dev_id;
 111 
 112         if (unlikely(!hw))
 113                 return IRQ_NONE;
 114 
 115         if (unlikely(pci_channel_offline(hw->pdev))) {
 116                 CSIO_INC_STATS(hw, n_pcich_offline);
 117                 return IRQ_NONE;
 118         }
 119 
 120         csio_fwevt_handler(hw);
 121 
 122         return IRQ_HANDLED;
 123 }
 124 
 125 
 126 
 127 
 128 
 129 
 130 void
 131 csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
 132                            struct csio_fl_dma_buf *flb, void *priv)
 133 {
 134         csio_fwevt_handler(hw);
 135 } 
 136 
 137 
 138 
 139 
 140 
 141 
 142 
 143 
 144 
 145 static void
 146 csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
 147                         struct csio_fl_dma_buf *flb, void *cbfn_q)
 148 {
 149         struct csio_ioreq *ioreq;
 150         uint8_t *scsiwr;
 151         uint8_t subop;
 152         void *cmnd;
 153         unsigned long flags;
 154 
 155         ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
 156         if (likely(ioreq)) {
 157                 if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
 158                         subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
 159                                         ((struct fw_scsi_abrt_cls_wr *)
 160                                             scsiwr)->sub_opcode_to_chk_all_io);
 161 
 162                         csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
 163                                     subop ? "Close" : "Abort",
 164                                     ioreq, ioreq->wr_status);
 165 
 166                         spin_lock_irqsave(&hw->lock, flags);
 167                         if (subop)
 168                                 csio_scsi_closed(ioreq,
 169                                                  (struct list_head *)cbfn_q);
 170                         else
 171                                 csio_scsi_aborted(ioreq,
 172                                                   (struct list_head *)cbfn_q);
 173                         
 174 
 175 
 176 
 177 
 178 
 179 
 180 
 181 
 182 
 183 
 184                         cmnd = csio_scsi_cmnd(ioreq);
 185                         if (unlikely(cmnd == NULL))
 186                                 list_del_init(&ioreq->sm.sm_list);
 187 
 188                         spin_unlock_irqrestore(&hw->lock, flags);
 189 
 190                         if (unlikely(cmnd == NULL))
 191                                 csio_put_scsi_ioreq_lock(hw,
 192                                                 csio_hw_to_scsim(hw), ioreq);
 193                 } else {
 194                         spin_lock_irqsave(&hw->lock, flags);
 195                         csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
 196                         spin_unlock_irqrestore(&hw->lock, flags);
 197                 }
 198         }
 199 }
 200 
 201 
 202 
 203 
 204 
 205 
 206 
 207 
 208 
 209 
 210 
 211 static inline irqreturn_t
 212 csio_scsi_isr_handler(struct csio_q *iq)
 213 {
 214         struct csio_hw *hw = (struct csio_hw *)iq->owner;
 215         LIST_HEAD(cbfn_q);
 216         struct list_head *tmp;
 217         struct csio_scsim *scm;
 218         struct csio_ioreq *ioreq;
 219         int isr_completions = 0;
 220 
 221         scm = csio_hw_to_scsim(hw);
 222 
 223         if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
 224                                         &cbfn_q) != 0))
 225                 return IRQ_NONE;
 226 
 227         
 228         list_for_each(tmp, &cbfn_q) {
 229                 ioreq = (struct csio_ioreq *)tmp;
 230                 isr_completions++;
 231                 ioreq->io_cbfn(hw, ioreq);
 232                 
 233                 if (unlikely(ioreq->dcopy))
 234                         csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
 235                                                     ioreq->nsge);
 236         }
 237 
 238         if (isr_completions) {
 239                 
 240                 csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
 241                                               isr_completions);
 242         }
 243 
 244         return IRQ_HANDLED;
 245 }
 246 
 247 
 248 
 249 
 250 
 251 
 252 
 253 
 254 
 255 static irqreturn_t
 256 csio_scsi_isr(int irq, void *dev_id)
 257 {
 258         struct csio_q *iq = (struct csio_q *) dev_id;
 259         struct csio_hw *hw;
 260 
 261         if (unlikely(!iq))
 262                 return IRQ_NONE;
 263 
 264         hw = (struct csio_hw *)iq->owner;
 265 
 266         if (unlikely(pci_channel_offline(hw->pdev))) {
 267                 CSIO_INC_STATS(hw, n_pcich_offline);
 268                 return IRQ_NONE;
 269         }
 270 
 271         csio_scsi_isr_handler(iq);
 272 
 273         return IRQ_HANDLED;
 274 }
 275 
 276 
 277 
 278 
 279 
 280 
 281 
 282 
 283 
 284 void
 285 csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
 286                         struct csio_fl_dma_buf *flb, void *priv)
 287 {
 288         struct csio_q *iq = priv;
 289 
 290         csio_scsi_isr_handler(iq);
 291 
 292 } 
 293 
 294 
 295 
 296 
 297 
 298 
 299 
 300 
 301 static irqreturn_t
 302 csio_fcoe_isr(int irq, void *dev_id)
 303 {
 304         struct csio_hw *hw = (struct csio_hw *) dev_id;
 305         struct csio_q *intx_q = NULL;
 306         int rv;
 307         irqreturn_t ret = IRQ_NONE;
 308         unsigned long flags;
 309 
 310         if (unlikely(!hw))
 311                 return IRQ_NONE;
 312 
 313         if (unlikely(pci_channel_offline(hw->pdev))) {
 314                 CSIO_INC_STATS(hw, n_pcich_offline);
 315                 return IRQ_NONE;
 316         }
 317 
 318         
 319         if (hw->intr_mode == CSIO_IM_INTX)
 320                 csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
 321 
 322         
 323 
 324 
 325 
 326         if (csio_hw_slow_intr_handler(hw))
 327                 ret = IRQ_HANDLED;
 328 
 329         
 330         intx_q = csio_get_q(hw, hw->intr_iq_idx);
 331 
 332         CSIO_DB_ASSERT(intx_q);
 333 
 334         
 335         if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
 336                 ret = IRQ_HANDLED;
 337 
 338         spin_lock_irqsave(&hw->lock, flags);
 339         rv = csio_mb_isr_handler(hw);
 340         if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
 341                 hw->flags |= CSIO_HWF_FWEVT_PENDING;
 342                 spin_unlock_irqrestore(&hw->lock, flags);
 343                 schedule_work(&hw->evtq_work);
 344                 return IRQ_HANDLED;
 345         }
 346         spin_unlock_irqrestore(&hw->lock, flags);
 347 
 348         return ret;
 349 }
 350 
 351 static void
 352 csio_add_msix_desc(struct csio_hw *hw)
 353 {
 354         int i;
 355         struct csio_msix_entries *entryp = &hw->msix_entries[0];
 356         int k = CSIO_EXTRA_VECS;
 357         int len = sizeof(entryp->desc) - 1;
 358         int cnt = hw->num_sqsets + k;
 359 
 360         
 361         memset(entryp->desc, 0, len + 1);
 362         snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
 363                  CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
 364 
 365         entryp++;
 366         memset(entryp->desc, 0, len + 1);
 367         snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
 368                  CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
 369         entryp++;
 370 
 371         
 372         for (i = k; i < cnt; i++, entryp++) {
 373                 memset(entryp->desc, 0, len + 1);
 374                 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
 375                          CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
 376                          CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
 377         }
 378 }
 379 
 380 int
 381 csio_request_irqs(struct csio_hw *hw)
 382 {
 383         int rv, i, j, k = 0;
 384         struct csio_msix_entries *entryp = &hw->msix_entries[0];
 385         struct csio_scsi_cpu_info *info;
 386         struct pci_dev *pdev = hw->pdev;
 387 
 388         if (hw->intr_mode != CSIO_IM_MSIX) {
 389                 rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr,
 390                                 hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED,
 391                                 KBUILD_MODNAME, hw);
 392                 if (rv) {
 393                         csio_err(hw, "Failed to allocate interrupt line.\n");
 394                         goto out_free_irqs;
 395                 }
 396 
 397                 goto out;
 398         }
 399 
 400         
 401         csio_add_msix_desc(hw);
 402 
 403         rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0,
 404                          entryp[k].desc, hw);
 405         if (rv) {
 406                 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
 407                          pci_irq_vector(pdev, k), rv);
 408                 goto out_free_irqs;
 409         }
 410 
 411         entryp[k++].dev_id = hw;
 412 
 413         rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0,
 414                          entryp[k].desc, hw);
 415         if (rv) {
 416                 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
 417                          pci_irq_vector(pdev, k), rv);
 418                 goto out_free_irqs;
 419         }
 420 
 421         entryp[k++].dev_id = (void *)hw;
 422 
 423         
 424         for (i = 0; i < hw->num_pports; i++) {
 425                 info = &hw->scsi_cpu_info[i];
 426                 for (j = 0; j < info->max_cpus; j++, k++) {
 427                         struct csio_scsi_qset *sqset = &hw->sqset[i][j];
 428                         struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
 429 
 430                         rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0,
 431                                          entryp[k].desc, q);
 432                         if (rv) {
 433                                 csio_err(hw,
 434                                        "IRQ request failed for vec %d err:%d\n",
 435                                        pci_irq_vector(pdev, k), rv);
 436                                 goto out_free_irqs;
 437                         }
 438 
 439                         entryp[k].dev_id = q;
 440 
 441                 } 
 442         } 
 443 
 444 out:
 445         hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
 446         return 0;
 447 
 448 out_free_irqs:
 449         for (i = 0; i < k; i++)
 450                 free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id);
 451         pci_free_irq_vectors(hw->pdev);
 452         return -EINVAL;
 453 }
 454 
 455 
 456 static void
 457 csio_reduce_sqsets(struct csio_hw *hw, int cnt)
 458 {
 459         int i;
 460         struct csio_scsi_cpu_info *info;
 461 
 462         while (cnt < hw->num_sqsets) {
 463                 for (i = 0; i < hw->num_pports; i++) {
 464                         info = &hw->scsi_cpu_info[i];
 465                         if (info->max_cpus > 1) {
 466                                 info->max_cpus--;
 467                                 hw->num_sqsets--;
 468                                 if (hw->num_sqsets <= cnt)
 469                                         break;
 470                         }
 471                 }
 472         }
 473 
 474         csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
 475 }
 476 
 477 static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs)
 478 {
 479         struct csio_hw *hw = affd->priv;
 480         u8 i;
 481 
 482         if (!nvecs)
 483                 return;
 484 
 485         if (nvecs < hw->num_pports) {
 486                 affd->nr_sets = 1;
 487                 affd->set_size[0] = nvecs;
 488                 return;
 489         }
 490 
 491         affd->nr_sets = hw->num_pports;
 492         for (i = 0; i < hw->num_pports; i++)
 493                 affd->set_size[i] = nvecs / hw->num_pports;
 494 }
 495 
 496 static int
 497 csio_enable_msix(struct csio_hw *hw)
 498 {
 499         int i, j, k, n, min, cnt;
 500         int extra = CSIO_EXTRA_VECS;
 501         struct csio_scsi_cpu_info *info;
 502         struct irq_affinity desc = {
 503                 .pre_vectors = CSIO_EXTRA_VECS,
 504                 .calc_sets = csio_calc_sets,
 505                 .priv = hw,
 506         };
 507 
 508         if (hw->num_pports > IRQ_AFFINITY_MAX_SETS)
 509                 return -ENOSPC;
 510 
 511         min = hw->num_pports + extra;
 512         cnt = hw->num_sqsets + extra;
 513 
 514         
 515         if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
 516                 cnt = min_t(uint8_t, hw->cfg_niq, cnt);
 517 
 518         csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
 519 
 520         cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt,
 521                         PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
 522         if (cnt < 0)
 523                 return cnt;
 524 
 525         if (cnt < (hw->num_sqsets + extra)) {
 526                 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
 527                 csio_reduce_sqsets(hw, cnt - extra);
 528         }
 529 
 530         
 531         k = 0;
 532         csio_set_nondata_intr_idx(hw, k);
 533         csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++);
 534         csio_set_fwevt_intr_idx(hw, k++);
 535 
 536         for (i = 0; i < hw->num_pports; i++) {
 537                 info = &hw->scsi_cpu_info[i];
 538 
 539                 for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
 540                         n = (j % info->max_cpus) +  k;
 541                         hw->sqset[i][j].intr_idx = n;
 542                 }
 543 
 544                 k += info->max_cpus;
 545         }
 546 
 547         return 0;
 548 }
 549 
 550 void
 551 csio_intr_enable(struct csio_hw *hw)
 552 {
 553         hw->intr_mode = CSIO_IM_NONE;
 554         hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
 555 
 556         
 557         if ((csio_msi == 2) && !csio_enable_msix(hw))
 558                 hw->intr_mode = CSIO_IM_MSIX;
 559         else {
 560                 
 561                 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
 562                         !csio_is_hw_master(hw)) {
 563                         int extra = CSIO_EXTRA_MSI_IQS;
 564 
 565                         if (hw->cfg_niq < (hw->num_sqsets + extra)) {
 566                                 csio_dbg(hw, "Reducing sqsets to %d\n",
 567                                          hw->cfg_niq - extra);
 568                                 csio_reduce_sqsets(hw, hw->cfg_niq - extra);
 569                         }
 570                 }
 571 
 572                 if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
 573                         hw->intr_mode = CSIO_IM_MSI;
 574                 else
 575                         hw->intr_mode = CSIO_IM_INTX;
 576         }
 577 
 578         csio_dbg(hw, "Using %s interrupt mode.\n",
 579                 (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
 580                 ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
 581 }
 582 
 583 void
 584 csio_intr_disable(struct csio_hw *hw, bool free)
 585 {
 586         csio_hw_intr_disable(hw);
 587 
 588         if (free) {
 589                 int i;
 590 
 591                 switch (hw->intr_mode) {
 592                 case CSIO_IM_MSIX:
 593                         for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) {
 594                                 free_irq(pci_irq_vector(hw->pdev, i),
 595                                          hw->msix_entries[i].dev_id);
 596                         }
 597                         break;
 598                 case CSIO_IM_MSI:
 599                 case CSIO_IM_INTX:
 600                         free_irq(pci_irq_vector(hw->pdev, 0), hw);
 601                         break;
 602                 default:
 603                         break;
 604                 }
 605         }
 606 
 607         pci_free_irq_vectors(hw->pdev);
 608         hw->intr_mode = CSIO_IM_NONE;
 609         hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
 610 }