root/drivers/infiniband/hw/qib/qib_driver.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qib_get_pci_dev
  2. qib_count_active_units
  3. qib_count_units
  4. qib_wait_linkstate
  5. qib_set_linkstate
  6. qib_get_egrbuf
  7. qib_rcv_hdrerr
  8. qib_kreceive
  9. qib_set_mtu
  10. qib_set_lid
  11. qib_run_led_override
  12. qib_set_led_override
  13. qib_reset_device

   1 /*
   2  * Copyright (c) 2013 Intel Corporation. All rights reserved.
   3  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
   4  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
   5  *
   6  * This software is available to you under a choice of one of two
   7  * licenses.  You may choose to be licensed under the terms of the GNU
   8  * General Public License (GPL) Version 2, available from the file
   9  * COPYING in the main directory of this source tree, or the
  10  * OpenIB.org BSD license below:
  11  *
  12  *     Redistribution and use in source and binary forms, with or
  13  *     without modification, are permitted provided that the following
  14  *     conditions are met:
  15  *
  16  *      - Redistributions of source code must retain the above
  17  *        copyright notice, this list of conditions and the following
  18  *        disclaimer.
  19  *
  20  *      - Redistributions in binary form must reproduce the above
  21  *        copyright notice, this list of conditions and the following
  22  *        disclaimer in the documentation and/or other materials
  23  *        provided with the distribution.
  24  *
  25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32  * SOFTWARE.
  33  */
  34 
  35 #include <linux/spinlock.h>
  36 #include <linux/pci.h>
  37 #include <linux/io.h>
  38 #include <linux/delay.h>
  39 #include <linux/netdevice.h>
  40 #include <linux/vmalloc.h>
  41 #include <linux/module.h>
  42 #include <linux/prefetch.h>
  43 
  44 #include "qib.h"
  45 
  46 /*
  47  * The size has to be longer than this string, so we can append
  48  * board/chip information to it in the init code.
  49  */
  50 const char ib_qib_version[] = QIB_DRIVER_VERSION "\n";
  51 
  52 DEFINE_MUTEX(qib_mutex);        /* general driver use */
  53 
  54 unsigned qib_ibmtu;
  55 module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
  56 MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
  57 
  58 unsigned qib_compat_ddr_negotiate = 1;
  59 module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
  60                    S_IWUSR | S_IRUGO);
  61 MODULE_PARM_DESC(compat_ddr_negotiate,
  62                  "Attempt pre-IBTA 1.2 DDR speed negotiation");
  63 
  64 MODULE_LICENSE("Dual BSD/GPL");
  65 MODULE_AUTHOR("Intel <ibsupport@intel.com>");
  66 MODULE_DESCRIPTION("Intel IB driver");
  67 
  68 /*
  69  * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
  70  * PIO send buffers.  This is well beyond anything currently
  71  * defined in the InfiniBand spec.
  72  */
  73 #define QIB_PIO_MAXIBHDR 128
  74 
  75 /*
  76  * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
  77  */
  78 #define QIB_MAX_PKT_RECV 64
  79 
  80 struct qlogic_ib_stats qib_stats;
  81 
  82 struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
  83 {
  84         struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
  85         struct qib_devdata *dd = container_of(ibdev,
  86                                               struct qib_devdata, verbs_dev);
  87         return dd->pcidev;
  88 }
  89 
  90 /*
  91  * Return count of units with at least one port ACTIVE.
  92  */
  93 int qib_count_active_units(void)
  94 {
  95         struct qib_devdata *dd;
  96         struct qib_pportdata *ppd;
  97         unsigned long index, flags;
  98         int pidx, nunits_active = 0;
  99 
 100         xa_lock_irqsave(&qib_dev_table, flags);
 101         xa_for_each(&qib_dev_table, index, dd) {
 102                 if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
 103                         continue;
 104                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
 105                         ppd = dd->pport + pidx;
 106                         if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
 107                                          QIBL_LINKARMED | QIBL_LINKACTIVE))) {
 108                                 nunits_active++;
 109                                 break;
 110                         }
 111                 }
 112         }
 113         xa_unlock_irqrestore(&qib_dev_table, flags);
 114         return nunits_active;
 115 }
 116 
 117 /*
 118  * Return count of all units, optionally return in arguments
 119  * the number of usable (present) units, and the number of
 120  * ports that are up.
 121  */
 122 int qib_count_units(int *npresentp, int *nupp)
 123 {
 124         int nunits = 0, npresent = 0, nup = 0;
 125         struct qib_devdata *dd;
 126         unsigned long index, flags;
 127         int pidx;
 128         struct qib_pportdata *ppd;
 129 
 130         xa_lock_irqsave(&qib_dev_table, flags);
 131         xa_for_each(&qib_dev_table, index, dd) {
 132                 nunits++;
 133                 if ((dd->flags & QIB_PRESENT) && dd->kregbase)
 134                         npresent++;
 135                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
 136                         ppd = dd->pport + pidx;
 137                         if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
 138                                          QIBL_LINKARMED | QIBL_LINKACTIVE)))
 139                                 nup++;
 140                 }
 141         }
 142         xa_unlock_irqrestore(&qib_dev_table, flags);
 143 
 144         if (npresentp)
 145                 *npresentp = npresent;
 146         if (nupp)
 147                 *nupp = nup;
 148 
 149         return nunits;
 150 }
 151 
 152 /**
 153  * qib_wait_linkstate - wait for an IB link state change to occur
 154  * @dd: the qlogic_ib device
 155  * @state: the state to wait for
 156  * @msecs: the number of milliseconds to wait
 157  *
 158  * wait up to msecs milliseconds for IB link state change to occur for
 159  * now, take the easy polling route.  Currently used only by
 160  * qib_set_linkstate.  Returns 0 if state reached, otherwise
 161  * -ETIMEDOUT state can have multiple states set, for any of several
 162  * transitions.
 163  */
 164 int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
 165 {
 166         int ret;
 167         unsigned long flags;
 168 
 169         spin_lock_irqsave(&ppd->lflags_lock, flags);
 170         if (ppd->state_wanted) {
 171                 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 172                 ret = -EBUSY;
 173                 goto bail;
 174         }
 175         ppd->state_wanted = state;
 176         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 177         wait_event_interruptible_timeout(ppd->state_wait,
 178                                          (ppd->lflags & state),
 179                                          msecs_to_jiffies(msecs));
 180         spin_lock_irqsave(&ppd->lflags_lock, flags);
 181         ppd->state_wanted = 0;
 182         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 183 
 184         if (!(ppd->lflags & state))
 185                 ret = -ETIMEDOUT;
 186         else
 187                 ret = 0;
 188 bail:
 189         return ret;
 190 }
 191 
 192 int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
 193 {
 194         u32 lstate;
 195         int ret;
 196         struct qib_devdata *dd = ppd->dd;
 197         unsigned long flags;
 198 
 199         switch (newstate) {
 200         case QIB_IB_LINKDOWN_ONLY:
 201                 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
 202                                  IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
 203                 /* don't wait */
 204                 ret = 0;
 205                 goto bail;
 206 
 207         case QIB_IB_LINKDOWN:
 208                 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
 209                                  IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
 210                 /* don't wait */
 211                 ret = 0;
 212                 goto bail;
 213 
 214         case QIB_IB_LINKDOWN_SLEEP:
 215                 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
 216                                  IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
 217                 /* don't wait */
 218                 ret = 0;
 219                 goto bail;
 220 
 221         case QIB_IB_LINKDOWN_DISABLE:
 222                 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
 223                                  IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
 224                 /* don't wait */
 225                 ret = 0;
 226                 goto bail;
 227 
 228         case QIB_IB_LINKARM:
 229                 if (ppd->lflags & QIBL_LINKARMED) {
 230                         ret = 0;
 231                         goto bail;
 232                 }
 233                 if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
 234                         ret = -EINVAL;
 235                         goto bail;
 236                 }
 237                 /*
 238                  * Since the port can be ACTIVE when we ask for ARMED,
 239                  * clear QIBL_LINKV so we can wait for a transition.
 240                  * If the link isn't ARMED, then something else happened
 241                  * and there is no point waiting for ARMED.
 242                  */
 243                 spin_lock_irqsave(&ppd->lflags_lock, flags);
 244                 ppd->lflags &= ~QIBL_LINKV;
 245                 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 246                 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
 247                                  IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
 248                 lstate = QIBL_LINKV;
 249                 break;
 250 
 251         case QIB_IB_LINKACTIVE:
 252                 if (ppd->lflags & QIBL_LINKACTIVE) {
 253                         ret = 0;
 254                         goto bail;
 255                 }
 256                 if (!(ppd->lflags & QIBL_LINKARMED)) {
 257                         ret = -EINVAL;
 258                         goto bail;
 259                 }
 260                 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
 261                                  IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
 262                 lstate = QIBL_LINKACTIVE;
 263                 break;
 264 
 265         default:
 266                 ret = -EINVAL;
 267                 goto bail;
 268         }
 269         ret = qib_wait_linkstate(ppd, lstate, 10);
 270 
 271 bail:
 272         return ret;
 273 }
 274 
 275 /*
 276  * Get address of eager buffer from it's index (allocated in chunks, not
 277  * contiguous).
 278  */
 279 static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
 280 {
 281         const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
 282         const u32 idx =  etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
 283 
 284         return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
 285 }
 286 
 287 /*
 288  * Returns 1 if error was a CRC, else 0.
 289  * Needed for some chip's synthesized error counters.
 290  */
 291 static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
 292                           u32 ctxt, u32 eflags, u32 l, u32 etail,
 293                           __le32 *rhf_addr, struct qib_message_header *rhdr)
 294 {
 295         u32 ret = 0;
 296 
 297         if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
 298                 ret = 1;
 299         else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
 300                 /* For TIDERR and RC QPs premptively schedule a NAK */
 301                 struct ib_header *hdr = (struct ib_header *)rhdr;
 302                 struct ib_other_headers *ohdr = NULL;
 303                 struct qib_ibport *ibp = &ppd->ibport_data;
 304                 struct qib_devdata *dd = ppd->dd;
 305                 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
 306                 struct rvt_qp *qp = NULL;
 307                 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
 308                 u16 lid  = be16_to_cpu(hdr->lrh[1]);
 309                 int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
 310                 u32 qp_num;
 311                 u32 opcode;
 312                 u32 psn;
 313                 int diff;
 314 
 315                 /* Sanity check packet */
 316                 if (tlen < 24)
 317                         goto drop;
 318 
 319                 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
 320                         lid &= ~((1 << ppd->lmc) - 1);
 321                         if (unlikely(lid != ppd->lid))
 322                                 goto drop;
 323                 }
 324 
 325                 /* Check for GRH */
 326                 if (lnh == QIB_LRH_BTH)
 327                         ohdr = &hdr->u.oth;
 328                 else if (lnh == QIB_LRH_GRH) {
 329                         u32 vtf;
 330 
 331                         ohdr = &hdr->u.l.oth;
 332                         if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
 333                                 goto drop;
 334                         vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
 335                         if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
 336                                 goto drop;
 337                 } else
 338                         goto drop;
 339 
 340                 /* Get opcode and PSN from packet */
 341                 opcode = be32_to_cpu(ohdr->bth[0]);
 342                 opcode >>= 24;
 343                 psn = be32_to_cpu(ohdr->bth[2]);
 344 
 345                 /* Get the destination QP number. */
 346                 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
 347                 if (qp_num != QIB_MULTICAST_QPN) {
 348                         int ruc_res;
 349 
 350                         rcu_read_lock();
 351                         qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
 352                         if (!qp) {
 353                                 rcu_read_unlock();
 354                                 goto drop;
 355                         }
 356 
 357                         /*
 358                          * Handle only RC QPs - for other QP types drop error
 359                          * packet.
 360                          */
 361                         spin_lock(&qp->r_lock);
 362 
 363                         /* Check for valid receive state. */
 364                         if (!(ib_rvt_state_ops[qp->state] &
 365                               RVT_PROCESS_RECV_OK)) {
 366                                 ibp->rvp.n_pkt_drops++;
 367                                 goto unlock;
 368                         }
 369 
 370                         switch (qp->ibqp.qp_type) {
 371                         case IB_QPT_RC:
 372                                 ruc_res =
 373                                         qib_ruc_check_hdr(
 374                                                 ibp, hdr,
 375                                                 lnh == QIB_LRH_GRH,
 376                                                 qp,
 377                                                 be32_to_cpu(ohdr->bth[0]));
 378                                 if (ruc_res)
 379                                         goto unlock;
 380 
 381                                 /* Only deal with RDMA Writes for now */
 382                                 if (opcode <
 383                                     IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
 384                                         diff = qib_cmp24(psn, qp->r_psn);
 385                                         if (!qp->r_nak_state && diff >= 0) {
 386                                                 ibp->rvp.n_rc_seqnak++;
 387                                                 qp->r_nak_state =
 388                                                         IB_NAK_PSN_ERROR;
 389                                                 /* Use the expected PSN. */
 390                                                 qp->r_ack_psn = qp->r_psn;
 391                                                 /*
 392                                                  * Wait to send the sequence
 393                                                  * NAK until all packets
 394                                                  * in the receive queue have
 395                                                  * been processed.
 396                                                  * Otherwise, we end up
 397                                                  * propagating congestion.
 398                                                  */
 399                                                 if (list_empty(&qp->rspwait)) {
 400                                                         qp->r_flags |=
 401                                                                 RVT_R_RSP_NAK;
 402                                                         rvt_get_qp(qp);
 403                                                         list_add_tail(
 404                                                          &qp->rspwait,
 405                                                          &rcd->qp_wait_list);
 406                                                 }
 407                                         } /* Out of sequence NAK */
 408                                 } /* QP Request NAKs */
 409                                 break;
 410                         case IB_QPT_SMI:
 411                         case IB_QPT_GSI:
 412                         case IB_QPT_UD:
 413                         case IB_QPT_UC:
 414                         default:
 415                                 /* For now don't handle any other QP types */
 416                                 break;
 417                         }
 418 
 419 unlock:
 420                         spin_unlock(&qp->r_lock);
 421                         rcu_read_unlock();
 422                 } /* Unicast QP */
 423         } /* Valid packet with TIDErr */
 424 
 425 drop:
 426         return ret;
 427 }
 428 
 429 /*
 430  * qib_kreceive - receive a packet
 431  * @rcd: the qlogic_ib context
 432  * @llic: gets count of good packets needed to clear lli,
 433  *          (used with chips that need need to track crcs for lli)
 434  *
 435  * called from interrupt handler for errors or receive interrupt
 436  * Returns number of CRC error packets, needed by some chips for
 437  * local link integrity tracking.   crcs are adjusted down by following
 438  * good packets, if any, and count of good packets is also tracked.
 439  */
 440 u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
 441 {
 442         struct qib_devdata *dd = rcd->dd;
 443         struct qib_pportdata *ppd = rcd->ppd;
 444         __le32 *rhf_addr;
 445         void *ebuf;
 446         const u32 rsize = dd->rcvhdrentsize;        /* words */
 447         const u32 maxcnt = dd->rcvhdrcnt * rsize;   /* words */
 448         u32 etail = -1, l, hdrqtail;
 449         struct qib_message_header *hdr;
 450         u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
 451         int last;
 452         u64 lval;
 453         struct rvt_qp *qp, *nqp;
 454 
 455         l = rcd->head;
 456         rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
 457         if (dd->flags & QIB_NODMA_RTAIL) {
 458                 u32 seq = qib_hdrget_seq(rhf_addr);
 459 
 460                 if (seq != rcd->seq_cnt)
 461                         goto bail;
 462                 hdrqtail = 0;
 463         } else {
 464                 hdrqtail = qib_get_rcvhdrtail(rcd);
 465                 if (l == hdrqtail)
 466                         goto bail;
 467                 smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
 468         }
 469 
 470         for (last = 0, i = 1; !last; i += !last) {
 471                 hdr = dd->f_get_msgheader(dd, rhf_addr);
 472                 eflags = qib_hdrget_err_flags(rhf_addr);
 473                 etype = qib_hdrget_rcv_type(rhf_addr);
 474                 /* total length */
 475                 tlen = qib_hdrget_length_in_bytes(rhf_addr);
 476                 ebuf = NULL;
 477                 if ((dd->flags & QIB_NODMA_RTAIL) ?
 478                     qib_hdrget_use_egr_buf(rhf_addr) :
 479                     (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
 480                         etail = qib_hdrget_index(rhf_addr);
 481                         updegr = 1;
 482                         if (tlen > sizeof(*hdr) ||
 483                             etype >= RCVHQ_RCV_TYPE_NON_KD) {
 484                                 ebuf = qib_get_egrbuf(rcd, etail);
 485                                 prefetch_range(ebuf, tlen - sizeof(*hdr));
 486                         }
 487                 }
 488                 if (!eflags) {
 489                         u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
 490 
 491                         if (lrh_len != tlen) {
 492                                 qib_stats.sps_lenerrs++;
 493                                 goto move_along;
 494                         }
 495                 }
 496                 if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
 497                     ebuf == NULL &&
 498                     tlen > (dd->rcvhdrentsize - 2 + 1 -
 499                                 qib_hdrget_offset(rhf_addr)) << 2) {
 500                         goto move_along;
 501                 }
 502 
 503                 /*
 504                  * Both tiderr and qibhdrerr are set for all plain IB
 505                  * packets; only qibhdrerr should be set.
 506                  */
 507                 if (unlikely(eflags))
 508                         crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
 509                                                etail, rhf_addr, hdr);
 510                 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
 511                         qib_ib_rcv(rcd, hdr, ebuf, tlen);
 512                         if (crcs)
 513                                 crcs--;
 514                         else if (llic && *llic)
 515                                 --*llic;
 516                 }
 517 move_along:
 518                 l += rsize;
 519                 if (l >= maxcnt)
 520                         l = 0;
 521                 if (i == QIB_MAX_PKT_RECV)
 522                         last = 1;
 523 
 524                 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
 525                 if (dd->flags & QIB_NODMA_RTAIL) {
 526                         u32 seq = qib_hdrget_seq(rhf_addr);
 527 
 528                         if (++rcd->seq_cnt > 13)
 529                                 rcd->seq_cnt = 1;
 530                         if (seq != rcd->seq_cnt)
 531                                 last = 1;
 532                 } else if (l == hdrqtail)
 533                         last = 1;
 534                 /*
 535                  * Update head regs etc., every 16 packets, if not last pkt,
 536                  * to help prevent rcvhdrq overflows, when many packets
 537                  * are processed and queue is nearly full.
 538                  * Don't request an interrupt for intermediate updates.
 539                  */
 540                 lval = l;
 541                 if (!last && !(i & 0xf)) {
 542                         dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 543                         updegr = 0;
 544                 }
 545         }
 546 
 547         rcd->head = l;
 548 
 549         /*
 550          * Iterate over all QPs waiting to respond.
 551          * The list won't change since the IRQ is only run on one CPU.
 552          */
 553         list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
 554                 list_del_init(&qp->rspwait);
 555                 if (qp->r_flags & RVT_R_RSP_NAK) {
 556                         qp->r_flags &= ~RVT_R_RSP_NAK;
 557                         qib_send_rc_ack(qp);
 558                 }
 559                 if (qp->r_flags & RVT_R_RSP_SEND) {
 560                         unsigned long flags;
 561 
 562                         qp->r_flags &= ~RVT_R_RSP_SEND;
 563                         spin_lock_irqsave(&qp->s_lock, flags);
 564                         if (ib_rvt_state_ops[qp->state] &
 565                                         RVT_PROCESS_OR_FLUSH_SEND)
 566                                 qib_schedule_send(qp);
 567                         spin_unlock_irqrestore(&qp->s_lock, flags);
 568                 }
 569                 rvt_put_qp(qp);
 570         }
 571 
 572 bail:
 573         /* Report number of packets consumed */
 574         if (npkts)
 575                 *npkts = i;
 576 
 577         /*
 578          * Always write head at end, and setup rcv interrupt, even
 579          * if no packets were processed.
 580          */
 581         lval = (u64)rcd->head | dd->rhdrhead_intr_off;
 582         dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 583         return crcs;
 584 }
 585 
 586 /**
 587  * qib_set_mtu - set the MTU
 588  * @ppd: the perport data
 589  * @arg: the new MTU
 590  *
 591  * We can handle "any" incoming size, the issue here is whether we
 592  * need to restrict our outgoing size.   For now, we don't do any
 593  * sanity checking on this, and we don't deal with what happens to
 594  * programs that are already running when the size changes.
 595  * NOTE: changing the MTU will usually cause the IBC to go back to
 596  * link INIT state...
 597  */
 598 int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
 599 {
 600         u32 piosize;
 601         int ret, chk;
 602 
 603         if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
 604             arg != 4096) {
 605                 ret = -EINVAL;
 606                 goto bail;
 607         }
 608         chk = ib_mtu_enum_to_int(qib_ibmtu);
 609         if (chk > 0 && arg > chk) {
 610                 ret = -EINVAL;
 611                 goto bail;
 612         }
 613 
 614         piosize = ppd->ibmaxlen;
 615         ppd->ibmtu = arg;
 616 
 617         if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
 618                 /* Only if it's not the initial value (or reset to it) */
 619                 if (piosize != ppd->init_ibmaxlen) {
 620                         if (arg > piosize && arg <= ppd->init_ibmaxlen)
 621                                 piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
 622                         ppd->ibmaxlen = piosize;
 623                 }
 624         } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
 625                 piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
 626                 ppd->ibmaxlen = piosize;
 627         }
 628 
 629         ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
 630 
 631         ret = 0;
 632 
 633 bail:
 634         return ret;
 635 }
 636 
 637 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
 638 {
 639         struct qib_devdata *dd = ppd->dd;
 640 
 641         ppd->lid = lid;
 642         ppd->lmc = lmc;
 643 
 644         dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
 645                          lid | (~((1U << lmc) - 1)) << 16);
 646 
 647         qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
 648                     dd->unit, ppd->port, lid);
 649 
 650         return 0;
 651 }
 652 
 653 /*
 654  * Following deal with the "obviously simple" task of overriding the state
 655  * of the LEDS, which normally indicate link physical and logical status.
 656  * The complications arise in dealing with different hardware mappings
 657  * and the board-dependent routine being called from interrupts.
 658  * and then there's the requirement to _flash_ them.
 659  */
 660 #define LED_OVER_FREQ_SHIFT 8
 661 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
 662 /* Below is "non-zero" to force override, but both actual LEDs are off */
 663 #define LED_OVER_BOTH_OFF (8)
 664 
 665 static void qib_run_led_override(struct timer_list *t)
 666 {
 667         struct qib_pportdata *ppd = from_timer(ppd, t,
 668                                                     led_override_timer);
 669         struct qib_devdata *dd = ppd->dd;
 670         int timeoff;
 671         int ph_idx;
 672 
 673         if (!(dd->flags & QIB_INITTED))
 674                 return;
 675 
 676         ph_idx = ppd->led_override_phase++ & 1;
 677         ppd->led_override = ppd->led_override_vals[ph_idx];
 678         timeoff = ppd->led_override_timeoff;
 679 
 680         dd->f_setextled(ppd, 1);
 681         /*
 682          * don't re-fire the timer if user asked for it to be off; we let
 683          * it fire one more time after they turn it off to simplify
 684          */
 685         if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
 686                 mod_timer(&ppd->led_override_timer, jiffies + timeoff);
 687 }
 688 
 689 void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
 690 {
 691         struct qib_devdata *dd = ppd->dd;
 692         int timeoff, freq;
 693 
 694         if (!(dd->flags & QIB_INITTED))
 695                 return;
 696 
 697         /* First check if we are blinking. If not, use 1HZ polling */
 698         timeoff = HZ;
 699         freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
 700 
 701         if (freq) {
 702                 /* For blink, set each phase from one nybble of val */
 703                 ppd->led_override_vals[0] = val & 0xF;
 704                 ppd->led_override_vals[1] = (val >> 4) & 0xF;
 705                 timeoff = (HZ << 4)/freq;
 706         } else {
 707                 /* Non-blink set both phases the same. */
 708                 ppd->led_override_vals[0] = val & 0xF;
 709                 ppd->led_override_vals[1] = val & 0xF;
 710         }
 711         ppd->led_override_timeoff = timeoff;
 712 
 713         /*
 714          * If the timer has not already been started, do so. Use a "quick"
 715          * timeout so the function will be called soon, to look at our request.
 716          */
 717         if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
 718                 /* Need to start timer */
 719                 timer_setup(&ppd->led_override_timer, qib_run_led_override, 0);
 720                 ppd->led_override_timer.expires = jiffies + 1;
 721                 add_timer(&ppd->led_override_timer);
 722         } else {
 723                 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
 724                         mod_timer(&ppd->led_override_timer, jiffies + 1);
 725                 atomic_dec(&ppd->led_override_timer_active);
 726         }
 727 }
 728 
 729 /**
 730  * qib_reset_device - reset the chip if possible
 731  * @unit: the device to reset
 732  *
 733  * Whether or not reset is successful, we attempt to re-initialize the chip
 734  * (that is, much like a driver unload/reload).  We clear the INITTED flag
 735  * so that the various entry points will fail until we reinitialize.  For
 736  * now, we only allow this if no user contexts are open that use chip resources
 737  */
 738 int qib_reset_device(int unit)
 739 {
 740         int ret, i;
 741         struct qib_devdata *dd = qib_lookup(unit);
 742         struct qib_pportdata *ppd;
 743         unsigned long flags;
 744         int pidx;
 745 
 746         if (!dd) {
 747                 ret = -ENODEV;
 748                 goto bail;
 749         }
 750 
 751         qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
 752 
 753         if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
 754                 qib_devinfo(dd->pcidev,
 755                         "Invalid unit number %u or not initialized or not present\n",
 756                         unit);
 757                 ret = -ENXIO;
 758                 goto bail;
 759         }
 760 
 761         spin_lock_irqsave(&dd->uctxt_lock, flags);
 762         if (dd->rcd)
 763                 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
 764                         if (!dd->rcd[i] || !dd->rcd[i]->cnt)
 765                                 continue;
 766                         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
 767                         ret = -EBUSY;
 768                         goto bail;
 769                 }
 770         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
 771 
 772         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
 773                 ppd = dd->pport + pidx;
 774                 if (atomic_read(&ppd->led_override_timer_active)) {
 775                         /* Need to stop LED timer, _then_ shut off LEDs */
 776                         del_timer_sync(&ppd->led_override_timer);
 777                         atomic_set(&ppd->led_override_timer_active, 0);
 778                 }
 779 
 780                 /* Shut off LEDs after we are sure timer is not running */
 781                 ppd->led_override = LED_OVER_BOTH_OFF;
 782                 dd->f_setextled(ppd, 0);
 783                 if (dd->flags & QIB_HAS_SEND_DMA)
 784                         qib_teardown_sdma(ppd);
 785         }
 786 
 787         ret = dd->f_reset(dd);
 788         if (ret == 1)
 789                 ret = qib_init(dd, 1);
 790         else
 791                 ret = -EAGAIN;
 792         if (ret)
 793                 qib_dev_err(dd,
 794                         "Reinitialize unit %u after reset failed with %d\n",
 795                         unit, ret);
 796         else
 797                 qib_devinfo(dd->pcidev,
 798                         "Reinitialized unit %u after resetting\n",
 799                         unit);
 800 
 801 bail:
 802         return ret;
 803 }

/* [<][>][^][v][top][bottom][index][help] */