Searched refs:sq (Results 1 - 98 of 98) sorted by relevance

/linux-4.1.27/drivers/infiniband/hw/cxgb4/
H A Dqp.c94 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) dealloc_oc_sq() argument
96 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); dealloc_oc_sq()
99 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) dealloc_host_sq() argument
101 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, dealloc_host_sq()
102 pci_unmap_addr(sq, mapping)); dealloc_host_sq()
105 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) dealloc_sq() argument
107 if (t4_sq_onchip(sq)) dealloc_sq()
108 dealloc_oc_sq(rdev, sq); dealloc_sq()
110 dealloc_host_sq(rdev, sq); dealloc_sq()
113 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) alloc_oc_sq() argument
117 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); alloc_oc_sq()
118 if (!sq->dma_addr) alloc_oc_sq()
120 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr - alloc_oc_sq()
122 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr - alloc_oc_sq()
124 sq->flags |= T4_SQ_ONCHIP; alloc_oc_sq()
128 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) alloc_host_sq() argument
130 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize, alloc_host_sq()
131 &(sq->dma_addr), GFP_KERNEL); alloc_host_sq()
132 if (!sq->queue) alloc_host_sq()
134 sq->phys_addr = virt_to_phys(sq->queue); alloc_host_sq()
135 pci_unmap_addr_set(sq, mapping, sq->dma_addr); alloc_host_sq()
139 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) alloc_sq() argument
143 ret = alloc_oc_sq(rdev, sq); alloc_sq()
145 ret = alloc_host_sq(rdev, sq); alloc_sq()
159 dealloc_sq(rdev, &wq->sq); destroy_qp()
162 kfree(wq->sq.sw_sq); destroy_qp()
164 c4iw_put_qpid(rdev, wq->sq.qid, uctx); destroy_qp()
181 wq->sq.qid = c4iw_get_qpid(rdev, uctx); create_qp()
182 if (!wq->sq.qid) create_qp()
192 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, create_qp()
194 if (!wq->sq.sw_sq) { create_qp()
217 ret = alloc_sq(rdev, &wq->sq, user); create_qp()
220 memset(wq->sq.queue, 0, wq->sq.memsize); create_qp()
221 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); create_qp()
230 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", create_qp()
231 __func__, wq->sq.queue, create_qp()
232 (unsigned long long)virt_to_phys(wq->sq.queue), create_qp()
243 off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK; create_qp()
245 wq->sq.udb = (u64 __iomem *)(rdev->bar2_pa + off); create_qp()
247 off += 128 * (wq->sq.qid & rdev->qpmask) + 8; create_qp()
248 wq->sq.udb = (u64 __iomem *)(rdev->bar2_kva + off); create_qp()
286 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + create_qp()
293 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | create_qp()
303 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); create_qp()
304 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); create_qp()
335 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); create_qp()
340 __func__, wq->sq.qid, wq->rq.qid, wq->db, create_qp()
341 (__force unsigned long) wq->sq.udb, create_qp()
350 dealloc_sq(rdev, &wq->sq); create_qp()
356 kfree(wq->sq.sw_sq); create_qp()
360 c4iw_put_qpid(rdev, wq->sq.qid, uctx); create_qp()
364 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, build_immd() argument
380 if (dstp == (u8 *)&sq->queue[sq->size]) build_immd()
381 dstp = (u8 *)sq->queue; build_immd()
382 if (rem <= (u8 *)&sq->queue[sq->size] - dstp) build_immd()
385 len = (u8 *)&sq->queue[sq->size] - dstp; build_immd()
434 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, build_rdma_send() argument
472 ret = build_immd(sq, wqe->send.u.immd_src, wr, build_rdma_send()
479 ret = build_isgl((__be64 *)sq->queue, build_rdma_send()
480 (__be64 *)&sq->queue[sq->size], build_rdma_send()
501 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, build_rdma_write() argument
515 ret = build_immd(sq, wqe->write.u.immd_src, wr, build_rdma_write()
522 ret = build_isgl((__be64 *)sq->queue, build_rdma_write()
523 (__be64 *)&sq->queue[sq->size], build_rdma_write()
588 static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, build_fastreg() argument
644 if (++p == (__be64 *)&sq->queue[sq->size]) build_fastreg()
645 p = (__be64 *)sq->queue; build_fastreg()
651 if (++p == (__be64 *)&sq->queue[sq->size]) build_fastreg()
652 p = (__be64 *)sq->queue; build_fastreg()
699 qhp->wq.sq.wq_pidx_inc += inc; ring_kernel_sq_db()
755 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + c4iw_post_send()
756 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); c4iw_post_send()
763 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; c4iw_post_send()
774 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); c4iw_post_send()
779 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); c4iw_post_send()
793 if (!qhp->wq.sq.oldest_read) c4iw_post_send()
794 qhp->wq.sq.oldest_read = swsqe; c4iw_post_send()
799 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16, c4iw_post_send()
820 swsqe->idx = qhp->wq.sq.pidx; c4iw_post_send()
832 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); c4iw_post_send()
835 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, c4iw_post_send()
1075 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, post_terminate()
1196 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, rdma_fini()
1220 qhp->wq.sq.qid, __func__); rdma_fini()
1258 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); rdma_init()
1304 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); rdma_init()
1305 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); rdma_init()
1324 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); rdma_init()
1348 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, c4iw_modify_qp()
1531 qhp->wq.sq.qid); c4iw_modify_qp()
1588 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); c4iw_destroy_qp()
1603 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); c4iw_destroy_qp()
1654 qhp->wq.sq.size = sqsize; c4iw_create_qp()
1655 qhp->wq.sq.memsize = c4iw_create_qp()
1657 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); c4iw_create_qp()
1658 qhp->wq.sq.flush_cidx = -1; c4iw_create_qp()
1665 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); c4iw_create_qp()
1700 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); c4iw_create_qp()
1725 if (t4_sq_onchip(&qhp->wq.sq)) { c4iw_create_qp()
1735 uresp.sqid = qhp->wq.sq.qid; c4iw_create_qp()
1736 uresp.sq_size = qhp->wq.sq.size; c4iw_create_qp()
1737 uresp.sq_memsize = qhp->wq.sq.memsize; c4iw_create_qp()
1761 mm1->addr = qhp->wq.sq.phys_addr; c4iw_create_qp()
1762 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); c4iw_create_qp()
1769 mm3->addr = (__force unsigned long)qhp->wq.sq.udb; c4iw_create_qp()
1784 qhp->ibqp.qp_num = qhp->wq.sq.qid; c4iw_create_qp()
1787 PDBG("%s sq id %u size %u memsize %zu num_entries %u " c4iw_create_qp()
1789 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, c4iw_create_qp()
1804 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); c4iw_create_qp()
H A Dcq.c196 CQE_QPID_V(wq->sq.qid)); insert_recv_cqe()
229 CQE_QPID_V(wq->sq.qid)); insert_sq_cqe()
247 if (wq->sq.flush_cidx == -1) c4iw_flush_sq()
248 wq->sq.flush_cidx = wq->sq.cidx; c4iw_flush_sq()
249 idx = wq->sq.flush_cidx; c4iw_flush_sq()
250 BUG_ON(idx >= wq->sq.size); c4iw_flush_sq()
251 while (idx != wq->sq.pidx) { c4iw_flush_sq()
252 swsqe = &wq->sq.sw_sq[idx]; c4iw_flush_sq()
256 if (wq->sq.oldest_read == swsqe) { c4iw_flush_sq()
261 if (++idx == wq->sq.size) c4iw_flush_sq()
264 wq->sq.flush_cidx += flushed; c4iw_flush_sq()
265 if (wq->sq.flush_cidx >= wq->sq.size) c4iw_flush_sq()
266 wq->sq.flush_cidx -= wq->sq.size; c4iw_flush_sq()
275 if (wq->sq.flush_cidx == -1) flush_completed_wrs()
276 wq->sq.flush_cidx = wq->sq.cidx; flush_completed_wrs()
277 cidx = wq->sq.flush_cidx; flush_completed_wrs()
278 BUG_ON(cidx > wq->sq.size); flush_completed_wrs()
280 while (cidx != wq->sq.pidx) { flush_completed_wrs()
281 swsqe = &wq->sq.sw_sq[cidx]; flush_completed_wrs()
283 if (++cidx == wq->sq.size) flush_completed_wrs()
292 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n", flush_completed_wrs()
298 if (++cidx == wq->sq.size) flush_completed_wrs()
300 wq->sq.flush_cidx = cidx; flush_completed_wrs()
309 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; create_read_req_cqe()
310 read_cqe->len = htonl(wq->sq.oldest_read->read_len); create_read_req_cqe()
321 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; advance_oldest_read()
323 if (rptr == wq->sq.size) advance_oldest_read()
325 while (rptr != wq->sq.pidx) { advance_oldest_read()
326 wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; advance_oldest_read()
328 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) advance_oldest_read()
330 if (++rptr == wq->sq.size) advance_oldest_read()
333 wq->sq.oldest_read = NULL; advance_oldest_read()
385 if (!qhp->wq.sq.oldest_read->signaled) { c4iw_flush_hw_cq()
403 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; c4iw_flush_hw_cq()
446 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) c4iw_count_rcqes()
547 if (!wq->sq.oldest_read->signaled) { poll_cq()
603 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { poll_cq()
608 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; poll_cq()
624 BUG_ON(idx >= wq->sq.size); poll_cq()
634 if (idx < wq->sq.cidx) poll_cq()
635 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; poll_cq()
637 wq->sq.in_use -= idx - wq->sq.cidx; poll_cq()
638 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); poll_cq()
640 wq->sq.cidx = (uint16_t)idx; poll_cq()
641 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); poll_cq()
642 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; poll_cq()
H A Dt4.h332 struct t4_sq sq; member in struct:t4_wq
388 static inline int t4_sq_onchip(struct t4_sq *sq) t4_sq_onchip() argument
390 return sq->flags & T4_SQ_ONCHIP; t4_sq_onchip()
395 return wq->sq.in_use == 0; t4_sq_empty()
400 return wq->sq.in_use == (wq->sq.size - 1); t4_sq_full()
405 return wq->sq.size - 1 - wq->sq.in_use; t4_sq_avail()
410 wq->sq.in_use++; t4_sq_produce()
411 if (++wq->sq.pidx == wq->sq.size) t4_sq_produce()
412 wq->sq.pidx = 0; t4_sq_produce()
413 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); t4_sq_produce()
414 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) t4_sq_produce()
415 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; t4_sq_produce()
420 BUG_ON(wq->sq.in_use < 1); t4_sq_consume()
421 if (wq->sq.cidx == wq->sq.flush_cidx) t4_sq_consume()
422 wq->sq.flush_cidx = -1; t4_sq_consume()
423 wq->sq.in_use--; t4_sq_consume()
424 if (++wq->sq.cidx == wq->sq.size) t4_sq_consume()
425 wq->sq.cidx = 0; t4_sq_consume()
430 return wq->sq.queue[wq->sq.size].status.host_wq_pidx; t4_sq_host_wq_pidx()
435 return wq->sq.size * T4_SQ_NUM_SLOTS; t4_sq_wq_size()
462 PDBG("%s: WC wq->sq.pidx = %d\n", t4_ring_sq_db()
463 __func__, wq->sq.pidx); t4_ring_sq_db()
464 pio_copy(wq->sq.udb + 7, (void *)wqe); t4_ring_sq_db()
466 PDBG("%s: DB wq->sq.pidx = %d\n", t4_ring_sq_db()
467 __func__, wq->sq.pidx); t4_ring_sq_db()
468 writel(PIDX_T5_V(inc), wq->sq.udb); t4_ring_sq_db()
475 writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); t4_ring_sq_db()
H A Ddevice.c132 le.qid = wq->sq.qid; c4iw_log_wr_stats()
134 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts; c4iw_log_wr_stats()
135 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; c4iw_log_wr_stats()
235 if (id != qp->wq.sq.qid) dump_qp()
254 "rc qp sq id %u rq id %u state %u " dump_qp()
257 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp()
259 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp()
278 "rc qp sq id %u rq id %u state %u " dump_qp()
281 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp()
283 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp()
294 "qp sq id %u rq id %u state %u onchip %u\n", dump_qp()
295 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp()
297 qp->wq.sq.flags & T4_SQ_ONCHIP); dump_qp()
1278 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, resume_rc_qp()
1280 qp->wq.sq.wq_pidx_inc = 0; resume_rc_qp()
1385 qp->wq.sq.qid, recover_lost_dbs()
1392 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); recover_lost_dbs()
1397 qp->wq.sq.wq_pidx_inc = 0; recover_lost_dbs()
H A Dev.c211 CQE_STATUS(err_cqe), qhp->wq.sq.qid); c4iw_ev_dispatch()
H A Dcm.c1708 __func__, ep->com.qp->wq.sq.qid, ep, rx_data()
2744 ep->com.qp->wq.sq.qid); terminate()
/linux-4.1.27/sound/oss/dmasound/
H A Ddmasound_core.c414 static int sq_allocate_buffers(struct sound_queue *sq, int num, int size) sq_allocate_buffers() argument
418 if (sq->buffers) sq_allocate_buffers()
420 sq->numBufs = num; sq_allocate_buffers()
421 sq->bufSize = size; sq_allocate_buffers()
422 sq->buffers = kmalloc (num * sizeof(char *), GFP_KERNEL); sq_allocate_buffers()
423 if (!sq->buffers) sq_allocate_buffers()
426 sq->buffers[i] = dmasound.mach.dma_alloc(size, GFP_KERNEL); sq_allocate_buffers()
427 if (!sq->buffers[i]) { sq_allocate_buffers()
429 dmasound.mach.dma_free(sq->buffers[i], size); sq_allocate_buffers()
430 kfree(sq->buffers); sq_allocate_buffers()
431 sq->buffers = NULL; sq_allocate_buffers()
438 static void sq_release_buffers(struct sound_queue *sq) sq_release_buffers() argument
442 if (sq->buffers) { sq_release_buffers()
443 for (i = 0; i < sq->numBufs; i++) sq_release_buffers()
444 dmasound.mach.dma_free(sq->buffers[i], sq->bufSize); sq_release_buffers()
445 kfree(sq->buffers); sq_release_buffers()
446 sq->buffers = NULL; sq_release_buffers()
451 static int sq_setup(struct sound_queue *sq) sq_setup() argument
456 if (sq->locked) { /* are we already set? - and not changeable */ sq_setup()
462 sq->locked = 1 ; /* don't think we have a race prob. here _check_ */ sq_setup()
482 if (sq->user_frags <= 0) { sq_setup()
483 sq->max_count = sq->numBufs ; sq_setup()
484 sq->max_active = sq->numBufs ; sq_setup()
485 sq->block_size = sq->bufSize; sq_setup()
487 sq->user_frags = sq->numBufs ; sq_setup()
488 sq->user_frag_size = sq->bufSize ; sq_setup()
489 sq->user_frag_size *= sq_setup()
491 sq->user_frag_size /= sq_setup()
495 sq->block_size = sq->user_frag_size ; sq_setup()
496 sq->block_size *= sq_setup()
498 sq->block_size /= sq_setup()
501 sq->block_size *= dmasound.hard.speed ; sq_setup()
502 sq->block_size /= dmasound.soft.speed ; sq_setup()
506 sq->block_size += (hard_frame - 1) ; sq_setup()
507 sq->block_size &= ~(hard_frame - 1) ; /* make sure we are aligned */ sq_setup()
509 if ( sq->block_size <= 0 || sq->block_size > sq->bufSize) { sq_setup()
511 printk("dmasound_core: invalid frag size (user set %d)\n", sq->user_frag_size) ; sq_setup()
513 sq->block_size = sq->bufSize ; sq_setup()
515 if ( sq->user_frags <= sq->numBufs ) { sq_setup()
516 sq->max_count = sq->user_frags ; sq_setup()
518 sq->max_active = (sq->max_active <= sq->max_count) ? sq_setup()
519 sq->max_active : sq->max_count ; sq_setup()
522 printk("dmasound_core: invalid frag count (user set %d)\n", sq->user_frags) ; sq_setup()
524 sq->max_count = sq_setup()
525 sq->max_active = sq->numBufs ; sq_setup()
528 sq->front = sq->count = sq->rear_size = 0; sq_setup()
529 sq->syncing = 0; sq_setup()
530 sq->active = 0; sq_setup()
532 if (sq == &write_sq) { sq_setup()
533 sq->rear = -1; sq_setup()
571 /* set up the sq if it is not already done. This may seem a dumb place sq_write()
691 static inline void sq_init_waitqueue(struct sound_queue *sq) sq_init_waitqueue() argument
693 init_waitqueue_head(&sq->action_queue); sq_init_waitqueue()
694 init_waitqueue_head(&sq->open_queue); sq_init_waitqueue()
695 init_waitqueue_head(&sq->sync_queue); sq_init_waitqueue()
696 sq->busy = 0; sq_init_waitqueue()
700 static inline void sq_wake_up(struct sound_queue *sq, struct file *file,
704 sq->busy = 0; /* CHECK: IS THIS OK??? */
705 WAKE_UP(sq->open_queue);
710 static int sq_open2(struct sound_queue *sq, struct file *file, fmode_t mode, sq_open2() argument
716 if (sq->busy) { sq_open2()
722 if (wait_event_interruptible(sq->open_queue, !sq->busy)) sq_open2()
732 sq->busy = 1; /* Let's play spot-the-race-condition */ sq_open2()
739 if (( rc = sq_allocate_buffers(sq, numbufs, bufsize))) { sq_open2()
741 sq_wake_up(sq, file, mode); sq_open2()
743 sq->busy = 0 ; sq_open2()
748 sq->non_blocking = file->f_flags & O_NONBLOCK; sq_open2()
956 static int set_queue_frags(struct sound_queue *sq, int bufs, int size) set_queue_frags() argument
958 if (sq->locked) { set_queue_frags()
968 if (size > sq->bufSize) set_queue_frags()
973 if (bufs > sq->numBufs) /* the user is allowed say "don't care" with 0x7fff */ set_queue_frags()
974 bufs = sq->numBufs ; set_queue_frags()
981 sq->user_frags = set_queue_frags()
982 sq->max_active = bufs ; set_queue_frags()
983 sq->user_frag_size = size ; set_queue_frags()
H A Ddmasound_q40.c486 * the sq variables, so better don't do anything here. Q40Interrupt()
H A Ddmasound_paula.c564 * the sq variables, so better don't do anything here. AmiInterrupt()
H A Ddmasound_atari.c1285 * the sq variables, so better don't do anything here. AtaInterrupt()
/linux-4.1.27/tools/perf/config/
H A Dutilities.mak78 # escape-for-shell-sq
80 # Usage: embeddable-text = $(call escape-for-shell-sq,text)
86 escape-for-shell-sq = $(subst ','\'',$(1))
88 # shell-sq
90 # Usage: single-quoted-and-escaped-text = $(call shell-sq,text)
92 shell-sq = '$(escape-for-shell-sq)'
124 # produces the same results as the `$(shell-sq)' function.
126 shell-wordify = $(if $(findstring $(newline),$(1)),$(_sw-esc-nl),$(shell-sq))
128 "$$(echo $(call escape-nl,$(shell-sq),$(2)) | $(call shell-unescape-nl,$(2)))"
135 is-absolute = $(shell echo $(shell-sq) | grep -q ^/ && echo y)
145 _l-sh = $(call shell-sq,command -v $(shell-sq) | $(call shell-escape-nl,))
154 is-executable = $(call _is-executable-helper,$(shell-sq))
156 _is-executable-sh = $(call shell-sq,test -f $(1) -a -x $(1) && echo y)
H A DMakefile155 PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
451 PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
/linux-4.1.27/arch/sh/include/cpu-sh4/cpu/
H A Dsq.h2 * include/asm-sh/cpu-sh4/sq.h
30 /* arch/sh/kernel/cpu/sh4/sq.c */
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c220 (n << qp->sq.wqe_shift); get_send_wqe()
223 (n << qp->sq.wqe_shift)) >> get_send_wqe()
225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & get_send_wqe()
498 qp_attr->cap.max_send_wr = qp->sq.max; mthca_query_qp()
500 qp_attr->cap.max_send_sge = qp->sq.max_gs; mthca_query_qp()
602 if (qp->sq.max) __mthca_modify_qp()
603 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; __mthca_modify_qp()
604 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; __mthca_modify_qp()
723 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); __mthca_modify_qp()
824 mthca_wq_reset(&qp->sq); __mthca_modify_qp()
825 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); __mthca_modify_qp()
831 *qp->sq.db = 0; __mthca_modify_qp()
854 spin_lock_irq(&qp->sq.lock); mthca_modify_qp()
858 spin_unlock_irq(&qp->sq.lock); mthca_modify_qp()
951 1 << qp->sq.wqe_shift)); mthca_adjust_qp_caps()
955 qp->sq.max_gs = min_t(int, dev->limits.max_sg, mthca_adjust_qp_caps()
964 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
965 * rq.max_gs and sq.max_gs must all be assigned.
967 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
987 size = qp->sq.max_gs * sizeof (struct mthca_data_seg); mthca_alloc_wqe_buf()
1027 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; mthca_alloc_wqe_buf()
1028 qp->sq.wqe_shift++) mthca_alloc_wqe_buf()
1032 1 << qp->sq.wqe_shift); mthca_alloc_wqe_buf()
1043 (qp->sq.max << qp->sq.wqe_shift)); mthca_alloc_wqe_buf()
1045 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), mthca_alloc_wqe_buf()
1066 (qp->sq.max << qp->sq.wqe_shift)), mthca_free_wqe_buf()
1121 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, mthca_alloc_memfree()
1122 qp->qpn, &qp->sq.db); mthca_alloc_memfree()
1123 if (qp->sq.db_index < 0) { mthca_alloc_memfree()
1136 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); mthca_free_memfree()
1159 mthca_wq_reset(&qp->sq); mthca_alloc_qp_common()
1162 spin_lock_init(&qp->sq.lock); mthca_alloc_qp_common()
1209 for (i = 0; i < qp->sq.max; ++i) { mthca_alloc_qp_common()
1211 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << mthca_alloc_qp_common()
1212 qp->sq.wqe_shift) + mthca_alloc_qp_common()
1224 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); mthca_alloc_qp_common()
1253 qp->sq.max = cap->max_send_wr ? mthca_set_qp_size()
1257 qp->sq.max = cap->max_send_wr; mthca_set_qp_size()
1261 qp->sq.max_gs = max_t(int, cap->max_send_sge, mthca_set_qp_size()
1361 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; mthca_alloc_sqp()
1626 spin_lock_irqsave(&qp->sq.lock, flags); mthca_tavor_post_send()
1630 ind = qp->sq.next_ind; mthca_tavor_post_send()
1633 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_tavor_post_send()
1636 qp->sq.head, qp->sq.tail, mthca_tavor_post_send()
1637 qp->sq.max, nreq); mthca_tavor_post_send()
1644 prev_wqe = qp->sq.last; mthca_tavor_post_send()
1645 qp->sq.last = wqe; mthca_tavor_post_send()
1729 if (wr->num_sge > qp->sq.max_gs) { mthca_tavor_post_send()
1761 cpu_to_be32(((ind << qp->sq.wqe_shift) + mthca_tavor_post_send()
1778 if (unlikely(ind >= qp->sq.max)) mthca_tavor_post_send()
1779 ind -= qp->sq.max; mthca_tavor_post_send()
1786 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + mthca_tavor_post_send()
1798 qp->sq.next_ind = ind; mthca_tavor_post_send()
1799 qp->sq.head += nreq; mthca_tavor_post_send()
1801 spin_unlock_irqrestore(&qp->sq.lock, flags); mthca_tavor_post_send()
1941 spin_lock_irqsave(&qp->sq.lock, flags); mthca_arbel_post_send()
1945 ind = qp->sq.head & (qp->sq.max - 1); mthca_arbel_post_send()
1952 ((qp->sq.head & 0xffff) << 8) | f0 | op0; mthca_arbel_post_send()
1954 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; mthca_arbel_post_send()
1961 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); mthca_arbel_post_send()
1974 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_arbel_post_send()
1977 qp->sq.head, qp->sq.tail, mthca_arbel_post_send()
1978 qp->sq.max, nreq); mthca_arbel_post_send()
1985 prev_wqe = qp->sq.last; mthca_arbel_post_send()
1986 qp->sq.last = wqe; mthca_arbel_post_send()
2070 if (wr->num_sge > qp->sq.max_gs) { mthca_arbel_post_send()
2102 cpu_to_be32(((ind << qp->sq.wqe_shift) + mthca_arbel_post_send()
2119 if (unlikely(ind >= qp->sq.max)) mthca_arbel_post_send()
2120 ind -= qp->sq.max; mthca_arbel_post_send()
2125 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; mthca_arbel_post_send()
2127 qp->sq.head += nreq; mthca_arbel_post_send()
2134 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); mthca_arbel_post_send()
2152 spin_unlock_irqrestore(&qp->sq.lock, flags); mthca_arbel_post_send()
H A Dmthca_provider.c561 qp->sq.db_index = ucmd.sq_db_index; mthca_create_qp()
618 init_attr->cap.max_send_wr = qp->sq.max; mthca_create_qp()
620 init_attr->cap.max_send_sge = qp->sq.max_gs; mthca_create_qp()
633 to_mqp(qp)->sq.db_index); mthca_destroy_qp()
H A Dmthca_provider.h276 struct mthca_wq sq; member in struct:mthca_qp
H A Dmthca_cq.c539 wq = &(*cur_qp)->sq; mthca_poll_one()
/linux-4.1.27/block/
H A Dblk-throttle.c202 * @sq: the throtl_service_queue of interest
204 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
207 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) sq_to_tg() argument
209 if (sq && sq->parent_sq) sq_to_tg()
210 return container_of(sq, struct throtl_grp, service_queue); sq_to_tg()
217 * @sq: the throtl_service_queue of interest
222 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) sq_to_td() argument
224 struct throtl_grp *tg = sq_to_tg(sq); sq_to_td()
229 return container_of(sq, struct throtl_data, service_queue); sq_to_td()
234 * @sq: the service_queue being reported
238 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
244 #define throtl_log(sq, fmt, args...) do { \
245 struct throtl_grp *__tg = sq_to_tg((sq)); \
246 struct throtl_data *__td = sq_to_td((sq)); \
390 static void throtl_service_queue_init(struct throtl_service_queue *sq, throtl_service_queue_init() argument
393 INIT_LIST_HEAD(&sq->queued[0]); throtl_service_queue_init()
394 INIT_LIST_HEAD(&sq->queued[1]); throtl_service_queue_init()
395 sq->pending_tree = RB_ROOT; throtl_service_queue_init()
396 sq->parent_sq = parent_sq; throtl_service_queue_init()
397 setup_timer(&sq->pending_timer, throtl_pending_timer_fn, throtl_service_queue_init()
398 (unsigned long)sq); throtl_service_queue_init()
401 static void throtl_service_queue_exit(struct throtl_service_queue *sq) throtl_service_queue_exit() argument
403 del_timer_sync(&sq->pending_timer); throtl_service_queue_exit()
648 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, throtl_schedule_pending_timer() argument
651 mod_timer(&sq->pending_timer, expires); throtl_schedule_pending_timer()
652 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", throtl_schedule_pending_timer()
658 * @sq: the service_queue to schedule dispatch for
661 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
671 * delay before dispatch starts even if @sq->first_pending_disptime is not
674 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq, throtl_schedule_next_dispatch() argument
678 if (!sq->nr_pending) throtl_schedule_next_dispatch()
681 update_min_dispatch_time(sq); throtl_schedule_next_dispatch()
684 if (force || time_after(sq->first_pending_disptime, jiffies)) { throtl_schedule_next_dispatch()
685 throtl_schedule_pending_timer(sq, sq->first_pending_disptime); throtl_schedule_next_dispatch()
1023 struct throtl_service_queue *sq = &tg->service_queue; throtl_add_bio_tg() local
1035 if (!sq->nr_queued[rw]) throtl_add_bio_tg()
1038 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); throtl_add_bio_tg()
1040 sq->nr_queued[rw]++; throtl_add_bio_tg()
1046 struct throtl_service_queue *sq = &tg->service_queue; tg_update_disptime() local
1050 if ((bio = throtl_peek_queued(&sq->queued[READ]))) tg_update_disptime()
1053 if ((bio = throtl_peek_queued(&sq->queued[WRITE]))) tg_update_disptime()
1080 struct throtl_service_queue *sq = &tg->service_queue; tg_dispatch_one_bio() local
1081 struct throtl_service_queue *parent_sq = sq->parent_sq; tg_dispatch_one_bio()
1092 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); tg_dispatch_one_bio()
1093 sq->nr_queued[rw]--; tg_dispatch_one_bio()
1122 struct throtl_service_queue *sq = &tg->service_queue; throtl_dispatch_tg() local
1130 while ((bio = throtl_peek_queued(&sq->queued[READ])) && throtl_dispatch_tg()
1140 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && throtl_dispatch_tg()
1159 struct throtl_service_queue *sq = &tg->service_queue; throtl_select_dispatch() local
1171 if (sq->nr_queued[0] || sq->nr_queued[1]) throtl_select_dispatch()
1198 struct throtl_service_queue *sq = (void *)arg; throtl_pending_timer_fn() local
1199 struct throtl_grp *tg = sq_to_tg(sq); throtl_pending_timer_fn()
1200 struct throtl_data *td = sq_to_td(sq); throtl_pending_timer_fn()
1208 parent_sq = sq->parent_sq; throtl_pending_timer_fn()
1212 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", throtl_pending_timer_fn()
1213 sq->nr_queued[READ] + sq->nr_queued[WRITE], throtl_pending_timer_fn()
1214 sq->nr_queued[READ], sq->nr_queued[WRITE]); throtl_pending_timer_fn()
1216 ret = throtl_select_dispatch(sq); throtl_pending_timer_fn()
1218 throtl_log(sq, "bios disp=%u", ret); throtl_pending_timer_fn()
1222 if (throtl_schedule_next_dispatch(sq, false)) throtl_pending_timer_fn()
1240 sq = parent_sq; throtl_pending_timer_fn()
1241 tg = sq_to_tg(sq); throtl_pending_timer_fn()
1358 struct throtl_service_queue *sq; tg_set_conf() local
1368 sq = &tg->service_queue; tg_set_conf()
1406 throtl_schedule_next_dispatch(sq->parent_sq, true); tg_set_conf()
1485 struct throtl_service_queue *sq; blk_throtl_bio() local
1519 sq = &tg->service_queue; blk_throtl_bio()
1523 if (sq->nr_queued[rw]) blk_throtl_bio()
1552 sq = sq->parent_sq; blk_throtl_bio()
1553 tg = sq_to_tg(sq); blk_throtl_bio()
1559 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", blk_throtl_bio()
1563 sq->nr_queued[READ], sq->nr_queued[WRITE]); blk_throtl_bio()
1606 struct throtl_service_queue *sq = &tg->service_queue; tg_drain_bios() local
1611 while ((bio = throtl_peek_queued(&sq->queued[READ]))) tg_drain_bios()
1613 while ((bio = throtl_peek_queued(&sq->queued[WRITE]))) tg_drain_bios()
/linux-4.1.27/arch/sh/kernel/cpu/sh4/
H A DMakefile10 obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
H A Dsq.c2 * arch/sh/kernel/cpu/sh4/sq.c
26 #include <cpu/sq.h>
62 unsigned long *sq = (unsigned long *)start; sq_flush_range() local
65 for (len >>= 5; len--; sq += 8) sq_flush_range()
66 prefetchw(sq); sq_flush_range()
352 "%s", "sq"); sq_dev_add()
368 .name = "sq",
380 printk(KERN_NOTICE "sq: Registering store queue API.\n"); sq_api_init()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dqp.c101 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); mlx5_get_send_wqe()
126 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; mlx5_ib_read_user_wqe()
360 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; calc_sq_size()
361 if (qp->sq.wqe_cnt > gen->max_wqes) { calc_sq_size()
363 qp->sq.wqe_cnt, gen->max_wqes); calc_sq_size()
366 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); calc_sq_size()
367 qp->sq.max_gs = attr->cap.max_send_sge; calc_sq_size()
368 qp->sq.max_post = wq_size / wqe_size; calc_sq_size()
369 attr->cap.max_send_wr = qp->sq.max_post; calc_sq_size()
379 int desc_sz = 1 << qp->sq.wqe_shift; set_user_buf_size()
394 qp->sq.wqe_cnt = ucmd->sq_wqe_count; set_user_buf_size()
396 if (qp->sq.wqe_cnt > gen->max_wqes) { set_user_buf_size()
398 qp->sq.wqe_cnt, gen->max_wqes); set_user_buf_size()
403 (qp->sq.wqe_cnt << 6); set_user_buf_size()
646 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); create_user_qp()
647 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; create_user_qp()
768 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; create_kernel_qp()
777 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); create_kernel_qp()
799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); create_kernel_qp()
800 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); create_kernel_qp()
802 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL); create_kernel_qp()
803 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL); create_kernel_qp()
805 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || create_kernel_qp()
806 !qp->sq.w_list || !qp->sq.wqe_head) { create_kernel_qp()
816 kfree(qp->sq.wqe_head); create_kernel_qp()
817 kfree(qp->sq.w_list); create_kernel_qp()
818 kfree(qp->sq.wrid); create_kernel_qp()
819 kfree(qp->sq.wr_data); create_kernel_qp()
836 kfree(qp->sq.wqe_head); destroy_qp_kernel()
837 kfree(qp->sq.w_list); destroy_qp_kernel()
838 kfree(qp->sq.wrid); destroy_qp_kernel()
839 kfree(qp->sq.wr_data); destroy_qp_kernel()
880 spin_lock_init(&qp->sq.lock); create_qp_common()
993 if (qp->sq.wqe_cnt) create_qp_common()
994 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); create_qp_common()
1761 qp->sq.head = 0; __mlx5_ib_modify_qp()
1762 qp->sq.tail = 0; __mlx5_ib_modify_qp()
1763 qp->sq.cur_post = 0; __mlx5_ib_modify_qp()
1764 qp->sq.last_poll = 0; __mlx5_ib_modify_qp()
2097 void *qend = qp->sq.qend; set_data_inl_seg()
2324 if (unlikely((*seg == qp->sq.qend))) set_sig_data_segment()
2334 if (unlikely((*seg == qp->sq.qend))) set_sig_data_segment()
2405 if (unlikely((*seg == qp->sq.qend))) set_sig_umr_wr()
2411 if (unlikely((*seg == qp->sq.qend))) set_sig_umr_wr()
2461 if (unlikely((*seg == qp->sq.qend))) set_frwr_li_wr()
2466 if (unlikely((*seg == qp->sq.qend))) set_frwr_li_wr()
2490 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); dump_wqe()
2513 if (unlikely(src == qp->sq.qend)) mlx5_bf_copy()
2542 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { begin_wqe()
2547 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); begin_wqe()
2572 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | finish_wqe()
2580 qp->sq.wrid[idx] = wr_id; finish_wqe()
2581 qp->sq.w_list[idx].opcode = mlx5_opcode; finish_wqe()
2582 qp->sq.wqe_head[idx] = qp->sq.head + nreq; finish_wqe()
2583 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); finish_wqe()
2584 qp->sq.w_list[idx].next = qp->sq.cur_post; finish_wqe()
2600 void *qend = qp->sq.qend; mlx5_ib_post_send()
2612 spin_lock_irqsave(&qp->sq.lock, flags); mlx5_ib_post_send()
2624 if (unlikely(num_sge > qp->sq.max_gs)) { mlx5_ib_post_send()
2667 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; mlx5_ib_post_send()
2680 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR; mlx5_ib_post_send()
2692 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; mlx5_ib_post_send()
2794 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; mlx5_ib_post_send()
2848 qp->sq.head += nreq; mlx5_ib_post_send()
2855 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); mlx5_ib_post_send()
2885 spin_unlock_irqrestore(&qp->sq.lock, flags); mlx5_ib_post_send()
3104 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; mlx5_ib_query_qp()
3105 qp_attr->cap.max_send_sge = qp->sq.max_gs; mlx5_ib_query_qp()
H A Dcq.c354 idx = tail & (qp->sq.wqe_cnt - 1); handle_atomics()
359 tail = qp->sq.w_list[idx].next; handle_atomics()
361 tail = qp->sq.w_list[idx].next; handle_atomics()
362 qp->sq.last_poll = tail; handle_atomics()
466 wq = &(*cur_qp)->sq; mlx5_poll_one()
494 wq = &(*cur_qp)->sq; mlx5_poll_one()
H A Dmlx5_ib.h188 struct mlx5_ib_wq sq; member in struct:mlx5_ib_qp
/linux-4.1.27/drivers/net/wireless/rtl818x/rtl8180/
H A Dsa2400.c91 static u8 sa2400_rf_calc_rssi(u8 agc, u8 sq) sa2400_rf_calc_rssi() argument
93 if (sq == 0x80) sa2400_rf_calc_rssi()
96 if (sq > 78) sa2400_rf_calc_rssi()
100 return 65 * sa2400_rf_rssi_map[sq] / 100; sa2400_rf_calc_rssi()
H A Dmax2820.c76 static u8 max2820_rf_calc_rssi(u8 agc, u8 sq) max2820_rf_calc_rssi() argument
H A Dgrf5101.c71 static u8 grf5101_rf_calc_rssi(u8 agc, u8 sq) grf5101_rf_calc_rssi() argument
H A Ddev.c216 u8 agc, sq; rtl8180_handle_rx() local
291 sq = flags2 & 0xff; rtl8180_handle_rx()
292 signal = priv->rf->calc_rssi(agc, sq); rtl8180_handle_rx()
/linux-4.1.27/drivers/staging/vt6656/
H A Ddpc.c52 u8 *rx_sts, *rx_rate, *sq, *sq_3; vnt_rx_data() local
130 sq = sq_3; vnt_rx_data()
132 sq = skb_data + 8 + pay_load_with_padding + 8; vnt_rx_data()
133 sq_3 = sq; vnt_rx_data()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dqp.c195 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); get_send_wqe()
218 s = roundup(size, 1U << qp->sq.wqe_shift); stamp_send_wqe()
220 ind = (i >> qp->sq.wqe_shift) + n; stamp_send_wqe()
221 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : stamp_send_wqe()
223 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); stamp_send_wqe()
224 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); stamp_send_wqe()
228 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); stamp_send_wqe()
244 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); post_nop_wqe()
269 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); post_nop_wqe()
277 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); pad_wraparound()
279 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); pad_wraparound()
479 qp->sq.wqe_shift = ilog2(64); set_kernel_sq_size()
481 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); set_kernel_sq_size()
484 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); set_kernel_sq_size()
490 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; set_kernel_sq_size()
491 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * set_kernel_sq_size()
495 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) set_kernel_sq_size()
501 ++qp->sq.wqe_shift; set_kernel_sq_size()
504 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, set_kernel_sq_size()
505 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - set_kernel_sq_size()
510 (qp->sq.wqe_cnt << qp->sq.wqe_shift); set_kernel_sq_size()
511 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { set_kernel_sq_size()
513 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; set_kernel_sq_size()
515 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; set_kernel_sq_size()
516 qp->sq.offset = 0; set_kernel_sq_size()
519 cap->max_send_wr = qp->sq.max_post = set_kernel_sq_size()
520 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; set_kernel_sq_size()
521 cap->max_send_sge = min(qp->sq.max_gs, set_kernel_sq_size()
541 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; set_user_sq_size()
542 qp->sq.wqe_shift = ucmd->log_sq_stride; set_user_sq_size()
545 (qp->sq.wqe_cnt << qp->sq.wqe_shift); set_user_sq_size()
697 spin_lock_init(&qp->sq.lock); create_qp_common()
789 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); create_qp_common()
791 if (!qp->sq.wrid || !qp->rq.wrid) { create_qp_common()
877 kfree(qp->sq.wrid); create_qp_common()
1053 kfree(qp->sq.wrid); destroy_qp_common()
1514 if (qp->sq.wqe_cnt) __mlx4_ib_modify_qp()
1515 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; __mlx4_ib_modify_qp()
1516 context->sq_size_stride |= qp->sq.wqe_shift - 4; __mlx4_ib_modify_qp()
1753 for (i = 0; i < qp->sq.wqe_cnt; ++i) { __mlx4_ib_modify_qp()
1757 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); __mlx4_ib_modify_qp()
1759 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); __mlx4_ib_modify_qp()
1813 qp->sq.head = 0; __mlx4_ib_modify_qp()
1814 qp->sq.tail = 0; __mlx4_ib_modify_qp()
2607 wr->num_sge > qp->sq.max_gs - (halign >> 4))) build_lso_seg()
2660 spin_lock_irqsave(&qp->sq.lock, flags); mlx4_ib_post_send()
2674 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mlx4_ib_post_send()
2680 if (unlikely(wr->num_sge > qp->sq.max_gs)) { mlx4_ib_post_send()
2686 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); mlx4_ib_post_send()
2687 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; mlx4_ib_post_send()
2903 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; mlx4_ib_post_send()
2906 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); mlx4_ib_post_send()
2925 qp->sq.head += nreq; mlx4_ib_post_send()
2948 spin_unlock_irqrestore(&qp->sq.lock, flags); mlx4_ib_post_send()
3184 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; mlx4_ib_query_qp()
3185 qp_attr->cap.max_send_sge = qp->sq.max_gs; mlx4_ib_query_qp()
H A Dcq.c605 wq = is_send ? &qp->sq : &qp->rq; mlx4_ib_qp_sw_comp()
740 wq = &(*cur_qp)->sq; mlx4_ib_poll_one()
H A Dmain.c2643 spin_lock_irqsave(&mqp->sq.lock, flags_qp); mlx4_ib_handle_catas_error()
2644 if (mqp->sq.tail != mqp->sq.head) { mlx4_ib_handle_catas_error()
2657 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); mlx4_ib_handle_catas_error()
H A Dmlx4_ib.h284 struct mlx4_ib_wq sq; member in struct:mlx4_ib_qp
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192se/
H A Dtrx.c153 u8 sq; _rtl92se_query_rxphystatus() local
155 sq = 100; _rtl92se_query_rxphystatus()
157 sq = cck_buf->sq_rpt; _rtl92se_query_rxphystatus()
158 if (sq > 64) _rtl92se_query_rxphystatus()
159 sq = 0; _rtl92se_query_rxphystatus()
160 else if (sq < 20) _rtl92se_query_rxphystatus()
161 sq = 100; _rtl92se_query_rxphystatus()
163 sq = ((64 - sq) * 100) / 44; _rtl92se_query_rxphystatus()
166 pstats->signalquality = sq; _rtl92se_query_rxphystatus()
167 pstats->rx_mimo_sig_qual[0] = sq; _rtl92se_query_rxphystatus()
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8723ae/
H A Dtrx.c150 u8 sq; _rtl8723e_query_rxphystatus() local
153 sq = 100; _rtl8723e_query_rxphystatus()
155 sq = cck_buf->sq_rpt; _rtl8723e_query_rxphystatus()
156 if (sq > 64) _rtl8723e_query_rxphystatus()
157 sq = 0; _rtl8723e_query_rxphystatus()
158 else if (sq < 20) _rtl8723e_query_rxphystatus()
159 sq = 100; _rtl8723e_query_rxphystatus()
161 sq = ((64 - sq) * 100) / 44; _rtl8723e_query_rxphystatus()
164 pstatus->signalquality = sq; _rtl8723e_query_rxphystatus()
165 pstatus->rx_mimo_signalquality[0] = sq; _rtl8723e_query_rxphystatus()
/linux-4.1.27/drivers/net/
H A Dvirtio_net.c104 struct send_queue *sq; member in struct:virtnet_info
823 static void free_old_xmit_skbs(struct send_queue *sq) free_old_xmit_skbs() argument
827 struct virtnet_info *vi = sq->vq->vdev->priv; free_old_xmit_skbs()
830 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { free_old_xmit_skbs()
842 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) xmit_skb() argument
846 struct virtnet_info *vi = sq->vq->vdev->priv; xmit_skb()
896 sg_init_table(sq->sg, MAX_SKB_FRAGS + 2); xmit_skb()
899 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); xmit_skb()
903 sg_set_buf(sq->sg, hdr, hdr_len); xmit_skb()
904 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; xmit_skb()
906 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); xmit_skb()
913 struct send_queue *sq = &vi->sq[qnum]; start_xmit() local
919 free_old_xmit_skbs(sq); start_xmit()
925 err = xmit_skb(sq, skb); start_xmit()
952 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { start_xmit()
954 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { start_xmit()
956 free_old_xmit_skbs(sq); start_xmit()
957 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { start_xmit()
959 virtqueue_disable_cb(sq->vq); start_xmit()
965 virtqueue_kick(sq->vq); start_xmit()
1256 virtqueue_set_affinity(vi->sq[i].vq, -1); virtnet_clean_affinity()
1281 virtqueue_set_affinity(vi->sq[i].vq, cpu); for_each_online_cpu()
1316 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); virtnet_get_ringparam()
1465 kfree(vi->sq); virtnet_free_queues()
1492 struct virtqueue *vq = vi->sq[i].vq; free_unused_bufs()
1562 sprintf(vi->sq[i].name, "output.%d", i); virtnet_find_vqs()
1564 names[txq2vq(i)] = vi->sq[i].name; virtnet_find_vqs()
1580 vi->sq[i].vq = vqs[txq2vq(i)]; virtnet_find_vqs()
1603 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); virtnet_alloc_queues()
1604 if (!vi->sq) virtnet_alloc_queues()
1619 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); virtnet_alloc_queues()
1625 kfree(vi->sq); virtnet_alloc_queues()
/linux-4.1.27/drivers/staging/vt6655/
H A Ddpc.c50 u8 *rx_sts, *rx_rate, *sq; vnt_rx_data() local
90 sq = skb_data + bytes_received - 4; vnt_rx_data()
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8188ee/
H A Dtrx.c169 u8 sq; _rtl88ee_query_rxphystatus() local
172 sq = 100; _rtl88ee_query_rxphystatus()
174 sq = cck_buf->sq_rpt; _rtl88ee_query_rxphystatus()
175 if (sq > 64) _rtl88ee_query_rxphystatus()
176 sq = 0; _rtl88ee_query_rxphystatus()
177 else if (sq < 20) _rtl88ee_query_rxphystatus()
178 sq = 100; _rtl88ee_query_rxphystatus()
180 sq = ((64 - sq) * 100) / 44; _rtl88ee_query_rxphystatus()
183 pstatus->signalquality = sq; _rtl88ee_query_rxphystatus()
184 pstatus->rx_mimo_signalquality[0] = sq; _rtl88ee_query_rxphystatus()
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192ce/
H A Dtrx.c210 u8 sq; _rtl92ce_query_rxphystatus() local
212 sq = 100; _rtl92ce_query_rxphystatus()
214 sq = cck_buf->sq_rpt; _rtl92ce_query_rxphystatus()
215 if (sq > 64) _rtl92ce_query_rxphystatus()
216 sq = 0; _rtl92ce_query_rxphystatus()
217 else if (sq < 20) _rtl92ce_query_rxphystatus()
218 sq = 100; _rtl92ce_query_rxphystatus()
220 sq = ((64 - sq) * 100) / 44; _rtl92ce_query_rxphystatus()
223 pstats->signalquality = sq; _rtl92ce_query_rxphystatus()
224 pstats->rx_mimo_sig_qual[0] = sq; _rtl92ce_query_rxphystatus()
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192cu/
H A Dmac.c845 u8 sq; _rtl92c_query_rxphystatus() local
847 sq = 100; _rtl92c_query_rxphystatus()
849 sq = cck_buf->sq_rpt; _rtl92c_query_rxphystatus()
850 if (sq > 64) _rtl92c_query_rxphystatus()
851 sq = 0; _rtl92c_query_rxphystatus()
852 else if (sq < 20) _rtl92c_query_rxphystatus()
853 sq = 100; _rtl92c_query_rxphystatus()
855 sq = ((64 - sq) * 100) / 44; _rtl92c_query_rxphystatus()
857 pstats->signalquality = sq; _rtl92c_query_rxphystatus()
858 pstats->RX_SIGQ[0] = sq; _rtl92c_query_rxphystatus()
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192de/
H A Dtrx.c202 u8 sq; _rtl92de_query_rxphystatus() local
204 sq = 100; _rtl92de_query_rxphystatus()
206 sq = cck_buf->sq_rpt; _rtl92de_query_rxphystatus()
207 if (sq > 64) _rtl92de_query_rxphystatus()
208 sq = 0; _rtl92de_query_rxphystatus()
209 else if (sq < 20) _rtl92de_query_rxphystatus()
210 sq = 100; _rtl92de_query_rxphystatus()
212 sq = ((64 - sq) * 100) / 44; _rtl92de_query_rxphystatus()
214 pstats->signalquality = sq; _rtl92de_query_rxphystatus()
215 pstats->rx_mimo_sig_qual[0] = sq; _rtl92de_query_rxphystatus()
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8821ae/
H A Dtrx.c200 u8 sq; query_rxphystatus() local
203 sq = 100; query_rxphystatus()
205 sq = p_phystrpt->pwdb_all; query_rxphystatus()
206 if (sq > 64) query_rxphystatus()
207 sq = 0; query_rxphystatus()
208 else if (sq < 20) query_rxphystatus()
209 sq = 100; query_rxphystatus()
211 sq = ((64 - sq) * 100) / 44; query_rxphystatus()
214 pstatus->signalquality = sq; query_rxphystatus()
215 pstatus->rx_mimo_signalquality[0] = sq; query_rxphystatus()
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
H A Docrdma_hw.c121 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe)); ocrdma_get_mqe()
126 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); ocrdma_mq_inc_head()
131 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)); ocrdma_get_mqe_rsp()
305 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK; ocrdma_ring_mq_db()
603 status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN, ocrdma_create_mq()
607 status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq); ocrdma_create_mq()
614 ocrdma_free_q(dev, &dev->mq.sq); ocrdma_create_mq()
629 mbxq = &dev->mq.sq; ocrdma_destroy_mq()
838 struct ocrdma_cq *cq, bool sq) _ocrdma_qp_buddy_cq_handler()
843 struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head); _ocrdma_qp_buddy_cq_handler()
846 if (sq) list_for_each()
858 /* if completion came on sq, rq's cq is buddy cq. list_for_each()
859 * if completion came on rq, sq's cq is buddy cq. list_for_each()
978 dev->mqe_ctx.tag = dev->mq.sq.head; ocrdma_post_mqe()
981 cmd->hdr.tag_lo = dev->mq.sq.head; ocrdma_post_mqe()
2065 qp->sq.head = 0; ocrdma_init_hwq_ptr()
2066 qp->sq.tail = 0; ocrdma_init_hwq_ptr()
2143 qp->sq.max_cnt = max_wqe_allocated; ocrdma_set_create_qp_sq_cmd()
2146 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); ocrdma_set_create_qp_sq_cmd()
2147 if (!qp->sq.va) ocrdma_set_create_qp_sq_cmd()
2149 memset(qp->sq.va, 0, len); ocrdma_set_create_qp_sq_cmd()
2150 qp->sq.len = len; ocrdma_set_create_qp_sq_cmd()
2151 qp->sq.pa = pa; ocrdma_set_create_qp_sq_cmd()
2152 qp->sq.entry_size = dev->attr.wqe_size; ocrdma_set_create_qp_sq_cmd()
2166 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) << ocrdma_set_create_qp_sq_cmd()
2281 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT; ocrdma_get_create_qp_rsp()
2299 qp->sq.max_cnt = max_wqe_allocated; ocrdma_get_create_qp_rsp()
2300 qp->sq.max_wqe_idx = max_wqe_allocated - 1; ocrdma_get_create_qp_rsp()
2400 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); ocrdma_mbx_create_qp()
2656 if (qp->sq.va) ocrdma_mbx_destroy_qp()
2657 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); ocrdma_mbx_destroy_qp()
837 _ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, struct ocrdma_cq *cq, bool sq) _ocrdma_qp_buddy_cq_handler() argument
H A Docrdma_verbs.c1228 uresp.sq_dbid = qp->sq.dbid; ocrdma_copy_qp_uresp()
1230 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); ocrdma_copy_qp_uresp()
1231 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va); ocrdma_copy_qp_uresp()
1232 uresp.num_wqe_allocated = qp->sq.max_cnt; ocrdma_copy_qp_uresp()
1296 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, ocrdma_alloc_wr_id_tbl()
1320 qp->sq.max_sges = attrs->cap.max_send_sge; ocrdma_set_qp_init_params()
1530 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; ocrdma_query_qp()
1532 qp_attr->cap.max_send_sge = qp->sq.max_sges; ocrdma_query_qp()
1603 return (qp->sq.tail == qp->sq.head); is_hw_sq_empty()
1670 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_discard_cqes()
1765 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, ocrdma_destroy_qp()
1766 PAGE_ALIGN(qp->sq.len)); ocrdma_destroy_qp()
2156 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); ocrdma_ring_sq_db()
2184 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || ocrdma_post_send()
2185 wr->num_sge > qp->sq.max_sges) { ocrdma_post_send()
2190 hdr = ocrdma_hwq_head(&qp->sq); ocrdma_post_send()
2245 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; ocrdma_post_send()
2247 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; ocrdma_post_send()
2248 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; ocrdma_post_send()
2257 ocrdma_hwq_inc_head(&qp->sq); ocrdma_post_send()
2482 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); ocrdma_update_wc()
2580 ocrdma_update_wc(qp, ibwc, qp->sq.tail); ocrdma_update_err_scqe()
2581 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_update_err_scqe()
2598 /* when hw sq is empty, but rq is not empty, so we continue ocrdma_poll_err_scqe()
2602 /* when cq for rq and sq is same, it is safe to return ocrdma_poll_err_scqe()
2636 int tail = qp->sq.tail; ocrdma_poll_success_scqe()
2649 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; ocrdma_poll_success_scqe()
2653 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_poll_success_scqe()
2891 ocrdma_update_wc(qp, ibwc, qp->sq.tail); ocrdma_add_err_cqe()
2892 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_add_err_cqe()
H A Docrdma.h142 struct ocrdma_queue_info sq; member in struct:ocrdma_mq
322 /* head of all qp's sq and rq for which cqes need to be flushed
375 struct ocrdma_qp_hwq_info sq; member in struct:ocrdma_qp
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dehca_uverbs.c228 ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num); ehca_mmap_qp()
233 "ehca_mmap_queue(sq) failed rc=%i qp_num=%x", ehca_mmap_qp()
253 u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */ ehca_mmap()
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
H A Dcxio_hal.c292 wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL); cxio_create_qp()
293 if (!wq->sq) cxio_create_qp()
313 kfree(wq->sq); cxio_create_qp()
343 kfree(wq->sq); cxio_destroy_qp()
411 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); cxio_flush_sq()
414 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); cxio_flush_sq()
419 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); cxio_flush_sq()
1079 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); flush_completed_wrs()
1083 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); flush_completed_wrs()
1089 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n", flush_completed_wrs()
1119 u32 rptr = wq->oldest_read - wq->sq + 1; advance_oldest_read()
1123 wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2); advance_oldest_read()
1284 sqp = wq->sq + cxio_poll_cq()
1301 PDBG("%s completing sq idx %ld\n", __func__, cxio_poll_cq()
1303 *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id; cxio_poll_cq()
H A Dcxio_wr.h699 struct t3_swsq *sq; /* SW SQ */ member in struct:t3_wq
703 u32 sq_size_log2; /* sq size */
H A Diwch_qp.c391 sqp = qhp->wq.sq + iwch_post_send()
589 sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2); iwch_bind_mw()
/linux-4.1.27/drivers/net/wireless/hostap/
H A Dhostap_main.c1008 struct hfa384x_comms_quality sq; prism2_update_comms_qual() local
1017 &sq, sizeof(sq), 1) >= 0) { prism2_update_comms_qual()
1018 local->comms_qual = (s16) le16_to_cpu(sq.comm_qual); prism2_update_comms_qual()
1019 local->avg_signal = (s16) le16_to_cpu(sq.signal_level); prism2_update_comms_qual()
1020 local->avg_noise = (s16) le16_to_cpu(sq.noise_level); prism2_update_comms_qual()
1026 &sq, sizeof(sq), 1) >= 0) { prism2_update_comms_qual()
1027 local->comms_qual = le16_to_cpu(sq.comm_qual); prism2_update_comms_qual()
1029 le16_to_cpu(sq.signal_level)); prism2_update_comms_qual()
1031 le16_to_cpu(sq.noise_level)); prism2_update_comms_qual()
H A Dhostap_80211_rx.c147 LWNG_SETVAL(sq, 5, 1 /* no value */, 4, 0); prism2_rx_80211()
H A Dhostap_wlan.h41 struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal, member in struct:linux_wlan_ng_prism_hdr
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192ee/
H A Dtrx.c141 u8 sq, sq_rpt; _rtl92ee_query_rxphystatus() local
144 sq = 100; _rtl92ee_query_rxphystatus()
148 sq = 0; _rtl92ee_query_rxphystatus()
150 sq = 100; _rtl92ee_query_rxphystatus()
152 sq = ((64 - sq_rpt) * 100) / 44; _rtl92ee_query_rxphystatus()
155 pstatus->signalquality = sq; _rtl92ee_query_rxphystatus()
156 pstatus->rx_mimo_signalquality[0] = sq; _rtl92ee_query_rxphystatus()
/linux-4.1.27/drivers/scsi/bnx2fc/
H A Dbnx2fc_tgt.c402 /* initialize sq doorbell */ bnx2fc_init_tgt()
681 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, bnx2fc_alloc_session_resc()
683 if (!tgt->sq) { bnx2fc_alloc_session_resc()
688 memset(tgt->sq, 0, tgt->sq_mem_size); bnx2fc_alloc_session_resc()
901 if (tgt->sq) { bnx2fc_free_session_resc()
903 tgt->sq, tgt->sq_dma); bnx2fc_free_session_resc()
904 tgt->sq = NULL; bnx2fc_free_session_resc()
H A Dbnx2fc.h319 struct fcoe_sqe *sq; member in struct:bnx2fc_rport
H A Dbnx2fc_hwi.c1387 sqe = &tgt->sq[tgt->sq_prod_idx]; bnx2fc_add_2_sq()
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8723be/
H A Dtrx.c118 u8 sq, sq_rpt; _rtl8723be_query_rxphystatus() local
120 sq = 100; _rtl8723be_query_rxphystatus()
124 sq = 0; _rtl8723be_query_rxphystatus()
126 sq = 100; _rtl8723be_query_rxphystatus()
128 sq = ((64 - sq_rpt) * 100) / 44; _rtl8723be_query_rxphystatus()
130 pstatus->signalquality = sq; _rtl8723be_query_rxphystatus()
131 pstatus->rx_mimo_signalquality[0] = sq; _rtl8723be_query_rxphystatus()
/linux-4.1.27/drivers/isdn/hardware/eicon/
H A Dcardtype.h954 "di_etsi.sq", CARD_FT_S,
955 "di_1tr6.sq", CARD_FT_S,
956 "di_belg.sq", CARD_FT_S,
957 "di_franc.sq", CARD_FT_S,
958 "di_atel.sq", CARD_FT_S,
959 "di_ni.sq", CARD_FT_S,
960 "di_5ess.sq", CARD_FT_S,
961 "di_japan.sq", CARD_FT_S,
981 "di_swed.sq", CARD_FT_S,
/linux-4.1.27/drivers/staging/rtl8712/
H A Drtl8712_recv.c737 u8 sq; query_rx_phy_status() local
817 sq = 100; query_rx_phy_status()
819 sq = pcck_buf->sq_rpt; query_rx_phy_status()
821 sq = 0; query_rx_phy_status()
823 sq = 100; query_rx_phy_status()
825 sq = ((64-sq) * 100) / 44; query_rx_phy_status()
827 prframe->u.hdr.attrib.signal_qual = sq; query_rx_phy_status()
828 prframe->u.hdr.attrib.rx_mimo_signal_qual[0] = sq; query_rx_phy_status()
/linux-4.1.27/drivers/infiniband/hw/nes/
H A Dnes_user.h62 __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
H A Dnes_hw.h913 struct nes_hw_nic_sq_wqe *sq_vbase; /* virtual address of sq */
H A Dnes_verbs.c240 /* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */ nes_bind_mw()
946 /* Now to get to sq.. we need to calculate how many */ nes_setup_virt_qp()
1153 nes_debug(NES_DBG_QP, "ERROR bad rq (%u) or sq (%u) size\n", nes_create_qp()
3298 /* nes_debug(NES_DBG_IW_TX, "processing sq wqe for QP%u at %p, head = %u.\n", nes_post_send()
3673 /* parse CQE, get completion context from WQE (either rq or sq) */ nes_poll_cq()
H A Dnes_mgt.c889 /* We are not sending from this NIC so sq is not allocated */ nes_init_mgt_qp()
/linux-4.1.27/drivers/staging/rtl8192e/rtl8192e/
H A Dr8192E_dev.c1586 u8 sq; rtl8192_query_rxphystatus() local
1589 sq = 100; rtl8192_query_rxphystatus()
1591 sq = pcck_buf->sq_rpt; rtl8192_query_rxphystatus()
1594 sq = 0; rtl8192_query_rxphystatus()
1596 sq = 100; rtl8192_query_rxphystatus()
1598 sq = ((64-sq) * 100) / 44; rtl8192_query_rxphystatus()
1600 pstats->SignalQuality = sq; rtl8192_query_rxphystatus()
1601 precord_stats->SignalQuality = sq; rtl8192_query_rxphystatus()
1602 pstats->RxMIMOSignalQuality[0] = sq; rtl8192_query_rxphystatus()
1603 precord_stats->RxMIMOSignalQuality[0] = sq; rtl8192_query_rxphystatus()
/linux-4.1.27/drivers/media/platform/vivid/
H A Dvivid-tpg.c1760 const struct v4l2_rect *sq = &tpg->square; tpg_fill_plane_extras() local
1802 if (tpg->show_square && frame_line >= sq->top && tpg_fill_plane_extras()
1803 frame_line < sq->top + sq->height && tpg_fill_plane_extras()
1804 sq->left < c->left + c->width && tpg_fill_plane_extras()
1805 sq->left + sq->width >= c->left) { tpg_fill_plane_extras()
1806 unsigned left = sq->left; tpg_fill_plane_extras()
1807 unsigned width = sq->width; tpg_fill_plane_extras()
/linux-4.1.27/drivers/isdn/hisax/
H A Damd7930_fn.c523 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { Amd7930_interrupt()
575 skb_queue_tail(&cs->sq, skb); Amd7930_l1hw()
597 skb_queue_tail(&cs->sq, skb); Amd7930_l1hw()
652 skb_queue_purge(&cs->sq); Amd7930_l1hw()
H A Dicc.c251 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { icc_interrupt()
450 skb_queue_tail(&cs->sq, skb); ICC_l1hw()
471 skb_queue_tail(&cs->sq, skb); ICC_l1hw()
551 skb_queue_purge(&cs->sq); ICC_l1hw()
H A Disac.c254 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { isac_interrupt()
453 skb_queue_tail(&cs->sq, skb); ISAC_l1hw()
474 skb_queue_tail(&cs->sq, skb); ISAC_l1hw()
549 skb_queue_purge(&cs->sq); ISAC_l1hw()
H A Dipacx.c106 skb_queue_tail(&cs->sq, skb); dch_l2l1()
124 skb_queue_tail(&cs->sq, skb); dch_l2l1()
183 skb_queue_purge(&cs->sq); dch_l2l1()
387 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { dch_int()
H A Dw6692.c470 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { W6692_interrupt()
574 skb_queue_tail(&cs->sq, skb); W6692_l1hw()
595 skb_queue_tail(&cs->sq, skb); W6692_l1hw()
655 skb_queue_purge(&cs->sq); W6692_l1hw()
H A Dconfig.c787 skb_queue_purge(&csta->sq); closecard()
1137 skb_queue_head_init(&cs->sq); hisax_cs_setup()
1712 skb = skb_dequeue(&cs->sq); hisax_d_l1l2()
1800 skb_queue_tail(&cs->sq, skb); hisax_d_l2l1()
H A Dst5481_b.c93 /* if (!(bcs->tx_skb = skb_dequeue(&bcs->sq))) { */ usb_b_out()
H A Dhfc_2bds0.c880 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { hfc2bds0_interrupt()
917 skb_queue_tail(&cs->sq, skb); HFCD_l1hw()
943 skb_queue_tail(&cs->sq, skb); HFCD_l1hw()
H A Dhfc_sx.c835 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { hfcsx_interrupt()
885 skb_queue_tail(&cs->sq, skb); HFCSX_l1hw()
911 skb_queue_tail(&cs->sq, skb); HFCSX_l1hw()
H A Dhfc_pci.c1070 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { hfcpci_interrupt()
1120 skb_queue_tail(&cs->sq, skb); HFCPCI_l1hw()
1146 skb_queue_tail(&cs->sq, skb); HFCPCI_l1hw()
H A Dhisax.h943 struct sk_buff_head rq, sq; /* D-channel queues */ member in struct:IsdnCardState
/linux-4.1.27/include/linux/
H A Ddmar.h207 sq : 2, member in struct:irte::__anon11614::__anon11615
/linux-4.1.27/drivers/net/wireless/ath/wil6210/
H A Drx_reorder.c28 static inline u16 seq_inc(u16 sq) seq_inc() argument
30 return (sq + 1) & SEQ_MASK; seq_inc()
/linux-4.1.27/drivers/iommu/
H A Dintel_irq_remapping.c309 unsigned int sq, unsigned int sid) set_irte_sid()
314 irte->sq = sq; set_irte_sid()
1012 irte.sid, irte.sq, irte.svt); intel_setup_ioapic_entry()
308 set_irte_sid(struct irte *irte, unsigned int svt, unsigned int sq, unsigned int sid) set_irte_sid() argument
/linux-4.1.27/drivers/net/wireless/rtl818x/rtl8187/
H A Drtl8187.h59 u8 sq; member in struct:rtl8187b_rx_hdr
/linux-4.1.27/drivers/net/wireless/rtl818x/
H A Drtl818x.h357 u8 (*calc_rssi)(u8 agc, u8 sq);
/linux-4.1.27/arch/tile/include/arch/
H A Dmpipe_shm.h284 uint_reg_t sq : 1; member in struct:__anon2767::__anon2768
320 uint_reg_t sq : 1;
/linux-4.1.27/drivers/infiniband/hw/amso1100/
H A Dc2_wr.h958 * Flags used for all post-sq WRs. These must fit in the flags
968 * Common fields for all post-sq WRs. Namely the standard header and a
969 * secondary header with fields common to all post-sq WRs.
/linux-4.1.27/drivers/staging/rtl8192u/
H A Dr8192U_core.c3993 u8 sq; rtl8192_query_rxphystatus() local
4082 sq = 100; rtl8192_query_rxphystatus()
4084 sq = pcck_buf->sq_rpt; rtl8192_query_rxphystatus()
4087 sq = 0; rtl8192_query_rxphystatus()
4089 sq = 100; rtl8192_query_rxphystatus()
4091 sq = ((64-sq) * 100) / 44; rtl8192_query_rxphystatus()
4093 pstats->SignalQuality = precord_stats->SignalQuality = sq; rtl8192_query_rxphystatus()
4094 pstats->RxMIMOSignalQuality[0] = precord_stats->RxMIMOSignalQuality[0] = sq; rtl8192_query_rxphystatus()
/linux-4.1.27/arch/mips/cavium-octeon/executive/
H A Dcvmx-pko.c273 "queue. sq: %d\n", cvmx_pko_config_port()
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.c450 pr_err("can't register for sq ret=%x\n", ret); ehea_create_qp()
H A Dehea_main.c2537 pr_err("WARNING: sq not flushed completely\n"); ehea_flush_sq()
/linux-4.1.27/drivers/staging/rtl8188eu/os_dep/
H A Dioctl_linux.c119 u8 ss, sq; translate_scan() local
310 sq = padapter->recvpriv.signal_qual; translate_scan()
313 sq = pnetwork->network.PhyInfo.SignalQuality; translate_scan()
317 iwe.u.qual.qual = (u8)sq; /* signal quality */ translate_scan()
/linux-4.1.27/drivers/net/wireless/ath/carl9170/
H A Ddebug.c290 ADD(buf, *off, bufsize, "%s %p, c:%2x, DA:%pM, sq:%4d, mc:%.4x, " carl9170_debugfs_format_frame()
/linux-4.1.27/net/9p/
H A Dtrans_rdma.c158 {Opt_sq_depth, "sq=%u"},
/linux-4.1.27/drivers/isdn/capi/
H A Dcapi.c1150 pr_debug("capinc_tty_chars_in_buffer = %d nack=%d sq=%d rq=%d\n", capinc_tty_chars_in_buffer()
/linux-4.1.27/drivers/scsi/bnx2i/
H A Dbnx2i.h589 * @sqe_left: number sq entry left
H A Dbnx2i_iscsi.c1127 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
/linux-4.1.27/drivers/video/fbdev/
H A Dpvr2fb.c72 #include <cpu/sq.h>
/linux-4.1.27/drivers/net/wireless/
H A Dairo.c5743 u16 sq; airo_get_quality() local
5751 sq = le16_to_cpu(status_rid->signalQuality); airo_get_quality()
5753 if (sq > 0x20) airo_get_quality()
5756 quality = 0x20 - sq; airo_get_quality()
5758 if (sq > 0xb0) airo_get_quality()
5760 else if (sq < 0x10) airo_get_quality()
5763 quality = 0xb0 - sq; airo_get_quality()
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Dsvc_rdma_transport.c405 dprintk("svcrdma: sq wc err status %d\n", sq_cq_reap()
/linux-4.1.27/drivers/staging/rtl8723au/core/
H A Drtw_recv.c1302 /* psta->signal_quality = prxcmd->sq; */ validate_recv_data_frame()
/linux-4.1.27/drivers/staging/rtl8188eu/core/
H A Drtw_recv.c1206 /* psta->signal_quality = prxcmd->sq; */ validate_recv_data_frame()
/linux-4.1.27/drivers/net/wireless/ipw2x00/
H A Dipw2100.c2676 struct ipw2100_status_queue *sq = &priv->status_queue; __ipw2100_rx_process() local
2713 frame_type = sq->drv[i].status_fields & STATUS_TYPE_MASK; __ipw2100_rx_process()
2714 stats.rssi = sq->drv[i].rssi + IPW2100_RSSI_TO_DBM; __ipw2100_rx_process()
2715 stats.len = sq->drv[i].frame_size; __ipw2100_rx_process()

Completed in 2494 milliseconds