/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
D | resource.c | 100 u32 qid; in c4iw_get_cqid() local 108 qid = entry->qid; in c4iw_get_cqid() 111 qid = c4iw_get_resource(&rdev->resource.qid_table); in c4iw_get_cqid() 112 if (!qid) in c4iw_get_cqid() 115 rdev->stats.qid.cur += rdev->qpmask + 1; in c4iw_get_cqid() 117 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid() 121 entry->qid = i; in c4iw_get_cqid() 132 entry->qid = qid; in c4iw_get_cqid() 134 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid() 138 entry->qid = i; in c4iw_get_cqid() [all …]
|
D | qp.c | 163 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp() 164 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp() 181 wq->sq.qid = c4iw_get_qpid(rdev, uctx); in create_qp() 182 if (!wq->sq.qid) in create_qp() 185 wq->rq.qid = c4iw_get_qpid(rdev, uctx); in create_qp() 186 if (!wq->rq.qid) { in create_qp() 243 off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK; in create_qp() 247 off += 128 * (wq->sq.qid & rdev->qpmask) + 8; in create_qp() 250 off = (wq->rq.qid << rdev->qpshift) & PAGE_MASK; in create_qp() 254 off += 128 * (wq->rq.qid & rdev->qpmask) + 8; in create_qp() [all …]
|
D | device.c | 132 le.qid = wq->sq.qid; in c4iw_log_wr_stats() 138 le.qid = wq->rq.qid; in c4iw_log_wr_stats() 179 lep->qid, lep->opcode, in wr_log_show() 235 if (id != qp->wq.sq.qid) in dump_qp() 257 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp() 281 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp() 295 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp() 467 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur, in stats_show() 468 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail); in stats_show() 511 dev->rdev.stats.qid.max = 0; in stats_clear() [all …]
|
D | ev.c | 211 CQE_STATUS(err_cqe), qhp->wq.sq.qid); in c4iw_ev_dispatch() 223 int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) in c4iw_ev_handler() argument 229 chp = get_chp(dev, qid); in c4iw_ev_handler() 240 PDBG("%s unknown cqid 0x%x\n", __func__, qid); in c4iw_ev_handler()
|
D | t4.h | 49 __be16 qid; member 295 u32 qid; member 319 u32 qid; member 475 writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); in t4_ring_sq_db() 499 writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db); in t4_ring_rq_db()
|
D | iw_cxgb4.h | 103 u32 qid; member 126 struct c4iw_stat qid; member 160 u16 qid; member 1022 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); 1026 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 1029 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
|
D | cq.c | 196 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe() 229 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe() 446 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) in c4iw_count_rcqes()
|
D | cm.c | 1708 __func__, ep->com.qp->wq.sq.qid, ep, in rx_data() 2744 ep->com.qp->wq.sq.qid); in terminate()
|
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_process_queue_manager.c | 31 struct process_queue_manager *pqm, unsigned int qid) in get_queue_by_qid() argument 38 if (pqn->q && pqn->q->properties.queue_id == qid) in get_queue_by_qid() 40 if (pqn->kq && pqn->kq->queue->properties.queue_id == qid) in get_queue_by_qid() 48 unsigned int *qid) in find_available_queue_slot() argument 52 BUG_ON(!pqm || !qid); in find_available_queue_slot() 68 *qid = found; in find_available_queue_slot() 116 struct file *f, unsigned int qid) in create_cp_queue() argument 126 kfd_queue_id_to_doorbell(dev, pqm->process, qid); in create_cp_queue() 130 q_properties->queue_id = qid; in create_cp_queue() 153 unsigned int *qid) in pqm_create_queue() argument [all …]
|
D | kfd_priv.h | 605 unsigned int *qid); 606 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 607 int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
|
/linux-4.1.27/drivers/scsi/lpfc/ |
D | lpfc_debugfs.h | 551 lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) in lpfc_debug_dump_wq_by_id() argument 556 if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) in lpfc_debug_dump_wq_by_id() 559 printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); in lpfc_debug_dump_wq_by_id() 564 if (phba->sli4_hba.els_wq->queue_id == qid) { in lpfc_debug_dump_wq_by_id() 565 printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid); in lpfc_debug_dump_wq_by_id() 579 lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid) in lpfc_debug_dump_mq_by_id() argument 581 if (phba->sli4_hba.mbx_wq->queue_id == qid) { in lpfc_debug_dump_mq_by_id() 582 printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid); in lpfc_debug_dump_mq_by_id() 596 lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid) in lpfc_debug_dump_rq_by_id() argument 598 if (phba->sli4_hba.hdr_rq->queue_id == qid) { in lpfc_debug_dump_rq_by_id() [all …]
|
/linux-4.1.27/drivers/s390/crypto/ |
D | ap_bus.c | 191 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) in ap_test_queue() argument 193 register unsigned long reg0 asm ("0") = qid; in ap_test_queue() 211 static inline unsigned long ap_query_facilities(ap_qid_t qid) in ap_query_facilities() argument 213 register unsigned long reg0 asm ("0") = qid | 0x00800000UL; in ap_query_facilities() 228 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) in ap_reset_queue() argument 230 register unsigned long reg0 asm ("0") = qid | 0x01000000UL; in ap_reset_queue() 248 ap_queue_interruption_control(ap_qid_t qid, void *ind) in ap_queue_interruption_control() argument 250 register unsigned long reg0 asm ("0") = qid | 0x03000000UL; in ap_queue_interruption_control() 263 __ap_query_functions(ap_qid_t qid, unsigned int *functions) in __ap_query_functions() argument 265 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); in __ap_query_functions() [all …]
|
D | zcrypt_error.h | 115 zdev->ap_dev->qid); in convert_error() 117 zdev->ap_dev->qid, zdev->online, ehdr->reply_code); in convert_error() 126 zdev->ap_dev->qid); in convert_error() 128 zdev->ap_dev->qid, zdev->online, ehdr->reply_code); in convert_error() 133 zdev->ap_dev->qid); in convert_error() 135 zdev->ap_dev->qid, zdev->online, ehdr->reply_code); in convert_error()
|
D | zcrypt_msgtype6.c | 207 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); in ICAMEX_msg_to_type6MEX_msgX() 275 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); in ICACRT_msg_to_type6CRT_msgX() 463 AP_QID_QUEUE(zdev->ap_dev->qid); in xcrb_msg_to_type6_ep11cprb_msgx() 481 AP_QID_QUEUE(zdev->ap_dev->qid); in xcrb_msg_to_type6_ep11cprb_msgx() 575 zdev->ap_dev->qid); in convert_type86_ica() 577 zdev->ap_dev->qid, zdev->online, in convert_type86_ica() 718 zdev->ap_dev->qid); in convert_response_ica() 720 zdev->ap_dev->qid, zdev->online); in convert_response_ica() 750 zdev->ap_dev->qid); in convert_response_xcrb() 752 zdev->ap_dev->qid, zdev->online); in convert_response_xcrb() [all …]
|
D | zcrypt_pcixcc.c | 179 rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg)); in zcrypt_pcixcc_mcl() 186 rc = ap_recv(ap_dev->qid, &psmid, reply, 4096); in zcrypt_pcixcc_mcl() 231 rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message, in zcrypt_pcixcc_rng_supported() 239 rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096); in zcrypt_pcixcc_rng_supported()
|
D | zcrypt_pcicc.c | 223 msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid); in ICAMEX_msg_to_type6MEX_msg() 286 msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid); in ICACRT_msg_to_type6CRT_msg() 379 zdev->ap_dev->qid); in convert_type86() 381 zdev->ap_dev->qid, zdev->online, in convert_type86() 437 zdev->ap_dev->qid); in convert_response() 439 zdev->ap_dev->qid, zdev->online); in convert_response()
|
D | zcrypt_pcica.c | 206 zdev->ap_dev->qid); in convert_type84() 208 zdev->ap_dev->qid, zdev->online, t84h->code); in convert_type84() 234 zdev->ap_dev->qid); in convert_response() 236 zdev->ap_dev->qid, zdev->online); in convert_response()
|
D | zcrypt_msgtype50.c | 339 zdev->ap_dev->qid); in convert_type80() 341 zdev->ap_dev->qid, zdev->online, t80h->code); in convert_type80() 371 zdev->ap_dev->qid); in convert_response() 373 zdev->ap_dev->qid, zdev->online); in convert_response()
|
D | zcrypt_api.c | 105 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid, in zcrypt_online_store() 266 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid, in zcrypt_device_register() 560 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)) in zcrypt_send_cprb() 642 if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) && in zcrypt_send_ep11_cprb() 707 status[AP_QID_DEVICE(zdev->ap_dev->qid)] = in zcrypt_status_mask() 720 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = in zcrypt_qdepth_mask() 736 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = in zcrypt_perdev_reqcnt() 1263 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { in zcrypt_disable_card() 1277 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { in zcrypt_enable_card()
|
D | zcrypt_msgtype6.h | 159 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid); in rng_type6CPRB_msgX()
|
D | ap_bus.h | 162 ap_qid_t qid; /* AP queue id. */ member
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
D | osc_quota.c | 45 int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]) in osc_quota_chkdq() argument 52 oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); in osc_quota_chkdq() 60 type == USRQUOTA ? "user" : "grout", qid[type]); in osc_quota_chkdq() 73 int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], in osc_quota_setdq() argument 89 oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); in osc_quota_setdq() 96 oqi = osc_oqi_alloc(qid[type]); in osc_quota_setdq() 103 &qid[type], &oqi->oqi_hash); in osc_quota_setdq() 113 qid[type], rc); in osc_quota_setdq() 121 &qid[type]); in osc_quota_setdq() 128 qid[type], oqi); in osc_quota_setdq()
|
D | osc_internal.h | 194 int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], 196 int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]);
|
D | osc_cache.c | 2260 unsigned int qid[MAXQUOTAS]; in osc_queue_async_io() local 2269 qid[USRQUOTA] = attr->cat_uid; in osc_queue_async_io() 2270 qid[GRPQUOTA] = attr->cat_gid; in osc_queue_async_io() 2271 if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA) in osc_queue_async_io()
|
D | osc_request.c | 1518 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid }; in osc_brw_fini_request() local 1523 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags); in osc_brw_fini_request()
|
/linux-4.1.27/fs/9p/ |
D | cache.c | 111 memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path)); in v9fs_cache_inode_get_key() 113 &v9inode->vfs_inode, v9inode->qid.path); in v9fs_cache_inode_get_key() 114 return sizeof(v9inode->qid.path); in v9fs_cache_inode_get_key() 131 memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version)); in v9fs_cache_inode_get_aux() 133 &v9inode->vfs_inode, v9inode->qid.version); in v9fs_cache_inode_get_aux() 134 return sizeof(v9inode->qid.version); in v9fs_cache_inode_get_aux() 144 if (buflen != sizeof(v9inode->qid.version)) in v9fs_cache_inode_check_aux() 147 if (memcmp(buffer, &v9inode->qid.version, in v9fs_cache_inode_check_aux() 148 sizeof(v9inode->qid.version))) in v9fs_cache_inode_check_aux()
|
D | vfs_inode_dotl.c | 84 if (memcmp(&v9inode->qid.version, in v9fs_test_inode_dotl() 85 &st->qid.version, sizeof(v9inode->qid.version))) in v9fs_test_inode_dotl() 88 if (v9inode->qid.type != st->qid.type) in v9fs_test_inode_dotl() 104 memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); in v9fs_set_inode_dotl() 110 struct p9_qid *qid, in v9fs_qid_iget_dotl() argument 126 i_ino = v9fs_qid2ino(qid); in v9fs_qid_iget_dotl() 168 inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new); in v9fs_inode_from_fid_dotl() 248 struct p9_qid qid; in v9fs_vfs_atomic_open_dotl() local 302 mode, gid, &qid); in v9fs_vfs_atomic_open_dotl() 391 struct p9_qid qid; in v9fs_vfs_mkdir_dotl() local [all …]
|
D | vfs_inode.c | 217 wstat->qid.type = ~0; in v9fs_blank_wstat() 218 wstat->qid.version = ~0; in v9fs_blank_wstat() 219 *((long long *)&wstat->qid.path) = ~0; in v9fs_blank_wstat() 480 if (memcmp(&v9inode->qid.version, in v9fs_test_inode() 481 &st->qid.version, sizeof(v9inode->qid.version))) in v9fs_test_inode() 484 if (v9inode->qid.type != st->qid.type) in v9fs_test_inode() 499 memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); in v9fs_set_inode() 504 struct p9_qid *qid, in v9fs_qid_iget() argument 521 i_ino = v9fs_qid2ino(qid); in v9fs_qid_iget() 559 inode = v9fs_qid_iget(sb, &st->qid, st, new); in v9fs_inode_from_fid() [all …]
|
D | vfs_dir.c | 159 v9fs_qid2ino(&st.qid), dt_type(&st)); in v9fs_dir_readdir() 216 v9fs_qid2ino(&curdirent.qid), in v9fs_dir_readdir_dotl()
|
D | v9fs_vfs.h | 63 ino_t v9fs_qid2ino(struct p9_qid *qid);
|
D | vfs_super.c | 171 d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); in v9fs_mount() 182 d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); in v9fs_mount()
|
D | v9fs.h | 129 struct p9_qid qid; member
|
D | v9fs.c | 571 memset(&v9inode->qid, 0, sizeof(v9inode->qid)); in v9fs_inode_init_once()
|
/linux-4.1.27/fs/quota/ |
D | kqid.c | 119 bool qid_valid(struct kqid qid) in qid_valid() argument 121 switch (qid.type) { in qid_valid() 123 return uid_valid(qid.uid); in qid_valid() 125 return gid_valid(qid.gid); in qid_valid() 127 return projid_valid(qid.projid); in qid_valid()
|
D | netlink.c | 44 void quota_send_warning(struct kqid qid, dev_t dev, in quota_send_warning() argument 70 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type); in quota_send_warning() 74 from_kqid_munged(&init_user_ns, qid)); in quota_send_warning()
|
D | quota.c | 206 struct kqid qid; in quota_getquota() local 213 qid = make_kqid(current_user_ns(), type, id); in quota_getquota() 214 if (!qid_valid(qid)) in quota_getquota() 216 ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); in quota_getquota() 256 struct kqid qid; in quota_setquota() local 262 qid = make_kqid(current_user_ns(), type, id); in quota_setquota() 263 if (!qid_valid(qid)) in quota_setquota() 266 return sb->s_qcop->set_dqblk(sb, qid, &fdq); in quota_setquota() 549 struct kqid qid; in quota_setxquota() local 555 qid = make_kqid(current_user_ns(), type, id); in quota_setxquota() [all …]
|
D | dquot.c | 253 hashfn(const struct super_block *sb, struct kqid qid) in hashfn() argument 255 unsigned int id = from_kqid(&init_user_ns, qid); in hashfn() 256 int type = qid.type; in hashfn() 279 struct kqid qid) in find_dquot() argument 286 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid)) in find_dquot() 832 struct dquot *dqget(struct super_block *sb, struct kqid qid) in dqget() argument 834 unsigned int hashent = hashfn(sb, qid); in dqget() 837 if (!sb_has_quota_active(sb, qid.type)) in dqget() 842 if (!sb_has_quota_active(sb, qid.type)) { in dqget() 849 dquot = find_dquot(hashent, sb, qid); in dqget() [all …]
|
D | quota_tree.c | 25 static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth) in get_index() argument 28 qid_t id = from_kqid(&init_user_ns, qid); in get_index()
|
/linux-4.1.27/net/9p/ |
D | client.c | 900 memset(&fid->qid, 0, sizeof(struct p9_qid)); in p9_fid_create() 1109 struct p9_qid qid; in p9_client_attach() local 1129 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid); in p9_client_attach() 1137 qid.type, (unsigned long long)qid.path, qid.version); in p9_client_attach() 1139 memmove(&fid->qid, &qid, sizeof(struct p9_qid)); in p9_client_attach() 1209 memmove(&fid->qid, &wqids[nwqids - 1], sizeof(struct p9_qid)); in p9_client_walk() 1211 fid->qid = oldfid->qid; in p9_client_walk() 1234 struct p9_qid qid; in p9_client_open() local 1254 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); in p9_client_open() 1261 p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type, in p9_client_open() [all …]
|
D | protocol.c | 191 struct p9_qid *qid = in p9pdu_vreadf() local 195 &qid->type, &qid->version, in p9pdu_vreadf() 196 &qid->path); in p9pdu_vreadf() 211 &stbuf->dev, &stbuf->qid, in p9pdu_vreadf() 324 &stbuf->qid, in p9pdu_vreadf() 420 const struct p9_qid *qid = in p9pdu_vwritef() local 424 qid->type, qid->version, in p9pdu_vwritef() 425 qid->path); in p9pdu_vwritef() 434 stbuf->dev, &stbuf->qid, in p9pdu_vwritef() 614 ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid, in p9dirent_read()
|
/linux-4.1.27/include/linux/ |
D | quota.h | 79 extern qid_t from_kqid(struct user_namespace *to, struct kqid qid); 80 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid); 81 extern bool qid_valid(struct kqid qid); 98 enum quota_type type, qid_t qid) in make_kqid() argument 105 kqid.uid = make_kuid(from, qid); in make_kqid() 108 kqid.gid = make_kgid(from, qid); in make_kqid() 111 kqid.projid = make_kprojid(from, qid); in make_kqid() 495 extern void quota_send_warning(struct kqid qid, dev_t dev, 498 static inline void quota_send_warning(struct kqid qid, dev_t dev, in quota_send_warning() argument
|
D | quotaops.h | 48 struct dquot *dqget(struct super_block *sb, struct kqid qid);
|
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_sriov.c | 240 struct bnx2x_virtf *vf, int qid, in bnx2x_vf_queue_create() argument 246 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); in bnx2x_vf_queue_create() 250 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); in bnx2x_vf_queue_create() 273 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), in bnx2x_vf_queue_create() 280 int qid) in bnx2x_vf_queue_destroy() argument 292 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); in bnx2x_vf_queue_destroy() 312 if (bnx2x_vfq(vf, qid, cxt)) { in bnx2x_vf_queue_destroy() 313 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; in bnx2x_vf_queue_destroy() 314 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; in bnx2x_vf_queue_destroy() 361 int qid, bool drv_only, bool mac) in bnx2x_vf_vlan_mac_clear() argument [all …]
|
D | bnx2x_sriov.h | 436 int qid, bool drv_only); 438 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, 441 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); 447 int qid, unsigned long accept_flags);
|
D | bnx2x_vfpf.c | 1816 int qid = mbx->msg->req.q_op.vf_qid; in bnx2x_vf_mbx_teardown_q() local 1820 vf->abs_vfid, qid); in bnx2x_vf_mbx_teardown_q() 1822 rc = bnx2x_vf_queue_teardown(bp, vf, qid); in bnx2x_vf_mbx_teardown_q()
|
/linux-4.1.27/drivers/net/wireless/rt2x00/ |
D | rt2800mmio.c | 254 u8 qid; in rt2800mmio_txdone() local 258 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE); in rt2800mmio_txdone() 259 if (unlikely(qid >= QID_RX)) { in rt2800mmio_txdone() 265 qid); in rt2800mmio_txdone() 269 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); in rt2800mmio_txdone() 276 qid); in rt2800mmio_txdone() 286 qid); in rt2800mmio_txdone() 305 qid); in rt2800mmio_txdone() 561 switch (queue->qid) { in rt2800mmio_start_queue() 589 switch (queue->qid) { in rt2800mmio_kick_queue() [all …]
|
D | rt2x00queue.c | 510 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data() 675 queue->qid); in rt2x00queue_write_tx_frame() 687 queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_frame() 896 switch (queue->qid) { in rt2x00queue_pause_queue_nocheck() 905 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); in rt2x00queue_pause_queue_nocheck() 929 switch (queue->qid) { in rt2x00queue_unpause_queue() 938 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); in rt2x00queue_unpause_queue() 992 (queue->qid == QID_AC_VO) || in rt2x00queue_flush_queue() 993 (queue->qid == QID_AC_VI) || in rt2x00queue_flush_queue() 994 (queue->qid == QID_AC_BE) || in rt2x00queue_flush_queue() [all …]
|
D | rt2800usb.c | 62 switch (queue->qid) { in rt2800usb_start_queue() 85 switch (queue->qid) { in rt2800usb_stop_queue() 130 entry->entry_idx, entry->queue->qid); in rt2800usb_entry_txstatus_timeout() 487 if (entry->queue->qid == QID_BEACON) in rt2800usb_get_txwi() 583 entry->queue->qid, entry->entry_idx); in rt2800usb_txdone_entry_check() 595 u8 qid; in rt2800usb_txdone() local 603 qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE); in rt2800usb_txdone() 604 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); in rt2800usb_txdone() 608 qid); in rt2800usb_txdone() 617 entry->entry_idx, qid); in rt2800usb_txdone() [all …]
|
D | rt2x00usb.c | 418 switch (queue->qid) { in rt2x00usb_kick_queue() 458 if ((entry->queue->qid == QID_BEACON) && in rt2x00usb_flush_entry() 477 switch (queue->qid) { in rt2x00usb_flush_queue() 518 queue->qid); in rt2x00usb_watchdog_tx_dma() 563 if (entry->queue->qid == QID_RX) in rt2x00usb_clear_entry() 576 if (queue->qid == QID_RX) { in rt2x00usb_assign_endpoint() 657 if (queue->qid != QID_BEACON || in rt2x00usb_alloc_entries() 692 if (queue->qid != QID_BEACON || in rt2x00usb_free_entries()
|
D | rt2x00mac.c | 106 enum data_queue_qid qid = skb_get_queue_mapping(skb); in rt2x00mac_tx() local 123 qid = QID_ATIM; in rt2x00mac_tx() 125 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); in rt2x00mac_tx() 129 "Please file bug report to %s\n", qid, DRV_PROJECT); in rt2x00mac_tx()
|
D | rt61pci.c | 1139 switch (queue->qid) { in rt61pci_start_queue() 1162 switch (queue->qid) { in rt61pci_kick_queue() 1193 switch (queue->qid) { in rt61pci_stop_queue() 1385 if (entry->queue->qid == QID_RX) { in rt61pci_get_entry_state() 1403 if (entry->queue->qid == QID_RX) { in rt61pci_clear_entry() 1880 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid); in rt61pci_write_tx_desc() 1905 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid); in rt61pci_write_tx_desc() 1913 if (entry->queue->qid != QID_BEACON) { in rt61pci_write_tx_desc() 1959 skbdesc->desc_len = (entry->queue->qid == QID_BEACON) ? TXINFO_SIZE : in rt61pci_write_tx_desc() 3028 switch (queue->qid) { in rt61pci_queue_init()
|
D | rt2400pci.c | 640 switch (queue->qid) { in rt2400pci_start_queue() 663 switch (queue->qid) { in rt2400pci_kick_queue() 689 switch (queue->qid) { in rt2400pci_stop_queue() 727 if (entry->queue->qid == QID_RX) { in rt2400pci_get_entry_state() 745 if (entry->queue->qid == QID_RX) { in rt2400pci_clear_entry() 1769 switch (queue->qid) { in rt2400pci_queue_init()
|
D | rt2x00queue.h | 463 enum data_queue_qid qid; member
|
D | rt2500pci.c | 729 switch (queue->qid) { in rt2500pci_start_queue() 752 switch (queue->qid) { in rt2500pci_kick_queue() 778 switch (queue->qid) { in rt2500pci_stop_queue() 816 if (entry->queue->qid == QID_RX) { in rt2500pci_get_entry_state() 834 if (entry->queue->qid == QID_RX) { in rt2500pci_clear_entry() 2068 switch (queue->qid) { in rt2500pci_queue_init()
|
D | rt2x00debug.c | 199 dump_hdr->queue_index = skbdesc->entry->queue->qid; in rt2x00debug_dump_frame() 352 queue->qid, (unsigned int)queue->flags, in rt2x00debug_read_queue_stats()
|
D | rt73usb.c | 1026 switch (queue->qid) { in rt73usb_start_queue() 1049 switch (queue->qid) { in rt73usb_stop_queue() 1489 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid); in rt73usb_write_tx_desc() 2362 switch (queue->qid) { in rt73usb_queue_init()
|
D | rt2500usb.c | 742 switch (queue->qid) { in rt2500usb_start_queue() 765 switch (queue->qid) { in rt2500usb_stop_queue() 1872 switch (queue->qid) { in rt2500usb_queue_init()
|
D | rt2800lib.c | 752 rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid); in rt2800_write_tx_data()
|
/linux-4.1.27/fs/xfs/ |
D | xfs_quotaops.c | 230 struct kqid qid, in xfs_fs_get_dqblk() argument 240 return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), in xfs_fs_get_dqblk() 241 xfs_quota_type(qid.type), qdq); in xfs_fs_get_dqblk() 247 struct kqid qid, in xfs_fs_set_dqblk() argument 259 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), in xfs_fs_set_dqblk() 260 xfs_quota_type(qid.type), qdq); in xfs_fs_set_dqblk()
|
/linux-4.1.27/fs/ntfs/ |
D | quota.c | 41 const le32 qid = QUOTA_DEFAULTS_ID; in ntfs_mark_quotas_out_of_date() local 57 err = ntfs_index_lookup(&qid, sizeof(qid), ictx); in ntfs_mark_quotas_out_of_date()
|
/linux-4.1.27/fs/gfs2/ |
D | quota.c | 89 const struct kqid qid) in gfs2_qd_hash() argument 94 h = jhash(&qid, sizeof(struct kqid), h); in gfs2_qd_hash() 199 struct kqid qid = qd->qd_id; in qd2index() local 200 return (2 * (u64)from_kqid(&init_user_ns, qid)) + in qd2index() 201 ((qid.type == USRQUOTA) ? 0 : 1); in qd2index() 214 static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid) in qd_alloc() argument 226 qd->qd_id = qid; in qd_alloc() 245 struct kqid qid) in gfs2_qd_search_bucket() argument 251 if (!qid_eq(qd->qd_id, qid)) in gfs2_qd_search_bucket() 265 static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, in qd_get() argument [all …]
|
D | sys.c | 177 struct kqid qid; in quota_refresh_user_store() local 186 qid = make_kqid(current_user_ns(), USRQUOTA, id); in quota_refresh_user_store() 187 if (!qid_valid(qid)) in quota_refresh_user_store() 190 error = gfs2_quota_refresh(sdp, qid); in quota_refresh_user_store() 197 struct kqid qid; in quota_refresh_group_store() local 206 qid = make_kqid(current_user_ns(), GRPQUOTA, id); in quota_refresh_group_store() 207 if (!qid_valid(qid)) in quota_refresh_group_store() 210 error = gfs2_quota_refresh(sdp, qid); in quota_refresh_group_store()
|
D | quota.h | 33 extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
|
/linux-4.1.27/include/net/9p/ |
D | client.h | 189 struct p9_qid qid; member 208 struct p9_qid qid; member 234 struct p9_qid *qid); 236 kgid_t gid, struct p9_qid *qid);
|
D | 9p.h | 400 struct p9_qid qid; member 417 struct p9_qid qid; member
|
/linux-4.1.27/drivers/net/wireless/ti/wl1251/ |
D | init.c | 231 static int wl1251_hw_init_txq_fill(u8 qid, in wl1251_hw_init_txq_fill() argument 235 config->qid = qid; in wl1251_hw_init_txq_fill() 237 switch (qid) { in wl1251_hw_init_txq_fill() 263 wl1251_error("Invalid TX queue id: %d", qid); in wl1251_hw_init_txq_fill()
|
D | acx.h | 318 u8 qid; member
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
D | lustre_fid.h | 532 fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid, in fid_build_quota_res_name() argument 536 res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid); in fid_build_quota_res_name() 537 res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid); in fid_build_quota_res_name() 546 union lquota_id *qid, in fid_extract_from_quota_res() argument 550 qid->qid_fid.f_seq = res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF]; in fid_extract_from_quota_res() 551 qid->qid_fid.f_oid = (__u32)res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF]; in fid_extract_from_quota_res() 552 qid->qid_fid.f_ver = in fid_extract_from_quota_res()
|
/linux-4.1.27/drivers/block/ |
D | virtio_blk.c | 138 int qid = vq->index; in virtblk_done() local 143 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtblk_done() 146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { in virtblk_done() 157 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtblk_done() 168 int qid = hctx->queue_num; in virtio_queue_rq() local 212 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq() 213 err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); in virtio_queue_rq() 215 virtqueue_kick(vblk->vqs[qid].vq); in virtio_queue_rq() 217 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq() 225 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) in virtio_queue_rq() [all …]
|
D | nvme-core.c | 113 u16 qid; member 1107 c.delete_queue.qid = cpu_to_le16(id); in adapter_delete_queue() 1112 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, in adapter_alloc_cq() argument 1121 c.create_cq.cqid = cpu_to_le16(qid); in adapter_alloc_cq() 1129 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, in adapter_alloc_sq() argument 1138 c.create_sq.sqid = cpu_to_le16(qid); in adapter_alloc_sq() 1141 c.create_sq.cqid = cpu_to_le16(qid); in adapter_alloc_sq() 1213 if (!nvmeq->qid || cmd_rq->aborted) { in nvme_abort_req() 1222 req->tag, nvmeq->qid); in nvme_abort_req() 1244 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_abort_req() [all …]
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_uld.h | 158 void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); 298 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); 307 unsigned int qid,
|
D | t4_hw.h | 138 __be32 qid; member
|
D | t4_hw.c | 3644 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) in t4_mk_filtdelwr() argument 3650 FW_FILTER_WR_NOREPLY_V(qid < 0)); in t4_mk_filtdelwr() 3652 if (qid >= 0) in t4_mk_filtdelwr() 3653 wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid)); in t4_mk_filtdelwr() 5126 unsigned int qid, in cxgb4_t4_bar2_sge_qregs() argument 5157 bar2_page_offset = ((qid >> qpp_shift) << page_shift); in cxgb4_t4_bar2_sge_qregs() 5158 bar2_qid = qid & qpp_mask; in cxgb4_t4_bar2_sge_qregs() 5481 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) in t4_read_cim_ibq() argument 5487 if (qid > 5 || (n & 3)) in t4_read_cim_ibq() 5490 addr = qid * nwords; in t4_read_cim_ibq() [all …]
|
D | cxgb4.h | 1186 unsigned int qid, 1220 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, 1222 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, 1244 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
|
D | cxgb4_main.c | 611 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); in fwevtq_handler() local 614 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler() 903 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) in rxq_to_chan() argument 905 qid -= p->ingr_start; in rxq_to_chan() 906 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; in rxq_to_chan() 1994 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) in read_eq_indices() argument 1996 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; in read_eq_indices() 2012 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, in cxgb4_sync_txq_pidx() argument 2019 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); in cxgb4_sync_txq_pidx() 2038 QID_V(qid) | val); in cxgb4_sync_txq_pidx() [all …]
|
D | cxgb4_debugfs.c | 267 unsigned int qid = (uintptr_t)inode->i_private & 7; in cim_ibq_open() local 268 struct adapter *adap = inode->i_private - qid; in cim_ibq_open() 274 ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4); in cim_ibq_open() 294 unsigned int qid = (uintptr_t)inode->i_private & 7; in cim_obq_open() local 295 struct adapter *adap = inode->i_private - qid; in cim_obq_open() 301 ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4); in cim_obq_open()
|
D | sge.c | 2207 unsigned int qid = ntohl(rc->pldbuflen_qid); in process_intrq() local 2209 qid -= adap->sge.ingr_start; in process_intrq() 2210 napi_schedule(&adap->sge.ingr_map[qid]->napi); in process_intrq() 2425 unsigned int qid, in bar2_address() argument 2432 ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, in bar2_address()
|
D | t4_msg.h | 210 __be16 qid; member
|
/linux-4.1.27/drivers/net/ethernet/sfc/ |
D | siena_sriov.c | 1400 unsigned qid, seq, type, data; in efx_siena_sriov_event() local 1402 qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID); in efx_siena_sriov_event() 1412 qid, seq, type, data); in efx_siena_sriov_event() 1414 if (map_vi_index(efx, qid, &vf, NULL)) in efx_siena_sriov_event() 1485 unsigned queue, qid; in efx_siena_sriov_tx_flush_done() local 1488 if (map_vi_index(efx, queue, &vf, &qid)) in efx_siena_sriov_tx_flush_done() 1491 if (!test_bit(qid, vf->txq_mask)) in efx_siena_sriov_tx_flush_done() 1494 __clear_bit(qid, vf->txq_mask); in efx_siena_sriov_tx_flush_done() 1504 unsigned ev_failed, queue, qid; in efx_siena_sriov_rx_flush_done() local 1509 if (map_vi_index(efx, queue, &vf, &qid)) in efx_siena_sriov_rx_flush_done() [all …]
|
D | farch.c | 1099 int qid; in efx_farch_handle_tx_flush_done() local 1101 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); in efx_farch_handle_tx_flush_done() 1102 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { in efx_farch_handle_tx_flush_done() 1103 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, in efx_farch_handle_tx_flush_done() 1104 qid % EFX_TXQ_TYPES); in efx_farch_handle_tx_flush_done() 1121 int qid; in efx_farch_handle_rx_flush_done() local 1124 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); in efx_farch_handle_rx_flush_done() 1126 if (qid >= efx->n_channels) in efx_farch_handle_rx_flush_done() 1128 channel = efx_get_channel(efx, qid); in efx_farch_handle_rx_flush_done() 1135 "RXQ %d flush retry\n", qid); in efx_farch_handle_rx_flush_done()
|
/linux-4.1.27/net/sched/ |
D | sch_api.c | 1531 u32 qid; in tc_ctl_tclass() local 1563 qid = TC_H_MAJ(clid); in tc_ctl_tclass() 1568 if (qid && qid1) { in tc_ctl_tclass() 1570 if (qid != qid1) in tc_ctl_tclass() 1573 qid = qid1; in tc_ctl_tclass() 1574 } else if (qid == 0) in tc_ctl_tclass() 1575 qid = dev->qdisc->handle; in tc_ctl_tclass() 1583 portid = TC_H_MAKE(qid, portid); in tc_ctl_tclass() 1585 if (qid == 0) in tc_ctl_tclass() 1586 qid = dev->qdisc->handle; in tc_ctl_tclass() [all …]
|
/linux-4.1.27/drivers/scsi/bfa/ |
D | bfa_core.c | 696 bfa_reqq_resume(struct bfa_s *bfa, int qid) in bfa_reqq_resume() argument 701 waitq = bfa_reqq(bfa, qid); in bfa_reqq_resume() 706 if (bfa_reqq_full(bfa, qid)) in bfa_reqq_resume() 716 bfa_isr_rspq(struct bfa_s *bfa, int qid) in bfa_isr_rspq() argument 723 ci = bfa_rspq_ci(bfa, qid); in bfa_isr_rspq() 724 pi = bfa_rspq_pi(bfa, qid); in bfa_isr_rspq() 729 m = bfa_rspq_elem(bfa, qid, ci); in bfa_isr_rspq() 739 bfa_isr_rspq_ack(bfa, qid, ci); in bfa_isr_rspq() 744 waitq = bfa_reqq(bfa, qid); in bfa_isr_rspq() 746 bfa_reqq_resume(bfa, qid); in bfa_isr_rspq() [all …]
|
D | bfa.h | 58 (__mh).mtag.h2i.qid = (__bfa)->iocfc.hw_qid[__reqq];\
|
D | bfi.h | 59 u8 qid; member
|
/linux-4.1.27/drivers/scsi/aacraid/ |
D | commsup.c | 313 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, uns… in aac_get_entry() argument 325 q = &dev->queues->queue[qid]; in aac_get_entry() 331 if (qid == AdapNormCmdQueue) in aac_get_entry() 340 if (qid == AdapNormCmdQueue) { in aac_get_entry() 351 qid, atomic_read(&q->numpending)); in aac_get_entry() 375 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, str… in aac_queue_get() argument 380 if (qid == AdapNormCmdQueue) { in aac_queue_get() 382 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { in aac_queue_get() 391 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { in aac_queue_get() 690 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) in aac_consumer_free() argument [all …]
|
D | aacraid.h | 2132 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, str…
|
/linux-4.1.27/drivers/net/vmxnet3/ |
D | vmxnet3_int.h | 239 int qid; member 277 u32 qid; /* rqID in RCD for buffer from 1st ring */ member
|
D | vmxnet3_drv.c | 1080 VMXNET3_REG_TXPROD + tq->qid * 8, in vmxnet3_tq_xmit() 1195 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); in vmxnet3_rq_rx_complete() 1213 rcd->rqID != rq->qid); in vmxnet3_rq_rx_complete() 1347 rxprod_reg[ring_idx] + rq->qid * 8, in vmxnet3_rq_rx_complete() 1891 rq->qid = i; in vmxnet3_request_irqs() 2547 tq->qid = i; in vmxnet3_create_queues()
|
/linux-4.1.27/arch/mips/include/asm/netlogic/xlp-hal/ |
D | pic.h | 186 #define PIC_IRT_MSG_Q_INDEX(qid) ((qid) + PIC_IRT_MSG_Q0_INDEX) argument
|
/linux-4.1.27/drivers/net/bonding/ |
D | bond_options.c | 1244 u16 qid; in bond_option_queue_id_set() local 1255 if (sscanf(++delim, "%hd\n", &qid) != 1) in bond_option_queue_id_set() 1260 qid > bond->dev->real_num_tx_queues) in bond_option_queue_id_set() 1276 else if (qid && qid == slave->queue_id) { in bond_option_queue_id_set() 1285 update_slave->queue_id = qid; in bond_option_queue_id_set()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | sge.c | 1922 unsigned int qid, iq_idx; in process_intrq() local 1954 qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid)); in process_intrq() 1955 iq_idx = IQ_IDX(s, qid); in process_intrq() 1958 "Ingress QID %d out of range\n", qid); in process_intrq() 1964 "Ingress QID %d RSPQ=NULL\n", qid); in process_intrq() 1967 if (unlikely(rspq->abs_id != qid)) { in process_intrq() 1970 qid, rspq->abs_id); in process_intrq() 2150 unsigned int qid, in bar2_address() argument 2157 ret = t4_bar2_sge_qregs(adapter, qid, qtype, in bar2_address()
|
D | t4vf_common.h | 288 unsigned int qid,
|
D | cxgb4vf_main.c | 474 unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid)); in fwevtq_handler() local 487 eq_idx = EQ_IDX(s, qid); in fwevtq_handler() 490 "Egress Update QID %d out of range\n", qid); in fwevtq_handler() 496 "Egress Update QID %d TXQ=NULL\n", qid); in fwevtq_handler() 500 if (unlikely(tq->abs_id != qid)) { in fwevtq_handler() 503 qid, tq->abs_id); in fwevtq_handler()
|
D | t4vf_hw.c | 456 unsigned int qid, in t4_bar2_sge_qregs() argument 487 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); in t4_bar2_sge_qregs() 488 bar2_qid = qid & qpp_mask; in t4_bar2_sge_qregs()
|
/linux-4.1.27/drivers/net/ethernet/brocade/bna/ |
D | bfi.h | 37 u8 qid; member 46 #define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid)
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_virtchnl_pf.c | 196 u8 qid) in i40e_vc_isvalid_queue_id() argument 201 return (vsi && (qid < vsi->alloc_queue_pairs)); in i40e_vc_isvalid_queue_id() 600 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); in i40e_enable_vf_mappings() local 601 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); in i40e_enable_vf_mappings() 611 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, in i40e_enable_vf_mappings() local 613 reg = qid; in i40e_enable_vf_mappings() 614 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, in i40e_enable_vf_mappings() 616 reg |= qid << 16; in i40e_enable_vf_mappings()
|
/linux-4.1.27/drivers/net/wireless/ath/ar5523/ |
D | ar5523_hw.h | 284 __be32 qid; member
|
D | ar5523.c | 443 __be32 qid = cpu_to_be32(0); in ar5523_reset_tx_queues() local 447 &qid, sizeof(qid), 0); in ar5523_reset_tx_queues() 475 qinfo.qid = cpu_to_be32(0); in ar5523_queue_init()
|
/linux-4.1.27/drivers/scsi/csiostor/ |
D | csio_wr.c | 1132 uint32_t wr_type, fw_qid, qid; in csio_wr_process_iq() local 1165 qid = fw_qid - wrm->fw_iq_start; in csio_wr_process_iq() 1166 q_completed = hw->wrm.intr_map[qid]; in csio_wr_process_iq() 1168 if (unlikely(qid == in csio_wr_process_iq()
|
D | csio_wr.h | 274 __be32 qid; member
|
/linux-4.1.27/arch/mips/include/asm/octeon/ |
D | cvmx-pko-defs.h | 1292 uint64_t qid:8; member 1294 uint64_t qid:8; 1318 uint64_t qid:8; member 1320 uint64_t qid:8; 1526 uint64_t qid:7; member 1528 uint64_t qid:7;
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb/ |
D | sge.c | 1661 unsigned int qid, struct net_device *dev) in t1_sge_tx() argument 1664 struct cmdQ *q = &sge->cmdQ[qid]; in t1_sge_tx() 1699 if (sge->tx_sched && !qid && skb->dev) { in t1_sge_tx() 1734 if (qid) in t1_sge_tx()
|
/linux-4.1.27/include/uapi/linux/ |
D | nvme.h | 423 __le16 qid; member
|
/linux-4.1.27/arch/mips/cavium-octeon/executive/ |
D | cvmx-helper-rgmii.c | 336 pko_mem_queue_qos.s.qid = queue; in __cvmx_helper_rgmii_link_set()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/ |
D | t3_cpl.h | 1167 __u8 qid:4; member 1171 __u8 qid:4; member
|
D | sge.c | 1372 unsigned int qid) in check_desc_avail() argument 1379 struct sge_qset *qs = txq_to_qset(q, qid); in check_desc_avail() 1381 set_bit(qid, &qs->txq_stopped); in check_desc_avail() 1385 test_and_clear_bit(qid, &qs->txq_stopped)) in check_desc_avail()
|
/linux-4.1.27/drivers/net/ethernet/emulex/benet/ |
D | be_main.c | 190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) in be_rxq_notify() argument 194 val |= qid & DB_RQ_RING_ID_MASK; in be_rxq_notify() 213 static void be_eq_notify(struct be_adapter *adapter, u16 qid, in be_eq_notify() argument 218 val |= qid & DB_EQ_RING_ID_MASK; in be_eq_notify() 219 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT); in be_eq_notify() 233 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) in be_cq_notify() argument 237 val |= qid & DB_CQ_RING_ID_MASK; in be_cq_notify() 238 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) << in be_cq_notify()
|
D | be.h | 797 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
|
/linux-4.1.27/drivers/net/wireless/ath/ath9k/ |
D | mac.h | 116 u8 qid; member
|
D | ar9003_mac.c | 380 ts->qid = MS(ads->ds_info, AR_TxQcuNum); in ar9003_hw_proc_txdesc()
|
D | xmit.c | 2749 if (ts.qid == sc->beacon.beaconq) { in ath_tx_edma_tasklet() 2762 txq = &sc->tx.txq[ts.qid]; in ath_tx_edma_tasklet()
|
/linux-4.1.27/drivers/scsi/be2iscsi/ |
D | be_main.h | 726 u8 qid[10]; member
|
/linux-4.1.27/drivers/net/wireless/ath/wil6210/ |
D | wmi.h | 1317 u8 qid; member
|
D | wmi.c | 329 data->info.qid, data->info.mid, data->info.cid); in wmi_evt_rx_mgmt()
|
/linux-4.1.27/drivers/scsi/qla2xxx/ |
D | qla_nx2.c | 2614 uint32_t r_stride, r_value, r_cnt, qid = 0; in qla8044_minidump_process_queue() local 2627 qla8044_wr_reg_indirect(vha, s_addr, qid); in qla8044_minidump_process_queue() 2634 qid += q_hdr->q_strd.queue_id_stride; in qla8044_minidump_process_queue()
|
D | qla_nx.c | 4002 uint32_t r_stride, r_value, r_cnt, qid = 0; in qla82xx_minidump_process_queue() local 4014 qla82xx_md_rw_32(ha, s_addr, qid, 1); in qla82xx_minidump_process_queue() 4021 qid += q_hdr->q_strd.queue_id_stride; in qla82xx_minidump_process_queue()
|
/linux-4.1.27/drivers/scsi/qla4xxx/ |
D | ql4_nx.c | 2413 uint32_t r_stride, r_value, r_cnt, qid = 0; in qla4_8xxx_minidump_process_queue() local 2426 ha->isp_ops->wr_reg_indirect(ha, s_addr, qid); in qla4_8xxx_minidump_process_queue() 2433 qid += q_hdr->q_strd.queue_id_stride; in qla4_8xxx_minidump_process_queue()
|
/linux-4.1.27/Documentation/networking/ |
D | bonding.txt | 1612 selection policy to be overridden, selecting instead qid 2, which maps to eth1. 1614 Note that qid values begin at 1. Qid 0 is reserved to initiate to the driver 1616 leaving the qid for a slave to 0 is the multiqueue awareness in the bonding
|