Lines Matching refs:crq

163 	entry->fmt = evt->crq.format;  in ibmvfc_trc_start()
196 entry->fmt = evt->crq.format; in ibmvfc_trc_end()
654 struct ibmvfc_crq_queue *crq = &vhost->crq; in ibmvfc_release_crq_queue() local
667 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); in ibmvfc_release_crq_queue()
668 free_page((unsigned long)crq->msgs); in ibmvfc_release_crq_queue()
708 struct ibmvfc_crq_queue *crq = &vhost->crq; in ibmvfc_reset_crq() local
723 memset(crq->msgs, 0, PAGE_SIZE); in ibmvfc_reset_crq()
724 crq->cur = 0; in ibmvfc_reset_crq()
728 crq->msg_token, PAGE_SIZE); in ibmvfc_reset_crq()
1227 evt->crq.valid = 0x80; in ibmvfc_init_event_pool()
1228 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); in ibmvfc_init_event_pool()
1295 evt->crq.format = format; in ibmvfc_init_event()
1407 __be64 *crq_as_u64 = (__be64 *) &evt->crq; in ibmvfc_send_event()
1412 if (evt->crq.format == IBMVFC_CMD_FORMAT) in ibmvfc_send_event()
1414 else if (evt->crq.format == IBMVFC_MAD_FORMAT) in ibmvfc_send_event()
1634 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); in ibmvfc_queuecommand_lck()
1902 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + in ibmvfc_bsg_request()
1980 tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); in ibmvfc_reset_device()
2245 if (evt->crq.format == IBMVFC_CMD_FORMAT && in ibmvfc_match_key()
2310 tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); in ibmvfc_abort_task_set()
2632 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, in ibmvfc_handle_async() argument
2635 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); in ibmvfc_handle_async()
2639 " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name, in ibmvfc_handle_async()
2640 ibmvfc_get_link_state(crq->link_state)); in ibmvfc_handle_async()
2642 switch (be64_to_cpu(crq->event)) { in ibmvfc_handle_async()
2644 switch (crq->link_state) { in ibmvfc_handle_async()
2683 if (!crq->scsi_id && !crq->wwpn && !crq->node_name) in ibmvfc_handle_async()
2685 if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id) in ibmvfc_handle_async()
2687 if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn) in ibmvfc_handle_async()
2689 if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name) in ibmvfc_handle_async()
2691 if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) in ibmvfc_handle_async()
2693 if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { in ibmvfc_handle_async()
2710 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); in ibmvfc_handle_async()
2721 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) in ibmvfc_handle_crq() argument
2724 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); in ibmvfc_handle_crq()
2726 switch (crq->valid) { in ibmvfc_handle_crq()
2728 switch (crq->format) { in ibmvfc_handle_crq()
2743 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); in ibmvfc_handle_crq()
2750 if (crq->format == IBMVFC_PARTITION_MIGRATED) { in ibmvfc_handle_crq()
2758 dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format); in ibmvfc_handle_crq()
2767 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid); in ibmvfc_handle_crq()
2771 if (crq->format == IBMVFC_ASYNC_EVENT) in ibmvfc_handle_crq()
2780 crq->ioba); in ibmvfc_handle_crq()
2786 crq->ioba); in ibmvfc_handle_crq()
3112 struct ibmvfc_async_crq *crq; in ibmvfc_next_async_crq() local
3114 crq = &async_crq->msgs[async_crq->cur]; in ibmvfc_next_async_crq()
3115 if (crq->valid & 0x80) { in ibmvfc_next_async_crq()
3120 crq = NULL; in ibmvfc_next_async_crq()
3122 return crq; in ibmvfc_next_async_crq()
3134 struct ibmvfc_crq_queue *queue = &vhost->crq; in ibmvfc_next_crq()
3135 struct ibmvfc_crq *crq; in ibmvfc_next_crq() local
3137 crq = &queue->msgs[queue->cur]; in ibmvfc_next_crq()
3138 if (crq->valid & 0x80) { in ibmvfc_next_crq()
3143 crq = NULL; in ibmvfc_next_crq()
3145 return crq; in ibmvfc_next_crq()
3179 struct ibmvfc_crq *crq; in ibmvfc_tasklet() local
3194 while ((crq = ibmvfc_next_crq(vhost)) != NULL) { in ibmvfc_tasklet()
3195 ibmvfc_handle_crq(crq, vhost); in ibmvfc_tasklet()
3196 crq->valid = 0; in ibmvfc_tasklet()
3206 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { in ibmvfc_tasklet()
3208 ibmvfc_handle_crq(crq, vhost); in ibmvfc_tasklet()
3209 crq->valid = 0; in ibmvfc_tasklet()
3639 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
3644 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
3648 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
4514 struct ibmvfc_crq_queue *crq = &vhost->crq; in ibmvfc_init_crq() local
4517 crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL); in ibmvfc_init_crq()
4519 if (!crq->msgs) in ibmvfc_init_crq()
4522 crq->size = PAGE_SIZE / sizeof(*crq->msgs); in ibmvfc_init_crq()
4523 crq->msg_token = dma_map_single(dev, crq->msgs, in ibmvfc_init_crq()
4526 if (dma_mapping_error(dev, crq->msg_token)) in ibmvfc_init_crq()
4530 crq->msg_token, PAGE_SIZE); in ibmvfc_init_crq()
4557 crq->cur = 0; in ibmvfc_init_crq()
4567 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); in ibmvfc_init_crq()
4569 free_page((unsigned long)crq->msgs); in ibmvfc_init_crq()