Lines Matching refs:qp_info

77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
327 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
350 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
357 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
447 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, in register_snoop_agent() argument
454 spin_lock_irqsave(&qp_info->snoop_lock, flags); in register_snoop_agent()
456 for (i = 0; i < qp_info->snoop_table_size; i++) in register_snoop_agent()
457 if (!qp_info->snoop_table[i]) in register_snoop_agent()
460 if (i == qp_info->snoop_table_size) { in register_snoop_agent()
462 new_snoop_table = krealloc(qp_info->snoop_table, in register_snoop_agent()
464 (qp_info->snoop_table_size + 1), in register_snoop_agent()
471 qp_info->snoop_table = new_snoop_table; in register_snoop_agent()
472 qp_info->snoop_table_size++; in register_snoop_agent()
474 qp_info->snoop_table[i] = mad_snoop_priv; in register_snoop_agent()
475 atomic_inc(&qp_info->snoop_count); in register_snoop_agent()
477 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in register_snoop_agent()
518 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_snoop()
523 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_snoop()
528 &port_priv->qp_info[qpn], in ib_register_mad_snoop()
569 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent()
589 struct ib_mad_qp_info *qp_info; in unregister_mad_snoop() local
592 qp_info = mad_snoop_priv->qp_info; in unregister_mad_snoop()
593 spin_lock_irqsave(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
594 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; in unregister_mad_snoop()
595 atomic_dec(&qp_info->snoop_count); in unregister_mad_snoop()
596 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
641 static void snoop_send(struct ib_mad_qp_info *qp_info, in snoop_send() argument
650 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
651 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_send()
652 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_send()
658 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
662 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
664 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
667 static void snoop_recv(struct ib_mad_qp_info *qp_info, in snoop_recv() argument
675 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
676 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_recv()
677 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_recv()
683 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
687 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
689 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
756 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); in handle_outgoing_dr_smp()
759 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in handle_outgoing_dr_smp()
760 mad_agent_priv->qp_info->port_priv->port_num); in handle_outgoing_dr_smp()
906 queue_work(mad_agent_priv->qp_info->port_priv->wq, in handle_outgoing_dr_smp()
1144 struct ib_mad_qp_info *qp_info; in ib_send_mad() local
1153 qp_info = mad_send_wr->mad_agent_priv->qp_info; in ib_send_mad()
1155 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; in ib_send_mad()
1180 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in ib_send_mad()
1181 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { in ib_send_mad()
1184 list = &qp_info->send_queue.list; in ib_send_mad()
1187 list = &qp_info->overflow_list; in ib_send_mad()
1191 qp_info->send_queue.count++; in ib_send_mad()
1194 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in ib_send_mad()
1454 port_priv = agent_priv->qp_info->port_priv; in add_nonoui_reg_req()
1520 port_priv = agent_priv->qp_info->port_priv; in add_oui_reg_req()
1626 port_priv = agent_priv->qp_info->port_priv; in remove_mad_reg_req()
1794 const struct ib_mad_qp_info *qp_info, in validate_mad() argument
1798 u32 qp_num = qp_info->qp->qp_num; in validate_mad()
2019 const struct ib_mad_qp_info *qp_info, in handle_ib_smi() argument
2058 qp_info->qp->qp_num, in handle_ib_smi()
2104 struct ib_mad_qp_info *qp_info, in handle_opa_smi() argument
2146 qp_info->qp->qp_num, in handle_opa_smi()
2158 struct ib_mad_qp_info *qp_info, in handle_smi() argument
2169 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, in handle_smi()
2172 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); in handle_smi()
2178 struct ib_mad_qp_info *qp_info; in ib_mad_recv_done_handler() local
2190 qp_info = mad_list->mad_queue->qp_info; in ib_mad_recv_done_handler()
2193 opa = rdma_cap_opa_mad(qp_info->port_priv->device, in ib_mad_recv_done_handler()
2194 qp_info->port_priv->port_num); in ib_mad_recv_done_handler()
2219 if (atomic_read(&qp_info->snoop_count)) in ib_mad_recv_done_handler()
2220 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); in ib_mad_recv_done_handler()
2223 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) in ib_mad_recv_done_handler()
2241 if (handle_smi(port_priv, qp_info, wc, port_num, recv, in ib_mad_recv_done_handler()
2268 qp_info->qp->qp_num, in ib_mad_recv_done_handler()
2287 qp_info->qp->qp_num, mad_size, opa); in ib_mad_recv_done_handler()
2293 ib_mad_post_receive_mads(qp_info, response); in ib_mad_recv_done_handler()
2296 ib_mad_post_receive_mads(qp_info, recv); in ib_mad_recv_done_handler()
2317 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in adjust_timeout()
2352 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in wait_for_response()
2421 struct ib_mad_qp_info *qp_info; in ib_mad_send_done_handler() local
2432 qp_info = send_queue->qp_info; in ib_mad_send_done_handler()
2447 mad_list = container_of(qp_info->overflow_list.next, in ib_mad_send_done_handler()
2459 if (atomic_read(&qp_info->snoop_count)) in ib_mad_send_done_handler()
2460 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, in ib_mad_send_done_handler()
2465 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, in ib_mad_send_done_handler()
2477 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) in mark_sends_for_retry() argument
2483 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2484 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { in mark_sends_for_retry()
2490 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2497 struct ib_mad_qp_info *qp_info; in mad_error_handler() local
2503 qp_info = mad_list->mad_queue->qp_info; in mad_error_handler()
2504 if (mad_list->mad_queue == &qp_info->recv_queue) in mad_error_handler()
2523 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, in mad_error_handler()
2537 ret = ib_modify_qp(qp_info->qp, attr, in mad_error_handler()
2545 mark_sends_for_retry(qp_info); in mad_error_handler()
2693 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in local_completions()
2694 mad_agent_priv->qp_info->port_priv->port_num); in local_completions()
2741 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) in local_completions()
2742 snoop_recv(recv_mad_agent->qp_info, in local_completions()
2758 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) in local_completions()
2759 snoop_send(mad_agent_priv->qp_info, in local_completions()
2831 queue_delayed_work(mad_agent_priv->qp_info-> in timeout_sends()
2872 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, in ib_mad_post_receive_mads() argument
2880 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; in ib_mad_post_receive_mads()
2883 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; in ib_mad_post_receive_mads()
2896 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), in ib_mad_post_receive_mads()
2899 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2906 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2910 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2924 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); in ib_mad_post_receive_mads()
2930 ib_dma_unmap_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2935 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2947 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) in cleanup_recv_queue() argument
2953 if (!qp_info->qp) in cleanup_recv_queue()
2956 while (!list_empty(&qp_info->recv_queue.list)) { in cleanup_recv_queue()
2958 mad_list = list_entry(qp_info->recv_queue.list.next, in cleanup_recv_queue()
2969 ib_dma_unmap_single(qp_info->port_priv->device, in cleanup_recv_queue()
2976 qp_info->recv_queue.count = 0; in cleanup_recv_queue()
3002 qp = port_priv->qp_info[i].qp; in ib_mad_port_start()
3051 if (!port_priv->qp_info[i].qp) in ib_mad_port_start()
3054 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); in ib_mad_port_start()
3068 struct ib_mad_qp_info *qp_info = qp_context; in qp_event_handler() local
3071 dev_err(&qp_info->port_priv->device->dev, in qp_event_handler()
3073 event->event, qp_info->qp->qp_num); in qp_event_handler()
3076 static void init_mad_queue(struct ib_mad_qp_info *qp_info, in init_mad_queue() argument
3079 mad_queue->qp_info = qp_info; in init_mad_queue()
3086 struct ib_mad_qp_info *qp_info) in init_mad_qp() argument
3088 qp_info->port_priv = port_priv; in init_mad_qp()
3089 init_mad_queue(qp_info, &qp_info->send_queue); in init_mad_qp()
3090 init_mad_queue(qp_info, &qp_info->recv_queue); in init_mad_qp()
3091 INIT_LIST_HEAD(&qp_info->overflow_list); in init_mad_qp()
3092 spin_lock_init(&qp_info->snoop_lock); in init_mad_qp()
3093 qp_info->snoop_table = NULL; in init_mad_qp()
3094 qp_info->snoop_table_size = 0; in init_mad_qp()
3095 atomic_set(&qp_info->snoop_count, 0); in init_mad_qp()
3098 static int create_mad_qp(struct ib_mad_qp_info *qp_info, in create_mad_qp() argument
3105 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
3106 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp()
3113 qp_init_attr.port_num = qp_info->port_priv->port_num; in create_mad_qp()
3114 qp_init_attr.qp_context = qp_info; in create_mad_qp()
3116 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); in create_mad_qp()
3117 if (IS_ERR(qp_info->qp)) { in create_mad_qp()
3118 dev_err(&qp_info->port_priv->device->dev, in create_mad_qp()
3121 ret = PTR_ERR(qp_info->qp); in create_mad_qp()
3125 qp_info->send_queue.max_active = mad_sendq_size; in create_mad_qp()
3126 qp_info->recv_queue.max_active = mad_recvq_size; in create_mad_qp()
3133 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) in destroy_mad_qp() argument
3135 if (!qp_info->qp) in destroy_mad_qp()
3138 ib_destroy_qp(qp_info->qp); in destroy_mad_qp()
3139 kfree(qp_info->snoop_table); in destroy_mad_qp()
3174 init_mad_qp(port_priv, &port_priv->qp_info[0]); in ib_mad_port_open()
3175 init_mad_qp(port_priv, &port_priv->qp_info[1]); in ib_mad_port_open()
3200 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); in ib_mad_port_open()
3204 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); in ib_mad_port_open()
3235 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_open()
3237 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_open()
3242 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_open()
3243 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_open()
3271 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_close()
3272 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_close()
3275 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_close()
3276 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_close()