Lines Matching refs:qp_info
77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
327 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
341 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, in ib_register_mad_agent()
357 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
364 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
456 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, in register_snoop_agent() argument
463 spin_lock_irqsave(&qp_info->snoop_lock, flags); in register_snoop_agent()
465 for (i = 0; i < qp_info->snoop_table_size; i++) in register_snoop_agent()
466 if (!qp_info->snoop_table[i]) in register_snoop_agent()
469 if (i == qp_info->snoop_table_size) { in register_snoop_agent()
471 new_snoop_table = krealloc(qp_info->snoop_table, in register_snoop_agent()
473 (qp_info->snoop_table_size + 1), in register_snoop_agent()
480 qp_info->snoop_table = new_snoop_table; in register_snoop_agent()
481 qp_info->snoop_table_size++; in register_snoop_agent()
483 qp_info->snoop_table[i] = mad_snoop_priv; in register_snoop_agent()
484 atomic_inc(&qp_info->snoop_count); in register_snoop_agent()
486 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in register_snoop_agent()
527 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_snoop()
532 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_snoop()
537 &port_priv->qp_info[qpn], in ib_register_mad_snoop()
578 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent()
599 struct ib_mad_qp_info *qp_info; in unregister_mad_snoop() local
602 qp_info = mad_snoop_priv->qp_info; in unregister_mad_snoop()
603 spin_lock_irqsave(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
604 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; in unregister_mad_snoop()
605 atomic_dec(&qp_info->snoop_count); in unregister_mad_snoop()
606 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
651 static void snoop_send(struct ib_mad_qp_info *qp_info, in snoop_send() argument
660 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
661 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_send()
662 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_send()
668 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
672 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
674 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
677 static void snoop_recv(struct ib_mad_qp_info *qp_info, in snoop_recv() argument
685 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
686 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_recv()
687 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_recv()
693 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
697 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
699 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
843 queue_work(mad_agent_priv->qp_info->port_priv->wq, in handle_outgoing_dr_smp()
1062 struct ib_mad_qp_info *qp_info; in ib_send_mad() local
1071 qp_info = mad_send_wr->mad_agent_priv->qp_info; in ib_send_mad()
1073 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; in ib_send_mad()
1098 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in ib_send_mad()
1099 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { in ib_send_mad()
1102 list = &qp_info->send_queue.list; in ib_send_mad()
1105 list = &qp_info->overflow_list; in ib_send_mad()
1109 qp_info->send_queue.count++; in ib_send_mad()
1112 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in ib_send_mad()
1372 port_priv = agent_priv->qp_info->port_priv; in add_nonoui_reg_req()
1438 port_priv = agent_priv->qp_info->port_priv; in add_oui_reg_req()
1544 port_priv = agent_priv->qp_info->port_priv; in remove_mad_reg_req()
1950 struct ib_mad_qp_info *qp_info; in ib_mad_recv_done_handler() local
1959 qp_info = mad_list->mad_queue->qp_info; in ib_mad_recv_done_handler()
1978 if (atomic_read(&qp_info->snoop_count)) in ib_mad_recv_done_handler()
1979 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); in ib_mad_recv_done_handler()
1982 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) in ib_mad_recv_done_handler()
2031 qp_info->qp->qp_num); in ib_mad_recv_done_handler()
2053 qp_info->qp->qp_num); in ib_mad_recv_done_handler()
2070 port_priv->device, port_num, qp_info->qp->qp_num); in ib_mad_recv_done_handler()
2076 ib_mad_post_receive_mads(qp_info, response); in ib_mad_recv_done_handler()
2080 ib_mad_post_receive_mads(qp_info, recv); in ib_mad_recv_done_handler()
2101 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in adjust_timeout()
2136 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in wait_for_response()
2205 struct ib_mad_qp_info *qp_info; in ib_mad_send_done_handler() local
2216 qp_info = send_queue->qp_info; in ib_mad_send_done_handler()
2231 mad_list = container_of(qp_info->overflow_list.next, in ib_mad_send_done_handler()
2243 if (atomic_read(&qp_info->snoop_count)) in ib_mad_send_done_handler()
2244 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, in ib_mad_send_done_handler()
2249 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, in ib_mad_send_done_handler()
2261 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) in mark_sends_for_retry() argument
2267 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2268 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { in mark_sends_for_retry()
2274 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2281 struct ib_mad_qp_info *qp_info; in mad_error_handler() local
2287 qp_info = mad_list->mad_queue->qp_info; in mad_error_handler()
2288 if (mad_list->mad_queue == &qp_info->recv_queue) in mad_error_handler()
2307 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, in mad_error_handler()
2321 ret = ib_modify_qp(qp_info->qp, attr, in mad_error_handler()
2329 mark_sends_for_retry(qp_info); in mad_error_handler()
2510 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) in local_completions()
2511 snoop_recv(recv_mad_agent->qp_info, in local_completions()
2527 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) in local_completions()
2528 snoop_send(mad_agent_priv->qp_info, in local_completions()
2600 queue_delayed_work(mad_agent_priv->qp_info-> in timeout_sends()
2641 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, in ib_mad_post_receive_mads() argument
2649 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; in ib_mad_post_receive_mads()
2653 sg_list.lkey = (*qp_info->port_priv->mr).lkey; in ib_mad_post_receive_mads()
2668 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2674 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2679 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2693 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); in ib_mad_post_receive_mads()
2699 ib_dma_unmap_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2705 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2717 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) in cleanup_recv_queue() argument
2723 if (!qp_info->qp) in cleanup_recv_queue()
2726 while (!list_empty(&qp_info->recv_queue.list)) { in cleanup_recv_queue()
2728 mad_list = list_entry(qp_info->recv_queue.list.next, in cleanup_recv_queue()
2739 ib_dma_unmap_single(qp_info->port_priv->device, in cleanup_recv_queue()
2747 qp_info->recv_queue.count = 0; in cleanup_recv_queue()
2773 qp = port_priv->qp_info[i].qp; in ib_mad_port_start()
2822 if (!port_priv->qp_info[i].qp) in ib_mad_port_start()
2825 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); in ib_mad_port_start()
2839 struct ib_mad_qp_info *qp_info = qp_context; in qp_event_handler() local
2842 dev_err(&qp_info->port_priv->device->dev, in qp_event_handler()
2844 event->event, qp_info->qp->qp_num); in qp_event_handler()
2847 static void init_mad_queue(struct ib_mad_qp_info *qp_info, in init_mad_queue() argument
2850 mad_queue->qp_info = qp_info; in init_mad_queue()
2857 struct ib_mad_qp_info *qp_info) in init_mad_qp() argument
2859 qp_info->port_priv = port_priv; in init_mad_qp()
2860 init_mad_queue(qp_info, &qp_info->send_queue); in init_mad_qp()
2861 init_mad_queue(qp_info, &qp_info->recv_queue); in init_mad_qp()
2862 INIT_LIST_HEAD(&qp_info->overflow_list); in init_mad_qp()
2863 spin_lock_init(&qp_info->snoop_lock); in init_mad_qp()
2864 qp_info->snoop_table = NULL; in init_mad_qp()
2865 qp_info->snoop_table_size = 0; in init_mad_qp()
2866 atomic_set(&qp_info->snoop_count, 0); in init_mad_qp()
2869 static int create_mad_qp(struct ib_mad_qp_info *qp_info, in create_mad_qp() argument
2876 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
2877 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp()
2884 qp_init_attr.port_num = qp_info->port_priv->port_num; in create_mad_qp()
2885 qp_init_attr.qp_context = qp_info; in create_mad_qp()
2887 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); in create_mad_qp()
2888 if (IS_ERR(qp_info->qp)) { in create_mad_qp()
2889 dev_err(&qp_info->port_priv->device->dev, in create_mad_qp()
2892 ret = PTR_ERR(qp_info->qp); in create_mad_qp()
2896 qp_info->send_queue.max_active = mad_sendq_size; in create_mad_qp()
2897 qp_info->recv_queue.max_active = mad_recvq_size; in create_mad_qp()
2904 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) in destroy_mad_qp() argument
2906 if (!qp_info->qp) in destroy_mad_qp()
2909 ib_destroy_qp(qp_info->qp); in destroy_mad_qp()
2910 kfree(qp_info->snoop_table); in destroy_mad_qp()
2937 init_mad_qp(port_priv, &port_priv->qp_info[0]); in ib_mad_port_open()
2938 init_mad_qp(port_priv, &port_priv->qp_info[1]); in ib_mad_port_open()
2969 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); in ib_mad_port_open()
2973 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); in ib_mad_port_open()
3004 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_open()
3006 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_open()
3013 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_open()
3014 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_open()
3042 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_close()
3043 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_close()
3047 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_close()
3048 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_close()