Lines Matching refs:priv

80 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,  in ipoib_cm_dma_unmap_rx()  argument
85 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); in ipoib_cm_dma_unmap_rx()
88 ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); in ipoib_cm_dma_unmap_rx()
93 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_post_receive_srq() local
97 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_srq()
99 for (i = 0; i < priv->cm.num_frags; ++i) in ipoib_cm_post_receive_srq()
100 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; in ipoib_cm_post_receive_srq()
102 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); in ipoib_cm_post_receive_srq()
104 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); in ipoib_cm_post_receive_srq()
105 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1, in ipoib_cm_post_receive_srq()
106 priv->cm.srq_ring[id].mapping); in ipoib_cm_post_receive_srq()
107 dev_kfree_skb_any(priv->cm.srq_ring[id].skb); in ipoib_cm_post_receive_srq()
108 priv->cm.srq_ring[id].skb = NULL; in ipoib_cm_post_receive_srq()
119 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_post_receive_nonsrq() local
130 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret); in ipoib_cm_post_receive_nonsrq()
131 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, in ipoib_cm_post_receive_nonsrq()
146 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_alloc_rx_skb() local
160 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, in ipoib_cm_alloc_rx_skb()
162 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { in ipoib_cm_alloc_rx_skb()
174 mapping[i + 1] = ib_dma_map_page(priv->ca, page, in ipoib_cm_alloc_rx_skb()
176 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) in ipoib_cm_alloc_rx_skb()
185 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); in ipoib_cm_alloc_rx_skb()
188 ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); in ipoib_cm_alloc_rx_skb()
197 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_free_rx_ring() local
202 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, in ipoib_cm_free_rx_ring()
210 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) in ipoib_cm_start_rx_drain() argument
217 if (list_empty(&priv->cm.rx_flush_list) || in ipoib_cm_start_rx_drain()
218 !list_empty(&priv->cm.rx_drain_list)) in ipoib_cm_start_rx_drain()
225 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); in ipoib_cm_start_rx_drain()
227 ipoib_warn(priv, "failed to post drain wr\n"); in ipoib_cm_start_rx_drain()
229 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); in ipoib_cm_start_rx_drain()
235 struct ipoib_dev_priv *priv = netdev_priv(p->dev); in ipoib_cm_rx_event_handler() local
241 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_rx_event_handler()
242 list_move(&p->list, &priv->cm.rx_flush_list); in ipoib_cm_rx_event_handler()
244 ipoib_cm_start_rx_drain(priv); in ipoib_cm_rx_event_handler()
245 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_rx_event_handler()
251 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_create_rx_qp() local
254 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp()
255 .recv_cq = priv->recv_cq, in ipoib_cm_create_rx_qp()
256 .srq = priv->cm.srq, in ipoib_cm_create_rx_qp()
269 return ib_create_qp(priv->pd, &attr); in ipoib_cm_create_rx_qp()
276 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_modify_rx_qp() local
283 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); in ipoib_cm_modify_rx_qp()
288 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); in ipoib_cm_modify_rx_qp()
294 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); in ipoib_cm_modify_rx_qp()
300 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); in ipoib_cm_modify_rx_qp()
315 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); in ipoib_cm_modify_rx_qp()
320 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); in ipoib_cm_modify_rx_qp()
331 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_init_rx_wr() local
334 for (i = 0; i < priv->cm.num_frags; ++i) in ipoib_cm_init_rx_wr()
335 sge[i].lkey = priv->mr->lkey; in ipoib_cm_init_rx_wr()
338 for (i = 1; i < priv->cm.num_frags; ++i) in ipoib_cm_init_rx_wr()
343 wr->num_sge = priv->cm.num_frags; in ipoib_cm_init_rx_wr()
349 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_nonsrq_init_rx() local
360 priv->ca->name, ipoib_recvq_size); in ipoib_cm_nonsrq_init_rx()
372 spin_lock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
374 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) { in ipoib_cm_nonsrq_init_rx()
375 spin_unlock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
380 ++priv->cm.nonsrq_conn_qp; in ipoib_cm_nonsrq_init_rx()
382 spin_unlock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
388 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); in ipoib_cm_nonsrq_init_rx()
394 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq " in ipoib_cm_nonsrq_init_rx()
408 spin_lock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
409 --priv->cm.nonsrq_conn_qp; in ipoib_cm_nonsrq_init_rx()
410 spin_unlock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
423 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_send_rep() local
427 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_rep()
443 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_req_handler() local
448 ipoib_dbg(priv, "REQ arrived\n"); in ipoib_cm_req_handler()
476 spin_lock_irq(&priv->lock); in ipoib_cm_req_handler()
477 queue_delayed_work(priv->wq, in ipoib_cm_req_handler()
478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); in ipoib_cm_req_handler()
483 list_move(&p->list, &priv->cm.passive_ids); in ipoib_cm_req_handler()
484 spin_unlock_irq(&priv->lock); in ipoib_cm_req_handler()
488 ipoib_warn(priv, "failed to send REP: %d\n", ret); in ipoib_cm_req_handler()
490 ipoib_warn(priv, "unable to move qp to error state\n"); in ipoib_cm_req_handler()
505 struct ipoib_dev_priv *priv; in ipoib_cm_rx_handler() local
516 priv = netdev_priv(p->dev); in ipoib_cm_rx_handler()
518 ipoib_warn(priv, "unable to move qp to error state\n"); in ipoib_cm_rx_handler()
560 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_handle_rx_wc() local
571 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", in ipoib_cm_handle_rx_wc()
576 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); in ipoib_cm_handle_rx_wc()
578 ipoib_cm_start_rx_drain(priv); in ipoib_cm_handle_rx_wc()
579 queue_work(priv->wq, &priv->cm.rx_reap_task); in ipoib_cm_handle_rx_wc()
580 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", in ipoib_cm_handle_rx_wc()
590 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring; in ipoib_cm_handle_rx_wc()
595 ipoib_dbg(priv, "cm recv error " in ipoib_cm_handle_rx_wc()
603 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
604 list_move(&p->list, &priv->cm.rx_reap_list); in ipoib_cm_handle_rx_wc()
605 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
606 queue_work(priv->wq, &priv->cm.rx_reap_task); in ipoib_cm_handle_rx_wc()
614 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
619 list_move(&p->list, &priv->cm.passive_ids); in ipoib_cm_handle_rx_wc()
620 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
630 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], in ipoib_cm_handle_rx_wc()
633 ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0], in ipoib_cm_handle_rx_wc()
651 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); in ipoib_cm_handle_rx_wc()
656 ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping); in ipoib_cm_handle_rx_wc()
659 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", in ipoib_cm_handle_rx_wc()
680 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed " in ipoib_cm_handle_rx_wc()
684 &priv->cm.rx_wr, in ipoib_cm_handle_rx_wc()
685 priv->cm.rx_sge, in ipoib_cm_handle_rx_wc()
688 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed " in ipoib_cm_handle_rx_wc()
694 static inline int post_send(struct ipoib_dev_priv *priv, in post_send() argument
701 priv->tx_sge[0].addr = addr; in post_send()
702 priv->tx_sge[0].length = len; in post_send()
704 priv->tx_wr.num_sge = 1; in post_send()
705 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; in post_send()
707 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); in post_send()
712 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_send() local
718 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", in ipoib_cm_send()
726 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", in ipoib_cm_send()
738 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); in ipoib_cm_send()
739 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { in ipoib_cm_send()
750 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), in ipoib_cm_send()
753 ipoib_warn(priv, "post_send failed, error %d\n", rc); in ipoib_cm_send()
755 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); in ipoib_cm_send()
761 if (++priv->tx_outstanding == ipoib_sendq_size) { in ipoib_cm_send()
762 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", in ipoib_cm_send()
765 rc = ib_req_notify_cq(priv->send_cq, in ipoib_cm_send()
768 ipoib_warn(priv, "request notify on send CQ failed\n"); in ipoib_cm_send()
770 ipoib_send_comp_handler(priv->send_cq, dev); in ipoib_cm_send()
777 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_handle_tx_wc() local
783 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", in ipoib_cm_handle_tx_wc()
787 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", in ipoib_cm_handle_tx_wc()
794 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); in ipoib_cm_handle_tx_wc()
805 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && in ipoib_cm_handle_tx_wc()
807 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) in ipoib_cm_handle_tx_wc()
814 ipoib_dbg(priv, "failed cm send event " in ipoib_cm_handle_tx_wc()
818 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_handle_tx_wc()
829 list_move(&tx->list, &priv->cm.reap_list); in ipoib_cm_handle_tx_wc()
830 queue_work(priv->wq, &priv->cm.reap_task); in ipoib_cm_handle_tx_wc()
835 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_handle_tx_wc()
843 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_dev_open() local
849 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); in ipoib_cm_dev_open()
850 if (IS_ERR(priv->cm.id)) { in ipoib_cm_dev_open()
851 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); in ipoib_cm_dev_open()
852 ret = PTR_ERR(priv->cm.id); in ipoib_cm_dev_open()
856 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), in ipoib_cm_dev_open()
859 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, in ipoib_cm_dev_open()
860 IPOIB_CM_IETF_ID | priv->qp->qp_num); in ipoib_cm_dev_open()
867 ib_destroy_cm_id(priv->cm.id); in ipoib_cm_dev_open()
869 priv->cm.id = NULL; in ipoib_cm_dev_open()
875 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_free_rx_reap_list() local
879 spin_lock_irq(&priv->lock); in ipoib_cm_free_rx_reap_list()
880 list_splice_init(&priv->cm.rx_reap_list, &list); in ipoib_cm_free_rx_reap_list()
881 spin_unlock_irq(&priv->lock); in ipoib_cm_free_rx_reap_list()
887 ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring); in ipoib_cm_free_rx_reap_list()
888 spin_lock_irq(&priv->lock); in ipoib_cm_free_rx_reap_list()
889 --priv->cm.nonsrq_conn_qp; in ipoib_cm_free_rx_reap_list()
890 spin_unlock_irq(&priv->lock); in ipoib_cm_free_rx_reap_list()
898 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_dev_stop() local
903 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) in ipoib_cm_dev_stop()
906 ib_destroy_cm_id(priv->cm.id); in ipoib_cm_dev_stop()
907 priv->cm.id = NULL; in ipoib_cm_dev_stop()
909 spin_lock_irq(&priv->lock); in ipoib_cm_dev_stop()
910 while (!list_empty(&priv->cm.passive_ids)) { in ipoib_cm_dev_stop()
911 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); in ipoib_cm_dev_stop()
912 list_move(&p->list, &priv->cm.rx_error_list); in ipoib_cm_dev_stop()
914 spin_unlock_irq(&priv->lock); in ipoib_cm_dev_stop()
917 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); in ipoib_cm_dev_stop()
918 spin_lock_irq(&priv->lock); in ipoib_cm_dev_stop()
924 while (!list_empty(&priv->cm.rx_error_list) || in ipoib_cm_dev_stop()
925 !list_empty(&priv->cm.rx_flush_list) || in ipoib_cm_dev_stop()
926 !list_empty(&priv->cm.rx_drain_list)) { in ipoib_cm_dev_stop()
928 ipoib_warn(priv, "RX drain timing out\n"); in ipoib_cm_dev_stop()
933 list_splice_init(&priv->cm.rx_flush_list, in ipoib_cm_dev_stop()
934 &priv->cm.rx_reap_list); in ipoib_cm_dev_stop()
935 list_splice_init(&priv->cm.rx_error_list, in ipoib_cm_dev_stop()
936 &priv->cm.rx_reap_list); in ipoib_cm_dev_stop()
937 list_splice_init(&priv->cm.rx_drain_list, in ipoib_cm_dev_stop()
938 &priv->cm.rx_reap_list); in ipoib_cm_dev_stop()
941 spin_unlock_irq(&priv->lock); in ipoib_cm_dev_stop()
944 spin_lock_irq(&priv->lock); in ipoib_cm_dev_stop()
947 spin_unlock_irq(&priv->lock); in ipoib_cm_dev_stop()
951 cancel_delayed_work(&priv->cm.stale_task); in ipoib_cm_dev_stop()
957 struct ipoib_dev_priv *priv = netdev_priv(p->dev); in ipoib_cm_rep_handler() local
967 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", in ipoib_cm_rep_handler()
975 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); in ipoib_cm_rep_handler()
982 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); in ipoib_cm_rep_handler()
989 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); in ipoib_cm_rep_handler()
994 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); in ipoib_cm_rep_handler()
1000 spin_lock_irq(&priv->lock); in ipoib_cm_rep_handler()
1005 spin_unlock_irq(&priv->lock); in ipoib_cm_rep_handler()
1010 ipoib_warn(priv, "dev_queue_xmit failed " in ipoib_cm_rep_handler()
1016 ipoib_warn(priv, "failed to send RTU: %d\n", ret); in ipoib_cm_rep_handler()
1024 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_create_tx_qp() local
1026 .send_cq = priv->recv_cq, in ipoib_cm_create_tx_qp()
1027 .recv_cq = priv->recv_cq, in ipoib_cm_create_tx_qp()
1028 .srq = priv->cm.srq, in ipoib_cm_create_tx_qp()
1039 tx_qp = ib_create_qp(priv->pd, &attr); in ipoib_cm_create_tx_qp()
1041 ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", in ipoib_cm_create_tx_qp()
1042 priv->ca->name); in ipoib_cm_create_tx_qp()
1044 tx_qp = ib_create_qp(priv->pd, &attr); in ipoib_cm_create_tx_qp()
1054 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_send_req() local
1058 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_req()
1089 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_modify_tx_init() local
1092 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); in ipoib_cm_modify_tx_init()
1094 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret); in ipoib_cm_modify_tx_init()
1100 qp_attr.port_num = priv->port; in ipoib_cm_modify_tx_init()
1105 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); in ipoib_cm_modify_tx_init()
1114 struct ipoib_dev_priv *priv = netdev_priv(p->dev); in ipoib_cm_tx_init() local
1120 ipoib_warn(priv, "failed to allocate tx ring\n"); in ipoib_cm_tx_init()
1129 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); in ipoib_cm_tx_init()
1133 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); in ipoib_cm_tx_init()
1136 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); in ipoib_cm_tx_init()
1142 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); in ipoib_cm_tx_init()
1148 ipoib_warn(priv, "failed to send cm req: %d\n", ret); in ipoib_cm_tx_init()
1152 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n", in ipoib_cm_tx_init()
1172 struct ipoib_dev_priv *priv = netdev_priv(p->dev); in ipoib_cm_tx_destroy() local
1176 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", in ipoib_cm_tx_destroy()
1187 ipoib_warn(priv, "timing out; %d sends not completed\n", in ipoib_cm_tx_destroy()
1200 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, in ipoib_cm_tx_destroy()
1205 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && in ipoib_cm_tx_destroy()
1207 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) in ipoib_cm_tx_destroy()
1223 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); in ipoib_cm_tx_handler() local
1224 struct net_device *dev = priv->dev; in ipoib_cm_tx_handler()
1231 ipoib_dbg(priv, "DREQ received.\n"); in ipoib_cm_tx_handler()
1235 ipoib_dbg(priv, "REP received.\n"); in ipoib_cm_tx_handler()
1244 ipoib_dbg(priv, "CM error %d.\n", event->event); in ipoib_cm_tx_handler()
1246 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_handler()
1257 list_move(&tx->list, &priv->cm.reap_list); in ipoib_cm_tx_handler()
1258 queue_work(priv->wq, &priv->cm.reap_task); in ipoib_cm_tx_handler()
1261 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_handler()
1274 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_create_tx() local
1285 list_add(&tx->list, &priv->cm.start_list); in ipoib_cm_create_tx()
1287 queue_work(priv->wq, &priv->cm.start_task); in ipoib_cm_create_tx()
1293 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); in ipoib_cm_destroy_tx() local
1296 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_destroy_tx()
1297 list_move(&tx->list, &priv->cm.reap_list); in ipoib_cm_destroy_tx()
1298 queue_work(priv->wq, &priv->cm.reap_task); in ipoib_cm_destroy_tx()
1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n", in ipoib_cm_destroy_tx()
1302 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_destroy_tx()
1308 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, in ipoib_cm_tx_start() local
1310 struct net_device *dev = priv->dev; in ipoib_cm_tx_start()
1320 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_start()
1322 while (!list_empty(&priv->cm.start_list)) { in ipoib_cm_tx_start()
1323 p = list_entry(priv->cm.start_list.next, typeof(*p), list); in ipoib_cm_tx_start()
1329 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_start()
1335 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_start()
1348 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_start()
1354 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, in ipoib_cm_tx_reap() local
1356 struct net_device *dev = priv->dev; in ipoib_cm_tx_reap()
1361 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_reap()
1363 while (!list_empty(&priv->cm.reap_list)) { in ipoib_cm_tx_reap()
1364 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); in ipoib_cm_tx_reap()
1366 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_reap()
1370 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_reap()
1373 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_reap()
1379 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, in ipoib_cm_skb_reap() local
1381 struct net_device *dev = priv->dev; in ipoib_cm_skb_reap()
1384 unsigned mtu = priv->mcast_mtu; in ipoib_cm_skb_reap()
1387 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_skb_reap()
1389 while ((skb = skb_dequeue(&priv->cm.skb_queue))) { in ipoib_cm_skb_reap()
1390 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_skb_reap()
1402 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_skb_reap()
1405 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_skb_reap()
1412 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_skb_too_long() local
1413 int e = skb_queue_empty(&priv->cm.skb_queue); in ipoib_cm_skb_too_long()
1418 skb_queue_tail(&priv->cm.skb_queue, skb); in ipoib_cm_skb_too_long()
1420 queue_work(priv->wq, &priv->cm.skb_task); in ipoib_cm_skb_too_long()
1431 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, in ipoib_cm_stale_task() local
1436 spin_lock_irq(&priv->lock); in ipoib_cm_stale_task()
1437 while (!list_empty(&priv->cm.passive_ids)) { in ipoib_cm_stale_task()
1440 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); in ipoib_cm_stale_task()
1443 list_move(&p->list, &priv->cm.rx_error_list); in ipoib_cm_stale_task()
1445 spin_unlock_irq(&priv->lock); in ipoib_cm_stale_task()
1448 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); in ipoib_cm_stale_task()
1449 spin_lock_irq(&priv->lock); in ipoib_cm_stale_task()
1452 if (!list_empty(&priv->cm.passive_ids)) in ipoib_cm_stale_task()
1453 queue_delayed_work(priv->wq, in ipoib_cm_stale_task()
1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); in ipoib_cm_stale_task()
1455 spin_unlock_irq(&priv->lock); in ipoib_cm_stale_task()
1462 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d)); in show_mode() local
1464 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) in show_mode()
1498 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_create_srq() local
1507 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); in ipoib_cm_create_srq()
1508 if (IS_ERR(priv->cm.srq)) { in ipoib_cm_create_srq()
1509 if (PTR_ERR(priv->cm.srq) != -ENOSYS) in ipoib_cm_create_srq()
1511 priv->ca->name, PTR_ERR(priv->cm.srq)); in ipoib_cm_create_srq()
1512 priv->cm.srq = NULL; in ipoib_cm_create_srq()
1516 priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); in ipoib_cm_create_srq()
1517 if (!priv->cm.srq_ring) { in ipoib_cm_create_srq()
1519 priv->ca->name, ipoib_recvq_size); in ipoib_cm_create_srq()
1520 ib_destroy_srq(priv->cm.srq); in ipoib_cm_create_srq()
1521 priv->cm.srq = NULL; in ipoib_cm_create_srq()
1529 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_dev_init() local
1533 INIT_LIST_HEAD(&priv->cm.passive_ids); in ipoib_cm_dev_init()
1534 INIT_LIST_HEAD(&priv->cm.reap_list); in ipoib_cm_dev_init()
1535 INIT_LIST_HEAD(&priv->cm.start_list); in ipoib_cm_dev_init()
1536 INIT_LIST_HEAD(&priv->cm.rx_error_list); in ipoib_cm_dev_init()
1537 INIT_LIST_HEAD(&priv->cm.rx_flush_list); in ipoib_cm_dev_init()
1538 INIT_LIST_HEAD(&priv->cm.rx_drain_list); in ipoib_cm_dev_init()
1539 INIT_LIST_HEAD(&priv->cm.rx_reap_list); in ipoib_cm_dev_init()
1540 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); in ipoib_cm_dev_init()
1541 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); in ipoib_cm_dev_init()
1542 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); in ipoib_cm_dev_init()
1543 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); in ipoib_cm_dev_init()
1544 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); in ipoib_cm_dev_init()
1546 skb_queue_head_init(&priv->cm.skb_queue); in ipoib_cm_dev_init()
1548 ret = ib_query_device(priv->ca, &attr); in ipoib_cm_dev_init()
1554 ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge); in ipoib_cm_dev_init()
1559 priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10; in ipoib_cm_dev_init()
1560 priv->cm.num_frags = attr.max_srq_sge; in ipoib_cm_dev_init()
1561 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n", in ipoib_cm_dev_init()
1562 priv->cm.max_cm_mtu, priv->cm.num_frags); in ipoib_cm_dev_init()
1564 priv->cm.max_cm_mtu = IPOIB_CM_MTU; in ipoib_cm_dev_init()
1565 priv->cm.num_frags = IPOIB_CM_RX_SG; in ipoib_cm_dev_init()
1568 ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge); in ipoib_cm_dev_init()
1572 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i, in ipoib_cm_dev_init()
1573 priv->cm.num_frags - 1, in ipoib_cm_dev_init()
1574 priv->cm.srq_ring[i].mapping, in ipoib_cm_dev_init()
1576 ipoib_warn(priv, "failed to allocate " in ipoib_cm_dev_init()
1583 ipoib_warn(priv, "ipoib_cm_post_receive_srq " in ipoib_cm_dev_init()
1591 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; in ipoib_cm_dev_init()
1597 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_dev_cleanup() local
1600 if (!priv->cm.srq) in ipoib_cm_dev_cleanup()
1603 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); in ipoib_cm_dev_cleanup()
1605 ret = ib_destroy_srq(priv->cm.srq); in ipoib_cm_dev_cleanup()
1607 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); in ipoib_cm_dev_cleanup()
1609 priv->cm.srq = NULL; in ipoib_cm_dev_cleanup()
1610 if (!priv->cm.srq_ring) in ipoib_cm_dev_cleanup()
1613 ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring); in ipoib_cm_dev_cleanup()
1614 priv->cm.srq_ring = NULL; in ipoib_cm_dev_cleanup()