Lines Matching refs:id_priv
152 struct rdma_id_private *id_priv; member
200 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) in cma_comp() argument
205 spin_lock_irqsave(&id_priv->lock, flags); in cma_comp()
206 ret = (id_priv->state == comp); in cma_comp()
207 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_comp()
211 static int cma_comp_exch(struct rdma_id_private *id_priv, in cma_comp_exch() argument
217 spin_lock_irqsave(&id_priv->lock, flags); in cma_comp_exch()
218 if ((ret = (id_priv->state == comp))) in cma_comp_exch()
219 id_priv->state = exch; in cma_comp_exch()
220 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_comp_exch()
224 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, in cma_exch() argument
230 spin_lock_irqsave(&id_priv->lock, flags); in cma_exch()
231 old = id_priv->state; in cma_exch()
232 id_priv->state = exch; in cma_exch()
233 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_exch()
247 static void cma_attach_to_dev(struct rdma_id_private *id_priv, in cma_attach_to_dev() argument
251 id_priv->cma_dev = cma_dev; in cma_attach_to_dev()
252 id_priv->id.device = cma_dev->device; in cma_attach_to_dev()
253 id_priv->id.route.addr.dev_addr.transport = in cma_attach_to_dev()
255 list_add_tail(&id_priv->list, &cma_dev->id_list); in cma_attach_to_dev()
272 static void cma_release_dev(struct rdma_id_private *id_priv) in cma_release_dev() argument
275 list_del(&id_priv->list); in cma_release_dev()
276 cma_deref_dev(id_priv->cma_dev); in cma_release_dev()
277 id_priv->cma_dev = NULL; in cma_release_dev()
281 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) in cma_src_addr() argument
283 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; in cma_src_addr()
286 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) in cma_dst_addr() argument
288 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; in cma_dst_addr()
291 static inline unsigned short cma_family(struct rdma_id_private *id_priv) in cma_family() argument
293 return id_priv->id.route.addr.src_addr.ss_family; in cma_family()
296 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) in cma_set_qkey() argument
301 if (id_priv->qkey) { in cma_set_qkey()
302 if (qkey && id_priv->qkey != qkey) in cma_set_qkey()
308 id_priv->qkey = qkey; in cma_set_qkey()
312 switch (id_priv->id.ps) { in cma_set_qkey()
315 id_priv->qkey = RDMA_UDP_QKEY; in cma_set_qkey()
318 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); in cma_set_qkey()
319 ret = ib_sa_get_mcmember_rec(id_priv->id.device, in cma_set_qkey()
320 id_priv->id.port_num, &rec.mgid, in cma_set_qkey()
323 id_priv->qkey = be32_to_cpu(rec.qkey); in cma_set_qkey()
352 static int cma_acquire_dev(struct rdma_id_private *id_priv, in cma_acquire_dev() argument
355 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_acquire_dev()
364 id_priv->id.ps == RDMA_PS_IPOIB) in cma_acquire_dev()
368 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_acquire_dev()
387 id_priv->id.port_num = found_port; in cma_acquire_dev()
405 id_priv->id.port_num = found_port; in cma_acquire_dev()
414 cma_attach_to_dev(id_priv, cma_dev); in cma_acquire_dev()
423 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) in cma_resolve_ib_dev() argument
433 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); in cma_resolve_ib_dev()
449 id_priv->id.port_num = p; in cma_resolve_ib_dev()
457 id_priv->id.port_num = p; in cma_resolve_ib_dev()
467 cma_attach_to_dev(id_priv, cma_dev); in cma_resolve_ib_dev()
468 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); in cma_resolve_ib_dev()
470 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); in cma_resolve_ib_dev()
474 static void cma_deref_id(struct rdma_id_private *id_priv) in cma_deref_id() argument
476 if (atomic_dec_and_test(&id_priv->refcount)) in cma_deref_id()
477 complete(&id_priv->comp); in cma_deref_id()
480 static int cma_disable_callback(struct rdma_id_private *id_priv, in cma_disable_callback() argument
483 mutex_lock(&id_priv->handler_mutex); in cma_disable_callback()
484 if (id_priv->state != state) { in cma_disable_callback()
485 mutex_unlock(&id_priv->handler_mutex); in cma_disable_callback()
495 struct rdma_id_private *id_priv; in rdma_create_id() local
497 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); in rdma_create_id()
498 if (!id_priv) in rdma_create_id()
501 id_priv->owner = task_pid_nr(current); in rdma_create_id()
502 id_priv->state = RDMA_CM_IDLE; in rdma_create_id()
503 id_priv->id.context = context; in rdma_create_id()
504 id_priv->id.event_handler = event_handler; in rdma_create_id()
505 id_priv->id.ps = ps; in rdma_create_id()
506 id_priv->id.qp_type = qp_type; in rdma_create_id()
507 spin_lock_init(&id_priv->lock); in rdma_create_id()
508 mutex_init(&id_priv->qp_mutex); in rdma_create_id()
509 init_completion(&id_priv->comp); in rdma_create_id()
510 atomic_set(&id_priv->refcount, 1); in rdma_create_id()
511 mutex_init(&id_priv->handler_mutex); in rdma_create_id()
512 INIT_LIST_HEAD(&id_priv->listen_list); in rdma_create_id()
513 INIT_LIST_HEAD(&id_priv->mc_list); in rdma_create_id()
514 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); in rdma_create_id()
516 return &id_priv->id; in rdma_create_id()
520 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_ud_qp() argument
526 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_ud_qp()
546 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_conn_qp() argument
552 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_conn_qp()
562 struct rdma_id_private *id_priv; in rdma_create_qp() local
566 id_priv = container_of(id, struct rdma_id_private, id); in rdma_create_qp()
575 ret = cma_init_ud_qp(id_priv, qp); in rdma_create_qp()
577 ret = cma_init_conn_qp(id_priv, qp); in rdma_create_qp()
582 id_priv->qp_num = qp->qp_num; in rdma_create_qp()
583 id_priv->srq = (qp->srq != NULL); in rdma_create_qp()
593 struct rdma_id_private *id_priv; in rdma_destroy_qp() local
595 id_priv = container_of(id, struct rdma_id_private, id); in rdma_destroy_qp()
596 mutex_lock(&id_priv->qp_mutex); in rdma_destroy_qp()
597 ib_destroy_qp(id_priv->id.qp); in rdma_destroy_qp()
598 id_priv->id.qp = NULL; in rdma_destroy_qp()
599 mutex_unlock(&id_priv->qp_mutex); in rdma_destroy_qp()
603 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, in cma_modify_qp_rtr() argument
610 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
611 if (!id_priv->id.qp) { in cma_modify_qp_rtr()
618 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
622 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
627 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
631 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, in cma_modify_qp_rtr()
636 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) in cma_modify_qp_rtr()
638 rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) in cma_modify_qp_rtr()
647 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
649 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
653 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, in cma_modify_qp_rts() argument
659 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rts()
660 if (!id_priv->id.qp) { in cma_modify_qp_rts()
666 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rts()
672 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rts()
674 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rts()
678 static int cma_modify_qp_err(struct rdma_id_private *id_priv) in cma_modify_qp_err() argument
683 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_err()
684 if (!id_priv->id.qp) { in cma_modify_qp_err()
690 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); in cma_modify_qp_err()
692 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_err()
696 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, in cma_ib_init_qp_attr() argument
699 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_ib_init_qp_attr()
703 if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == in cma_ib_init_qp_attr()
709 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, in cma_ib_init_qp_attr()
714 qp_attr->port_num = id_priv->id.port_num; in cma_ib_init_qp_attr()
717 if (id_priv->id.qp_type == IB_QPT_UD) { in cma_ib_init_qp_attr()
718 ret = cma_set_qkey(id_priv, 0); in cma_ib_init_qp_attr()
722 qp_attr->qkey = id_priv->qkey; in cma_ib_init_qp_attr()
734 struct rdma_id_private *id_priv; in rdma_init_qp_attr() local
737 id_priv = container_of(id, struct rdma_id_private, id); in rdma_init_qp_attr()
738 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { in rdma_init_qp_attr()
740 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr()
741 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); in rdma_init_qp_attr()
743 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr()
747 qp_attr->rq_psn = id_priv->seq_num; in rdma_init_qp_attr()
750 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr()
754 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr()
941 static inline int cma_user_data_offset(struct rdma_id_private *id_priv) in cma_user_data_offset() argument
943 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); in cma_user_data_offset()
946 static void cma_cancel_route(struct rdma_id_private *id_priv) in cma_cancel_route() argument
948 switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { in cma_cancel_route()
950 if (id_priv->query) in cma_cancel_route()
951 ib_sa_cancel_query(id_priv->query_id, id_priv->query); in cma_cancel_route()
958 static void cma_cancel_listens(struct rdma_id_private *id_priv) in cma_cancel_listens() argument
967 list_del(&id_priv->list); in cma_cancel_listens()
969 while (!list_empty(&id_priv->listen_list)) { in cma_cancel_listens()
970 dev_id_priv = list_entry(id_priv->listen_list.next, in cma_cancel_listens()
983 static void cma_cancel_operation(struct rdma_id_private *id_priv, in cma_cancel_operation() argument
988 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); in cma_cancel_operation()
991 cma_cancel_route(id_priv); in cma_cancel_operation()
994 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) in cma_cancel_operation()
995 cma_cancel_listens(id_priv); in cma_cancel_operation()
1002 static void cma_release_port(struct rdma_id_private *id_priv) in cma_release_port() argument
1004 struct rdma_bind_list *bind_list = id_priv->bind_list; in cma_release_port()
1010 hlist_del(&id_priv->node); in cma_release_port()
1018 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) in cma_leave_mc_groups() argument
1022 while (!list_empty(&id_priv->mc_list)) { in cma_leave_mc_groups()
1023 mc = container_of(id_priv->mc_list.next, in cma_leave_mc_groups()
1026 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { in cma_leave_mc_groups()
1042 struct rdma_id_private *id_priv; in rdma_destroy_id() local
1045 id_priv = container_of(id, struct rdma_id_private, id); in rdma_destroy_id()
1046 state = cma_exch(id_priv, RDMA_CM_DESTROYING); in rdma_destroy_id()
1047 cma_cancel_operation(id_priv, state); in rdma_destroy_id()
1053 mutex_lock(&id_priv->handler_mutex); in rdma_destroy_id()
1054 mutex_unlock(&id_priv->handler_mutex); in rdma_destroy_id()
1056 if (id_priv->cma_dev) { in rdma_destroy_id()
1057 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { in rdma_destroy_id()
1059 if (id_priv->cm_id.ib) in rdma_destroy_id()
1060 ib_destroy_cm_id(id_priv->cm_id.ib); in rdma_destroy_id()
1063 if (id_priv->cm_id.iw) in rdma_destroy_id()
1064 iw_destroy_cm_id(id_priv->cm_id.iw); in rdma_destroy_id()
1069 cma_leave_mc_groups(id_priv); in rdma_destroy_id()
1070 cma_release_dev(id_priv); in rdma_destroy_id()
1073 cma_release_port(id_priv); in rdma_destroy_id()
1074 cma_deref_id(id_priv); in rdma_destroy_id()
1075 wait_for_completion(&id_priv->comp); in rdma_destroy_id()
1077 if (id_priv->internal_id) in rdma_destroy_id()
1078 cma_deref_id(id_priv->id.context); in rdma_destroy_id()
1080 kfree(id_priv->id.route.path_rec); in rdma_destroy_id()
1081 kfree(id_priv); in rdma_destroy_id()
1085 static int cma_rep_recv(struct rdma_id_private *id_priv) in cma_rep_recv() argument
1089 ret = cma_modify_qp_rtr(id_priv, NULL); in cma_rep_recv()
1093 ret = cma_modify_qp_rts(id_priv, NULL); in cma_rep_recv()
1097 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); in cma_rep_recv()
1103 cma_modify_qp_err(id_priv); in cma_rep_recv()
1104 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, in cma_rep_recv()
1125 struct rdma_id_private *id_priv = cm_id->context; in cma_ib_handler() local
1130 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || in cma_ib_handler()
1132 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) in cma_ib_handler()
1143 if (id_priv->id.qp) { in cma_ib_handler()
1144 event.status = cma_rep_recv(id_priv); in cma_ib_handler()
1161 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, in cma_ib_handler()
1173 cma_modify_qp_err(id_priv); in cma_ib_handler()
1185 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_ib_handler()
1188 id_priv->cm_id.ib = NULL; in cma_ib_handler()
1189 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_ib_handler()
1190 mutex_unlock(&id_priv->handler_mutex); in cma_ib_handler()
1191 rdma_destroy_id(&id_priv->id); in cma_ib_handler()
1195 mutex_unlock(&id_priv->handler_mutex); in cma_ib_handler()
1202 struct rdma_id_private *id_priv; in cma_new_conn_id() local
1212 id_priv = container_of(id, struct rdma_id_private, id); in cma_new_conn_id()
1227 if (cma_any_addr(cma_src_addr(id_priv))) { in cma_new_conn_id()
1232 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); in cma_new_conn_id()
1238 id_priv->state = RDMA_CM_CONNECT; in cma_new_conn_id()
1239 return id_priv; in cma_new_conn_id()
1249 struct rdma_id_private *id_priv; in cma_new_udp_id() local
1258 id_priv = container_of(id, struct rdma_id_private, id); in cma_new_udp_id()
1263 ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr); in cma_new_udp_id()
1268 id_priv->state = RDMA_CM_CONNECT; in cma_new_udp_id()
1269 return id_priv; in cma_new_udp_id()
1422 struct rdma_id_private *id_priv = iw_id->context; in cma_iw_handler() local
1428 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) in cma_iw_handler()
1437 memcpy(cma_src_addr(id_priv), laddr, in cma_iw_handler()
1439 memcpy(cma_dst_addr(id_priv), raddr, in cma_iw_handler()
1471 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_iw_handler()
1474 id_priv->cm_id.iw = NULL; in cma_iw_handler()
1475 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_iw_handler()
1476 mutex_unlock(&id_priv->handler_mutex); in cma_iw_handler()
1477 rdma_destroy_id(&id_priv->id); in cma_iw_handler()
1481 mutex_unlock(&id_priv->handler_mutex); in cma_iw_handler()
1571 static int cma_ib_listen(struct rdma_id_private *id_priv) in cma_ib_listen() argument
1579 id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv); in cma_ib_listen()
1583 id_priv->cm_id.ib = id; in cma_ib_listen()
1585 addr = cma_src_addr(id_priv); in cma_ib_listen()
1586 svc_id = rdma_get_service_id(&id_priv->id, addr); in cma_ib_listen()
1587 if (cma_any_addr(addr) && !id_priv->afonly) in cma_ib_listen()
1588 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); in cma_ib_listen()
1590 cma_set_compare_data(id_priv->id.ps, addr, &compare_data); in cma_ib_listen()
1591 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); in cma_ib_listen()
1595 ib_destroy_cm_id(id_priv->cm_id.ib); in cma_ib_listen()
1596 id_priv->cm_id.ib = NULL; in cma_ib_listen()
1602 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) in cma_iw_listen() argument
1607 id = iw_create_cm_id(id_priv->id.device, in cma_iw_listen()
1609 id_priv); in cma_iw_listen()
1613 id_priv->cm_id.iw = id; in cma_iw_listen()
1615 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), in cma_iw_listen()
1616 rdma_addr_size(cma_src_addr(id_priv))); in cma_iw_listen()
1618 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); in cma_iw_listen()
1621 iw_destroy_cm_id(id_priv->cm_id.iw); in cma_iw_listen()
1622 id_priv->cm_id.iw = NULL; in cma_iw_listen()
1631 struct rdma_id_private *id_priv = id->context; in cma_listen_handler() local
1633 id->context = id_priv->id.context; in cma_listen_handler()
1634 id->event_handler = id_priv->id.event_handler; in cma_listen_handler()
1635 return id_priv->id.event_handler(id, event); in cma_listen_handler()
1638 static void cma_listen_on_dev(struct rdma_id_private *id_priv, in cma_listen_on_dev() argument
1645 if (cma_family(id_priv) == AF_IB && in cma_listen_on_dev()
1649 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, in cma_listen_on_dev()
1650 id_priv->id.qp_type); in cma_listen_on_dev()
1657 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), in cma_listen_on_dev()
1658 rdma_addr_size(cma_src_addr(id_priv))); in cma_listen_on_dev()
1661 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); in cma_listen_on_dev()
1662 atomic_inc(&id_priv->refcount); in cma_listen_on_dev()
1664 dev_id_priv->afonly = id_priv->afonly; in cma_listen_on_dev()
1666 ret = rdma_listen(id, id_priv->backlog); in cma_listen_on_dev()
1672 static void cma_listen_on_all(struct rdma_id_private *id_priv) in cma_listen_on_all() argument
1677 list_add_tail(&id_priv->list, &listen_any_list); in cma_listen_on_all()
1679 cma_listen_on_dev(id_priv, cma_dev); in cma_listen_on_all()
1685 struct rdma_id_private *id_priv; in rdma_set_service_type() local
1687 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_service_type()
1688 id_priv->tos = (u8) tos; in rdma_set_service_type()
1713 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, in cma_query_ib_route() argument
1716 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_query_ib_route()
1728 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_query_ib_route()
1734 switch (cma_family(id_priv)) { in cma_query_ib_route()
1736 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); in cma_query_ib_route()
1740 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); in cma_query_ib_route()
1745 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); in cma_query_ib_route()
1751 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, in cma_query_ib_route()
1752 id_priv->id.port_num, &path_rec, in cma_query_ib_route()
1755 work, &id_priv->query); in cma_query_ib_route()
1757 return (id_priv->query_id < 0) ? id_priv->query_id : 0; in cma_query_ib_route()
1763 struct rdma_id_private *id_priv = work->id; in cma_work_handler() local
1766 mutex_lock(&id_priv->handler_mutex); in cma_work_handler()
1767 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) in cma_work_handler()
1770 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { in cma_work_handler()
1771 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_work_handler()
1775 mutex_unlock(&id_priv->handler_mutex); in cma_work_handler()
1776 cma_deref_id(id_priv); in cma_work_handler()
1778 rdma_destroy_id(&id_priv->id); in cma_work_handler()
1785 struct rdma_id_private *id_priv = work->id; in cma_ndev_work_handler() local
1788 mutex_lock(&id_priv->handler_mutex); in cma_ndev_work_handler()
1789 if (id_priv->state == RDMA_CM_DESTROYING || in cma_ndev_work_handler()
1790 id_priv->state == RDMA_CM_DEVICE_REMOVAL) in cma_ndev_work_handler()
1793 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { in cma_ndev_work_handler()
1794 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_ndev_work_handler()
1799 mutex_unlock(&id_priv->handler_mutex); in cma_ndev_work_handler()
1800 cma_deref_id(id_priv); in cma_ndev_work_handler()
1802 rdma_destroy_id(&id_priv->id); in cma_ndev_work_handler()
1806 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) in cma_resolve_ib_route() argument
1808 struct rdma_route *route = &id_priv->id.route; in cma_resolve_ib_route()
1816 work->id = id_priv; in cma_resolve_ib_route()
1828 ret = cma_query_ib_route(id_priv, timeout_ms, work); in cma_resolve_ib_route()
1844 struct rdma_id_private *id_priv; in rdma_set_ib_paths() local
1847 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_ib_paths()
1848 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, in rdma_set_ib_paths()
1862 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); in rdma_set_ib_paths()
1867 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) in cma_resolve_iw_route() argument
1875 work->id = id_priv; in cma_resolve_iw_route()
1904 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) in cma_resolve_iboe_route() argument
1906 struct rdma_route *route = &id_priv->id.route; in cma_resolve_iboe_route()
1917 work->id = id_priv; in cma_resolve_iboe_route()
1939 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_resolve_iboe_route()
1941 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, in cma_resolve_iboe_route()
1948 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); in cma_resolve_iboe_route()
1979 struct rdma_id_private *id_priv; in rdma_resolve_route() local
1982 id_priv = container_of(id, struct rdma_id_private, id); in rdma_resolve_route()
1983 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) in rdma_resolve_route()
1986 atomic_inc(&id_priv->refcount); in rdma_resolve_route()
1991 ret = cma_resolve_ib_route(id_priv, timeout_ms); in rdma_resolve_route()
1994 ret = cma_resolve_iboe_route(id_priv); in rdma_resolve_route()
2001 ret = cma_resolve_iw_route(id_priv, timeout_ms); in rdma_resolve_route()
2012 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); in rdma_resolve_route()
2013 cma_deref_id(id_priv); in rdma_resolve_route()
2035 static int cma_bind_loopback(struct rdma_id_private *id_priv) in cma_bind_loopback() argument
2047 if (cma_family(id_priv) == AF_IB && in cma_bind_loopback()
2079 id_priv->id.route.addr.dev_addr.dev_type = in cma_bind_loopback()
2083 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_bind_loopback()
2084 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); in cma_bind_loopback()
2085 id_priv->id.port_num = p; in cma_bind_loopback()
2086 cma_attach_to_dev(id_priv, cma_dev); in cma_bind_loopback()
2087 cma_set_loopback(cma_src_addr(id_priv)); in cma_bind_loopback()
2096 struct rdma_id_private *id_priv = context; in addr_handler() local
2100 mutex_lock(&id_priv->handler_mutex); in addr_handler()
2101 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, in addr_handler()
2105 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); in addr_handler()
2106 if (!status && !id_priv->cma_dev) in addr_handler()
2107 status = cma_acquire_dev(id_priv, NULL); in addr_handler()
2110 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, in addr_handler()
2118 if (id_priv->id.event_handler(&id_priv->id, &event)) { in addr_handler()
2119 cma_exch(id_priv, RDMA_CM_DESTROYING); in addr_handler()
2120 mutex_unlock(&id_priv->handler_mutex); in addr_handler()
2121 cma_deref_id(id_priv); in addr_handler()
2122 rdma_destroy_id(&id_priv->id); in addr_handler()
2126 mutex_unlock(&id_priv->handler_mutex); in addr_handler()
2127 cma_deref_id(id_priv); in addr_handler()
2130 static int cma_resolve_loopback(struct rdma_id_private *id_priv) in cma_resolve_loopback() argument
2140 if (!id_priv->cma_dev) { in cma_resolve_loopback()
2141 ret = cma_bind_loopback(id_priv); in cma_resolve_loopback()
2146 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
2147 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
2149 work->id = id_priv; in cma_resolve_loopback()
2161 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) in cma_resolve_ib_addr() argument
2170 if (!id_priv->cma_dev) { in cma_resolve_ib_addr()
2171 ret = cma_resolve_ib_dev(id_priv); in cma_resolve_ib_addr()
2176 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) in cma_resolve_ib_addr()
2177 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); in cma_resolve_ib_addr()
2179 work->id = id_priv; in cma_resolve_ib_addr()
2211 struct rdma_id_private *id_priv; in rdma_resolve_addr() local
2214 id_priv = container_of(id, struct rdma_id_private, id); in rdma_resolve_addr()
2215 if (id_priv->state == RDMA_CM_IDLE) { in rdma_resolve_addr()
2221 if (cma_family(id_priv) != dst_addr->sa_family) in rdma_resolve_addr()
2224 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) in rdma_resolve_addr()
2227 atomic_inc(&id_priv->refcount); in rdma_resolve_addr()
2228 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); in rdma_resolve_addr()
2230 ret = cma_resolve_loopback(id_priv); in rdma_resolve_addr()
2233 ret = cma_resolve_ib_addr(id_priv); in rdma_resolve_addr()
2235 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), in rdma_resolve_addr()
2237 timeout_ms, addr_handler, id_priv); in rdma_resolve_addr()
2245 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); in rdma_resolve_addr()
2246 cma_deref_id(id_priv); in rdma_resolve_addr()
2253 struct rdma_id_private *id_priv; in rdma_set_reuseaddr() local
2257 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_reuseaddr()
2258 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_reuseaddr()
2259 if (reuse || id_priv->state == RDMA_CM_IDLE) { in rdma_set_reuseaddr()
2260 id_priv->reuseaddr = reuse; in rdma_set_reuseaddr()
2265 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_reuseaddr()
2272 struct rdma_id_private *id_priv; in rdma_set_afonly() local
2276 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_afonly()
2277 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_afonly()
2278 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { in rdma_set_afonly()
2279 id_priv->options |= (1 << CMA_OPTION_AFONLY); in rdma_set_afonly()
2280 id_priv->afonly = afonly; in rdma_set_afonly()
2285 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_afonly()
2291 struct rdma_id_private *id_priv) in cma_bind_port() argument
2298 addr = cma_src_addr(id_priv); in cma_bind_port()
2316 id_priv->bind_list = bind_list; in cma_bind_port()
2317 hlist_add_head(&id_priv->node, &bind_list->owners); in cma_bind_port()
2320 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, in cma_alloc_port() argument
2336 cma_bind_port(bind_list, id_priv); in cma_alloc_port()
2343 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) in cma_alloc_any_port() argument
2355 int ret = cma_alloc_port(ps, id_priv, rover); in cma_alloc_any_port()
2381 struct rdma_id_private *id_priv, uint8_t reuseaddr) in cma_check_port() argument
2386 addr = cma_src_addr(id_priv); in cma_check_port()
2388 if (id_priv == cur_id) in cma_check_port()
2396 if (id_priv->afonly && cur_id->afonly && in cma_check_port()
2409 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) in cma_use_port() argument
2415 snum = ntohs(cma_port(cma_src_addr(id_priv))); in cma_use_port()
2421 ret = cma_alloc_port(ps, id_priv, snum); in cma_use_port()
2423 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); in cma_use_port()
2425 cma_bind_port(bind_list, id_priv); in cma_use_port()
2430 static int cma_bind_listen(struct rdma_id_private *id_priv) in cma_bind_listen() argument
2432 struct rdma_bind_list *bind_list = id_priv->bind_list; in cma_bind_listen()
2437 ret = cma_check_port(bind_list, id_priv, 0); in cma_bind_listen()
2442 static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv) in cma_select_inet_ps() argument
2444 switch (id_priv->id.ps) { in cma_select_inet_ps()
2458 static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv) in cma_select_ib_ps() argument
2464 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); in cma_select_ib_ps()
2468 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { in cma_select_ib_ps()
2471 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && in cma_select_ib_ps()
2475 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && in cma_select_ib_ps()
2489 static int cma_get_port(struct rdma_id_private *id_priv) in cma_get_port() argument
2494 if (cma_family(id_priv) != AF_IB) in cma_get_port()
2495 ps = cma_select_inet_ps(id_priv); in cma_get_port()
2497 ps = cma_select_ib_ps(id_priv); in cma_get_port()
2502 if (cma_any_port(cma_src_addr(id_priv))) in cma_get_port()
2503 ret = cma_alloc_any_port(ps, id_priv); in cma_get_port()
2505 ret = cma_use_port(ps, id_priv); in cma_get_port()
2535 struct rdma_id_private *id_priv; in rdma_listen() local
2538 id_priv = container_of(id, struct rdma_id_private, id); in rdma_listen()
2539 if (id_priv->state == RDMA_CM_IDLE) { in rdma_listen()
2541 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); in rdma_listen()
2546 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) in rdma_listen()
2549 if (id_priv->reuseaddr) { in rdma_listen()
2550 ret = cma_bind_listen(id_priv); in rdma_listen()
2555 id_priv->backlog = backlog; in rdma_listen()
2559 ret = cma_ib_listen(id_priv); in rdma_listen()
2564 ret = cma_iw_listen(id_priv, backlog); in rdma_listen()
2573 cma_listen_on_all(id_priv); in rdma_listen()
2577 id_priv->backlog = 0; in rdma_listen()
2578 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); in rdma_listen()
2585 struct rdma_id_private *id_priv; in rdma_bind_addr() local
2592 id_priv = container_of(id, struct rdma_id_private, id); in rdma_bind_addr()
2593 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) in rdma_bind_addr()
2600 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); in rdma_bind_addr()
2606 ret = cma_acquire_dev(id_priv, NULL); in rdma_bind_addr()
2611 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { in rdma_bind_addr()
2613 id_priv->afonly = 1; in rdma_bind_addr()
2616 id_priv->afonly = init_net.ipv6.sysctl.bindv6only; in rdma_bind_addr()
2619 ret = cma_get_port(id_priv); in rdma_bind_addr()
2625 if (id_priv->cma_dev) in rdma_bind_addr()
2626 cma_release_dev(id_priv); in rdma_bind_addr()
2628 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); in rdma_bind_addr()
2633 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) in cma_format_hdr() argument
2639 if (cma_family(id_priv) == AF_INET) { in cma_format_hdr()
2642 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); in cma_format_hdr()
2643 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); in cma_format_hdr()
2649 } else if (cma_family(id_priv) == AF_INET6) { in cma_format_hdr()
2652 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); in cma_format_hdr()
2653 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); in cma_format_hdr()
2666 struct rdma_id_private *id_priv = cm_id->context; in cma_sidr_rep_handler() local
2671 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) in cma_sidr_rep_handler()
2688 ret = cma_set_qkey(id_priv, rep->qkey); in cma_sidr_rep_handler()
2694 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, in cma_sidr_rep_handler()
2695 id_priv->id.route.path_rec, in cma_sidr_rep_handler()
2708 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_sidr_rep_handler()
2711 id_priv->cm_id.ib = NULL; in cma_sidr_rep_handler()
2712 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_sidr_rep_handler()
2713 mutex_unlock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
2714 rdma_destroy_id(&id_priv->id); in cma_sidr_rep_handler()
2718 mutex_unlock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
2722 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, in cma_resolve_ib_udp() argument
2731 offset = cma_user_data_offset(id_priv); in cma_resolve_ib_udp()
2749 ret = cma_format_hdr(private_data, id_priv); in cma_resolve_ib_udp()
2755 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, in cma_resolve_ib_udp()
2756 id_priv); in cma_resolve_ib_udp()
2761 id_priv->cm_id.ib = id; in cma_resolve_ib_udp()
2763 req.path = id_priv->id.route.path_rec; in cma_resolve_ib_udp()
2764 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_resolve_ib_udp()
2768 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); in cma_resolve_ib_udp()
2770 ib_destroy_cm_id(id_priv->cm_id.ib); in cma_resolve_ib_udp()
2771 id_priv->cm_id.ib = NULL; in cma_resolve_ib_udp()
2778 static int cma_connect_ib(struct rdma_id_private *id_priv, in cma_connect_ib() argument
2788 offset = cma_user_data_offset(id_priv); in cma_connect_ib()
2805 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); in cma_connect_ib()
2810 id_priv->cm_id.ib = id; in cma_connect_ib()
2812 route = &id_priv->id.route; in cma_connect_ib()
2814 ret = cma_format_hdr(private_data, id_priv); in cma_connect_ib()
2824 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_connect_ib()
2825 req.qp_num = id_priv->qp_num; in cma_connect_ib()
2826 req.qp_type = id_priv->id.qp_type; in cma_connect_ib()
2827 req.starting_psn = id_priv->seq_num; in cma_connect_ib()
2836 req.srq = id_priv->srq ? 1 : 0; in cma_connect_ib()
2838 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); in cma_connect_ib()
2842 id_priv->cm_id.ib = NULL; in cma_connect_ib()
2849 static int cma_connect_iw(struct rdma_id_private *id_priv, in cma_connect_iw() argument
2856 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); in cma_connect_iw()
2860 id_priv->cm_id.iw = cm_id; in cma_connect_iw()
2862 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), in cma_connect_iw()
2863 rdma_addr_size(cma_src_addr(id_priv))); in cma_connect_iw()
2864 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), in cma_connect_iw()
2865 rdma_addr_size(cma_dst_addr(id_priv))); in cma_connect_iw()
2867 ret = cma_modify_qp_rtr(id_priv, conn_param); in cma_connect_iw()
2876 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; in cma_connect_iw()
2879 iw_param.qpn = id_priv->qp_num; in cma_connect_iw()
2885 id_priv->cm_id.iw = NULL; in cma_connect_iw()
2892 struct rdma_id_private *id_priv; in rdma_connect() local
2895 id_priv = container_of(id, struct rdma_id_private, id); in rdma_connect()
2896 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) in rdma_connect()
2900 id_priv->qp_num = conn_param->qp_num; in rdma_connect()
2901 id_priv->srq = conn_param->srq; in rdma_connect()
2907 ret = cma_resolve_ib_udp(id_priv, conn_param); in rdma_connect()
2909 ret = cma_connect_ib(id_priv, conn_param); in rdma_connect()
2912 ret = cma_connect_iw(id_priv, conn_param); in rdma_connect()
2923 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); in rdma_connect()
2928 static int cma_accept_ib(struct rdma_id_private *id_priv, in cma_accept_ib() argument
2934 ret = cma_modify_qp_rtr(id_priv, conn_param); in cma_accept_ib()
2938 ret = cma_modify_qp_rts(id_priv, conn_param); in cma_accept_ib()
2943 rep.qp_num = id_priv->qp_num; in cma_accept_ib()
2944 rep.starting_psn = id_priv->seq_num; in cma_accept_ib()
2952 rep.srq = id_priv->srq ? 1 : 0; in cma_accept_ib()
2954 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); in cma_accept_ib()
2959 static int cma_accept_iw(struct rdma_id_private *id_priv, in cma_accept_iw() argument
2965 ret = cma_modify_qp_rtr(id_priv, conn_param); in cma_accept_iw()
2973 if (id_priv->id.qp) { in cma_accept_iw()
2974 iw_param.qpn = id_priv->qp_num; in cma_accept_iw()
2978 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); in cma_accept_iw()
2981 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, in cma_send_sidr_rep() argument
2991 ret = cma_set_qkey(id_priv, qkey); in cma_send_sidr_rep()
2994 rep.qp_num = id_priv->qp_num; in cma_send_sidr_rep()
2995 rep.qkey = id_priv->qkey; in cma_send_sidr_rep()
3000 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); in cma_send_sidr_rep()
3005 struct rdma_id_private *id_priv; in rdma_accept() local
3008 id_priv = container_of(id, struct rdma_id_private, id); in rdma_accept()
3010 id_priv->owner = task_pid_nr(current); in rdma_accept()
3012 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) in rdma_accept()
3016 id_priv->qp_num = conn_param->qp_num; in rdma_accept()
3017 id_priv->srq = conn_param->srq; in rdma_accept()
3024 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, in rdma_accept()
3029 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, in rdma_accept()
3033 ret = cma_accept_ib(id_priv, conn_param); in rdma_accept()
3035 ret = cma_rep_recv(id_priv); in rdma_accept()
3039 ret = cma_accept_iw(id_priv, conn_param); in rdma_accept()
3051 cma_modify_qp_err(id_priv); in rdma_accept()
3059 struct rdma_id_private *id_priv; in rdma_notify() local
3062 id_priv = container_of(id, struct rdma_id_private, id); in rdma_notify()
3063 if (!id_priv->cm_id.ib) in rdma_notify()
3068 ret = ib_cm_notify(id_priv->cm_id.ib, event); in rdma_notify()
3081 struct rdma_id_private *id_priv; in rdma_reject() local
3084 id_priv = container_of(id, struct rdma_id_private, id); in rdma_reject()
3085 if (!id_priv->cm_id.ib) in rdma_reject()
3091 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, in rdma_reject()
3094 ret = ib_send_cm_rej(id_priv->cm_id.ib, in rdma_reject()
3099 ret = iw_cm_reject(id_priv->cm_id.iw, in rdma_reject()
3112 struct rdma_id_private *id_priv; in rdma_disconnect() local
3115 id_priv = container_of(id, struct rdma_id_private, id); in rdma_disconnect()
3116 if (!id_priv->cm_id.ib) in rdma_disconnect()
3121 ret = cma_modify_qp_err(id_priv); in rdma_disconnect()
3125 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) in rdma_disconnect()
3126 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); in rdma_disconnect()
3129 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); in rdma_disconnect()
3142 struct rdma_id_private *id_priv; in cma_ib_mc_handler() local
3147 id_priv = mc->id_priv; in cma_ib_mc_handler()
3148 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && in cma_ib_mc_handler()
3149 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) in cma_ib_mc_handler()
3153 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); in cma_ib_mc_handler()
3154 mutex_lock(&id_priv->qp_mutex); in cma_ib_mc_handler()
3155 if (!status && id_priv->id.qp) in cma_ib_mc_handler()
3156 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, in cma_ib_mc_handler()
3158 mutex_unlock(&id_priv->qp_mutex); in cma_ib_mc_handler()
3165 ib_init_ah_from_mcmember(id_priv->id.device, in cma_ib_mc_handler()
3166 id_priv->id.port_num, &multicast->rec, in cma_ib_mc_handler()
3173 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_ib_mc_handler()
3175 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_ib_mc_handler()
3176 mutex_unlock(&id_priv->handler_mutex); in cma_ib_mc_handler()
3177 rdma_destroy_id(&id_priv->id); in cma_ib_mc_handler()
3181 mutex_unlock(&id_priv->handler_mutex); in cma_ib_mc_handler()
3185 static void cma_set_mgid(struct rdma_id_private *id_priv, in cma_set_mgid() argument
3189 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_set_mgid()
3204 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
3209 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
3215 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, in cma_join_ib_multicast() argument
3219 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_join_ib_multicast()
3224 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, in cma_join_ib_multicast()
3229 ret = cma_set_qkey(id_priv, 0); in cma_join_ib_multicast()
3233 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); in cma_join_ib_multicast()
3234 rec.qkey = cpu_to_be32(id_priv->qkey); in cma_join_ib_multicast()
3245 if (id_priv->id.ps == RDMA_PS_IPOIB) in cma_join_ib_multicast()
3252 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, in cma_join_ib_multicast()
3253 id_priv->id.port_num, &rec, in cma_join_ib_multicast()
3297 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, in cma_iboe_join_multicast() argument
3301 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_iboe_join_multicast()
3322 if (id_priv->id.ps == RDMA_PS_UDP) in cma_iboe_join_multicast()
3339 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_iboe_join_multicast()
3341 work->id = id_priv; in cma_iboe_join_multicast()
3359 struct rdma_id_private *id_priv; in rdma_join_multicast() local
3363 id_priv = container_of(id, struct rdma_id_private, id); in rdma_join_multicast()
3364 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && in rdma_join_multicast()
3365 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) in rdma_join_multicast()
3374 mc->id_priv = id_priv; in rdma_join_multicast()
3376 spin_lock(&id_priv->lock); in rdma_join_multicast()
3377 list_add(&mc->list, &id_priv->mc_list); in rdma_join_multicast()
3378 spin_unlock(&id_priv->lock); in rdma_join_multicast()
3384 ret = cma_join_ib_multicast(id_priv, mc); in rdma_join_multicast()
3388 ret = cma_iboe_join_multicast(id_priv, mc); in rdma_join_multicast()
3400 spin_lock_irq(&id_priv->lock); in rdma_join_multicast()
3402 spin_unlock_irq(&id_priv->lock); in rdma_join_multicast()
3411 struct rdma_id_private *id_priv; in rdma_leave_multicast() local
3414 id_priv = container_of(id, struct rdma_id_private, id); in rdma_leave_multicast()
3415 spin_lock_irq(&id_priv->lock); in rdma_leave_multicast()
3416 list_for_each_entry(mc, &id_priv->mc_list, list) { in rdma_leave_multicast()
3419 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
3425 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { in rdma_leave_multicast()
3441 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
3445 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) in cma_netdev_change() argument
3450 dev_addr = &id_priv->id.route.addr.dev_addr; in cma_netdev_change()
3455 ndev->name, &id_priv->id); in cma_netdev_change()
3461 work->id = id_priv; in cma_netdev_change()
3463 atomic_inc(&id_priv->refcount); in cma_netdev_change()
3475 struct rdma_id_private *id_priv; in cma_netdev_callback() local
3489 list_for_each_entry(id_priv, &cma_dev->id_list, list) { in cma_netdev_callback()
3490 ret = cma_netdev_change(ndev, id_priv); in cma_netdev_callback()
3507 struct rdma_id_private *id_priv; in cma_add_one() local
3522 list_for_each_entry(id_priv, &listen_any_list, list) in cma_add_one()
3523 cma_listen_on_dev(id_priv, cma_dev); in cma_add_one()
3527 static int cma_remove_id_dev(struct rdma_id_private *id_priv) in cma_remove_id_dev() argument
3534 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); in cma_remove_id_dev()
3538 cma_cancel_operation(id_priv, state); in cma_remove_id_dev()
3539 mutex_lock(&id_priv->handler_mutex); in cma_remove_id_dev()
3542 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) in cma_remove_id_dev()
3547 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_remove_id_dev()
3549 mutex_unlock(&id_priv->handler_mutex); in cma_remove_id_dev()
3555 struct rdma_id_private *id_priv; in cma_process_remove() local
3560 id_priv = list_entry(cma_dev->id_list.next, in cma_process_remove()
3563 list_del(&id_priv->listen_list); in cma_process_remove()
3564 list_del_init(&id_priv->list); in cma_process_remove()
3565 atomic_inc(&id_priv->refcount); in cma_process_remove()
3568 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); in cma_process_remove()
3569 cma_deref_id(id_priv); in cma_process_remove()
3571 rdma_destroy_id(&id_priv->id); in cma_process_remove()
3601 struct rdma_id_private *id_priv; in cma_get_id_stats() local
3619 list_for_each_entry(id_priv, &cma_dev->id_list, list) { in cma_get_id_stats()
3633 id = &id_priv->id; in cma_get_id_stats()
3640 rdma_addr_size(cma_src_addr(id_priv)), in cma_get_id_stats()
3641 cma_src_addr(id_priv), in cma_get_id_stats()
3645 rdma_addr_size(cma_src_addr(id_priv)), in cma_get_id_stats()
3646 cma_dst_addr(id_priv), in cma_get_id_stats()
3650 id_stats->pid = id_priv->owner; in cma_get_id_stats()
3652 id_stats->cm_state = id_priv->state; in cma_get_id_stats()
3653 id_stats->qp_num = id_priv->qp_num; in cma_get_id_stats()