Lines Matching refs:conn
64 void rds_send_reset(struct rds_connection *conn) in rds_send_reset() argument
69 if (conn->c_xmit_rm) { in rds_send_reset()
70 rm = conn->c_xmit_rm; in rds_send_reset()
71 conn->c_xmit_rm = NULL; in rds_send_reset()
80 conn->c_xmit_sg = 0; in rds_send_reset()
81 conn->c_xmit_hdr_off = 0; in rds_send_reset()
82 conn->c_xmit_data_off = 0; in rds_send_reset()
83 conn->c_xmit_atomic_sent = 0; in rds_send_reset()
84 conn->c_xmit_rdma_sent = 0; in rds_send_reset()
85 conn->c_xmit_data_sent = 0; in rds_send_reset()
87 conn->c_map_queued = 0; in rds_send_reset()
89 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_reset()
90 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_reset()
93 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_reset()
94 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_reset()
98 list_splice_init(&conn->c_retrans, &conn->c_send_queue); in rds_send_reset()
99 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_reset()
102 static int acquire_in_xmit(struct rds_connection *conn) in acquire_in_xmit() argument
104 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0; in acquire_in_xmit()
107 static void release_in_xmit(struct rds_connection *conn) in release_in_xmit() argument
109 clear_bit(RDS_IN_XMIT, &conn->c_flags); in release_in_xmit()
117 if (waitqueue_active(&conn->c_waitq)) in release_in_xmit()
118 wake_up_all(&conn->c_waitq); in release_in_xmit()
135 int rds_send_xmit(struct rds_connection *conn) in rds_send_xmit() argument
156 if (!acquire_in_xmit(conn)) { in rds_send_xmit()
170 conn->c_send_gen++; in rds_send_xmit()
171 send_gen = conn->c_send_gen; in rds_send_xmit()
177 if (!rds_conn_up(conn)) { in rds_send_xmit()
178 release_in_xmit(conn); in rds_send_xmit()
183 if (conn->c_trans->xmit_prepare) in rds_send_xmit()
184 conn->c_trans->xmit_prepare(conn); in rds_send_xmit()
192 rm = conn->c_xmit_rm; in rds_send_xmit()
198 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { in rds_send_xmit()
199 rm = rds_cong_update_alloc(conn); in rds_send_xmit()
206 conn->c_xmit_rm = rm; in rds_send_xmit()
229 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_xmit()
231 if (!list_empty(&conn->c_send_queue)) { in rds_send_xmit()
232 rm = list_entry(conn->c_send_queue.next, in rds_send_xmit()
241 list_move_tail(&rm->m_conn_item, &conn->c_retrans); in rds_send_xmit()
244 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_xmit()
258 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_xmit()
261 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_xmit()
267 if (conn->c_unacked_packets == 0 || in rds_send_xmit()
268 conn->c_unacked_bytes < len) { in rds_send_xmit()
271 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_xmit()
272 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_xmit()
275 conn->c_unacked_bytes -= len; in rds_send_xmit()
276 conn->c_unacked_packets--; in rds_send_xmit()
279 conn->c_xmit_rm = rm; in rds_send_xmit()
283 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { in rds_send_xmit()
285 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); in rds_send_xmit()
288 conn->c_xmit_rdma_sent = 1; in rds_send_xmit()
295 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { in rds_send_xmit()
297 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); in rds_send_xmit()
300 conn->c_xmit_atomic_sent = 1; in rds_send_xmit()
329 if (rm->data.op_active && !conn->c_xmit_data_sent) { in rds_send_xmit()
331 ret = conn->c_trans->xmit(conn, rm, in rds_send_xmit()
332 conn->c_xmit_hdr_off, in rds_send_xmit()
333 conn->c_xmit_sg, in rds_send_xmit()
334 conn->c_xmit_data_off); in rds_send_xmit()
338 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { in rds_send_xmit()
341 conn->c_xmit_hdr_off); in rds_send_xmit()
342 conn->c_xmit_hdr_off += tmp; in rds_send_xmit()
346 sg = &rm->data.op_sg[conn->c_xmit_sg]; in rds_send_xmit()
349 conn->c_xmit_data_off); in rds_send_xmit()
350 conn->c_xmit_data_off += tmp; in rds_send_xmit()
352 if (conn->c_xmit_data_off == sg->length) { in rds_send_xmit()
353 conn->c_xmit_data_off = 0; in rds_send_xmit()
355 conn->c_xmit_sg++; in rds_send_xmit()
357 conn->c_xmit_sg == rm->data.op_nents); in rds_send_xmit()
361 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) && in rds_send_xmit()
362 (conn->c_xmit_sg == rm->data.op_nents)) in rds_send_xmit()
363 conn->c_xmit_data_sent = 1; in rds_send_xmit()
371 if (!rm->data.op_active || conn->c_xmit_data_sent) { in rds_send_xmit()
372 conn->c_xmit_rm = NULL; in rds_send_xmit()
373 conn->c_xmit_sg = 0; in rds_send_xmit()
374 conn->c_xmit_hdr_off = 0; in rds_send_xmit()
375 conn->c_xmit_data_off = 0; in rds_send_xmit()
376 conn->c_xmit_rdma_sent = 0; in rds_send_xmit()
377 conn->c_xmit_atomic_sent = 0; in rds_send_xmit()
378 conn->c_xmit_data_sent = 0; in rds_send_xmit()
385 if (conn->c_trans->xmit_complete) in rds_send_xmit()
386 conn->c_trans->xmit_complete(conn); in rds_send_xmit()
387 release_in_xmit(conn); in rds_send_xmit()
414 if (!list_empty(&conn->c_send_queue) && in rds_send_xmit()
415 send_gen == conn->c_send_gen) { in rds_send_xmit()
553 struct rds_message *rds_send_get_message(struct rds_connection *conn, in rds_send_get_message() argument
559 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_get_message()
561 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_get_message()
569 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { in rds_send_get_message()
578 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_get_message()
674 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, in rds_send_drop_acked() argument
681 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_drop_acked()
683 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_drop_acked()
695 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_drop_acked()
705 struct rds_connection *conn; in rds_send_drop_to() local
733 conn = rm->m_inc.i_conn; in rds_send_drop_to()
735 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_drop_to()
742 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_drop_to()
749 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_drop_to()
783 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, in rds_send_queue_rm() argument
826 rm->m_inc.i_conn = conn; in rds_send_queue_rm()
829 spin_lock(&conn->c_lock); in rds_send_queue_rm()
830 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); in rds_send_queue_rm()
831 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); in rds_send_queue_rm()
833 spin_unlock(&conn->c_lock); in rds_send_queue_rm()
960 struct rds_connection *conn; in rds_sendmsg() local
1026 conn = rs->rs_conn; in rds_sendmsg()
1028 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr, in rds_sendmsg()
1031 if (IS_ERR(conn)) { in rds_sendmsg()
1032 ret = PTR_ERR(conn); in rds_sendmsg()
1035 rs->rs_conn = conn; in rds_sendmsg()
1043 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { in rds_sendmsg()
1045 &rm->rdma, conn->c_trans->xmit_rdma); in rds_sendmsg()
1050 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { in rds_sendmsg()
1052 &rm->atomic, conn->c_trans->xmit_atomic); in rds_sendmsg()
1057 rds_conn_connect_if_down(conn); in rds_sendmsg()
1059 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); in rds_sendmsg()
1065 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, in rds_sendmsg()
1079 rds_send_queue_rm(rs, conn, rm, in rds_sendmsg()
1100 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) in rds_sendmsg()
1101 rds_send_xmit(conn); in rds_sendmsg()
1122 rds_send_pong(struct rds_connection *conn, __be16 dport) in rds_send_pong() argument
1134 rm->m_daddr = conn->c_faddr; in rds_send_pong()
1137 rds_conn_connect_if_down(conn); in rds_send_pong()
1139 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); in rds_send_pong()
1143 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_pong()
1144 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); in rds_send_pong()
1147 rm->m_inc.i_conn = conn; in rds_send_pong()
1150 conn->c_next_tx_seq); in rds_send_pong()
1151 conn->c_next_tx_seq++; in rds_send_pong()
1152 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_pong()
1157 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) in rds_send_pong()
1158 queue_delayed_work(rds_wq, &conn->c_send_w, 0); in rds_send_pong()