1/*
2 * Copyright (c) 2006 Oracle.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/moduleparam.h>
35#include <linux/gfp.h>
36#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
39#include <linux/ratelimit.h>
40#include <linux/export.h>
41
42#include "rds.h"
43
44/* When transmitting messages in rds_send_xmit, we need to emerge from
45 * time to time and briefly release the CPU. Otherwise the softlock watchdog
46 * will kick our shin.
47 * Also, it seems fairer to not let one busy connection stall all the
48 * others.
49 *
50 * send_batch_count is the number of times we'll loop in send_xmit. Setting
51 * it to 0 will restore the old behavior (where we looped until we had
52 * drained the queue).
53 */
54static int send_batch_count = 64;
55module_param(send_batch_count, int, 0444);
56MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
57
58static void rds_send_remove_from_sock(struct list_head *messages, int status);
59
60/*
61 * Reset the send state.  Callers must ensure that this doesn't race with
62 * rds_send_xmit().
63 */
64void rds_send_reset(struct rds_connection *conn)
65{
66	struct rds_message *rm, *tmp;
67	unsigned long flags;
68
69	if (conn->c_xmit_rm) {
70		rm = conn->c_xmit_rm;
71		conn->c_xmit_rm = NULL;
72		/* Tell the user the RDMA op is no longer mapped by the
73		 * transport. This isn't entirely true (it's flushed out
74		 * independently) but as the connection is down, there's
75		 * no ongoing RDMA to/from that memory */
76		rds_message_unmapped(rm);
77		rds_message_put(rm);
78	}
79
80	conn->c_xmit_sg = 0;
81	conn->c_xmit_hdr_off = 0;
82	conn->c_xmit_data_off = 0;
83	conn->c_xmit_atomic_sent = 0;
84	conn->c_xmit_rdma_sent = 0;
85	conn->c_xmit_data_sent = 0;
86
87	conn->c_map_queued = 0;
88
89	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
90	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
91
92	/* Mark messages as retransmissions, and move them to the send q */
93	spin_lock_irqsave(&conn->c_lock, flags);
94	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
95		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
96		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
97	}
98	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
99	spin_unlock_irqrestore(&conn->c_lock, flags);
100}
101
102static int acquire_in_xmit(struct rds_connection *conn)
103{
104	return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
105}
106
107static void release_in_xmit(struct rds_connection *conn)
108{
109	clear_bit(RDS_IN_XMIT, &conn->c_flags);
110	smp_mb__after_atomic();
111	/*
112	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
113	 * hot path and finding waiters is very rare.  We don't want to walk
114	 * the system-wide hashed waitqueue buckets in the fast path only to
115	 * almost never find waiters.
116	 */
117	if (waitqueue_active(&conn->c_waitq))
118		wake_up_all(&conn->c_waitq);
119}
120
121/*
122 * We're making the conscious trade-off here to only send one message
123 * down the connection at a time.
124 *   Pro:
125 *      - tx queueing is a simple fifo list
126 *   	- reassembly is optional and easily done by transports per conn
127 *      - no per flow rx lookup at all, straight to the socket
128 *   	- less per-frag memory and wire overhead
129 *   Con:
130 *      - queued acks can be delayed behind large messages
131 *   Depends:
132 *      - small message latency is higher behind queued large messages
133 *      - large message latency isn't starved by intervening small sends
134 */
135int rds_send_xmit(struct rds_connection *conn)
136{
137	struct rds_message *rm;
138	unsigned long flags;
139	unsigned int tmp;
140	struct scatterlist *sg;
141	int ret = 0;
142	LIST_HEAD(to_be_dropped);
143	int batch_count;
144	unsigned long send_gen = 0;
145
146restart:
147	batch_count = 0;
148
149	/*
150	 * sendmsg calls here after having queued its message on the send
151	 * queue.  We only have one task feeding the connection at a time.  If
152	 * another thread is already feeding the queue then we back off.  This
153	 * avoids blocking the caller and trading per-connection data between
154	 * caches per message.
155	 */
156	if (!acquire_in_xmit(conn)) {
157		rds_stats_inc(s_send_lock_contention);
158		ret = -ENOMEM;
159		goto out;
160	}
161
162	/*
163	 * we record the send generation after doing the xmit acquire.
164	 * if someone else manages to jump in and do some work, we'll use
165	 * this to avoid a goto restart farther down.
166	 *
167	 * The acquire_in_xmit() check above ensures that only one
168	 * caller can increment c_send_gen at any time.
169	 */
170	conn->c_send_gen++;
171	send_gen = conn->c_send_gen;
172
173	/*
174	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
175	 * we do the opposite to avoid races.
176	 */
177	if (!rds_conn_up(conn)) {
178		release_in_xmit(conn);
179		ret = 0;
180		goto out;
181	}
182
183	if (conn->c_trans->xmit_prepare)
184		conn->c_trans->xmit_prepare(conn);
185
186	/*
187	 * spin trying to push headers and data down the connection until
188	 * the connection doesn't make forward progress.
189	 */
190	while (1) {
191
192		rm = conn->c_xmit_rm;
193
194		/*
195		 * If between sending messages, we can send a pending congestion
196		 * map update.
197		 */
198		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
199			rm = rds_cong_update_alloc(conn);
200			if (IS_ERR(rm)) {
201				ret = PTR_ERR(rm);
202				break;
203			}
204			rm->data.op_active = 1;
205
206			conn->c_xmit_rm = rm;
207		}
208
209		/*
210		 * If not already working on one, grab the next message.
211		 *
212		 * c_xmit_rm holds a ref while we're sending this message down
213		 * the connction.  We can use this ref while holding the
214		 * send_sem.. rds_send_reset() is serialized with it.
215		 */
216		if (!rm) {
217			unsigned int len;
218
219			batch_count++;
220
221			/* we want to process as big a batch as we can, but
222			 * we also want to avoid softlockups.  If we've been
223			 * through a lot of messages, lets back off and see
224			 * if anyone else jumps in
225			 */
226			if (batch_count >= 1024)
227				goto over_batch;
228
229			spin_lock_irqsave(&conn->c_lock, flags);
230
231			if (!list_empty(&conn->c_send_queue)) {
232				rm = list_entry(conn->c_send_queue.next,
233						struct rds_message,
234						m_conn_item);
235				rds_message_addref(rm);
236
237				/*
238				 * Move the message from the send queue to the retransmit
239				 * list right away.
240				 */
241				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
242			}
243
244			spin_unlock_irqrestore(&conn->c_lock, flags);
245
246			if (!rm)
247				break;
248
249			/* Unfortunately, the way Infiniband deals with
250			 * RDMA to a bad MR key is by moving the entire
251			 * queue pair to error state. We cold possibly
252			 * recover from that, but right now we drop the
253			 * connection.
254			 * Therefore, we never retransmit messages with RDMA ops.
255			 */
256			if (rm->rdma.op_active &&
257			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
258				spin_lock_irqsave(&conn->c_lock, flags);
259				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
260					list_move(&rm->m_conn_item, &to_be_dropped);
261				spin_unlock_irqrestore(&conn->c_lock, flags);
262				continue;
263			}
264
265			/* Require an ACK every once in a while */
266			len = ntohl(rm->m_inc.i_hdr.h_len);
267			if (conn->c_unacked_packets == 0 ||
268			    conn->c_unacked_bytes < len) {
269				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
270
271				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
272				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
273				rds_stats_inc(s_send_ack_required);
274			} else {
275				conn->c_unacked_bytes -= len;
276				conn->c_unacked_packets--;
277			}
278
279			conn->c_xmit_rm = rm;
280		}
281
282		/* The transport either sends the whole rdma or none of it */
283		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
284			rm->m_final_op = &rm->rdma;
285			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
286			if (ret)
287				break;
288			conn->c_xmit_rdma_sent = 1;
289
290			/* The transport owns the mapped memory for now.
291			 * You can't unmap it while it's on the send queue */
292			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
293		}
294
295		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
296			rm->m_final_op = &rm->atomic;
297			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
298			if (ret)
299				break;
300			conn->c_xmit_atomic_sent = 1;
301
302			/* The transport owns the mapped memory for now.
303			 * You can't unmap it while it's on the send queue */
304			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
305		}
306
307		/*
308		 * A number of cases require an RDS header to be sent
309		 * even if there is no data.
310		 * We permit 0-byte sends; rds-ping depends on this.
311		 * However, if there are exclusively attached silent ops,
312		 * we skip the hdr/data send, to enable silent operation.
313		 */
314		if (rm->data.op_nents == 0) {
315			int ops_present;
316			int all_ops_are_silent = 1;
317
318			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
319			if (rm->atomic.op_active && !rm->atomic.op_silent)
320				all_ops_are_silent = 0;
321			if (rm->rdma.op_active && !rm->rdma.op_silent)
322				all_ops_are_silent = 0;
323
324			if (ops_present && all_ops_are_silent
325			    && !rm->m_rdma_cookie)
326				rm->data.op_active = 0;
327		}
328
329		if (rm->data.op_active && !conn->c_xmit_data_sent) {
330			rm->m_final_op = &rm->data;
331			ret = conn->c_trans->xmit(conn, rm,
332						  conn->c_xmit_hdr_off,
333						  conn->c_xmit_sg,
334						  conn->c_xmit_data_off);
335			if (ret <= 0)
336				break;
337
338			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
339				tmp = min_t(int, ret,
340					    sizeof(struct rds_header) -
341					    conn->c_xmit_hdr_off);
342				conn->c_xmit_hdr_off += tmp;
343				ret -= tmp;
344			}
345
346			sg = &rm->data.op_sg[conn->c_xmit_sg];
347			while (ret) {
348				tmp = min_t(int, ret, sg->length -
349						      conn->c_xmit_data_off);
350				conn->c_xmit_data_off += tmp;
351				ret -= tmp;
352				if (conn->c_xmit_data_off == sg->length) {
353					conn->c_xmit_data_off = 0;
354					sg++;
355					conn->c_xmit_sg++;
356					BUG_ON(ret != 0 &&
357					       conn->c_xmit_sg == rm->data.op_nents);
358				}
359			}
360
361			if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
362			    (conn->c_xmit_sg == rm->data.op_nents))
363				conn->c_xmit_data_sent = 1;
364		}
365
366		/*
367		 * A rm will only take multiple times through this loop
368		 * if there is a data op. Thus, if the data is sent (or there was
369		 * none), then we're done with the rm.
370		 */
371		if (!rm->data.op_active || conn->c_xmit_data_sent) {
372			conn->c_xmit_rm = NULL;
373			conn->c_xmit_sg = 0;
374			conn->c_xmit_hdr_off = 0;
375			conn->c_xmit_data_off = 0;
376			conn->c_xmit_rdma_sent = 0;
377			conn->c_xmit_atomic_sent = 0;
378			conn->c_xmit_data_sent = 0;
379
380			rds_message_put(rm);
381		}
382	}
383
384over_batch:
385	if (conn->c_trans->xmit_complete)
386		conn->c_trans->xmit_complete(conn);
387	release_in_xmit(conn);
388
389	/* Nuke any messages we decided not to retransmit. */
390	if (!list_empty(&to_be_dropped)) {
391		/* irqs on here, so we can put(), unlike above */
392		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
393			rds_message_put(rm);
394		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
395	}
396
397	/*
398	 * Other senders can queue a message after we last test the send queue
399	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
400	 * not try and send their newly queued message.  We need to check the
401	 * send queue after having cleared RDS_IN_XMIT so that their message
402	 * doesn't get stuck on the send queue.
403	 *
404	 * If the transport cannot continue (i.e ret != 0), then it must
405	 * call us when more room is available, such as from the tx
406	 * completion handler.
407	 *
408	 * We have an extra generation check here so that if someone manages
409	 * to jump in after our release_in_xmit, we'll see that they have done
410	 * some work and we will skip our goto
411	 */
412	if (ret == 0) {
413		smp_mb();
414		if (!list_empty(&conn->c_send_queue) &&
415		    send_gen == conn->c_send_gen) {
416			rds_stats_inc(s_send_lock_queue_raced);
417			goto restart;
418		}
419	}
420out:
421	return ret;
422}
423
424static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
425{
426	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
427
428	assert_spin_locked(&rs->rs_lock);
429
430	BUG_ON(rs->rs_snd_bytes < len);
431	rs->rs_snd_bytes -= len;
432
433	if (rs->rs_snd_bytes == 0)
434		rds_stats_inc(s_send_queue_empty);
435}
436
437static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
438				    is_acked_func is_acked)
439{
440	if (is_acked)
441		return is_acked(rm, ack);
442	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
443}
444
445/*
446 * This is pretty similar to what happens below in the ACK
447 * handling code - except that we call here as soon as we get
448 * the IB send completion on the RDMA op and the accompanying
449 * message.
450 */
451void rds_rdma_send_complete(struct rds_message *rm, int status)
452{
453	struct rds_sock *rs = NULL;
454	struct rm_rdma_op *ro;
455	struct rds_notifier *notifier;
456	unsigned long flags;
457
458	spin_lock_irqsave(&rm->m_rs_lock, flags);
459
460	ro = &rm->rdma;
461	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
462	    ro->op_active && ro->op_notify && ro->op_notifier) {
463		notifier = ro->op_notifier;
464		rs = rm->m_rs;
465		sock_hold(rds_rs_to_sk(rs));
466
467		notifier->n_status = status;
468		spin_lock(&rs->rs_lock);
469		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
470		spin_unlock(&rs->rs_lock);
471
472		ro->op_notifier = NULL;
473	}
474
475	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
476
477	if (rs) {
478		rds_wake_sk_sleep(rs);
479		sock_put(rds_rs_to_sk(rs));
480	}
481}
482EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
483
484/*
485 * Just like above, except looks at atomic op
486 */
487void rds_atomic_send_complete(struct rds_message *rm, int status)
488{
489	struct rds_sock *rs = NULL;
490	struct rm_atomic_op *ao;
491	struct rds_notifier *notifier;
492	unsigned long flags;
493
494	spin_lock_irqsave(&rm->m_rs_lock, flags);
495
496	ao = &rm->atomic;
497	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
498	    && ao->op_active && ao->op_notify && ao->op_notifier) {
499		notifier = ao->op_notifier;
500		rs = rm->m_rs;
501		sock_hold(rds_rs_to_sk(rs));
502
503		notifier->n_status = status;
504		spin_lock(&rs->rs_lock);
505		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
506		spin_unlock(&rs->rs_lock);
507
508		ao->op_notifier = NULL;
509	}
510
511	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
512
513	if (rs) {
514		rds_wake_sk_sleep(rs);
515		sock_put(rds_rs_to_sk(rs));
516	}
517}
518EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
519
520/*
521 * This is the same as rds_rdma_send_complete except we
522 * don't do any locking - we have all the ingredients (message,
523 * socket, socket lock) and can just move the notifier.
524 */
525static inline void
526__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
527{
528	struct rm_rdma_op *ro;
529	struct rm_atomic_op *ao;
530
531	ro = &rm->rdma;
532	if (ro->op_active && ro->op_notify && ro->op_notifier) {
533		ro->op_notifier->n_status = status;
534		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
535		ro->op_notifier = NULL;
536	}
537
538	ao = &rm->atomic;
539	if (ao->op_active && ao->op_notify && ao->op_notifier) {
540		ao->op_notifier->n_status = status;
541		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
542		ao->op_notifier = NULL;
543	}
544
545	/* No need to wake the app - caller does this */
546}
547
548/*
549 * This is called from the IB send completion when we detect
550 * a RDMA operation that failed with remote access error.
551 * So speed is not an issue here.
552 */
553struct rds_message *rds_send_get_message(struct rds_connection *conn,
554					 struct rm_rdma_op *op)
555{
556	struct rds_message *rm, *tmp, *found = NULL;
557	unsigned long flags;
558
559	spin_lock_irqsave(&conn->c_lock, flags);
560
561	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
562		if (&rm->rdma == op) {
563			atomic_inc(&rm->m_refcount);
564			found = rm;
565			goto out;
566		}
567	}
568
569	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
570		if (&rm->rdma == op) {
571			atomic_inc(&rm->m_refcount);
572			found = rm;
573			break;
574		}
575	}
576
577out:
578	spin_unlock_irqrestore(&conn->c_lock, flags);
579
580	return found;
581}
582EXPORT_SYMBOL_GPL(rds_send_get_message);
583
584/*
585 * This removes messages from the socket's list if they're on it.  The list
586 * argument must be private to the caller, we must be able to modify it
587 * without locks.  The messages must have a reference held for their
588 * position on the list.  This function will drop that reference after
589 * removing the messages from the 'messages' list regardless of if it found
590 * the messages on the socket list or not.
591 */
592static void rds_send_remove_from_sock(struct list_head *messages, int status)
593{
594	unsigned long flags;
595	struct rds_sock *rs = NULL;
596	struct rds_message *rm;
597
598	while (!list_empty(messages)) {
599		int was_on_sock = 0;
600
601		rm = list_entry(messages->next, struct rds_message,
602				m_conn_item);
603		list_del_init(&rm->m_conn_item);
604
605		/*
606		 * If we see this flag cleared then we're *sure* that someone
607		 * else beat us to removing it from the sock.  If we race
608		 * with their flag update we'll get the lock and then really
609		 * see that the flag has been cleared.
610		 *
611		 * The message spinlock makes sure nobody clears rm->m_rs
612		 * while we're messing with it. It does not prevent the
613		 * message from being removed from the socket, though.
614		 */
615		spin_lock_irqsave(&rm->m_rs_lock, flags);
616		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
617			goto unlock_and_drop;
618
619		if (rs != rm->m_rs) {
620			if (rs) {
621				rds_wake_sk_sleep(rs);
622				sock_put(rds_rs_to_sk(rs));
623			}
624			rs = rm->m_rs;
625			if (rs)
626				sock_hold(rds_rs_to_sk(rs));
627		}
628		if (!rs)
629			goto unlock_and_drop;
630		spin_lock(&rs->rs_lock);
631
632		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
633			struct rm_rdma_op *ro = &rm->rdma;
634			struct rds_notifier *notifier;
635
636			list_del_init(&rm->m_sock_item);
637			rds_send_sndbuf_remove(rs, rm);
638
639			if (ro->op_active && ro->op_notifier &&
640			       (ro->op_notify || (ro->op_recverr && status))) {
641				notifier = ro->op_notifier;
642				list_add_tail(&notifier->n_list,
643						&rs->rs_notify_queue);
644				if (!notifier->n_status)
645					notifier->n_status = status;
646				rm->rdma.op_notifier = NULL;
647			}
648			was_on_sock = 1;
649			rm->m_rs = NULL;
650		}
651		spin_unlock(&rs->rs_lock);
652
653unlock_and_drop:
654		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
655		rds_message_put(rm);
656		if (was_on_sock)
657			rds_message_put(rm);
658	}
659
660	if (rs) {
661		rds_wake_sk_sleep(rs);
662		sock_put(rds_rs_to_sk(rs));
663	}
664}
665
666/*
667 * Transports call here when they've determined that the receiver queued
668 * messages up to, and including, the given sequence number.  Messages are
669 * moved to the retrans queue when rds_send_xmit picks them off the send
670 * queue. This means that in the TCP case, the message may not have been
671 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
672 * checks the RDS_MSG_HAS_ACK_SEQ bit.
673 */
674void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
675			 is_acked_func is_acked)
676{
677	struct rds_message *rm, *tmp;
678	unsigned long flags;
679	LIST_HEAD(list);
680
681	spin_lock_irqsave(&conn->c_lock, flags);
682
683	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
684		if (!rds_send_is_acked(rm, ack, is_acked))
685			break;
686
687		list_move(&rm->m_conn_item, &list);
688		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
689	}
690
691	/* order flag updates with spin locks */
692	if (!list_empty(&list))
693		smp_mb__after_atomic();
694
695	spin_unlock_irqrestore(&conn->c_lock, flags);
696
697	/* now remove the messages from the sock list as needed */
698	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
699}
700EXPORT_SYMBOL_GPL(rds_send_drop_acked);
701
702void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
703{
704	struct rds_message *rm, *tmp;
705	struct rds_connection *conn;
706	unsigned long flags;
707	LIST_HEAD(list);
708
709	/* get all the messages we're dropping under the rs lock */
710	spin_lock_irqsave(&rs->rs_lock, flags);
711
712	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
713		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
714			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
715			continue;
716
717		list_move(&rm->m_sock_item, &list);
718		rds_send_sndbuf_remove(rs, rm);
719		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
720	}
721
722	/* order flag updates with the rs lock */
723	smp_mb__after_atomic();
724
725	spin_unlock_irqrestore(&rs->rs_lock, flags);
726
727	if (list_empty(&list))
728		return;
729
730	/* Remove the messages from the conn */
731	list_for_each_entry(rm, &list, m_sock_item) {
732
733		conn = rm->m_inc.i_conn;
734
735		spin_lock_irqsave(&conn->c_lock, flags);
736		/*
737		 * Maybe someone else beat us to removing rm from the conn.
738		 * If we race with their flag update we'll get the lock and
739		 * then really see that the flag has been cleared.
740		 */
741		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
742			spin_unlock_irqrestore(&conn->c_lock, flags);
743			spin_lock_irqsave(&rm->m_rs_lock, flags);
744			rm->m_rs = NULL;
745			spin_unlock_irqrestore(&rm->m_rs_lock, flags);
746			continue;
747		}
748		list_del_init(&rm->m_conn_item);
749		spin_unlock_irqrestore(&conn->c_lock, flags);
750
751		/*
752		 * Couldn't grab m_rs_lock in top loop (lock ordering),
753		 * but we can now.
754		 */
755		spin_lock_irqsave(&rm->m_rs_lock, flags);
756
757		spin_lock(&rs->rs_lock);
758		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
759		spin_unlock(&rs->rs_lock);
760
761		rm->m_rs = NULL;
762		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
763
764		rds_message_put(rm);
765	}
766
767	rds_wake_sk_sleep(rs);
768
769	while (!list_empty(&list)) {
770		rm = list_entry(list.next, struct rds_message, m_sock_item);
771		list_del_init(&rm->m_sock_item);
772
773		rds_message_wait(rm);
774		rds_message_put(rm);
775	}
776}
777
778/*
779 * we only want this to fire once so we use the callers 'queued'.  It's
780 * possible that another thread can race with us and remove the
781 * message from the flow with RDS_CANCEL_SENT_TO.
782 */
783static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
784			     struct rds_message *rm, __be16 sport,
785			     __be16 dport, int *queued)
786{
787	unsigned long flags;
788	u32 len;
789
790	if (*queued)
791		goto out;
792
793	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
794
795	/* this is the only place which holds both the socket's rs_lock
796	 * and the connection's c_lock */
797	spin_lock_irqsave(&rs->rs_lock, flags);
798
799	/*
800	 * If there is a little space in sndbuf, we don't queue anything,
801	 * and userspace gets -EAGAIN. But poll() indicates there's send
802	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
803	 * freed up by incoming acks. So we check the *old* value of
804	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
805	 * and poll() now knows no more data can be sent.
806	 */
807	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
808		rs->rs_snd_bytes += len;
809
810		/* let recv side know we are close to send space exhaustion.
811		 * This is probably not the optimal way to do it, as this
812		 * means we set the flag on *all* messages as soon as our
813		 * throughput hits a certain threshold.
814		 */
815		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
816			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
817
818		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
819		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
820		rds_message_addref(rm);
821		rm->m_rs = rs;
822
823		/* The code ordering is a little weird, but we're
824		   trying to minimize the time we hold c_lock */
825		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
826		rm->m_inc.i_conn = conn;
827		rds_message_addref(rm);
828
829		spin_lock(&conn->c_lock);
830		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
831		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
832		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
833		spin_unlock(&conn->c_lock);
834
835		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
836			 rm, len, rs, rs->rs_snd_bytes,
837			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
838
839		*queued = 1;
840	}
841
842	spin_unlock_irqrestore(&rs->rs_lock, flags);
843out:
844	return *queued;
845}
846
847/*
848 * rds_message is getting to be quite complicated, and we'd like to allocate
849 * it all in one go. This figures out how big it needs to be up front.
850 */
851static int rds_rm_size(struct msghdr *msg, int data_len)
852{
853	struct cmsghdr *cmsg;
854	int size = 0;
855	int cmsg_groups = 0;
856	int retval;
857
858	for_each_cmsghdr(cmsg, msg) {
859		if (!CMSG_OK(msg, cmsg))
860			return -EINVAL;
861
862		if (cmsg->cmsg_level != SOL_RDS)
863			continue;
864
865		switch (cmsg->cmsg_type) {
866		case RDS_CMSG_RDMA_ARGS:
867			cmsg_groups |= 1;
868			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
869			if (retval < 0)
870				return retval;
871			size += retval;
872
873			break;
874
875		case RDS_CMSG_RDMA_DEST:
876		case RDS_CMSG_RDMA_MAP:
877			cmsg_groups |= 2;
878			/* these are valid but do no add any size */
879			break;
880
881		case RDS_CMSG_ATOMIC_CSWP:
882		case RDS_CMSG_ATOMIC_FADD:
883		case RDS_CMSG_MASKED_ATOMIC_CSWP:
884		case RDS_CMSG_MASKED_ATOMIC_FADD:
885			cmsg_groups |= 1;
886			size += sizeof(struct scatterlist);
887			break;
888
889		default:
890			return -EINVAL;
891		}
892
893	}
894
895	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
896
897	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
898	if (cmsg_groups == 3)
899		return -EINVAL;
900
901	return size;
902}
903
904static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
905			 struct msghdr *msg, int *allocated_mr)
906{
907	struct cmsghdr *cmsg;
908	int ret = 0;
909
910	for_each_cmsghdr(cmsg, msg) {
911		if (!CMSG_OK(msg, cmsg))
912			return -EINVAL;
913
914		if (cmsg->cmsg_level != SOL_RDS)
915			continue;
916
917		/* As a side effect, RDMA_DEST and RDMA_MAP will set
918		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
919		 */
920		switch (cmsg->cmsg_type) {
921		case RDS_CMSG_RDMA_ARGS:
922			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
923			break;
924
925		case RDS_CMSG_RDMA_DEST:
926			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
927			break;
928
929		case RDS_CMSG_RDMA_MAP:
930			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
931			if (!ret)
932				*allocated_mr = 1;
933			break;
934		case RDS_CMSG_ATOMIC_CSWP:
935		case RDS_CMSG_ATOMIC_FADD:
936		case RDS_CMSG_MASKED_ATOMIC_CSWP:
937		case RDS_CMSG_MASKED_ATOMIC_FADD:
938			ret = rds_cmsg_atomic(rs, rm, cmsg);
939			break;
940
941		default:
942			return -EINVAL;
943		}
944
945		if (ret)
946			break;
947	}
948
949	return ret;
950}
951
952int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
953{
954	struct sock *sk = sock->sk;
955	struct rds_sock *rs = rds_sk_to_rs(sk);
956	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
957	__be32 daddr;
958	__be16 dport;
959	struct rds_message *rm = NULL;
960	struct rds_connection *conn;
961	int ret = 0;
962	int queued = 0, allocated_mr = 0;
963	int nonblock = msg->msg_flags & MSG_DONTWAIT;
964	long timeo = sock_sndtimeo(sk, nonblock);
965
966	/* Mirror Linux UDP mirror of BSD error message compatibility */
967	/* XXX: Perhaps MSG_MORE someday */
968	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
969		ret = -EOPNOTSUPP;
970		goto out;
971	}
972
973	if (msg->msg_namelen) {
974		/* XXX fail non-unicast destination IPs? */
975		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
976			ret = -EINVAL;
977			goto out;
978		}
979		daddr = usin->sin_addr.s_addr;
980		dport = usin->sin_port;
981	} else {
982		/* We only care about consistency with ->connect() */
983		lock_sock(sk);
984		daddr = rs->rs_conn_addr;
985		dport = rs->rs_conn_port;
986		release_sock(sk);
987	}
988
989	lock_sock(sk);
990	if (daddr == 0 || rs->rs_bound_addr == 0) {
991		release_sock(sk);
992		ret = -ENOTCONN; /* XXX not a great errno */
993		goto out;
994	}
995	release_sock(sk);
996
997	/* size of rm including all sgs */
998	ret = rds_rm_size(msg, payload_len);
999	if (ret < 0)
1000		goto out;
1001
1002	rm = rds_message_alloc(ret, GFP_KERNEL);
1003	if (!rm) {
1004		ret = -ENOMEM;
1005		goto out;
1006	}
1007
1008	/* Attach data to the rm */
1009	if (payload_len) {
1010		rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1011		if (!rm->data.op_sg) {
1012			ret = -ENOMEM;
1013			goto out;
1014		}
1015		ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1016		if (ret)
1017			goto out;
1018	}
1019	rm->data.op_active = 1;
1020
1021	rm->m_daddr = daddr;
1022
1023	/* rds_conn_create has a spinlock that runs with IRQ off.
1024	 * Caching the conn in the socket helps a lot. */
1025	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1026		conn = rs->rs_conn;
1027	else {
1028		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
1029					rs->rs_transport,
1030					sock->sk->sk_allocation);
1031		if (IS_ERR(conn)) {
1032			ret = PTR_ERR(conn);
1033			goto out;
1034		}
1035		rs->rs_conn = conn;
1036	}
1037
1038	/* Parse any control messages the user may have included. */
1039	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1040	if (ret)
1041		goto out;
1042
1043	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1044		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1045			       &rm->rdma, conn->c_trans->xmit_rdma);
1046		ret = -EOPNOTSUPP;
1047		goto out;
1048	}
1049
1050	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1051		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1052			       &rm->atomic, conn->c_trans->xmit_atomic);
1053		ret = -EOPNOTSUPP;
1054		goto out;
1055	}
1056
1057	rds_conn_connect_if_down(conn);
1058
1059	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1060	if (ret) {
1061		rs->rs_seen_congestion = 1;
1062		goto out;
1063	}
1064
1065	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1066				  dport, &queued)) {
1067		rds_stats_inc(s_send_queue_full);
1068		/* XXX make sure this is reasonable */
1069		if (payload_len > rds_sk_sndbuf(rs)) {
1070			ret = -EMSGSIZE;
1071			goto out;
1072		}
1073		if (nonblock) {
1074			ret = -EAGAIN;
1075			goto out;
1076		}
1077
1078		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1079					rds_send_queue_rm(rs, conn, rm,
1080							  rs->rs_bound_port,
1081							  dport,
1082							  &queued),
1083					timeo);
1084		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1085		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1086			continue;
1087
1088		ret = timeo;
1089		if (ret == 0)
1090			ret = -ETIMEDOUT;
1091		goto out;
1092	}
1093
1094	/*
1095	 * By now we've committed to the send.  We reuse rds_send_worker()
1096	 * to retry sends in the rds thread if the transport asks us to.
1097	 */
1098	rds_stats_inc(s_send_queued);
1099
1100	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1101		rds_send_xmit(conn);
1102
1103	rds_message_put(rm);
1104	return payload_len;
1105
1106out:
1107	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1108	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1109	 * or in any other way, we need to destroy the MR again */
1110	if (allocated_mr)
1111		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1112
1113	if (rm)
1114		rds_message_put(rm);
1115	return ret;
1116}
1117
1118/*
1119 * Reply to a ping packet.
1120 */
1121int
1122rds_send_pong(struct rds_connection *conn, __be16 dport)
1123{
1124	struct rds_message *rm;
1125	unsigned long flags;
1126	int ret = 0;
1127
1128	rm = rds_message_alloc(0, GFP_ATOMIC);
1129	if (!rm) {
1130		ret = -ENOMEM;
1131		goto out;
1132	}
1133
1134	rm->m_daddr = conn->c_faddr;
1135	rm->data.op_active = 1;
1136
1137	rds_conn_connect_if_down(conn);
1138
1139	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1140	if (ret)
1141		goto out;
1142
1143	spin_lock_irqsave(&conn->c_lock, flags);
1144	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1145	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1146	rds_message_addref(rm);
1147	rm->m_inc.i_conn = conn;
1148
1149	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1150				    conn->c_next_tx_seq);
1151	conn->c_next_tx_seq++;
1152	spin_unlock_irqrestore(&conn->c_lock, flags);
1153
1154	rds_stats_inc(s_send_queued);
1155	rds_stats_inc(s_send_pong);
1156
1157	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1158		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1159
1160	rds_message_put(rm);
1161	return 0;
1162
1163out:
1164	if (rm)
1165		rds_message_put(rm);
1166	return ret;
1167}
1168