1/*
2 * Copyright (c) 2006 Oracle.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
35#include <linux/device.h>
36#include <linux/dmapool.h>
37#include <linux/ratelimit.h>
38
39#include "rds.h"
40#include "ib.h"
41
42static char *rds_ib_wc_status_strings[] = {
43#define RDS_IB_WC_STATUS_STR(foo) \
44		[IB_WC_##foo] = __stringify(IB_WC_##foo)
45	RDS_IB_WC_STATUS_STR(SUCCESS),
46	RDS_IB_WC_STATUS_STR(LOC_LEN_ERR),
47	RDS_IB_WC_STATUS_STR(LOC_QP_OP_ERR),
48	RDS_IB_WC_STATUS_STR(LOC_EEC_OP_ERR),
49	RDS_IB_WC_STATUS_STR(LOC_PROT_ERR),
50	RDS_IB_WC_STATUS_STR(WR_FLUSH_ERR),
51	RDS_IB_WC_STATUS_STR(MW_BIND_ERR),
52	RDS_IB_WC_STATUS_STR(BAD_RESP_ERR),
53	RDS_IB_WC_STATUS_STR(LOC_ACCESS_ERR),
54	RDS_IB_WC_STATUS_STR(REM_INV_REQ_ERR),
55	RDS_IB_WC_STATUS_STR(REM_ACCESS_ERR),
56	RDS_IB_WC_STATUS_STR(REM_OP_ERR),
57	RDS_IB_WC_STATUS_STR(RETRY_EXC_ERR),
58	RDS_IB_WC_STATUS_STR(RNR_RETRY_EXC_ERR),
59	RDS_IB_WC_STATUS_STR(LOC_RDD_VIOL_ERR),
60	RDS_IB_WC_STATUS_STR(REM_INV_RD_REQ_ERR),
61	RDS_IB_WC_STATUS_STR(REM_ABORT_ERR),
62	RDS_IB_WC_STATUS_STR(INV_EECN_ERR),
63	RDS_IB_WC_STATUS_STR(INV_EEC_STATE_ERR),
64	RDS_IB_WC_STATUS_STR(FATAL_ERR),
65	RDS_IB_WC_STATUS_STR(RESP_TIMEOUT_ERR),
66	RDS_IB_WC_STATUS_STR(GENERAL_ERR),
67#undef RDS_IB_WC_STATUS_STR
68};
69
70char *rds_ib_wc_status_str(enum ib_wc_status status)
71{
72	return rds_str_array(rds_ib_wc_status_strings,
73			     ARRAY_SIZE(rds_ib_wc_status_strings), status);
74}
75
76/*
77 * Convert IB-specific error message to RDS error message and call core
78 * completion handler.
79 */
80static void rds_ib_send_complete(struct rds_message *rm,
81				 int wc_status,
82				 void (*complete)(struct rds_message *rm, int status))
83{
84	int notify_status;
85
86	switch (wc_status) {
87	case IB_WC_WR_FLUSH_ERR:
88		return;
89
90	case IB_WC_SUCCESS:
91		notify_status = RDS_RDMA_SUCCESS;
92		break;
93
94	case IB_WC_REM_ACCESS_ERR:
95		notify_status = RDS_RDMA_REMOTE_ERROR;
96		break;
97
98	default:
99		notify_status = RDS_RDMA_OTHER_ERROR;
100		break;
101	}
102	complete(rm, notify_status);
103}
104
105static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
106				   struct rm_data_op *op,
107				   int wc_status)
108{
109	if (op->op_nents)
110		ib_dma_unmap_sg(ic->i_cm_id->device,
111				op->op_sg, op->op_nents,
112				DMA_TO_DEVICE);
113}
114
115static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
116				   struct rm_rdma_op *op,
117				   int wc_status)
118{
119	if (op->op_mapped) {
120		ib_dma_unmap_sg(ic->i_cm_id->device,
121				op->op_sg, op->op_nents,
122				op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
123		op->op_mapped = 0;
124	}
125
126	/* If the user asked for a completion notification on this
127	 * message, we can implement three different semantics:
128	 *  1.	Notify when we received the ACK on the RDS message
129	 *	that was queued with the RDMA. This provides reliable
130	 *	notification of RDMA status at the expense of a one-way
131	 *	packet delay.
132	 *  2.	Notify when the IB stack gives us the completion event for
133	 *	the RDMA operation.
134	 *  3.	Notify when the IB stack gives us the completion event for
135	 *	the accompanying RDS messages.
136	 * Here, we implement approach #3. To implement approach #2,
137	 * we would need to take an event for the rdma WR. To implement #1,
138	 * don't call rds_rdma_send_complete at all, and fall back to the notify
139	 * handling in the ACK processing code.
140	 *
141	 * Note: There's no need to explicitly sync any RDMA buffers using
142	 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
143	 * operation itself unmapped the RDMA buffers, which takes care
144	 * of synching.
145	 */
146	rds_ib_send_complete(container_of(op, struct rds_message, rdma),
147			     wc_status, rds_rdma_send_complete);
148
149	if (op->op_write)
150		rds_stats_add(s_send_rdma_bytes, op->op_bytes);
151	else
152		rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
153}
154
155static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
156				     struct rm_atomic_op *op,
157				     int wc_status)
158{
159	/* unmap atomic recvbuf */
160	if (op->op_mapped) {
161		ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
162				DMA_FROM_DEVICE);
163		op->op_mapped = 0;
164	}
165
166	rds_ib_send_complete(container_of(op, struct rds_message, atomic),
167			     wc_status, rds_atomic_send_complete);
168
169	if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
170		rds_ib_stats_inc(s_ib_atomic_cswp);
171	else
172		rds_ib_stats_inc(s_ib_atomic_fadd);
173}
174
175/*
176 * Unmap the resources associated with a struct send_work.
177 *
178 * Returns the rm for no good reason other than it is unobtainable
179 * other than by switching on wr.opcode, currently, and the caller,
180 * the event handler, needs it.
181 */
182static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
183						struct rds_ib_send_work *send,
184						int wc_status)
185{
186	struct rds_message *rm = NULL;
187
188	/* In the error case, wc.opcode sometimes contains garbage */
189	switch (send->s_wr.opcode) {
190	case IB_WR_SEND:
191		if (send->s_op) {
192			rm = container_of(send->s_op, struct rds_message, data);
193			rds_ib_send_unmap_data(ic, send->s_op, wc_status);
194		}
195		break;
196	case IB_WR_RDMA_WRITE:
197	case IB_WR_RDMA_READ:
198		if (send->s_op) {
199			rm = container_of(send->s_op, struct rds_message, rdma);
200			rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
201		}
202		break;
203	case IB_WR_ATOMIC_FETCH_AND_ADD:
204	case IB_WR_ATOMIC_CMP_AND_SWP:
205		if (send->s_op) {
206			rm = container_of(send->s_op, struct rds_message, atomic);
207			rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
208		}
209		break;
210	default:
211		printk_ratelimited(KERN_NOTICE
212			       "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
213			       __func__, send->s_wr.opcode);
214		break;
215	}
216
217	send->s_wr.opcode = 0xdead;
218
219	return rm;
220}
221
222void rds_ib_send_init_ring(struct rds_ib_connection *ic)
223{
224	struct rds_ib_send_work *send;
225	u32 i;
226
227	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
228		struct ib_sge *sge;
229
230		send->s_op = NULL;
231
232		send->s_wr.wr_id = i;
233		send->s_wr.sg_list = send->s_sge;
234		send->s_wr.ex.imm_data = 0;
235
236		sge = &send->s_sge[0];
237		sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
238		sge->length = sizeof(struct rds_header);
239		sge->lkey = ic->i_mr->lkey;
240
241		send->s_sge[1].lkey = ic->i_mr->lkey;
242	}
243}
244
245void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
246{
247	struct rds_ib_send_work *send;
248	u32 i;
249
250	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
251		if (send->s_op && send->s_wr.opcode != 0xdead)
252			rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
253	}
254}
255
256/*
257 * The only fast path caller always has a non-zero nr, so we don't
258 * bother testing nr before performing the atomic sub.
259 */
260static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
261{
262	if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
263	    waitqueue_active(&rds_ib_ring_empty_wait))
264		wake_up(&rds_ib_ring_empty_wait);
265	BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
266}
267
268/*
269 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
270 * operations performed in the send path.  As the sender allocs and potentially
271 * unallocs the next free entry in the ring it doesn't alter which is
272 * the next to be freed, which is what this is concerned with.
273 */
274void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
275{
276	struct rds_connection *conn = context;
277	struct rds_ib_connection *ic = conn->c_transport_data;
278	struct rds_message *rm = NULL;
279	struct ib_wc wc;
280	struct rds_ib_send_work *send;
281	u32 completed;
282	u32 oldest;
283	u32 i = 0;
284	int ret;
285	int nr_sig = 0;
286
287	rdsdebug("cq %p conn %p\n", cq, conn);
288	rds_ib_stats_inc(s_ib_tx_cq_call);
289	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
290	if (ret)
291		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
292
293	while (ib_poll_cq(cq, 1, &wc) > 0) {
294		rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
295			 (unsigned long long)wc.wr_id, wc.status,
296			 rds_ib_wc_status_str(wc.status), wc.byte_len,
297			 be32_to_cpu(wc.ex.imm_data));
298		rds_ib_stats_inc(s_ib_tx_cq_event);
299
300		if (wc.wr_id == RDS_IB_ACK_WR_ID) {
301			if (time_after(jiffies, ic->i_ack_queued + HZ/2))
302				rds_ib_stats_inc(s_ib_tx_stalled);
303			rds_ib_ack_send_complete(ic);
304			continue;
305		}
306
307		oldest = rds_ib_ring_oldest(&ic->i_send_ring);
308
309		completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
310
311		for (i = 0; i < completed; i++) {
312			send = &ic->i_sends[oldest];
313			if (send->s_wr.send_flags & IB_SEND_SIGNALED)
314				nr_sig++;
315
316			rm = rds_ib_send_unmap_op(ic, send, wc.status);
317
318			if (time_after(jiffies, send->s_queued + HZ/2))
319				rds_ib_stats_inc(s_ib_tx_stalled);
320
321			if (send->s_op) {
322				if (send->s_op == rm->m_final_op) {
323					/* If anyone waited for this message to get flushed out, wake
324					 * them up now */
325					rds_message_unmapped(rm);
326				}
327				rds_message_put(rm);
328				send->s_op = NULL;
329			}
330
331			oldest = (oldest + 1) % ic->i_send_ring.w_nr;
332		}
333
334		rds_ib_ring_free(&ic->i_send_ring, completed);
335		rds_ib_sub_signaled(ic, nr_sig);
336		nr_sig = 0;
337
338		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
339		    test_bit(0, &conn->c_map_queued))
340			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
341
342		/* We expect errors as the qp is drained during shutdown */
343		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
344			rds_ib_conn_error(conn, "send completion on %pI4 had status "
345					  "%u (%s), disconnecting and reconnecting\n",
346					  &conn->c_faddr, wc.status,
347					  rds_ib_wc_status_str(wc.status));
348		}
349	}
350}
351
352/*
353 * This is the main function for allocating credits when sending
354 * messages.
355 *
356 * Conceptually, we have two counters:
357 *  -	send credits: this tells us how many WRs we're allowed
358 *	to submit without overruning the receiver's queue. For
359 *	each SEND WR we post, we decrement this by one.
360 *
361 *  -	posted credits: this tells us how many WRs we recently
362 *	posted to the receive queue. This value is transferred
363 *	to the peer as a "credit update" in a RDS header field.
364 *	Every time we transmit credits to the peer, we subtract
365 *	the amount of transferred credits from this counter.
366 *
367 * It is essential that we avoid situations where both sides have
368 * exhausted their send credits, and are unable to send new credits
369 * to the peer. We achieve this by requiring that we send at least
370 * one credit update to the peer before exhausting our credits.
371 * When new credits arrive, we subtract one credit that is withheld
372 * until we've posted new buffers and are ready to transmit these
373 * credits (see rds_ib_send_add_credits below).
374 *
375 * The RDS send code is essentially single-threaded; rds_send_xmit
376 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
377 * However, the ACK sending code is independent and can race with
378 * message SENDs.
379 *
380 * In the send path, we need to update the counters for send credits
381 * and the counter of posted buffers atomically - when we use the
382 * last available credit, we cannot allow another thread to race us
383 * and grab the posted credits counter.  Hence, we have to use a
384 * spinlock to protect the credit counter, or use atomics.
385 *
386 * Spinlocks shared between the send and the receive path are bad,
387 * because they create unnecessary delays. An early implementation
388 * using a spinlock showed a 5% degradation in throughput at some
389 * loads.
390 *
391 * This implementation avoids spinlocks completely, putting both
392 * counters into a single atomic, and updating that atomic using
393 * atomic_add (in the receive path, when receiving fresh credits),
394 * and using atomic_cmpxchg when updating the two counters.
395 */
396int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
397			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
398{
399	unsigned int avail, posted, got = 0, advertise;
400	long oldval, newval;
401
402	*adv_credits = 0;
403	if (!ic->i_flowctl)
404		return wanted;
405
406try_again:
407	advertise = 0;
408	oldval = newval = atomic_read(&ic->i_credits);
409	posted = IB_GET_POST_CREDITS(oldval);
410	avail = IB_GET_SEND_CREDITS(oldval);
411
412	rdsdebug("wanted=%u credits=%u posted=%u\n",
413			wanted, avail, posted);
414
415	/* The last credit must be used to send a credit update. */
416	if (avail && !posted)
417		avail--;
418
419	if (avail < wanted) {
420		struct rds_connection *conn = ic->i_cm_id->context;
421
422		/* Oops, there aren't that many credits left! */
423		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
424		got = avail;
425	} else {
426		/* Sometimes you get what you want, lalala. */
427		got = wanted;
428	}
429	newval -= IB_SET_SEND_CREDITS(got);
430
431	/*
432	 * If need_posted is non-zero, then the caller wants
433	 * the posted regardless of whether any send credits are
434	 * available.
435	 */
436	if (posted && (got || need_posted)) {
437		advertise = min_t(unsigned int, posted, max_posted);
438		newval -= IB_SET_POST_CREDITS(advertise);
439	}
440
441	/* Finally bill everything */
442	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
443		goto try_again;
444
445	*adv_credits = advertise;
446	return got;
447}
448
449void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
450{
451	struct rds_ib_connection *ic = conn->c_transport_data;
452
453	if (credits == 0)
454		return;
455
456	rdsdebug("credits=%u current=%u%s\n",
457			credits,
458			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
459			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
460
461	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
462	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
463		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
464
465	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
466
467	rds_ib_stats_inc(s_ib_rx_credit_updates);
468}
469
470void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
471{
472	struct rds_ib_connection *ic = conn->c_transport_data;
473
474	if (posted == 0)
475		return;
476
477	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
478
479	/* Decide whether to send an update to the peer now.
480	 * If we would send a credit update for every single buffer we
481	 * post, we would end up with an ACK storm (ACK arrives,
482	 * consumes buffer, we refill the ring, send ACK to remote
483	 * advertising the newly posted buffer... ad inf)
484	 *
485	 * Performance pretty much depends on how often we send
486	 * credit updates - too frequent updates mean lots of ACKs.
487	 * Too infrequent updates, and the peer will run out of
488	 * credits and has to throttle.
489	 * For the time being, 16 seems to be a good compromise.
490	 */
491	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
492		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
493}
494
495static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
496					     struct rds_ib_send_work *send,
497					     bool notify)
498{
499	/*
500	 * We want to delay signaling completions just enough to get
501	 * the batching benefits but not so much that we create dead time
502	 * on the wire.
503	 */
504	if (ic->i_unsignaled_wrs-- == 0 || notify) {
505		ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
506		send->s_wr.send_flags |= IB_SEND_SIGNALED;
507		return 1;
508	}
509	return 0;
510}
511
512/*
513 * This can be called multiple times for a given message.  The first time
514 * we see a message we map its scatterlist into the IB device so that
515 * we can provide that mapped address to the IB scatter gather entries
516 * in the IB work requests.  We translate the scatterlist into a series
517 * of work requests that fragment the message.  These work requests complete
518 * in order so we pass ownership of the message to the completion handler
519 * once we send the final fragment.
520 *
521 * The RDS core uses the c_send_lock to only enter this function once
522 * per connection.  This makes sure that the tx ring alloc/unalloc pairs
523 * don't get out of sync and confuse the ring.
524 */
525int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
526		unsigned int hdr_off, unsigned int sg, unsigned int off)
527{
528	struct rds_ib_connection *ic = conn->c_transport_data;
529	struct ib_device *dev = ic->i_cm_id->device;
530	struct rds_ib_send_work *send = NULL;
531	struct rds_ib_send_work *first;
532	struct rds_ib_send_work *prev;
533	struct ib_send_wr *failed_wr;
534	struct scatterlist *scat;
535	u32 pos;
536	u32 i;
537	u32 work_alloc;
538	u32 credit_alloc = 0;
539	u32 posted;
540	u32 adv_credits = 0;
541	int send_flags = 0;
542	int bytes_sent = 0;
543	int ret;
544	int flow_controlled = 0;
545	int nr_sig = 0;
546
547	BUG_ON(off % RDS_FRAG_SIZE);
548	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
549
550	/* Do not send cong updates to IB loopback */
551	if (conn->c_loopback
552	    && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
553		rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
554		scat = &rm->data.op_sg[sg];
555		ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
556		return sizeof(struct rds_header) + ret;
557	}
558
559	/* FIXME we may overallocate here */
560	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
561		i = 1;
562	else
563		i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
564
565	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
566	if (work_alloc == 0) {
567		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
568		rds_ib_stats_inc(s_ib_tx_ring_full);
569		ret = -ENOMEM;
570		goto out;
571	}
572
573	if (ic->i_flowctl) {
574		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
575		adv_credits += posted;
576		if (credit_alloc < work_alloc) {
577			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
578			work_alloc = credit_alloc;
579			flow_controlled = 1;
580		}
581		if (work_alloc == 0) {
582			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
583			rds_ib_stats_inc(s_ib_tx_throttle);
584			ret = -ENOMEM;
585			goto out;
586		}
587	}
588
589	/* map the message the first time we see it */
590	if (!ic->i_data_op) {
591		if (rm->data.op_nents) {
592			rm->data.op_count = ib_dma_map_sg(dev,
593							  rm->data.op_sg,
594							  rm->data.op_nents,
595							  DMA_TO_DEVICE);
596			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
597			if (rm->data.op_count == 0) {
598				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
599				rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
600				ret = -ENOMEM; /* XXX ? */
601				goto out;
602			}
603		} else {
604			rm->data.op_count = 0;
605		}
606
607		rds_message_addref(rm);
608		ic->i_data_op = &rm->data;
609
610		/* Finalize the header */
611		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
612			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
613		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
614			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
615
616		/* If it has a RDMA op, tell the peer we did it. This is
617		 * used by the peer to release use-once RDMA MRs. */
618		if (rm->rdma.op_active) {
619			struct rds_ext_header_rdma ext_hdr;
620
621			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
622			rds_message_add_extension(&rm->m_inc.i_hdr,
623					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
624		}
625		if (rm->m_rdma_cookie) {
626			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
627					rds_rdma_cookie_key(rm->m_rdma_cookie),
628					rds_rdma_cookie_offset(rm->m_rdma_cookie));
629		}
630
631		/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
632		 * we should not do this unless we have a chance of at least
633		 * sticking the header into the send ring. Which is why we
634		 * should call rds_ib_ring_alloc first. */
635		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
636		rds_message_make_checksum(&rm->m_inc.i_hdr);
637
638		/*
639		 * Update adv_credits since we reset the ACK_REQUIRED bit.
640		 */
641		if (ic->i_flowctl) {
642			rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
643			adv_credits += posted;
644			BUG_ON(adv_credits > 255);
645		}
646	}
647
648	/* Sometimes you want to put a fence between an RDMA
649	 * READ and the following SEND.
650	 * We could either do this all the time
651	 * or when requested by the user. Right now, we let
652	 * the application choose.
653	 */
654	if (rm->rdma.op_active && rm->rdma.op_fence)
655		send_flags = IB_SEND_FENCE;
656
657	/* Each frag gets a header. Msgs may be 0 bytes */
658	send = &ic->i_sends[pos];
659	first = send;
660	prev = NULL;
661	scat = &ic->i_data_op->op_sg[sg];
662	i = 0;
663	do {
664		unsigned int len = 0;
665
666		/* Set up the header */
667		send->s_wr.send_flags = send_flags;
668		send->s_wr.opcode = IB_WR_SEND;
669		send->s_wr.num_sge = 1;
670		send->s_wr.next = NULL;
671		send->s_queued = jiffies;
672		send->s_op = NULL;
673
674		send->s_sge[0].addr = ic->i_send_hdrs_dma
675			+ (pos * sizeof(struct rds_header));
676		send->s_sge[0].length = sizeof(struct rds_header);
677
678		memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
679
680		/* Set up the data, if present */
681		if (i < work_alloc
682		    && scat != &rm->data.op_sg[rm->data.op_count]) {
683			len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
684			send->s_wr.num_sge = 2;
685
686			send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
687			send->s_sge[1].length = len;
688
689			bytes_sent += len;
690			off += len;
691			if (off == ib_sg_dma_len(dev, scat)) {
692				scat++;
693				off = 0;
694			}
695		}
696
697		rds_ib_set_wr_signal_state(ic, send, 0);
698
699		/*
700		 * Always signal the last one if we're stopping due to flow control.
701		 */
702		if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
703			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
704
705		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
706			nr_sig++;
707
708		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
709			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
710
711		if (ic->i_flowctl && adv_credits) {
712			struct rds_header *hdr = &ic->i_send_hdrs[pos];
713
714			/* add credit and redo the header checksum */
715			hdr->h_credit = adv_credits;
716			rds_message_make_checksum(hdr);
717			adv_credits = 0;
718			rds_ib_stats_inc(s_ib_tx_credit_updates);
719		}
720
721		if (prev)
722			prev->s_wr.next = &send->s_wr;
723		prev = send;
724
725		pos = (pos + 1) % ic->i_send_ring.w_nr;
726		send = &ic->i_sends[pos];
727		i++;
728
729	} while (i < work_alloc
730		 && scat != &rm->data.op_sg[rm->data.op_count]);
731
732	/* Account the RDS header in the number of bytes we sent, but just once.
733	 * The caller has no concept of fragmentation. */
734	if (hdr_off == 0)
735		bytes_sent += sizeof(struct rds_header);
736
737	/* if we finished the message then send completion owns it */
738	if (scat == &rm->data.op_sg[rm->data.op_count]) {
739		prev->s_op = ic->i_data_op;
740		prev->s_wr.send_flags |= IB_SEND_SOLICITED;
741		ic->i_data_op = NULL;
742	}
743
744	/* Put back wrs & credits we didn't use */
745	if (i < work_alloc) {
746		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
747		work_alloc = i;
748	}
749	if (ic->i_flowctl && i < credit_alloc)
750		rds_ib_send_add_credits(conn, credit_alloc - i);
751
752	if (nr_sig)
753		atomic_add(nr_sig, &ic->i_signaled_sends);
754
755	/* XXX need to worry about failed_wr and partial sends. */
756	failed_wr = &first->s_wr;
757	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
758	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
759		 first, &first->s_wr, ret, failed_wr);
760	BUG_ON(failed_wr != &first->s_wr);
761	if (ret) {
762		printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
763		       "returned %d\n", &conn->c_faddr, ret);
764		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
765		rds_ib_sub_signaled(ic, nr_sig);
766		if (prev->s_op) {
767			ic->i_data_op = prev->s_op;
768			prev->s_op = NULL;
769		}
770
771		rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
772		goto out;
773	}
774
775	ret = bytes_sent;
776out:
777	BUG_ON(adv_credits);
778	return ret;
779}
780
781/*
782 * Issue atomic operation.
783 * A simplified version of the rdma case, we always map 1 SG, and
784 * only 8 bytes, for the return value from the atomic operation.
785 */
786int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
787{
788	struct rds_ib_connection *ic = conn->c_transport_data;
789	struct rds_ib_send_work *send = NULL;
790	struct ib_send_wr *failed_wr;
791	struct rds_ib_device *rds_ibdev;
792	u32 pos;
793	u32 work_alloc;
794	int ret;
795	int nr_sig = 0;
796
797	rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
798
799	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
800	if (work_alloc != 1) {
801		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
802		rds_ib_stats_inc(s_ib_tx_ring_full);
803		ret = -ENOMEM;
804		goto out;
805	}
806
807	/* address of send request in ring */
808	send = &ic->i_sends[pos];
809	send->s_queued = jiffies;
810
811	if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
812		send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
813		send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare;
814		send->s_wr.wr.atomic.swap = op->op_m_cswp.swap;
815		send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask;
816		send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask;
817	} else { /* FADD */
818		send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
819		send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add;
820		send->s_wr.wr.atomic.swap = 0;
821		send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask;
822		send->s_wr.wr.atomic.swap_mask = 0;
823	}
824	nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
825	send->s_wr.num_sge = 1;
826	send->s_wr.next = NULL;
827	send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
828	send->s_wr.wr.atomic.rkey = op->op_rkey;
829	send->s_op = op;
830	rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
831
832	/* map 8 byte retval buffer to the device */
833	ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
834	rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
835	if (ret != 1) {
836		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
837		rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
838		ret = -ENOMEM; /* XXX ? */
839		goto out;
840	}
841
842	/* Convert our struct scatterlist to struct ib_sge */
843	send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
844	send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
845	send->s_sge[0].lkey = ic->i_mr->lkey;
846
847	rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
848		 send->s_sge[0].addr, send->s_sge[0].length);
849
850	if (nr_sig)
851		atomic_add(nr_sig, &ic->i_signaled_sends);
852
853	failed_wr = &send->s_wr;
854	ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
855	rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
856		 send, &send->s_wr, ret, failed_wr);
857	BUG_ON(failed_wr != &send->s_wr);
858	if (ret) {
859		printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
860		       "returned %d\n", &conn->c_faddr, ret);
861		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
862		rds_ib_sub_signaled(ic, nr_sig);
863		goto out;
864	}
865
866	if (unlikely(failed_wr != &send->s_wr)) {
867		printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
868		BUG_ON(failed_wr != &send->s_wr);
869	}
870
871out:
872	return ret;
873}
874
875int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
876{
877	struct rds_ib_connection *ic = conn->c_transport_data;
878	struct rds_ib_send_work *send = NULL;
879	struct rds_ib_send_work *first;
880	struct rds_ib_send_work *prev;
881	struct ib_send_wr *failed_wr;
882	struct scatterlist *scat;
883	unsigned long len;
884	u64 remote_addr = op->op_remote_addr;
885	u32 max_sge = ic->rds_ibdev->max_sge;
886	u32 pos;
887	u32 work_alloc;
888	u32 i;
889	u32 j;
890	int sent;
891	int ret;
892	int num_sge;
893	int nr_sig = 0;
894
895	/* map the op the first time we see it */
896	if (!op->op_mapped) {
897		op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
898					     op->op_sg, op->op_nents, (op->op_write) ?
899					     DMA_TO_DEVICE : DMA_FROM_DEVICE);
900		rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
901		if (op->op_count == 0) {
902			rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
903			ret = -ENOMEM; /* XXX ? */
904			goto out;
905		}
906
907		op->op_mapped = 1;
908	}
909
910	/*
911	 * Instead of knowing how to return a partial rdma read/write we insist that there
912	 * be enough work requests to send the entire message.
913	 */
914	i = ceil(op->op_count, max_sge);
915
916	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
917	if (work_alloc != i) {
918		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
919		rds_ib_stats_inc(s_ib_tx_ring_full);
920		ret = -ENOMEM;
921		goto out;
922	}
923
924	send = &ic->i_sends[pos];
925	first = send;
926	prev = NULL;
927	scat = &op->op_sg[0];
928	sent = 0;
929	num_sge = op->op_count;
930
931	for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
932		send->s_wr.send_flags = 0;
933		send->s_queued = jiffies;
934		send->s_op = NULL;
935
936		nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
937
938		send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
939		send->s_wr.wr.rdma.remote_addr = remote_addr;
940		send->s_wr.wr.rdma.rkey = op->op_rkey;
941
942		if (num_sge > max_sge) {
943			send->s_wr.num_sge = max_sge;
944			num_sge -= max_sge;
945		} else {
946			send->s_wr.num_sge = num_sge;
947		}
948
949		send->s_wr.next = NULL;
950
951		if (prev)
952			prev->s_wr.next = &send->s_wr;
953
954		for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
955			len = ib_sg_dma_len(ic->i_cm_id->device, scat);
956			send->s_sge[j].addr =
957				 ib_sg_dma_address(ic->i_cm_id->device, scat);
958			send->s_sge[j].length = len;
959			send->s_sge[j].lkey = ic->i_mr->lkey;
960
961			sent += len;
962			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
963
964			remote_addr += len;
965			scat++;
966		}
967
968		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
969			&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
970
971		prev = send;
972		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
973			send = ic->i_sends;
974	}
975
976	/* give a reference to the last op */
977	if (scat == &op->op_sg[op->op_count]) {
978		prev->s_op = op;
979		rds_message_addref(container_of(op, struct rds_message, rdma));
980	}
981
982	if (i < work_alloc) {
983		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
984		work_alloc = i;
985	}
986
987	if (nr_sig)
988		atomic_add(nr_sig, &ic->i_signaled_sends);
989
990	failed_wr = &first->s_wr;
991	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
992	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
993		 first, &first->s_wr, ret, failed_wr);
994	BUG_ON(failed_wr != &first->s_wr);
995	if (ret) {
996		printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
997		       "returned %d\n", &conn->c_faddr, ret);
998		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
999		rds_ib_sub_signaled(ic, nr_sig);
1000		goto out;
1001	}
1002
1003	if (unlikely(failed_wr != &first->s_wr)) {
1004		printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
1005		BUG_ON(failed_wr != &first->s_wr);
1006	}
1007
1008
1009out:
1010	return ret;
1011}
1012
1013void rds_ib_xmit_complete(struct rds_connection *conn)
1014{
1015	struct rds_ib_connection *ic = conn->c_transport_data;
1016
1017	/* We may have a pending ACK or window update we were unable
1018	 * to send previously (due to flow control). Try again. */
1019	rds_ib_attempt_ack(ic);
1020}
1021