1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_umem.h>
35#include <rdma/ib_smi.h>
36
37#include "qib.h"
38
39/* Fast memory region */
40struct qib_fmr {
41	struct ib_fmr ibfmr;
42	struct qib_mregion mr;        /* must be last */
43};
44
45static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
46{
47	return container_of(ibfmr, struct qib_fmr, ibfmr);
48}
49
50static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
51	int count)
52{
53	int m, i = 0;
54	int rval = 0;
55
56	m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
57	for (; i < m; i++) {
58		mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
59		if (!mr->map[i])
60			goto bail;
61	}
62	mr->mapsz = m;
63	init_completion(&mr->comp);
64	/* count returning the ptr to user */
65	atomic_set(&mr->refcount, 1);
66	mr->pd = pd;
67	mr->max_segs = count;
68out:
69	return rval;
70bail:
71	while (i)
72		kfree(mr->map[--i]);
73	rval = -ENOMEM;
74	goto out;
75}
76
77static void deinit_qib_mregion(struct qib_mregion *mr)
78{
79	int i = mr->mapsz;
80
81	mr->mapsz = 0;
82	while (i)
83		kfree(mr->map[--i]);
84}
85
86
87/**
88 * qib_get_dma_mr - get a DMA memory region
89 * @pd: protection domain for this memory region
90 * @acc: access flags
91 *
92 * Returns the memory region on success, otherwise returns an errno.
93 * Note that all DMA addresses should be created via the
94 * struct ib_dma_mapping_ops functions (see qib_dma.c).
95 */
96struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
97{
98	struct qib_mr *mr = NULL;
99	struct ib_mr *ret;
100	int rval;
101
102	if (to_ipd(pd)->user) {
103		ret = ERR_PTR(-EPERM);
104		goto bail;
105	}
106
107	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
108	if (!mr) {
109		ret = ERR_PTR(-ENOMEM);
110		goto bail;
111	}
112
113	rval = init_qib_mregion(&mr->mr, pd, 0);
114	if (rval) {
115		ret = ERR_PTR(rval);
116		goto bail;
117	}
118
119
120	rval = qib_alloc_lkey(&mr->mr, 1);
121	if (rval) {
122		ret = ERR_PTR(rval);
123		goto bail_mregion;
124	}
125
126	mr->mr.access_flags = acc;
127	ret = &mr->ibmr;
128done:
129	return ret;
130
131bail_mregion:
132	deinit_qib_mregion(&mr->mr);
133bail:
134	kfree(mr);
135	goto done;
136}
137
138static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
139{
140	struct qib_mr *mr;
141	int rval = -ENOMEM;
142	int m;
143
144	/* Allocate struct plus pointers to first level page tables. */
145	m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
146	mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
147	if (!mr)
148		goto bail;
149
150	rval = init_qib_mregion(&mr->mr, pd, count);
151	if (rval)
152		goto bail;
153	/*
154	 * ib_reg_phys_mr() will initialize mr->ibmr except for
155	 * lkey and rkey.
156	 */
157	rval = qib_alloc_lkey(&mr->mr, 0);
158	if (rval)
159		goto bail_mregion;
160	mr->ibmr.lkey = mr->mr.lkey;
161	mr->ibmr.rkey = mr->mr.lkey;
162done:
163	return mr;
164
165bail_mregion:
166	deinit_qib_mregion(&mr->mr);
167bail:
168	kfree(mr);
169	mr = ERR_PTR(rval);
170	goto done;
171}
172
173/**
174 * qib_reg_phys_mr - register a physical memory region
175 * @pd: protection domain for this memory region
176 * @buffer_list: pointer to the list of physical buffers to register
177 * @num_phys_buf: the number of physical buffers to register
178 * @iova_start: the starting address passed over IB which maps to this MR
179 *
180 * Returns the memory region on success, otherwise returns an errno.
181 */
182struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
183			      struct ib_phys_buf *buffer_list,
184			      int num_phys_buf, int acc, u64 *iova_start)
185{
186	struct qib_mr *mr;
187	int n, m, i;
188	struct ib_mr *ret;
189
190	mr = alloc_mr(num_phys_buf, pd);
191	if (IS_ERR(mr)) {
192		ret = (struct ib_mr *)mr;
193		goto bail;
194	}
195
196	mr->mr.user_base = *iova_start;
197	mr->mr.iova = *iova_start;
198	mr->mr.access_flags = acc;
199
200	m = 0;
201	n = 0;
202	for (i = 0; i < num_phys_buf; i++) {
203		mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
204		mr->mr.map[m]->segs[n].length = buffer_list[i].size;
205		mr->mr.length += buffer_list[i].size;
206		n++;
207		if (n == QIB_SEGSZ) {
208			m++;
209			n = 0;
210		}
211	}
212
213	ret = &mr->ibmr;
214
215bail:
216	return ret;
217}
218
219/**
220 * qib_reg_user_mr - register a userspace memory region
221 * @pd: protection domain for this memory region
222 * @start: starting userspace address
223 * @length: length of region to register
224 * @mr_access_flags: access flags for this memory region
225 * @udata: unused by the QLogic_IB driver
226 *
227 * Returns the memory region on success, otherwise returns an errno.
228 */
229struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
230			      u64 virt_addr, int mr_access_flags,
231			      struct ib_udata *udata)
232{
233	struct qib_mr *mr;
234	struct ib_umem *umem;
235	struct scatterlist *sg;
236	int n, m, entry;
237	struct ib_mr *ret;
238
239	if (length == 0) {
240		ret = ERR_PTR(-EINVAL);
241		goto bail;
242	}
243
244	umem = ib_umem_get(pd->uobject->context, start, length,
245			   mr_access_flags, 0);
246	if (IS_ERR(umem))
247		return (void *) umem;
248
249	n = umem->nmap;
250
251	mr = alloc_mr(n, pd);
252	if (IS_ERR(mr)) {
253		ret = (struct ib_mr *)mr;
254		ib_umem_release(umem);
255		goto bail;
256	}
257
258	mr->mr.user_base = start;
259	mr->mr.iova = virt_addr;
260	mr->mr.length = length;
261	mr->mr.offset = ib_umem_offset(umem);
262	mr->mr.access_flags = mr_access_flags;
263	mr->umem = umem;
264
265	if (is_power_of_2(umem->page_size))
266		mr->mr.page_shift = ilog2(umem->page_size);
267	m = 0;
268	n = 0;
269	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
270			void *vaddr;
271
272			vaddr = page_address(sg_page(sg));
273			if (!vaddr) {
274				ret = ERR_PTR(-EINVAL);
275				goto bail;
276			}
277			mr->mr.map[m]->segs[n].vaddr = vaddr;
278			mr->mr.map[m]->segs[n].length = umem->page_size;
279			n++;
280			if (n == QIB_SEGSZ) {
281				m++;
282				n = 0;
283			}
284	}
285	ret = &mr->ibmr;
286
287bail:
288	return ret;
289}
290
291/**
292 * qib_dereg_mr - unregister and free a memory region
293 * @ibmr: the memory region to free
294 *
295 * Returns 0 on success.
296 *
297 * Note that this is called to free MRs created by qib_get_dma_mr()
298 * or qib_reg_user_mr().
299 */
300int qib_dereg_mr(struct ib_mr *ibmr)
301{
302	struct qib_mr *mr = to_imr(ibmr);
303	int ret = 0;
304	unsigned long timeout;
305
306	qib_free_lkey(&mr->mr);
307
308	qib_put_mr(&mr->mr); /* will set completion if last */
309	timeout = wait_for_completion_timeout(&mr->mr.comp,
310		5 * HZ);
311	if (!timeout) {
312		qib_get_mr(&mr->mr);
313		ret = -EBUSY;
314		goto out;
315	}
316	deinit_qib_mregion(&mr->mr);
317	if (mr->umem)
318		ib_umem_release(mr->umem);
319	kfree(mr);
320out:
321	return ret;
322}
323
324/*
325 * Allocate a memory region usable with the
326 * IB_WR_FAST_REG_MR send work request.
327 *
328 * Return the memory region on success, otherwise return an errno.
329 */
330struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
331{
332	struct qib_mr *mr;
333
334	mr = alloc_mr(max_page_list_len, pd);
335	if (IS_ERR(mr))
336		return (struct ib_mr *)mr;
337
338	return &mr->ibmr;
339}
340
341struct ib_fast_reg_page_list *
342qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
343{
344	unsigned size = page_list_len * sizeof(u64);
345	struct ib_fast_reg_page_list *pl;
346
347	if (size > PAGE_SIZE)
348		return ERR_PTR(-EINVAL);
349
350	pl = kzalloc(sizeof(*pl), GFP_KERNEL);
351	if (!pl)
352		return ERR_PTR(-ENOMEM);
353
354	pl->page_list = kzalloc(size, GFP_KERNEL);
355	if (!pl->page_list)
356		goto err_free;
357
358	return pl;
359
360err_free:
361	kfree(pl);
362	return ERR_PTR(-ENOMEM);
363}
364
365void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
366{
367	kfree(pl->page_list);
368	kfree(pl);
369}
370
371/**
372 * qib_alloc_fmr - allocate a fast memory region
373 * @pd: the protection domain for this memory region
374 * @mr_access_flags: access flags for this memory region
375 * @fmr_attr: fast memory region attributes
376 *
377 * Returns the memory region on success, otherwise returns an errno.
378 */
379struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
380			     struct ib_fmr_attr *fmr_attr)
381{
382	struct qib_fmr *fmr;
383	int m;
384	struct ib_fmr *ret;
385	int rval = -ENOMEM;
386
387	/* Allocate struct plus pointers to first level page tables. */
388	m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
389	fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
390	if (!fmr)
391		goto bail;
392
393	rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
394	if (rval)
395		goto bail;
396
397	/*
398	 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
399	 * rkey.
400	 */
401	rval = qib_alloc_lkey(&fmr->mr, 0);
402	if (rval)
403		goto bail_mregion;
404	fmr->ibfmr.rkey = fmr->mr.lkey;
405	fmr->ibfmr.lkey = fmr->mr.lkey;
406	/*
407	 * Resources are allocated but no valid mapping (RKEY can't be
408	 * used).
409	 */
410	fmr->mr.access_flags = mr_access_flags;
411	fmr->mr.max_segs = fmr_attr->max_pages;
412	fmr->mr.page_shift = fmr_attr->page_shift;
413
414	ret = &fmr->ibfmr;
415done:
416	return ret;
417
418bail_mregion:
419	deinit_qib_mregion(&fmr->mr);
420bail:
421	kfree(fmr);
422	ret = ERR_PTR(rval);
423	goto done;
424}
425
426/**
427 * qib_map_phys_fmr - set up a fast memory region
428 * @ibmfr: the fast memory region to set up
429 * @page_list: the list of pages to associate with the fast memory region
430 * @list_len: the number of pages to associate with the fast memory region
431 * @iova: the virtual address of the start of the fast memory region
432 *
433 * This may be called from interrupt context.
434 */
435
436int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
437		     int list_len, u64 iova)
438{
439	struct qib_fmr *fmr = to_ifmr(ibfmr);
440	struct qib_lkey_table *rkt;
441	unsigned long flags;
442	int m, n, i;
443	u32 ps;
444	int ret;
445
446	i = atomic_read(&fmr->mr.refcount);
447	if (i > 2)
448		return -EBUSY;
449
450	if (list_len > fmr->mr.max_segs) {
451		ret = -EINVAL;
452		goto bail;
453	}
454	rkt = &to_idev(ibfmr->device)->lk_table;
455	spin_lock_irqsave(&rkt->lock, flags);
456	fmr->mr.user_base = iova;
457	fmr->mr.iova = iova;
458	ps = 1 << fmr->mr.page_shift;
459	fmr->mr.length = list_len * ps;
460	m = 0;
461	n = 0;
462	for (i = 0; i < list_len; i++) {
463		fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
464		fmr->mr.map[m]->segs[n].length = ps;
465		if (++n == QIB_SEGSZ) {
466			m++;
467			n = 0;
468		}
469	}
470	spin_unlock_irqrestore(&rkt->lock, flags);
471	ret = 0;
472
473bail:
474	return ret;
475}
476
477/**
478 * qib_unmap_fmr - unmap fast memory regions
479 * @fmr_list: the list of fast memory regions to unmap
480 *
481 * Returns 0 on success.
482 */
483int qib_unmap_fmr(struct list_head *fmr_list)
484{
485	struct qib_fmr *fmr;
486	struct qib_lkey_table *rkt;
487	unsigned long flags;
488
489	list_for_each_entry(fmr, fmr_list, ibfmr.list) {
490		rkt = &to_idev(fmr->ibfmr.device)->lk_table;
491		spin_lock_irqsave(&rkt->lock, flags);
492		fmr->mr.user_base = 0;
493		fmr->mr.iova = 0;
494		fmr->mr.length = 0;
495		spin_unlock_irqrestore(&rkt->lock, flags);
496	}
497	return 0;
498}
499
500/**
501 * qib_dealloc_fmr - deallocate a fast memory region
502 * @ibfmr: the fast memory region to deallocate
503 *
504 * Returns 0 on success.
505 */
506int qib_dealloc_fmr(struct ib_fmr *ibfmr)
507{
508	struct qib_fmr *fmr = to_ifmr(ibfmr);
509	int ret = 0;
510	unsigned long timeout;
511
512	qib_free_lkey(&fmr->mr);
513	qib_put_mr(&fmr->mr); /* will set completion if last */
514	timeout = wait_for_completion_timeout(&fmr->mr.comp,
515		5 * HZ);
516	if (!timeout) {
517		qib_get_mr(&fmr->mr);
518		ret = -EBUSY;
519		goto out;
520	}
521	deinit_qib_mregion(&fmr->mr);
522	kfree(fmr);
523out:
524	return ret;
525}
526
527void mr_rcu_callback(struct rcu_head *list)
528{
529	struct qib_mregion *mr = container_of(list, struct qib_mregion, list);
530
531	complete(&mr->comp);
532}
533