1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/slab.h>
35
36#include <rdma/ib_umem.h>
37#include <rdma/ib_pack.h>
38#include <rdma/ib_smi.h>
39
40#include "ipath_verbs.h"
41
42/* Fast memory region */
43struct ipath_fmr {
44	struct ib_fmr ibfmr;
45	u8 page_shift;
46	struct ipath_mregion mr;        /* must be last */
47};
48
49static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
50{
51	return container_of(ibfmr, struct ipath_fmr, ibfmr);
52}
53
54/**
55 * ipath_get_dma_mr - get a DMA memory region
56 * @pd: protection domain for this memory region
57 * @acc: access flags
58 *
59 * Returns the memory region on success, otherwise returns an errno.
60 * Note that all DMA addresses should be created via the
61 * struct ib_dma_mapping_ops functions (see ipath_dma.c).
62 */
63struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
64{
65	struct ipath_mr *mr;
66	struct ib_mr *ret;
67
68	mr = kzalloc(sizeof *mr, GFP_KERNEL);
69	if (!mr) {
70		ret = ERR_PTR(-ENOMEM);
71		goto bail;
72	}
73
74	mr->mr.access_flags = acc;
75	ret = &mr->ibmr;
76
77bail:
78	return ret;
79}
80
81static struct ipath_mr *alloc_mr(int count,
82				 struct ipath_lkey_table *lk_table)
83{
84	struct ipath_mr *mr;
85	int m, i = 0;
86
87	/* Allocate struct plus pointers to first level page tables. */
88	m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
89	mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
90	if (!mr)
91		goto done;
92
93	/* Allocate first level page tables. */
94	for (; i < m; i++) {
95		mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
96		if (!mr->mr.map[i])
97			goto bail;
98	}
99	mr->mr.mapsz = m;
100
101	/*
102	 * ib_reg_phys_mr() will initialize mr->ibmr except for
103	 * lkey and rkey.
104	 */
105	if (!ipath_alloc_lkey(lk_table, &mr->mr))
106		goto bail;
107	mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
108
109	goto done;
110
111bail:
112	while (i) {
113		i--;
114		kfree(mr->mr.map[i]);
115	}
116	kfree(mr);
117	mr = NULL;
118
119done:
120	return mr;
121}
122
123/**
124 * ipath_reg_phys_mr - register a physical memory region
125 * @pd: protection domain for this memory region
126 * @buffer_list: pointer to the list of physical buffers to register
127 * @num_phys_buf: the number of physical buffers to register
128 * @iova_start: the starting address passed over IB which maps to this MR
129 *
130 * Returns the memory region on success, otherwise returns an errno.
131 */
132struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
133				struct ib_phys_buf *buffer_list,
134				int num_phys_buf, int acc, u64 *iova_start)
135{
136	struct ipath_mr *mr;
137	int n, m, i;
138	struct ib_mr *ret;
139
140	mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
141	if (mr == NULL) {
142		ret = ERR_PTR(-ENOMEM);
143		goto bail;
144	}
145
146	mr->mr.pd = pd;
147	mr->mr.user_base = *iova_start;
148	mr->mr.iova = *iova_start;
149	mr->mr.length = 0;
150	mr->mr.offset = 0;
151	mr->mr.access_flags = acc;
152	mr->mr.max_segs = num_phys_buf;
153	mr->umem = NULL;
154
155	m = 0;
156	n = 0;
157	for (i = 0; i < num_phys_buf; i++) {
158		mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
159		mr->mr.map[m]->segs[n].length = buffer_list[i].size;
160		mr->mr.length += buffer_list[i].size;
161		n++;
162		if (n == IPATH_SEGSZ) {
163			m++;
164			n = 0;
165		}
166	}
167
168	ret = &mr->ibmr;
169
170bail:
171	return ret;
172}
173
174/**
175 * ipath_reg_user_mr - register a userspace memory region
176 * @pd: protection domain for this memory region
177 * @start: starting userspace address
178 * @length: length of region to register
179 * @virt_addr: virtual address to use (from HCA's point of view)
180 * @mr_access_flags: access flags for this memory region
181 * @udata: unused by the InfiniPath driver
182 *
183 * Returns the memory region on success, otherwise returns an errno.
184 */
185struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
186				u64 virt_addr, int mr_access_flags,
187				struct ib_udata *udata)
188{
189	struct ipath_mr *mr;
190	struct ib_umem *umem;
191	int n, m, entry;
192	struct scatterlist *sg;
193	struct ib_mr *ret;
194
195	if (length == 0) {
196		ret = ERR_PTR(-EINVAL);
197		goto bail;
198	}
199
200	umem = ib_umem_get(pd->uobject->context, start, length,
201			   mr_access_flags, 0);
202	if (IS_ERR(umem))
203		return (void *) umem;
204
205	n = umem->nmap;
206	mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
207	if (!mr) {
208		ret = ERR_PTR(-ENOMEM);
209		ib_umem_release(umem);
210		goto bail;
211	}
212
213	mr->mr.pd = pd;
214	mr->mr.user_base = start;
215	mr->mr.iova = virt_addr;
216	mr->mr.length = length;
217	mr->mr.offset = ib_umem_offset(umem);
218	mr->mr.access_flags = mr_access_flags;
219	mr->mr.max_segs = n;
220	mr->umem = umem;
221
222	m = 0;
223	n = 0;
224	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
225		void *vaddr;
226
227		vaddr = page_address(sg_page(sg));
228		if (!vaddr) {
229			ret = ERR_PTR(-EINVAL);
230			goto bail;
231		}
232		mr->mr.map[m]->segs[n].vaddr = vaddr;
233		mr->mr.map[m]->segs[n].length = umem->page_size;
234		n++;
235		if (n == IPATH_SEGSZ) {
236			m++;
237			n = 0;
238		}
239	}
240	ret = &mr->ibmr;
241
242bail:
243	return ret;
244}
245
246/**
247 * ipath_dereg_mr - unregister and free a memory region
248 * @ibmr: the memory region to free
249 *
250 * Returns 0 on success.
251 *
252 * Note that this is called to free MRs created by ipath_get_dma_mr()
253 * or ipath_reg_user_mr().
254 */
255int ipath_dereg_mr(struct ib_mr *ibmr)
256{
257	struct ipath_mr *mr = to_imr(ibmr);
258	int i;
259
260	ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
261	i = mr->mr.mapsz;
262	while (i) {
263		i--;
264		kfree(mr->mr.map[i]);
265	}
266
267	if (mr->umem)
268		ib_umem_release(mr->umem);
269
270	kfree(mr);
271	return 0;
272}
273
274/**
275 * ipath_alloc_fmr - allocate a fast memory region
276 * @pd: the protection domain for this memory region
277 * @mr_access_flags: access flags for this memory region
278 * @fmr_attr: fast memory region attributes
279 *
280 * Returns the memory region on success, otherwise returns an errno.
281 */
282struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
283			       struct ib_fmr_attr *fmr_attr)
284{
285	struct ipath_fmr *fmr;
286	int m, i = 0;
287	struct ib_fmr *ret;
288
289	/* Allocate struct plus pointers to first level page tables. */
290	m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
291	fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
292	if (!fmr)
293		goto bail;
294
295	/* Allocate first level page tables. */
296	for (; i < m; i++) {
297		fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
298					 GFP_KERNEL);
299		if (!fmr->mr.map[i])
300			goto bail;
301	}
302	fmr->mr.mapsz = m;
303
304	/*
305	 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
306	 * rkey.
307	 */
308	if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
309		goto bail;
310	fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
311	/*
312	 * Resources are allocated but no valid mapping (RKEY can't be
313	 * used).
314	 */
315	fmr->mr.pd = pd;
316	fmr->mr.user_base = 0;
317	fmr->mr.iova = 0;
318	fmr->mr.length = 0;
319	fmr->mr.offset = 0;
320	fmr->mr.access_flags = mr_access_flags;
321	fmr->mr.max_segs = fmr_attr->max_pages;
322	fmr->page_shift = fmr_attr->page_shift;
323
324	ret = &fmr->ibfmr;
325	goto done;
326
327bail:
328	while (i)
329		kfree(fmr->mr.map[--i]);
330	kfree(fmr);
331	ret = ERR_PTR(-ENOMEM);
332
333done:
334	return ret;
335}
336
337/**
338 * ipath_map_phys_fmr - set up a fast memory region
339 * @ibmfr: the fast memory region to set up
340 * @page_list: the list of pages to associate with the fast memory region
341 * @list_len: the number of pages to associate with the fast memory region
342 * @iova: the virtual address of the start of the fast memory region
343 *
344 * This may be called from interrupt context.
345 */
346
347int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
348		       int list_len, u64 iova)
349{
350	struct ipath_fmr *fmr = to_ifmr(ibfmr);
351	struct ipath_lkey_table *rkt;
352	unsigned long flags;
353	int m, n, i;
354	u32 ps;
355	int ret;
356
357	if (list_len > fmr->mr.max_segs) {
358		ret = -EINVAL;
359		goto bail;
360	}
361	rkt = &to_idev(ibfmr->device)->lk_table;
362	spin_lock_irqsave(&rkt->lock, flags);
363	fmr->mr.user_base = iova;
364	fmr->mr.iova = iova;
365	ps = 1 << fmr->page_shift;
366	fmr->mr.length = list_len * ps;
367	m = 0;
368	n = 0;
369	ps = 1 << fmr->page_shift;
370	for (i = 0; i < list_len; i++) {
371		fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
372		fmr->mr.map[m]->segs[n].length = ps;
373		if (++n == IPATH_SEGSZ) {
374			m++;
375			n = 0;
376		}
377	}
378	spin_unlock_irqrestore(&rkt->lock, flags);
379	ret = 0;
380
381bail:
382	return ret;
383}
384
385/**
386 * ipath_unmap_fmr - unmap fast memory regions
387 * @fmr_list: the list of fast memory regions to unmap
388 *
389 * Returns 0 on success.
390 */
391int ipath_unmap_fmr(struct list_head *fmr_list)
392{
393	struct ipath_fmr *fmr;
394	struct ipath_lkey_table *rkt;
395	unsigned long flags;
396
397	list_for_each_entry(fmr, fmr_list, ibfmr.list) {
398		rkt = &to_idev(fmr->ibfmr.device)->lk_table;
399		spin_lock_irqsave(&rkt->lock, flags);
400		fmr->mr.user_base = 0;
401		fmr->mr.iova = 0;
402		fmr->mr.length = 0;
403		spin_unlock_irqrestore(&rkt->lock, flags);
404	}
405	return 0;
406}
407
408/**
409 * ipath_dealloc_fmr - deallocate a fast memory region
410 * @ibfmr: the fast memory region to deallocate
411 *
412 * Returns 0 on success.
413 */
414int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
415{
416	struct ipath_fmr *fmr = to_ifmr(ibfmr);
417	int i;
418
419	ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
420	i = fmr->mr.mapsz;
421	while (i)
422		kfree(fmr->mr.map[--i]);
423	kfree(fmr);
424	return 0;
425}
426