1/*
2 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
3 * Copyright (c) 2012 David Airlie <airlied@linux.ie>
4 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <drm/drmP.h>
26#include <drm/drm_mm.h>
27#include <drm/drm_vma_manager.h>
28#include <linux/fs.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/rbtree.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/types.h>
35
36/**
37 * DOC: vma offset manager
38 *
39 * The vma-manager is responsible to map arbitrary driver-dependent memory
40 * regions into the linear user address-space. It provides offsets to the
41 * caller which can then be used on the address_space of the drm-device. It
42 * takes care to not overlap regions, size them appropriately and to not
43 * confuse mm-core by inconsistent fake vm_pgoff fields.
44 * Drivers shouldn't use this for object placement in VMEM. This manager should
45 * only be used to manage mappings into linear user-space VMs.
46 *
47 * We use drm_mm as backend to manage object allocations. But it is highly
48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
49 * speed up offset lookups.
50 *
51 * You must not use multiple offset managers on a single address_space.
52 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
53 * no longer be linear.
54 *
55 * This offset manager works on page-based addresses. That is, every argument
56 * and return code (with the exception of drm_vma_node_offset_addr()) is given
57 * in number of pages, not number of bytes. That means, object sizes and offsets
58 * must always be page-aligned (as usual).
59 * If you want to get a valid byte-based user-space address for a given offset,
60 * please see drm_vma_node_offset_addr().
61 *
62 * Additionally to offset management, the vma offset manager also handles access
63 * management. For every open-file context that is allowed to access a given
64 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
65 * open-file with the offset of the node will fail with -EACCES. To revoke
66 * access again, use drm_vma_node_revoke(). However, the caller is responsible
67 * for destroying already existing mappings, if required.
68 */
69
70/**
71 * drm_vma_offset_manager_init - Initialize new offset-manager
72 * @mgr: Manager object
73 * @page_offset: Offset of available memory area (page-based)
74 * @size: Size of available address space range (page-based)
75 *
76 * Initialize a new offset-manager. The offset and area size available for the
77 * manager are given as @page_offset and @size. Both are interpreted as
78 * page-numbers, not bytes.
79 *
80 * Adding/removing nodes from the manager is locked internally and protected
81 * against concurrent access. However, node allocation and destruction is left
82 * for the caller. While calling into the vma-manager, a given node must
83 * always be guaranteed to be referenced.
84 */
85void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
86				 unsigned long page_offset, unsigned long size)
87{
88	rwlock_init(&mgr->vm_lock);
89	mgr->vm_addr_space_rb = RB_ROOT;
90	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
91}
92EXPORT_SYMBOL(drm_vma_offset_manager_init);
93
94/**
95 * drm_vma_offset_manager_destroy() - Destroy offset manager
96 * @mgr: Manager object
97 *
98 * Destroy an object manager which was previously created via
99 * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
100 * before destroying the manager. Otherwise, drm_mm will refuse to free the
101 * requested resources.
102 *
103 * The manager must not be accessed after this function is called.
104 */
105void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
106{
107	/* take the lock to protect against buggy drivers */
108	write_lock(&mgr->vm_lock);
109	drm_mm_takedown(&mgr->vm_addr_space_mm);
110	write_unlock(&mgr->vm_lock);
111}
112EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
113
114/**
115 * drm_vma_offset_lookup() - Find node in offset space
116 * @mgr: Manager object
117 * @start: Start address for object (page-based)
118 * @pages: Size of object (page-based)
119 *
120 * Find a node given a start address and object size. This returns the _best_
121 * match for the given node. That is, @start may point somewhere into a valid
122 * region and the given node will be returned, as long as the node spans the
123 * whole requested area (given the size in number of pages as @pages).
124 *
125 * RETURNS:
126 * Returns NULL if no suitable node can be found. Otherwise, the best match
127 * is returned. It's the caller's responsibility to make sure the node doesn't
128 * get destroyed before the caller can access it.
129 */
130struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
131						  unsigned long start,
132						  unsigned long pages)
133{
134	struct drm_vma_offset_node *node;
135
136	read_lock(&mgr->vm_lock);
137	node = drm_vma_offset_lookup_locked(mgr, start, pages);
138	read_unlock(&mgr->vm_lock);
139
140	return node;
141}
142EXPORT_SYMBOL(drm_vma_offset_lookup);
143
144/**
145 * drm_vma_offset_lookup_locked() - Find node in offset space
146 * @mgr: Manager object
147 * @start: Start address for object (page-based)
148 * @pages: Size of object (page-based)
149 *
150 * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
151 * manually. See drm_vma_offset_lock_lookup() for an example.
152 *
153 * RETURNS:
154 * Returns NULL if no suitable node can be found. Otherwise, the best match
155 * is returned.
156 */
157struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
158							 unsigned long start,
159							 unsigned long pages)
160{
161	struct drm_vma_offset_node *node, *best;
162	struct rb_node *iter;
163	unsigned long offset;
164
165	iter = mgr->vm_addr_space_rb.rb_node;
166	best = NULL;
167
168	while (likely(iter)) {
169		node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
170		offset = node->vm_node.start;
171		if (start >= offset) {
172			iter = iter->rb_right;
173			best = node;
174			if (start == offset)
175				break;
176		} else {
177			iter = iter->rb_left;
178		}
179	}
180
181	/* verify that the node spans the requested area */
182	if (best) {
183		offset = best->vm_node.start + best->vm_node.size;
184		if (offset < start + pages)
185			best = NULL;
186	}
187
188	return best;
189}
190EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
191
192/* internal helper to link @node into the rb-tree */
193static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
194				   struct drm_vma_offset_node *node)
195{
196	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
197	struct rb_node *parent = NULL;
198	struct drm_vma_offset_node *iter_node;
199
200	while (likely(*iter)) {
201		parent = *iter;
202		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
203
204		if (node->vm_node.start < iter_node->vm_node.start)
205			iter = &(*iter)->rb_left;
206		else if (node->vm_node.start > iter_node->vm_node.start)
207			iter = &(*iter)->rb_right;
208		else
209			BUG();
210	}
211
212	rb_link_node(&node->vm_rb, parent, iter);
213	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
214}
215
216/**
217 * drm_vma_offset_add() - Add offset node to manager
218 * @mgr: Manager object
219 * @node: Node to be added
220 * @pages: Allocation size visible to user-space (in number of pages)
221 *
222 * Add a node to the offset-manager. If the node was already added, this does
223 * nothing and return 0. @pages is the size of the object given in number of
224 * pages.
225 * After this call succeeds, you can access the offset of the node until it
226 * is removed again.
227 *
228 * If this call fails, it is safe to retry the operation or call
229 * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
230 * case.
231 *
232 * @pages is not required to be the same size as the underlying memory object
233 * that you want to map. It only limits the size that user-space can map into
234 * their address space.
235 *
236 * RETURNS:
237 * 0 on success, negative error code on failure.
238 */
239int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
240		       struct drm_vma_offset_node *node, unsigned long pages)
241{
242	int ret;
243
244	write_lock(&mgr->vm_lock);
245
246	if (drm_mm_node_allocated(&node->vm_node)) {
247		ret = 0;
248		goto out_unlock;
249	}
250
251	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
252				 pages, 0, DRM_MM_SEARCH_DEFAULT);
253	if (ret)
254		goto out_unlock;
255
256	_drm_vma_offset_add_rb(mgr, node);
257
258out_unlock:
259	write_unlock(&mgr->vm_lock);
260	return ret;
261}
262EXPORT_SYMBOL(drm_vma_offset_add);
263
264/**
265 * drm_vma_offset_remove() - Remove offset node from manager
266 * @mgr: Manager object
267 * @node: Node to be removed
268 *
269 * Remove a node from the offset manager. If the node wasn't added before, this
270 * does nothing. After this call returns, the offset and size will be 0 until a
271 * new offset is allocated via drm_vma_offset_add() again. Helper functions like
272 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
273 * offset is allocated.
274 */
275void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
276			   struct drm_vma_offset_node *node)
277{
278	write_lock(&mgr->vm_lock);
279
280	if (drm_mm_node_allocated(&node->vm_node)) {
281		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
282		drm_mm_remove_node(&node->vm_node);
283		memset(&node->vm_node, 0, sizeof(node->vm_node));
284	}
285
286	write_unlock(&mgr->vm_lock);
287}
288EXPORT_SYMBOL(drm_vma_offset_remove);
289
290/**
291 * drm_vma_node_allow - Add open-file to list of allowed users
292 * @node: Node to modify
293 * @filp: Open file to add
294 *
295 * Add @filp to the list of allowed open-files for this node. If @filp is
296 * already on this list, the ref-count is incremented.
297 *
298 * The list of allowed-users is preserved across drm_vma_offset_add() and
299 * drm_vma_offset_remove() calls. You may even call it if the node is currently
300 * not added to any offset-manager.
301 *
302 * You must remove all open-files the same number of times as you added them
303 * before destroying the node. Otherwise, you will leak memory.
304 *
305 * This is locked against concurrent access internally.
306 *
307 * RETURNS:
308 * 0 on success, negative error code on internal failure (out-of-mem)
309 */
310int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
311{
312	struct rb_node **iter;
313	struct rb_node *parent = NULL;
314	struct drm_vma_offset_file *new, *entry;
315	int ret = 0;
316
317	/* Preallocate entry to avoid atomic allocations below. It is quite
318	 * unlikely that an open-file is added twice to a single node so we
319	 * don't optimize for this case. OOM is checked below only if the entry
320	 * is actually used. */
321	new = kmalloc(sizeof(*entry), GFP_KERNEL);
322
323	write_lock(&node->vm_lock);
324
325	iter = &node->vm_files.rb_node;
326
327	while (likely(*iter)) {
328		parent = *iter;
329		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
330
331		if (filp == entry->vm_filp) {
332			entry->vm_count++;
333			goto unlock;
334		} else if (filp > entry->vm_filp) {
335			iter = &(*iter)->rb_right;
336		} else {
337			iter = &(*iter)->rb_left;
338		}
339	}
340
341	if (!new) {
342		ret = -ENOMEM;
343		goto unlock;
344	}
345
346	new->vm_filp = filp;
347	new->vm_count = 1;
348	rb_link_node(&new->vm_rb, parent, iter);
349	rb_insert_color(&new->vm_rb, &node->vm_files);
350	new = NULL;
351
352unlock:
353	write_unlock(&node->vm_lock);
354	kfree(new);
355	return ret;
356}
357EXPORT_SYMBOL(drm_vma_node_allow);
358
359/**
360 * drm_vma_node_revoke - Remove open-file from list of allowed users
361 * @node: Node to modify
362 * @filp: Open file to remove
363 *
364 * Decrement the ref-count of @filp in the list of allowed open-files on @node.
365 * If the ref-count drops to zero, remove @filp from the list. You must call
366 * this once for every drm_vma_node_allow() on @filp.
367 *
368 * This is locked against concurrent access internally.
369 *
370 * If @filp is not on the list, nothing is done.
371 */
372void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
373{
374	struct drm_vma_offset_file *entry;
375	struct rb_node *iter;
376
377	write_lock(&node->vm_lock);
378
379	iter = node->vm_files.rb_node;
380	while (likely(iter)) {
381		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
382		if (filp == entry->vm_filp) {
383			if (!--entry->vm_count) {
384				rb_erase(&entry->vm_rb, &node->vm_files);
385				kfree(entry);
386			}
387			break;
388		} else if (filp > entry->vm_filp) {
389			iter = iter->rb_right;
390		} else {
391			iter = iter->rb_left;
392		}
393	}
394
395	write_unlock(&node->vm_lock);
396}
397EXPORT_SYMBOL(drm_vma_node_revoke);
398
399/**
400 * drm_vma_node_is_allowed - Check whether an open-file is granted access
401 * @node: Node to check
402 * @filp: Open-file to check for
403 *
404 * Search the list in @node whether @filp is currently on the list of allowed
405 * open-files (see drm_vma_node_allow()).
406 *
407 * This is locked against concurrent access internally.
408 *
409 * RETURNS:
410 * true iff @filp is on the list
411 */
412bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
413			     struct file *filp)
414{
415	struct drm_vma_offset_file *entry;
416	struct rb_node *iter;
417
418	read_lock(&node->vm_lock);
419
420	iter = node->vm_files.rb_node;
421	while (likely(iter)) {
422		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
423		if (filp == entry->vm_filp)
424			break;
425		else if (filp > entry->vm_filp)
426			iter = iter->rb_right;
427		else
428			iter = iter->rb_left;
429	}
430
431	read_unlock(&node->vm_lock);
432
433	return iter;
434}
435EXPORT_SYMBOL(drm_vma_node_is_allowed);
436