1/*
2 * Copyright 2011 (c) Oracle Corp.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24 */
25
26/*
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
28 * over the DMA pools:
29 * - Pool collects resently freed pages for reuse (and hooks up to
30 *   the shrinker).
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
33 *   when freed).
34 */
35
36#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
37#define pr_fmt(fmt) "[TTM] " fmt
38
39#include <linux/dma-mapping.h>
40#include <linux/list.h>
41#include <linux/seq_file.h> /* for seq_printf */
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/highmem.h>
45#include <linux/mm_types.h>
46#include <linux/module.h>
47#include <linux/mm.h>
48#include <linux/atomic.h>
49#include <linux/device.h>
50#include <linux/kthread.h>
51#include <drm/ttm/ttm_bo_driver.h>
52#include <drm/ttm/ttm_page_alloc.h>
53#ifdef TTM_HAS_AGP
54#include <asm/agp.h>
55#endif
56
57#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
58#define SMALL_ALLOCATION		4
59#define FREE_ALL_PAGES			(~0U)
60/* times are in msecs */
61#define IS_UNDEFINED			(0)
62#define IS_WC				(1<<1)
63#define IS_UC				(1<<2)
64#define IS_CACHED			(1<<3)
65#define IS_DMA32			(1<<4)
66
67enum pool_type {
68	POOL_IS_UNDEFINED,
69	POOL_IS_WC = IS_WC,
70	POOL_IS_UC = IS_UC,
71	POOL_IS_CACHED = IS_CACHED,
72	POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
73	POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
74	POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
75};
76/*
77 * The pool structure. There are usually six pools:
78 *  - generic (not restricted to DMA32):
79 *      - write combined, uncached, cached.
80 *  - dma32 (up to 2^32 - so up 4GB):
81 *      - write combined, uncached, cached.
82 * for each 'struct device'. The 'cached' is for pages that are actively used.
83 * The other ones can be shrunk by the shrinker API if neccessary.
84 * @pools: The 'struct device->dma_pools' link.
85 * @type: Type of the pool
86 * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
87 * used with irqsave/irqrestore variants because pool allocator maybe called
88 * from delayed work.
89 * @inuse_list: Pool of pages that are in use. The order is very important and
90 *   it is in the order that the TTM pages that are put back are in.
91 * @free_list: Pool of pages that are free to be used. No order requirements.
92 * @dev: The device that is associated with these pools.
93 * @size: Size used during DMA allocation.
94 * @npages_free: Count of available pages for re-use.
95 * @npages_in_use: Count of pages that are in use.
96 * @nfrees: Stats when pool is shrinking.
97 * @nrefills: Stats when the pool is grown.
98 * @gfp_flags: Flags to pass for alloc_page.
99 * @name: Name of the pool.
100 * @dev_name: Name derieved from dev - similar to how dev_info works.
101 *   Used during shutdown as the dev_info during release is unavailable.
102 */
103struct dma_pool {
104	struct list_head pools; /* The 'struct device->dma_pools link */
105	enum pool_type type;
106	spinlock_t lock;
107	struct list_head inuse_list;
108	struct list_head free_list;
109	struct device *dev;
110	unsigned size;
111	unsigned npages_free;
112	unsigned npages_in_use;
113	unsigned long nfrees; /* Stats when shrunk. */
114	unsigned long nrefills; /* Stats when grown. */
115	gfp_t gfp_flags;
116	char name[13]; /* "cached dma32" */
117	char dev_name[64]; /* Constructed from dev */
118};
119
120/*
121 * The accounting page keeping track of the allocated page along with
122 * the DMA address.
123 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
124 * @vaddr: The virtual address of the page
125 * @dma: The bus address of the page. If the page is not allocated
126 *   via the DMA API, it will be -1.
127 */
128struct dma_page {
129	struct list_head page_list;
130	void *vaddr;
131	struct page *p;
132	dma_addr_t dma;
133};
134
135/*
136 * Limits for the pool. They are handled without locks because only place where
137 * they may change is in sysfs store. They won't have immediate effect anyway
138 * so forcing serialization to access them is pointless.
139 */
140
141struct ttm_pool_opts {
142	unsigned	alloc_size;
143	unsigned	max_size;
144	unsigned	small;
145};
146
147/*
148 * Contains the list of all of the 'struct device' and their corresponding
149 * DMA pools. Guarded by _mutex->lock.
150 * @pools: The link to 'struct ttm_pool_manager->pools'
151 * @dev: The 'struct device' associated with the 'pool'
152 * @pool: The 'struct dma_pool' associated with the 'dev'
153 */
154struct device_pools {
155	struct list_head pools;
156	struct device *dev;
157	struct dma_pool *pool;
158};
159
160/*
161 * struct ttm_pool_manager - Holds memory pools for fast allocation
162 *
163 * @lock: Lock used when adding/removing from pools
164 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
165 * @options: Limits for the pool.
166 * @npools: Total amount of pools in existence.
167 * @shrinker: The structure used by [un|]register_shrinker
168 */
169struct ttm_pool_manager {
170	struct mutex		lock;
171	struct list_head	pools;
172	struct ttm_pool_opts	options;
173	unsigned		npools;
174	struct shrinker		mm_shrink;
175	struct kobject		kobj;
176};
177
178static struct ttm_pool_manager *_manager;
179
180static struct attribute ttm_page_pool_max = {
181	.name = "pool_max_size",
182	.mode = S_IRUGO | S_IWUSR
183};
184static struct attribute ttm_page_pool_small = {
185	.name = "pool_small_allocation",
186	.mode = S_IRUGO | S_IWUSR
187};
188static struct attribute ttm_page_pool_alloc_size = {
189	.name = "pool_allocation_size",
190	.mode = S_IRUGO | S_IWUSR
191};
192
193static struct attribute *ttm_pool_attrs[] = {
194	&ttm_page_pool_max,
195	&ttm_page_pool_small,
196	&ttm_page_pool_alloc_size,
197	NULL
198};
199
200static void ttm_pool_kobj_release(struct kobject *kobj)
201{
202	struct ttm_pool_manager *m =
203		container_of(kobj, struct ttm_pool_manager, kobj);
204	kfree(m);
205}
206
207static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
208			      const char *buffer, size_t size)
209{
210	struct ttm_pool_manager *m =
211		container_of(kobj, struct ttm_pool_manager, kobj);
212	int chars;
213	unsigned val;
214	chars = sscanf(buffer, "%u", &val);
215	if (chars == 0)
216		return size;
217
218	/* Convert kb to number of pages */
219	val = val / (PAGE_SIZE >> 10);
220
221	if (attr == &ttm_page_pool_max)
222		m->options.max_size = val;
223	else if (attr == &ttm_page_pool_small)
224		m->options.small = val;
225	else if (attr == &ttm_page_pool_alloc_size) {
226		if (val > NUM_PAGES_TO_ALLOC*8) {
227			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
228			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
229			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
230			return size;
231		} else if (val > NUM_PAGES_TO_ALLOC) {
232			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
233				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
234		}
235		m->options.alloc_size = val;
236	}
237
238	return size;
239}
240
241static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
242			     char *buffer)
243{
244	struct ttm_pool_manager *m =
245		container_of(kobj, struct ttm_pool_manager, kobj);
246	unsigned val = 0;
247
248	if (attr == &ttm_page_pool_max)
249		val = m->options.max_size;
250	else if (attr == &ttm_page_pool_small)
251		val = m->options.small;
252	else if (attr == &ttm_page_pool_alloc_size)
253		val = m->options.alloc_size;
254
255	val = val * (PAGE_SIZE >> 10);
256
257	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
258}
259
260static const struct sysfs_ops ttm_pool_sysfs_ops = {
261	.show = &ttm_pool_show,
262	.store = &ttm_pool_store,
263};
264
265static struct kobj_type ttm_pool_kobj_type = {
266	.release = &ttm_pool_kobj_release,
267	.sysfs_ops = &ttm_pool_sysfs_ops,
268	.default_attrs = ttm_pool_attrs,
269};
270
271#ifndef CONFIG_X86
272static int set_pages_array_wb(struct page **pages, int addrinarray)
273{
274#ifdef TTM_HAS_AGP
275	int i;
276
277	for (i = 0; i < addrinarray; i++)
278		unmap_page_from_agp(pages[i]);
279#endif
280	return 0;
281}
282
283static int set_pages_array_wc(struct page **pages, int addrinarray)
284{
285#ifdef TTM_HAS_AGP
286	int i;
287
288	for (i = 0; i < addrinarray; i++)
289		map_page_into_agp(pages[i]);
290#endif
291	return 0;
292}
293
294static int set_pages_array_uc(struct page **pages, int addrinarray)
295{
296#ifdef TTM_HAS_AGP
297	int i;
298
299	for (i = 0; i < addrinarray; i++)
300		map_page_into_agp(pages[i]);
301#endif
302	return 0;
303}
304#endif /* for !CONFIG_X86 */
305
306static int ttm_set_pages_caching(struct dma_pool *pool,
307				 struct page **pages, unsigned cpages)
308{
309	int r = 0;
310	/* Set page caching */
311	if (pool->type & IS_UC) {
312		r = set_pages_array_uc(pages, cpages);
313		if (r)
314			pr_err("%s: Failed to set %d pages to uc!\n",
315			       pool->dev_name, cpages);
316	}
317	if (pool->type & IS_WC) {
318		r = set_pages_array_wc(pages, cpages);
319		if (r)
320			pr_err("%s: Failed to set %d pages to wc!\n",
321			       pool->dev_name, cpages);
322	}
323	return r;
324}
325
326static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
327{
328	dma_addr_t dma = d_page->dma;
329	dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
330
331	kfree(d_page);
332	d_page = NULL;
333}
334static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
335{
336	struct dma_page *d_page;
337
338	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
339	if (!d_page)
340		return NULL;
341
342	d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
343					   &d_page->dma,
344					   pool->gfp_flags);
345	if (d_page->vaddr)
346		d_page->p = virt_to_page(d_page->vaddr);
347	else {
348		kfree(d_page);
349		d_page = NULL;
350	}
351	return d_page;
352}
353static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
354{
355	enum pool_type type = IS_UNDEFINED;
356
357	if (flags & TTM_PAGE_FLAG_DMA32)
358		type |= IS_DMA32;
359	if (cstate == tt_cached)
360		type |= IS_CACHED;
361	else if (cstate == tt_uncached)
362		type |= IS_UC;
363	else
364		type |= IS_WC;
365
366	return type;
367}
368
369static void ttm_pool_update_free_locked(struct dma_pool *pool,
370					unsigned freed_pages)
371{
372	pool->npages_free -= freed_pages;
373	pool->nfrees += freed_pages;
374
375}
376
377/* set memory back to wb and free the pages. */
378static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
379			      struct page *pages[], unsigned npages)
380{
381	struct dma_page *d_page, *tmp;
382
383	/* Don't set WB on WB page pool. */
384	if (npages && !(pool->type & IS_CACHED) &&
385	    set_pages_array_wb(pages, npages))
386		pr_err("%s: Failed to set %d pages to wb!\n",
387		       pool->dev_name, npages);
388
389	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
390		list_del(&d_page->page_list);
391		__ttm_dma_free_page(pool, d_page);
392	}
393}
394
395static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
396{
397	/* Don't set WB on WB page pool. */
398	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
399		pr_err("%s: Failed to set %d pages to wb!\n",
400		       pool->dev_name, 1);
401
402	list_del(&d_page->page_list);
403	__ttm_dma_free_page(pool, d_page);
404}
405
406/*
407 * Free pages from pool.
408 *
409 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
410 * number of pages in one go.
411 *
412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool
414 * @use_static: Safe to use static buffer
415 **/
416static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
417				       bool use_static)
418{
419	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
420	unsigned long irq_flags;
421	struct dma_page *dma_p, *tmp;
422	struct page **pages_to_free;
423	struct list_head d_pages;
424	unsigned freed_pages = 0,
425		 npages_to_free = nr_free;
426
427	if (NUM_PAGES_TO_ALLOC < nr_free)
428		npages_to_free = NUM_PAGES_TO_ALLOC;
429#if 0
430	if (nr_free > 1) {
431		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
432			 pool->dev_name, pool->name, current->pid,
433			 npages_to_free, nr_free);
434	}
435#endif
436	if (use_static)
437		pages_to_free = static_buf;
438	else
439		pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
440					GFP_KERNEL);
441
442	if (!pages_to_free) {
443		pr_err("%s: Failed to allocate memory for pool free operation\n",
444		       pool->dev_name);
445		return 0;
446	}
447	INIT_LIST_HEAD(&d_pages);
448restart:
449	spin_lock_irqsave(&pool->lock, irq_flags);
450
451	/* We picking the oldest ones off the list */
452	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
453					 page_list) {
454		if (freed_pages >= npages_to_free)
455			break;
456
457		/* Move the dma_page from one list to another. */
458		list_move(&dma_p->page_list, &d_pages);
459
460		pages_to_free[freed_pages++] = dma_p->p;
461		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
462		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
463
464			ttm_pool_update_free_locked(pool, freed_pages);
465			/**
466			 * Because changing page caching is costly
467			 * we unlock the pool to prevent stalling.
468			 */
469			spin_unlock_irqrestore(&pool->lock, irq_flags);
470
471			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
472					  freed_pages);
473
474			INIT_LIST_HEAD(&d_pages);
475
476			if (likely(nr_free != FREE_ALL_PAGES))
477				nr_free -= freed_pages;
478
479			if (NUM_PAGES_TO_ALLOC >= nr_free)
480				npages_to_free = nr_free;
481			else
482				npages_to_free = NUM_PAGES_TO_ALLOC;
483
484			freed_pages = 0;
485
486			/* free all so restart the processing */
487			if (nr_free)
488				goto restart;
489
490			/* Not allowed to fall through or break because
491			 * following context is inside spinlock while we are
492			 * outside here.
493			 */
494			goto out;
495
496		}
497	}
498
499	/* remove range of pages from the pool */
500	if (freed_pages) {
501		ttm_pool_update_free_locked(pool, freed_pages);
502		nr_free -= freed_pages;
503	}
504
505	spin_unlock_irqrestore(&pool->lock, irq_flags);
506
507	if (freed_pages)
508		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
509out:
510	if (pages_to_free != static_buf)
511		kfree(pages_to_free);
512	return nr_free;
513}
514
515static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
516{
517	struct device_pools *p;
518	struct dma_pool *pool;
519
520	if (!dev)
521		return;
522
523	mutex_lock(&_manager->lock);
524	list_for_each_entry_reverse(p, &_manager->pools, pools) {
525		if (p->dev != dev)
526			continue;
527		pool = p->pool;
528		if (pool->type != type)
529			continue;
530
531		list_del(&p->pools);
532		kfree(p);
533		_manager->npools--;
534		break;
535	}
536	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
537		if (pool->type != type)
538			continue;
539		/* Takes a spinlock.. */
540		/* OK to use static buffer since global mutex is held. */
541		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
542		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
543		/* This code path is called after _all_ references to the
544		 * struct device has been dropped - so nobody should be
545		 * touching it. In case somebody is trying to _add_ we are
546		 * guarded by the mutex. */
547		list_del(&pool->pools);
548		kfree(pool);
549		break;
550	}
551	mutex_unlock(&_manager->lock);
552}
553
554/*
555 * On free-ing of the 'struct device' this deconstructor is run.
556 * Albeit the pool might have already been freed earlier.
557 */
558static void ttm_dma_pool_release(struct device *dev, void *res)
559{
560	struct dma_pool *pool = *(struct dma_pool **)res;
561
562	if (pool)
563		ttm_dma_free_pool(dev, pool->type);
564}
565
566static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
567{
568	return *(struct dma_pool **)res == match_data;
569}
570
571static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
572					  enum pool_type type)
573{
574	char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
575	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
576	struct device_pools *sec_pool = NULL;
577	struct dma_pool *pool = NULL, **ptr;
578	unsigned i;
579	int ret = -ENODEV;
580	char *p;
581
582	if (!dev)
583		return NULL;
584
585	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
586	if (!ptr)
587		return NULL;
588
589	ret = -ENOMEM;
590
591	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
592			    dev_to_node(dev));
593	if (!pool)
594		goto err_mem;
595
596	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
597				dev_to_node(dev));
598	if (!sec_pool)
599		goto err_mem;
600
601	INIT_LIST_HEAD(&sec_pool->pools);
602	sec_pool->dev = dev;
603	sec_pool->pool =  pool;
604
605	INIT_LIST_HEAD(&pool->free_list);
606	INIT_LIST_HEAD(&pool->inuse_list);
607	INIT_LIST_HEAD(&pool->pools);
608	spin_lock_init(&pool->lock);
609	pool->dev = dev;
610	pool->npages_free = pool->npages_in_use = 0;
611	pool->nfrees = 0;
612	pool->gfp_flags = flags;
613	pool->size = PAGE_SIZE;
614	pool->type = type;
615	pool->nrefills = 0;
616	p = pool->name;
617	for (i = 0; i < 5; i++) {
618		if (type & t[i]) {
619			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
620				      "%s", n[i]);
621		}
622	}
623	*p = 0;
624	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
625	 * - the kobj->name has already been deallocated.*/
626	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
627		 dev_driver_string(dev), dev_name(dev));
628	mutex_lock(&_manager->lock);
629	/* You can get the dma_pool from either the global: */
630	list_add(&sec_pool->pools, &_manager->pools);
631	_manager->npools++;
632	/* or from 'struct device': */
633	list_add(&pool->pools, &dev->dma_pools);
634	mutex_unlock(&_manager->lock);
635
636	*ptr = pool;
637	devres_add(dev, ptr);
638
639	return pool;
640err_mem:
641	devres_free(ptr);
642	kfree(sec_pool);
643	kfree(pool);
644	return ERR_PTR(ret);
645}
646
647static struct dma_pool *ttm_dma_find_pool(struct device *dev,
648					  enum pool_type type)
649{
650	struct dma_pool *pool, *tmp, *found = NULL;
651
652	if (type == IS_UNDEFINED)
653		return found;
654
655	/* NB: We iterate on the 'struct dev' which has no spinlock, but
656	 * it does have a kref which we have taken. The kref is taken during
657	 * graphic driver loading - in the drm_pci_init it calls either
658	 * pci_dev_get or pci_register_driver which both end up taking a kref
659	 * on 'struct device'.
660	 *
661	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
662	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
663	 * thing is at that point of time there are no pages associated with the
664	 * driver so this function will not be called.
665	 */
666	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
667		if (pool->type != type)
668			continue;
669		found = pool;
670		break;
671	}
672	return found;
673}
674
675/*
676 * Free pages the pages that failed to change the caching state. If there
677 * are pages that have changed their caching state already put them to the
678 * pool.
679 */
680static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
681						 struct list_head *d_pages,
682						 struct page **failed_pages,
683						 unsigned cpages)
684{
685	struct dma_page *d_page, *tmp;
686	struct page *p;
687	unsigned i = 0;
688
689	p = failed_pages[0];
690	if (!p)
691		return;
692	/* Find the failed page. */
693	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
694		if (d_page->p != p)
695			continue;
696		/* .. and then progress over the full list. */
697		list_del(&d_page->page_list);
698		__ttm_dma_free_page(pool, d_page);
699		if (++i < cpages)
700			p = failed_pages[i];
701		else
702			break;
703	}
704
705}
706
707/*
708 * Allocate 'count' pages, and put 'need' number of them on the
709 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
710 * The full list of pages should also be on 'd_pages'.
711 * We return zero for success, and negative numbers as errors.
712 */
713static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
714					struct list_head *d_pages,
715					unsigned count)
716{
717	struct page **caching_array;
718	struct dma_page *dma_p;
719	struct page *p;
720	int r = 0;
721	unsigned i, cpages;
722	unsigned max_cpages = min(count,
723			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
724
725	/* allocate array for page caching change */
726	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
727
728	if (!caching_array) {
729		pr_err("%s: Unable to allocate table for new pages\n",
730		       pool->dev_name);
731		return -ENOMEM;
732	}
733
734	if (count > 1) {
735		pr_debug("%s: (%s:%d) Getting %d pages\n",
736			 pool->dev_name, pool->name, current->pid, count);
737	}
738
739	for (i = 0, cpages = 0; i < count; ++i) {
740		dma_p = __ttm_dma_alloc_page(pool);
741		if (!dma_p) {
742			pr_err("%s: Unable to get page %u\n",
743			       pool->dev_name, i);
744
745			/* store already allocated pages in the pool after
746			 * setting the caching state */
747			if (cpages) {
748				r = ttm_set_pages_caching(pool, caching_array,
749							  cpages);
750				if (r)
751					ttm_dma_handle_caching_state_failure(
752						pool, d_pages, caching_array,
753						cpages);
754			}
755			r = -ENOMEM;
756			goto out;
757		}
758		p = dma_p->p;
759#ifdef CONFIG_HIGHMEM
760		/* gfp flags of highmem page should never be dma32 so we
761		 * we should be fine in such case
762		 */
763		if (!PageHighMem(p))
764#endif
765		{
766			caching_array[cpages++] = p;
767			if (cpages == max_cpages) {
768				/* Note: Cannot hold the spinlock */
769				r = ttm_set_pages_caching(pool, caching_array,
770						 cpages);
771				if (r) {
772					ttm_dma_handle_caching_state_failure(
773						pool, d_pages, caching_array,
774						cpages);
775					goto out;
776				}
777				cpages = 0;
778			}
779		}
780		list_add(&dma_p->page_list, d_pages);
781	}
782
783	if (cpages) {
784		r = ttm_set_pages_caching(pool, caching_array, cpages);
785		if (r)
786			ttm_dma_handle_caching_state_failure(pool, d_pages,
787					caching_array, cpages);
788	}
789out:
790	kfree(caching_array);
791	return r;
792}
793
794/*
795 * @return count of pages still required to fulfill the request.
796 */
797static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
798					 unsigned long *irq_flags)
799{
800	unsigned count = _manager->options.small;
801	int r = pool->npages_free;
802
803	if (count > pool->npages_free) {
804		struct list_head d_pages;
805
806		INIT_LIST_HEAD(&d_pages);
807
808		spin_unlock_irqrestore(&pool->lock, *irq_flags);
809
810		/* Returns how many more are neccessary to fulfill the
811		 * request. */
812		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
813
814		spin_lock_irqsave(&pool->lock, *irq_flags);
815		if (!r) {
816			/* Add the fresh to the end.. */
817			list_splice(&d_pages, &pool->free_list);
818			++pool->nrefills;
819			pool->npages_free += count;
820			r = count;
821		} else {
822			struct dma_page *d_page;
823			unsigned cpages = 0;
824
825			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
826			       pool->dev_name, pool->name, r);
827
828			list_for_each_entry(d_page, &d_pages, page_list) {
829				cpages++;
830			}
831			list_splice_tail(&d_pages, &pool->free_list);
832			pool->npages_free += cpages;
833			r = cpages;
834		}
835	}
836	return r;
837}
838
839/*
840 * @return count of pages still required to fulfill the request.
841 * The populate list is actually a stack (not that is matters as TTM
842 * allocates one page at a time.
843 */
844static int ttm_dma_pool_get_pages(struct dma_pool *pool,
845				  struct ttm_dma_tt *ttm_dma,
846				  unsigned index)
847{
848	struct dma_page *d_page;
849	struct ttm_tt *ttm = &ttm_dma->ttm;
850	unsigned long irq_flags;
851	int count, r = -ENOMEM;
852
853	spin_lock_irqsave(&pool->lock, irq_flags);
854	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
855	if (count) {
856		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
857		ttm->pages[index] = d_page->p;
858		ttm_dma->cpu_address[index] = d_page->vaddr;
859		ttm_dma->dma_address[index] = d_page->dma;
860		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
861		r = 0;
862		pool->npages_in_use += 1;
863		pool->npages_free -= 1;
864	}
865	spin_unlock_irqrestore(&pool->lock, irq_flags);
866	return r;
867}
868
869/*
870 * On success pages list will hold count number of correctly
871 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
872 */
873int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
874{
875	struct ttm_tt *ttm = &ttm_dma->ttm;
876	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
877	struct dma_pool *pool;
878	enum pool_type type;
879	unsigned i;
880	gfp_t gfp_flags;
881	int ret;
882
883	if (ttm->state != tt_unpopulated)
884		return 0;
885
886	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
887	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
888		gfp_flags = GFP_USER | GFP_DMA32;
889	else
890		gfp_flags = GFP_HIGHUSER;
891	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
892		gfp_flags |= __GFP_ZERO;
893
894	pool = ttm_dma_find_pool(dev, type);
895	if (!pool) {
896		pool = ttm_dma_pool_init(dev, gfp_flags, type);
897		if (IS_ERR_OR_NULL(pool)) {
898			return -ENOMEM;
899		}
900	}
901
902	INIT_LIST_HEAD(&ttm_dma->pages_list);
903	for (i = 0; i < ttm->num_pages; ++i) {
904		ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
905		if (ret != 0) {
906			ttm_dma_unpopulate(ttm_dma, dev);
907			return -ENOMEM;
908		}
909
910		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
911						false, false);
912		if (unlikely(ret != 0)) {
913			ttm_dma_unpopulate(ttm_dma, dev);
914			return -ENOMEM;
915		}
916	}
917
918	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
919		ret = ttm_tt_swapin(ttm);
920		if (unlikely(ret != 0)) {
921			ttm_dma_unpopulate(ttm_dma, dev);
922			return ret;
923		}
924	}
925
926	ttm->state = tt_unbound;
927	return 0;
928}
929EXPORT_SYMBOL_GPL(ttm_dma_populate);
930
931/* Put all pages in pages list to correct pool to wait for reuse */
932void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
933{
934	struct ttm_tt *ttm = &ttm_dma->ttm;
935	struct dma_pool *pool;
936	struct dma_page *d_page, *next;
937	enum pool_type type;
938	bool is_cached = false;
939	unsigned count = 0, i, npages = 0;
940	unsigned long irq_flags;
941
942	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
943	pool = ttm_dma_find_pool(dev, type);
944	if (!pool)
945		return;
946
947	is_cached = (ttm_dma_find_pool(pool->dev,
948		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
949
950	/* make sure pages array match list and count number of pages */
951	list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
952		ttm->pages[count] = d_page->p;
953		count++;
954	}
955
956	spin_lock_irqsave(&pool->lock, irq_flags);
957	pool->npages_in_use -= count;
958	if (is_cached) {
959		pool->nfrees += count;
960	} else {
961		pool->npages_free += count;
962		list_splice(&ttm_dma->pages_list, &pool->free_list);
963		npages = count;
964		if (pool->npages_free > _manager->options.max_size) {
965			npages = pool->npages_free - _manager->options.max_size;
966			/* free at least NUM_PAGES_TO_ALLOC number of pages
967			 * to reduce calls to set_memory_wb */
968			if (npages < NUM_PAGES_TO_ALLOC)
969				npages = NUM_PAGES_TO_ALLOC;
970		}
971	}
972	spin_unlock_irqrestore(&pool->lock, irq_flags);
973
974	if (is_cached) {
975		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
976			ttm_mem_global_free_page(ttm->glob->mem_glob,
977						 d_page->p);
978			ttm_dma_page_put(pool, d_page);
979		}
980	} else {
981		for (i = 0; i < count; i++) {
982			ttm_mem_global_free_page(ttm->glob->mem_glob,
983						 ttm->pages[i]);
984		}
985	}
986
987	INIT_LIST_HEAD(&ttm_dma->pages_list);
988	for (i = 0; i < ttm->num_pages; i++) {
989		ttm->pages[i] = NULL;
990		ttm_dma->cpu_address[i] = 0;
991		ttm_dma->dma_address[i] = 0;
992	}
993
994	/* shrink pool if necessary (only on !is_cached pools)*/
995	if (npages)
996		ttm_dma_page_pool_free(pool, npages, false);
997	ttm->state = tt_unpopulated;
998}
999EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1000
1001/**
1002 * Callback for mm to request pool to reduce number of page held.
1003 *
1004 * XXX: (dchinner) Deadlock warning!
1005 *
1006 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1007 * shrinkers
1008 */
1009static unsigned long
1010ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1011{
1012	static unsigned start_pool;
1013	unsigned idx = 0;
1014	unsigned pool_offset;
1015	unsigned shrink_pages = sc->nr_to_scan;
1016	struct device_pools *p;
1017	unsigned long freed = 0;
1018
1019	if (list_empty(&_manager->pools))
1020		return SHRINK_STOP;
1021
1022	if (!mutex_trylock(&_manager->lock))
1023		return SHRINK_STOP;
1024	if (!_manager->npools)
1025		goto out;
1026	pool_offset = ++start_pool % _manager->npools;
1027	list_for_each_entry(p, &_manager->pools, pools) {
1028		unsigned nr_free;
1029
1030		if (!p->dev)
1031			continue;
1032		if (shrink_pages == 0)
1033			break;
1034		/* Do it in round-robin fashion. */
1035		if (++idx < pool_offset)
1036			continue;
1037		nr_free = shrink_pages;
1038		/* OK to use static buffer since global mutex is held. */
1039		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1040		freed += nr_free - shrink_pages;
1041
1042		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1043			 p->pool->dev_name, p->pool->name, current->pid,
1044			 nr_free, shrink_pages);
1045	}
1046out:
1047	mutex_unlock(&_manager->lock);
1048	return freed;
1049}
1050
1051static unsigned long
1052ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1053{
1054	struct device_pools *p;
1055	unsigned long count = 0;
1056
1057	if (!mutex_trylock(&_manager->lock))
1058		return 0;
1059	list_for_each_entry(p, &_manager->pools, pools)
1060		count += p->pool->npages_free;
1061	mutex_unlock(&_manager->lock);
1062	return count;
1063}
1064
1065static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1066{
1067	manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1068	manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1069	manager->mm_shrink.seeks = 1;
1070	register_shrinker(&manager->mm_shrink);
1071}
1072
1073static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1074{
1075	unregister_shrinker(&manager->mm_shrink);
1076}
1077
1078int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1079{
1080	int ret = -ENOMEM;
1081
1082	WARN_ON(_manager);
1083
1084	pr_info("Initializing DMA pool allocator\n");
1085
1086	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1087	if (!_manager)
1088		goto err;
1089
1090	mutex_init(&_manager->lock);
1091	INIT_LIST_HEAD(&_manager->pools);
1092
1093	_manager->options.max_size = max_pages;
1094	_manager->options.small = SMALL_ALLOCATION;
1095	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1096
1097	/* This takes care of auto-freeing the _manager */
1098	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1099				   &glob->kobj, "dma_pool");
1100	if (unlikely(ret != 0)) {
1101		kobject_put(&_manager->kobj);
1102		goto err;
1103	}
1104	ttm_dma_pool_mm_shrink_init(_manager);
1105	return 0;
1106err:
1107	return ret;
1108}
1109
1110void ttm_dma_page_alloc_fini(void)
1111{
1112	struct device_pools *p, *t;
1113
1114	pr_info("Finalizing DMA pool allocator\n");
1115	ttm_dma_pool_mm_shrink_fini(_manager);
1116
1117	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1118		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1119			current->pid);
1120		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1121			ttm_dma_pool_match, p->pool));
1122		ttm_dma_free_pool(p->dev, p->pool->type);
1123	}
1124	kobject_put(&_manager->kobj);
1125	_manager = NULL;
1126}
1127
1128int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1129{
1130	struct device_pools *p;
1131	struct dma_pool *pool = NULL;
1132	char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1133		     "name", "virt", "busaddr"};
1134
1135	if (!_manager) {
1136		seq_printf(m, "No pool allocator running.\n");
1137		return 0;
1138	}
1139	seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1140		   h[0], h[1], h[2], h[3], h[4], h[5]);
1141	mutex_lock(&_manager->lock);
1142	list_for_each_entry(p, &_manager->pools, pools) {
1143		struct device *dev = p->dev;
1144		if (!dev)
1145			continue;
1146		pool = p->pool;
1147		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1148				pool->name, pool->nrefills,
1149				pool->nfrees, pool->npages_in_use,
1150				pool->npages_free,
1151				pool->dev_name);
1152	}
1153	mutex_unlock(&_manager->lock);
1154	return 0;
1155}
1156EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1157
1158#endif
1159