1/*
2 * address space "slices" (meta-segments) support
3 *
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5 *
6 * Based on hugetlb implementation
7 *
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 */
24
25#undef DEBUG
26
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/err.h>
31#include <linux/spinlock.h>
32#include <linux/export.h>
33#include <linux/hugetlb.h>
34#include <asm/mman.h>
35#include <asm/mmu.h>
36#include <asm/copro.h>
37#include <asm/hugetlb.h>
38
39/* some sanity checks */
40#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
41#error PGTABLE_RANGE exceeds slice_mask high_slices size
42#endif
43
44static DEFINE_SPINLOCK(slice_convert_lock);
45
46
47#ifdef DEBUG
48int _slice_debug = 1;
49
50static void slice_print_mask(const char *label, struct slice_mask mask)
51{
52	char	*p, buf[16 + 3 + 64 + 1];
53	int	i;
54
55	if (!_slice_debug)
56		return;
57	p = buf;
58	for (i = 0; i < SLICE_NUM_LOW; i++)
59		*(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
60	*(p++) = ' ';
61	*(p++) = '-';
62	*(p++) = ' ';
63	for (i = 0; i < SLICE_NUM_HIGH; i++)
64		*(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
65	*(p++) = 0;
66
67	printk(KERN_DEBUG "%s:%s\n", label, buf);
68}
69
70#define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
71
72#else
73
74static void slice_print_mask(const char *label, struct slice_mask mask) {}
75#define slice_dbg(fmt...)
76
77#endif
78
79static struct slice_mask slice_range_to_mask(unsigned long start,
80					     unsigned long len)
81{
82	unsigned long end = start + len - 1;
83	struct slice_mask ret = { 0, 0 };
84
85	if (start < SLICE_LOW_TOP) {
86		unsigned long mend = min(end, SLICE_LOW_TOP);
87		unsigned long mstart = min(start, SLICE_LOW_TOP);
88
89		ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
90			- (1u << GET_LOW_SLICE_INDEX(mstart));
91	}
92
93	if ((start + len) > SLICE_LOW_TOP)
94		ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
95			- (1ul << GET_HIGH_SLICE_INDEX(start));
96
97	return ret;
98}
99
100static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
101			      unsigned long len)
102{
103	struct vm_area_struct *vma;
104
105	if ((mm->task_size - len) < addr)
106		return 0;
107	vma = find_vma(mm, addr);
108	return (!vma || (addr + len) <= vma->vm_start);
109}
110
111static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
112{
113	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
114				   1ul << SLICE_LOW_SHIFT);
115}
116
117static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
118{
119	unsigned long start = slice << SLICE_HIGH_SHIFT;
120	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
121
122	/* Hack, so that each addresses is controlled by exactly one
123	 * of the high or low area bitmaps, the first high area starts
124	 * at 4GB, not 0 */
125	if (start == 0)
126		start = SLICE_LOW_TOP;
127
128	return !slice_area_is_free(mm, start, end - start);
129}
130
131static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
132{
133	struct slice_mask ret = { 0, 0 };
134	unsigned long i;
135
136	for (i = 0; i < SLICE_NUM_LOW; i++)
137		if (!slice_low_has_vma(mm, i))
138			ret.low_slices |= 1u << i;
139
140	if (mm->task_size <= SLICE_LOW_TOP)
141		return ret;
142
143	for (i = 0; i < SLICE_NUM_HIGH; i++)
144		if (!slice_high_has_vma(mm, i))
145			ret.high_slices |= 1ul << i;
146
147	return ret;
148}
149
150static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
151{
152	unsigned char *hpsizes;
153	int index, mask_index;
154	struct slice_mask ret = { 0, 0 };
155	unsigned long i;
156	u64 lpsizes;
157
158	lpsizes = mm->context.low_slices_psize;
159	for (i = 0; i < SLICE_NUM_LOW; i++)
160		if (((lpsizes >> (i * 4)) & 0xf) == psize)
161			ret.low_slices |= 1u << i;
162
163	hpsizes = mm->context.high_slices_psize;
164	for (i = 0; i < SLICE_NUM_HIGH; i++) {
165		mask_index = i & 0x1;
166		index = i >> 1;
167		if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
168			ret.high_slices |= 1ul << i;
169	}
170
171	return ret;
172}
173
174static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
175{
176	return (mask.low_slices & available.low_slices) == mask.low_slices &&
177		(mask.high_slices & available.high_slices) == mask.high_slices;
178}
179
180static void slice_flush_segments(void *parm)
181{
182	struct mm_struct *mm = parm;
183	unsigned long flags;
184
185	if (mm != current->active_mm)
186		return;
187
188	/* update the paca copy of the context struct */
189	get_paca()->context = current->active_mm->context;
190
191	local_irq_save(flags);
192	slb_flush_and_rebolt();
193	local_irq_restore(flags);
194}
195
196static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
197{
198	int index, mask_index;
199	/* Write the new slice psize bits */
200	unsigned char *hpsizes;
201	u64 lpsizes;
202	unsigned long i, flags;
203
204	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
205	slice_print_mask(" mask", mask);
206
207	/* We need to use a spinlock here to protect against
208	 * concurrent 64k -> 4k demotion ...
209	 */
210	spin_lock_irqsave(&slice_convert_lock, flags);
211
212	lpsizes = mm->context.low_slices_psize;
213	for (i = 0; i < SLICE_NUM_LOW; i++)
214		if (mask.low_slices & (1u << i))
215			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
216				(((unsigned long)psize) << (i * 4));
217
218	/* Assign the value back */
219	mm->context.low_slices_psize = lpsizes;
220
221	hpsizes = mm->context.high_slices_psize;
222	for (i = 0; i < SLICE_NUM_HIGH; i++) {
223		mask_index = i & 0x1;
224		index = i >> 1;
225		if (mask.high_slices & (1ul << i))
226			hpsizes[index] = (hpsizes[index] &
227					  ~(0xf << (mask_index * 4))) |
228				(((unsigned long)psize) << (mask_index * 4));
229	}
230
231	slice_dbg(" lsps=%lx, hsps=%lx\n",
232		  mm->context.low_slices_psize,
233		  mm->context.high_slices_psize);
234
235	spin_unlock_irqrestore(&slice_convert_lock, flags);
236
237	copro_flush_all_slbs(mm);
238}
239
240/*
241 * Compute which slice addr is part of;
242 * set *boundary_addr to the start or end boundary of that slice
243 * (depending on 'end' parameter);
244 * return boolean indicating if the slice is marked as available in the
245 * 'available' slice_mark.
246 */
247static bool slice_scan_available(unsigned long addr,
248				 struct slice_mask available,
249				 int end,
250				 unsigned long *boundary_addr)
251{
252	unsigned long slice;
253	if (addr < SLICE_LOW_TOP) {
254		slice = GET_LOW_SLICE_INDEX(addr);
255		*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
256		return !!(available.low_slices & (1u << slice));
257	} else {
258		slice = GET_HIGH_SLICE_INDEX(addr);
259		*boundary_addr = (slice + end) ?
260			((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
261		return !!(available.high_slices & (1ul << slice));
262	}
263}
264
265static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
266					      unsigned long len,
267					      struct slice_mask available,
268					      int psize)
269{
270	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
271	unsigned long addr, found, next_end;
272	struct vm_unmapped_area_info info;
273
274	info.flags = 0;
275	info.length = len;
276	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
277	info.align_offset = 0;
278
279	addr = TASK_UNMAPPED_BASE;
280	while (addr < TASK_SIZE) {
281		info.low_limit = addr;
282		if (!slice_scan_available(addr, available, 1, &addr))
283			continue;
284
285 next_slice:
286		/*
287		 * At this point [info.low_limit; addr) covers
288		 * available slices only and ends at a slice boundary.
289		 * Check if we need to reduce the range, or if we can
290		 * extend it to cover the next available slice.
291		 */
292		if (addr >= TASK_SIZE)
293			addr = TASK_SIZE;
294		else if (slice_scan_available(addr, available, 1, &next_end)) {
295			addr = next_end;
296			goto next_slice;
297		}
298		info.high_limit = addr;
299
300		found = vm_unmapped_area(&info);
301		if (!(found & ~PAGE_MASK))
302			return found;
303	}
304
305	return -ENOMEM;
306}
307
308static unsigned long slice_find_area_topdown(struct mm_struct *mm,
309					     unsigned long len,
310					     struct slice_mask available,
311					     int psize)
312{
313	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
314	unsigned long addr, found, prev;
315	struct vm_unmapped_area_info info;
316
317	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
318	info.length = len;
319	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
320	info.align_offset = 0;
321
322	addr = mm->mmap_base;
323	while (addr > PAGE_SIZE) {
324		info.high_limit = addr;
325		if (!slice_scan_available(addr - 1, available, 0, &addr))
326			continue;
327
328 prev_slice:
329		/*
330		 * At this point [addr; info.high_limit) covers
331		 * available slices only and starts at a slice boundary.
332		 * Check if we need to reduce the range, or if we can
333		 * extend it to cover the previous available slice.
334		 */
335		if (addr < PAGE_SIZE)
336			addr = PAGE_SIZE;
337		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
338			addr = prev;
339			goto prev_slice;
340		}
341		info.low_limit = addr;
342
343		found = vm_unmapped_area(&info);
344		if (!(found & ~PAGE_MASK))
345			return found;
346	}
347
348	/*
349	 * A failed mmap() very likely causes application failure,
350	 * so fall back to the bottom-up function here. This scenario
351	 * can happen with large stack limits and large mmap()
352	 * allocations.
353	 */
354	return slice_find_area_bottomup(mm, len, available, psize);
355}
356
357
358static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
359				     struct slice_mask mask, int psize,
360				     int topdown)
361{
362	if (topdown)
363		return slice_find_area_topdown(mm, len, mask, psize);
364	else
365		return slice_find_area_bottomup(mm, len, mask, psize);
366}
367
368#define or_mask(dst, src)	do {			\
369	(dst).low_slices |= (src).low_slices;		\
370	(dst).high_slices |= (src).high_slices;		\
371} while (0)
372
373#define andnot_mask(dst, src)	do {			\
374	(dst).low_slices &= ~(src).low_slices;		\
375	(dst).high_slices &= ~(src).high_slices;	\
376} while (0)
377
378#ifdef CONFIG_PPC_64K_PAGES
379#define MMU_PAGE_BASE	MMU_PAGE_64K
380#else
381#define MMU_PAGE_BASE	MMU_PAGE_4K
382#endif
383
384unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
385				      unsigned long flags, unsigned int psize,
386				      int topdown)
387{
388	struct slice_mask mask = {0, 0};
389	struct slice_mask good_mask;
390	struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
391	struct slice_mask compat_mask = {0, 0};
392	int fixed = (flags & MAP_FIXED);
393	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
394	struct mm_struct *mm = current->mm;
395	unsigned long newaddr;
396
397	/* Sanity checks */
398	BUG_ON(mm->task_size == 0);
399
400	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
401	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
402		  addr, len, flags, topdown);
403
404	if (len > mm->task_size)
405		return -ENOMEM;
406	if (len & ((1ul << pshift) - 1))
407		return -EINVAL;
408	if (fixed && (addr & ((1ul << pshift) - 1)))
409		return -EINVAL;
410	if (fixed && addr > (mm->task_size - len))
411		return -ENOMEM;
412
413	/* If hint, make sure it matches our alignment restrictions */
414	if (!fixed && addr) {
415		addr = _ALIGN_UP(addr, 1ul << pshift);
416		slice_dbg(" aligned addr=%lx\n", addr);
417		/* Ignore hint if it's too large or overlaps a VMA */
418		if (addr > mm->task_size - len ||
419		    !slice_area_is_free(mm, addr, len))
420			addr = 0;
421	}
422
423	/* First make up a "good" mask of slices that have the right size
424	 * already
425	 */
426	good_mask = slice_mask_for_size(mm, psize);
427	slice_print_mask(" good_mask", good_mask);
428
429	/*
430	 * Here "good" means slices that are already the right page size,
431	 * "compat" means slices that have a compatible page size (i.e.
432	 * 4k in a 64k pagesize kernel), and "free" means slices without
433	 * any VMAs.
434	 *
435	 * If MAP_FIXED:
436	 *	check if fits in good | compat => OK
437	 *	check if fits in good | compat | free => convert free
438	 *	else bad
439	 * If have hint:
440	 *	check if hint fits in good => OK
441	 *	check if hint fits in good | free => convert free
442	 * Otherwise:
443	 *	search in good, found => OK
444	 *	search in good | free, found => convert free
445	 *	search in good | compat | free, found => convert free.
446	 */
447
448#ifdef CONFIG_PPC_64K_PAGES
449	/* If we support combo pages, we can allow 64k pages in 4k slices */
450	if (psize == MMU_PAGE_64K) {
451		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
452		if (fixed)
453			or_mask(good_mask, compat_mask);
454	}
455#endif
456
457	/* First check hint if it's valid or if we have MAP_FIXED */
458	if (addr != 0 || fixed) {
459		/* Build a mask for the requested range */
460		mask = slice_range_to_mask(addr, len);
461		slice_print_mask(" mask", mask);
462
463		/* Check if we fit in the good mask. If we do, we just return,
464		 * nothing else to do
465		 */
466		if (slice_check_fit(mask, good_mask)) {
467			slice_dbg(" fits good !\n");
468			return addr;
469		}
470	} else {
471		/* Now let's see if we can find something in the existing
472		 * slices for that size
473		 */
474		newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
475		if (newaddr != -ENOMEM) {
476			/* Found within the good mask, we don't have to setup,
477			 * we thus return directly
478			 */
479			slice_dbg(" found area at 0x%lx\n", newaddr);
480			return newaddr;
481		}
482	}
483
484	/* We don't fit in the good mask, check what other slices are
485	 * empty and thus can be converted
486	 */
487	potential_mask = slice_mask_for_free(mm);
488	or_mask(potential_mask, good_mask);
489	slice_print_mask(" potential", potential_mask);
490
491	if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
492		slice_dbg(" fits potential !\n");
493		goto convert;
494	}
495
496	/* If we have MAP_FIXED and failed the above steps, then error out */
497	if (fixed)
498		return -EBUSY;
499
500	slice_dbg(" search...\n");
501
502	/* If we had a hint that didn't work out, see if we can fit
503	 * anywhere in the good area.
504	 */
505	if (addr) {
506		addr = slice_find_area(mm, len, good_mask, psize, topdown);
507		if (addr != -ENOMEM) {
508			slice_dbg(" found area at 0x%lx\n", addr);
509			return addr;
510		}
511	}
512
513	/* Now let's see if we can find something in the existing slices
514	 * for that size plus free slices
515	 */
516	addr = slice_find_area(mm, len, potential_mask, psize, topdown);
517
518#ifdef CONFIG_PPC_64K_PAGES
519	if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
520		/* retry the search with 4k-page slices included */
521		or_mask(potential_mask, compat_mask);
522		addr = slice_find_area(mm, len, potential_mask, psize,
523				       topdown);
524	}
525#endif
526
527	if (addr == -ENOMEM)
528		return -ENOMEM;
529
530	mask = slice_range_to_mask(addr, len);
531	slice_dbg(" found potential area at 0x%lx\n", addr);
532	slice_print_mask(" mask", mask);
533
534 convert:
535	andnot_mask(mask, good_mask);
536	andnot_mask(mask, compat_mask);
537	if (mask.low_slices || mask.high_slices) {
538		slice_convert(mm, mask, psize);
539		if (psize > MMU_PAGE_BASE)
540			on_each_cpu(slice_flush_segments, mm, 1);
541	}
542	return addr;
543
544}
545EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
546
547unsigned long arch_get_unmapped_area(struct file *filp,
548				     unsigned long addr,
549				     unsigned long len,
550				     unsigned long pgoff,
551				     unsigned long flags)
552{
553	return slice_get_unmapped_area(addr, len, flags,
554				       current->mm->context.user_psize, 0);
555}
556
557unsigned long arch_get_unmapped_area_topdown(struct file *filp,
558					     const unsigned long addr0,
559					     const unsigned long len,
560					     const unsigned long pgoff,
561					     const unsigned long flags)
562{
563	return slice_get_unmapped_area(addr0, len, flags,
564				       current->mm->context.user_psize, 1);
565}
566
567unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
568{
569	unsigned char *hpsizes;
570	int index, mask_index;
571
572	if (addr < SLICE_LOW_TOP) {
573		u64 lpsizes;
574		lpsizes = mm->context.low_slices_psize;
575		index = GET_LOW_SLICE_INDEX(addr);
576		return (lpsizes >> (index * 4)) & 0xf;
577	}
578	hpsizes = mm->context.high_slices_psize;
579	index = GET_HIGH_SLICE_INDEX(addr);
580	mask_index = index & 0x1;
581	return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
582}
583EXPORT_SYMBOL_GPL(get_slice_psize);
584
585/*
586 * This is called by hash_page when it needs to do a lazy conversion of
587 * an address space from real 64K pages to combo 4K pages (typically
588 * when hitting a non cacheable mapping on a processor or hypervisor
589 * that won't allow them for 64K pages).
590 *
591 * This is also called in init_new_context() to change back the user
592 * psize from whatever the parent context had it set to
593 * N.B. This may be called before mm->context.id has been set.
594 *
595 * This function will only change the content of the {low,high)_slice_psize
596 * masks, it will not flush SLBs as this shall be handled lazily by the
597 * caller.
598 */
599void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
600{
601	int index, mask_index;
602	unsigned char *hpsizes;
603	unsigned long flags, lpsizes;
604	unsigned int old_psize;
605	int i;
606
607	slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
608
609	spin_lock_irqsave(&slice_convert_lock, flags);
610
611	old_psize = mm->context.user_psize;
612	slice_dbg(" old_psize=%d\n", old_psize);
613	if (old_psize == psize)
614		goto bail;
615
616	mm->context.user_psize = psize;
617	wmb();
618
619	lpsizes = mm->context.low_slices_psize;
620	for (i = 0; i < SLICE_NUM_LOW; i++)
621		if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
622			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
623				(((unsigned long)psize) << (i * 4));
624	/* Assign the value back */
625	mm->context.low_slices_psize = lpsizes;
626
627	hpsizes = mm->context.high_slices_psize;
628	for (i = 0; i < SLICE_NUM_HIGH; i++) {
629		mask_index = i & 0x1;
630		index = i >> 1;
631		if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
632			hpsizes[index] = (hpsizes[index] &
633					  ~(0xf << (mask_index * 4))) |
634				(((unsigned long)psize) << (mask_index * 4));
635	}
636
637
638
639
640	slice_dbg(" lsps=%lx, hsps=%lx\n",
641		  mm->context.low_slices_psize,
642		  mm->context.high_slices_psize);
643
644 bail:
645	spin_unlock_irqrestore(&slice_convert_lock, flags);
646}
647
648void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
649			   unsigned long len, unsigned int psize)
650{
651	struct slice_mask mask = slice_range_to_mask(start, len);
652
653	slice_convert(mm, mask, psize);
654}
655
656#ifdef CONFIG_HUGETLB_PAGE
657/*
658 * is_hugepage_only_range() is used by generic code to verify whether
659 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
660 *
661 * until the generic code provides a more generic hook and/or starts
662 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
663 * here knows how to deal with), we hijack it to keep standard mappings
664 * away from us.
665 *
666 * because of that generic code limitation, MAP_FIXED mapping cannot
667 * "convert" back a slice with no VMAs to the standard page size, only
668 * get_unmapped_area() can. It would be possible to fix it here but I
669 * prefer working on fixing the generic code instead.
670 *
671 * WARNING: This will not work if hugetlbfs isn't enabled since the
672 * generic code will redefine that function as 0 in that. This is ok
673 * for now as we only use slices with hugetlbfs enabled. This should
674 * be fixed as the generic code gets fixed.
675 */
676int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
677			   unsigned long len)
678{
679	struct slice_mask mask, available;
680	unsigned int psize = mm->context.user_psize;
681
682	mask = slice_range_to_mask(addr, len);
683	available = slice_mask_for_size(mm, psize);
684#ifdef CONFIG_PPC_64K_PAGES
685	/* We need to account for 4k slices too */
686	if (psize == MMU_PAGE_64K) {
687		struct slice_mask compat_mask;
688		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
689		or_mask(available, compat_mask);
690	}
691#endif
692
693#if 0 /* too verbose */
694	slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
695		 mm, addr, len);
696	slice_print_mask(" mask", mask);
697	slice_print_mask(" available", available);
698#endif
699	return !slice_check_fit(mask, available);
700}
701#endif
702