root/include/linux/highmem.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. flush_anon_page
  2. flush_kernel_dcache_page
  3. flush_kernel_vmap_range
  4. invalidate_kernel_vmap_range
  5. totalhigh_pages
  6. totalhigh_pages_inc
  7. totalhigh_pages_dec
  8. totalhigh_pages_add
  9. totalhigh_pages_set
  10. nr_free_highpages
  11. kmap_to_page
  12. totalhigh_pages
  13. kmap
  14. kunmap
  15. kmap_atomic
  16. __kunmap_atomic
  17. kmap_atomic_idx_push
  18. kmap_atomic_idx
  19. kmap_atomic_idx_pop
  20. clear_user_highpage
  21. __alloc_zeroed_user_highpage
  22. alloc_zeroed_user_highpage_movable
  23. clear_highpage
  24. zero_user_segments
  25. zero_user_segment
  26. zero_user
  27. copy_user_highpage
  28. copy_highpage

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_HIGHMEM_H
   3 #define _LINUX_HIGHMEM_H
   4 
   5 #include <linux/fs.h>
   6 #include <linux/kernel.h>
   7 #include <linux/bug.h>
   8 #include <linux/mm.h>
   9 #include <linux/uaccess.h>
  10 #include <linux/hardirq.h>
  11 
  12 #include <asm/cacheflush.h>
  13 
  14 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
  15 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  16 {
  17 }
  18 #endif
  19 
  20 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  21 static inline void flush_kernel_dcache_page(struct page *page)
  22 {
  23 }
  24 static inline void flush_kernel_vmap_range(void *vaddr, int size)
  25 {
  26 }
  27 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
  28 {
  29 }
  30 #endif
  31 
  32 #include <asm/kmap_types.h>
  33 
  34 #ifdef CONFIG_HIGHMEM
  35 #include <asm/highmem.h>
  36 
  37 /* declarations for linux/mm/highmem.c */
  38 unsigned int nr_free_highpages(void);
  39 extern atomic_long_t _totalhigh_pages;
  40 static inline unsigned long totalhigh_pages(void)
  41 {
  42         return (unsigned long)atomic_long_read(&_totalhigh_pages);
  43 }
  44 
  45 static inline void totalhigh_pages_inc(void)
  46 {
  47         atomic_long_inc(&_totalhigh_pages);
  48 }
  49 
  50 static inline void totalhigh_pages_dec(void)
  51 {
  52         atomic_long_dec(&_totalhigh_pages);
  53 }
  54 
  55 static inline void totalhigh_pages_add(long count)
  56 {
  57         atomic_long_add(count, &_totalhigh_pages);
  58 }
  59 
  60 static inline void totalhigh_pages_set(long val)
  61 {
  62         atomic_long_set(&_totalhigh_pages, val);
  63 }
  64 
  65 void kmap_flush_unused(void);
  66 
  67 struct page *kmap_to_page(void *addr);
  68 
  69 #else /* CONFIG_HIGHMEM */
  70 
  71 static inline unsigned int nr_free_highpages(void) { return 0; }
  72 
  73 static inline struct page *kmap_to_page(void *addr)
  74 {
  75         return virt_to_page(addr);
  76 }
  77 
  78 static inline unsigned long totalhigh_pages(void) { return 0UL; }
  79 
  80 #ifndef ARCH_HAS_KMAP
  81 static inline void *kmap(struct page *page)
  82 {
  83         might_sleep();
  84         return page_address(page);
  85 }
  86 
  87 static inline void kunmap(struct page *page)
  88 {
  89 }
  90 
  91 static inline void *kmap_atomic(struct page *page)
  92 {
  93         preempt_disable();
  94         pagefault_disable();
  95         return page_address(page);
  96 }
  97 #define kmap_atomic_prot(page, prot)    kmap_atomic(page)
  98 
  99 static inline void __kunmap_atomic(void *addr)
 100 {
 101         pagefault_enable();
 102         preempt_enable();
 103 }
 104 
 105 #define kmap_atomic_pfn(pfn)    kmap_atomic(pfn_to_page(pfn))
 106 
 107 #define kmap_flush_unused()     do {} while(0)
 108 #endif
 109 
 110 #endif /* CONFIG_HIGHMEM */
 111 
 112 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
 113 
 114 DECLARE_PER_CPU(int, __kmap_atomic_idx);
 115 
 116 static inline int kmap_atomic_idx_push(void)
 117 {
 118         int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
 119 
 120 #ifdef CONFIG_DEBUG_HIGHMEM
 121         WARN_ON_ONCE(in_irq() && !irqs_disabled());
 122         BUG_ON(idx >= KM_TYPE_NR);
 123 #endif
 124         return idx;
 125 }
 126 
 127 static inline int kmap_atomic_idx(void)
 128 {
 129         return __this_cpu_read(__kmap_atomic_idx) - 1;
 130 }
 131 
 132 static inline void kmap_atomic_idx_pop(void)
 133 {
 134 #ifdef CONFIG_DEBUG_HIGHMEM
 135         int idx = __this_cpu_dec_return(__kmap_atomic_idx);
 136 
 137         BUG_ON(idx < 0);
 138 #else
 139         __this_cpu_dec(__kmap_atomic_idx);
 140 #endif
 141 }
 142 
 143 #endif
 144 
 145 /*
 146  * Prevent people trying to call kunmap_atomic() as if it were kunmap()
 147  * kunmap_atomic() should get the return value of kmap_atomic, not the page.
 148  */
 149 #define kunmap_atomic(addr)                                     \
 150 do {                                                            \
 151         BUILD_BUG_ON(__same_type((addr), struct page *));       \
 152         __kunmap_atomic(addr);                                  \
 153 } while (0)
 154 
 155 
 156 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 157 #ifndef clear_user_highpage
 158 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 159 {
 160         void *addr = kmap_atomic(page);
 161         clear_user_page(addr, vaddr, page);
 162         kunmap_atomic(addr);
 163 }
 164 #endif
 165 
 166 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 167 /**
 168  * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
 169  * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
 170  * @vma: The VMA the page is to be allocated for
 171  * @vaddr: The virtual address the page will be inserted into
 172  *
 173  * This function will allocate a page for a VMA but the caller is expected
 174  * to specify via movableflags whether the page will be movable in the
 175  * future or not
 176  *
 177  * An architecture may override this function by defining
 178  * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
 179  * implementation.
 180  */
 181 static inline struct page *
 182 __alloc_zeroed_user_highpage(gfp_t movableflags,
 183                         struct vm_area_struct *vma,
 184                         unsigned long vaddr)
 185 {
 186         struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
 187                         vma, vaddr);
 188 
 189         if (page)
 190                 clear_user_highpage(page, vaddr);
 191 
 192         return page;
 193 }
 194 #endif
 195 
 196 /**
 197  * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
 198  * @vma: The VMA the page is to be allocated for
 199  * @vaddr: The virtual address the page will be inserted into
 200  *
 201  * This function will allocate a page for a VMA that the caller knows will
 202  * be able to migrate in the future using move_pages() or reclaimed
 203  */
 204 static inline struct page *
 205 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
 206                                         unsigned long vaddr)
 207 {
 208         return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
 209 }
 210 
 211 static inline void clear_highpage(struct page *page)
 212 {
 213         void *kaddr = kmap_atomic(page);
 214         clear_page(kaddr);
 215         kunmap_atomic(kaddr);
 216 }
 217 
 218 static inline void zero_user_segments(struct page *page,
 219         unsigned start1, unsigned end1,
 220         unsigned start2, unsigned end2)
 221 {
 222         void *kaddr = kmap_atomic(page);
 223 
 224         BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
 225 
 226         if (end1 > start1)
 227                 memset(kaddr + start1, 0, end1 - start1);
 228 
 229         if (end2 > start2)
 230                 memset(kaddr + start2, 0, end2 - start2);
 231 
 232         kunmap_atomic(kaddr);
 233         flush_dcache_page(page);
 234 }
 235 
 236 static inline void zero_user_segment(struct page *page,
 237         unsigned start, unsigned end)
 238 {
 239         zero_user_segments(page, start, end, 0, 0);
 240 }
 241 
 242 static inline void zero_user(struct page *page,
 243         unsigned start, unsigned size)
 244 {
 245         zero_user_segments(page, start, start + size, 0, 0);
 246 }
 247 
 248 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
 249 
 250 static inline void copy_user_highpage(struct page *to, struct page *from,
 251         unsigned long vaddr, struct vm_area_struct *vma)
 252 {
 253         char *vfrom, *vto;
 254 
 255         vfrom = kmap_atomic(from);
 256         vto = kmap_atomic(to);
 257         copy_user_page(vto, vfrom, vaddr, to);
 258         kunmap_atomic(vto);
 259         kunmap_atomic(vfrom);
 260 }
 261 
 262 #endif
 263 
 264 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
 265 
 266 static inline void copy_highpage(struct page *to, struct page *from)
 267 {
 268         char *vfrom, *vto;
 269 
 270         vfrom = kmap_atomic(from);
 271         vto = kmap_atomic(to);
 272         copy_page(vto, vfrom);
 273         kunmap_atomic(vto);
 274         kunmap_atomic(vfrom);
 275 }
 276 
 277 #endif
 278 
 279 #endif /* _LINUX_HIGHMEM_H */

/* [<][>][^][v][top][bottom][index][help] */