root/arch/sparc/mm/highmem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmap_init
  2. kmap_atomic
  3. __kunmap_atomic

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  highmem.c: virtual kernel memory mappings for high memory
   4  *
   5  *  Provides kernel-static versions of atomic kmap functions originally
   6  *  found as inlines in include/asm-sparc/highmem.h.  These became
   7  *  needed as kmap_atomic() and kunmap_atomic() started getting
   8  *  called from within modules.
   9  *  -- Tomas Szepe <szepe@pinerecords.com>, September 2002
  10  *
  11  *  But kmap_atomic() and kunmap_atomic() cannot be inlined in
  12  *  modules because they are loaded with btfixup-ped functions.
  13  */
  14 
  15 /*
  16  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  17  * gives a more generic (and caching) interface. But kmap_atomic can
  18  * be used in IRQ contexts, so in some (very limited) cases we need it.
  19  *
  20  * XXX This is an old text. Actually, it's good to use atomic kmaps,
  21  * provided you remember that they are atomic and not try to sleep
  22  * with a kmap taken, much like a spinlock. Non-atomic kmaps are
  23  * shared by CPUs, and so precious, and establishing them requires IPI.
  24  * Atomic kmaps are lightweight and we may have NCPUS more of them.
  25  */
  26 #include <linux/highmem.h>
  27 #include <linux/export.h>
  28 #include <linux/mm.h>
  29 
  30 #include <asm/cacheflush.h>
  31 #include <asm/tlbflush.h>
  32 #include <asm/pgalloc.h>
  33 #include <asm/vaddrs.h>
  34 
  35 pgprot_t kmap_prot;
  36 
  37 static pte_t *kmap_pte;
  38 
  39 void __init kmap_init(void)
  40 {
  41         unsigned long address;
  42         pmd_t *dir;
  43 
  44         address = __fix_to_virt(FIX_KMAP_BEGIN);
  45         dir = pmd_offset(pgd_offset_k(address), address);
  46 
  47         /* cache the first kmap pte */
  48         kmap_pte = pte_offset_kernel(dir, address);
  49         kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
  50 }
  51 
  52 void *kmap_atomic(struct page *page)
  53 {
  54         unsigned long vaddr;
  55         long idx, type;
  56 
  57         preempt_disable();
  58         pagefault_disable();
  59         if (!PageHighMem(page))
  60                 return page_address(page);
  61 
  62         type = kmap_atomic_idx_push();
  63         idx = type + KM_TYPE_NR*smp_processor_id();
  64         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  65 
  66 /* XXX Fix - Anton */
  67 #if 0
  68         __flush_cache_one(vaddr);
  69 #else
  70         flush_cache_all();
  71 #endif
  72 
  73 #ifdef CONFIG_DEBUG_HIGHMEM
  74         BUG_ON(!pte_none(*(kmap_pte-idx)));
  75 #endif
  76         set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  77 /* XXX Fix - Anton */
  78 #if 0
  79         __flush_tlb_one(vaddr);
  80 #else
  81         flush_tlb_all();
  82 #endif
  83 
  84         return (void*) vaddr;
  85 }
  86 EXPORT_SYMBOL(kmap_atomic);
  87 
  88 void __kunmap_atomic(void *kvaddr)
  89 {
  90         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  91         int type;
  92 
  93         if (vaddr < FIXADDR_START) { // FIXME
  94                 pagefault_enable();
  95                 preempt_enable();
  96                 return;
  97         }
  98 
  99         type = kmap_atomic_idx();
 100 
 101 #ifdef CONFIG_DEBUG_HIGHMEM
 102         {
 103                 unsigned long idx;
 104 
 105                 idx = type + KM_TYPE_NR * smp_processor_id();
 106                 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
 107 
 108                 /* XXX Fix - Anton */
 109 #if 0
 110                 __flush_cache_one(vaddr);
 111 #else
 112                 flush_cache_all();
 113 #endif
 114 
 115                 /*
 116                  * force other mappings to Oops if they'll try to access
 117                  * this pte without first remap it
 118                  */
 119                 pte_clear(&init_mm, vaddr, kmap_pte-idx);
 120                 /* XXX Fix - Anton */
 121 #if 0
 122                 __flush_tlb_one(vaddr);
 123 #else
 124                 flush_tlb_all();
 125 #endif
 126         }
 127 #endif
 128 
 129         kmap_atomic_idx_pop();
 130         pagefault_enable();
 131         preempt_enable();
 132 }
 133 EXPORT_SYMBOL(__kunmap_atomic);

/* [<][>][^][v][top][bottom][index][help] */