root/arch/xtensa/mm/highmem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmap_waitqueues_init
  2. kmap_waitqueues_init
  3. kmap_idx
  4. kmap_atomic
  5. __kunmap_atomic
  6. kmap_init

   1 /*
   2  * High memory support for Xtensa architecture
   3  *
   4  * This file is subject to the terms and conditions of the GNU General
   5  * Public License.  See the file "COPYING" in the main directory of
   6  * this archive for more details.
   7  *
   8  * Copyright (C) 2014 Cadence Design Systems Inc.
   9  */
  10 
  11 #include <linux/export.h>
  12 #include <linux/highmem.h>
  13 #include <asm/tlbflush.h>
  14 
  15 static pte_t *kmap_pte;
  16 
  17 #if DCACHE_WAY_SIZE > PAGE_SIZE
  18 unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
  19 wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
  20 
  21 static void __init kmap_waitqueues_init(void)
  22 {
  23         unsigned int i;
  24 
  25         for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
  26                 init_waitqueue_head(pkmap_map_wait_arr + i);
  27 }
  28 #else
  29 static inline void kmap_waitqueues_init(void)
  30 {
  31 }
  32 #endif
  33 
  34 static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
  35 {
  36         return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
  37                 color;
  38 }
  39 
  40 void *kmap_atomic(struct page *page)
  41 {
  42         enum fixed_addresses idx;
  43         unsigned long vaddr;
  44 
  45         preempt_disable();
  46         pagefault_disable();
  47         if (!PageHighMem(page))
  48                 return page_address(page);
  49 
  50         idx = kmap_idx(kmap_atomic_idx_push(),
  51                        DCACHE_ALIAS(page_to_phys(page)));
  52         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  53 #ifdef CONFIG_DEBUG_HIGHMEM
  54         BUG_ON(!pte_none(*(kmap_pte + idx)));
  55 #endif
  56         set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));
  57 
  58         return (void *)vaddr;
  59 }
  60 EXPORT_SYMBOL(kmap_atomic);
  61 
  62 void __kunmap_atomic(void *kvaddr)
  63 {
  64         if (kvaddr >= (void *)FIXADDR_START &&
  65             kvaddr < (void *)FIXADDR_TOP) {
  66                 int idx = kmap_idx(kmap_atomic_idx(),
  67                                    DCACHE_ALIAS((unsigned long)kvaddr));
  68 
  69                 /*
  70                  * Force other mappings to Oops if they'll try to access this
  71                  * pte without first remap it.  Keeping stale mappings around
  72                  * is a bad idea also, in case the page changes cacheability
  73                  * attributes or becomes a protected page in a hypervisor.
  74                  */
  75                 pte_clear(&init_mm, kvaddr, kmap_pte + idx);
  76                 local_flush_tlb_kernel_range((unsigned long)kvaddr,
  77                                              (unsigned long)kvaddr + PAGE_SIZE);
  78 
  79                 kmap_atomic_idx_pop();
  80         }
  81 
  82         pagefault_enable();
  83         preempt_enable();
  84 }
  85 EXPORT_SYMBOL(__kunmap_atomic);
  86 
  87 void __init kmap_init(void)
  88 {
  89         unsigned long kmap_vstart;
  90 
  91         /* cache the first kmap pte */
  92         kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  93         kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  94         kmap_waitqueues_init();
  95 }

/* [<][>][^][v][top][bottom][index][help] */