root/arch/sh/mm/cache-sh4.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sh4_flush_icache_range
  2. flush_cache_one
  3. sh4_flush_dcache_page
  4. flush_icache_all
  5. flush_dcache_all
  6. sh4_flush_cache_all
  7. sh4_flush_cache_mm
  8. sh4_flush_cache_page
  9. sh4_flush_cache_range
  10. __flush_cache_one
  11. sh4_cache_init

   1 /*
   2  * arch/sh/mm/cache-sh4.c
   3  *
   4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
   5  * Copyright (C) 2001 - 2009  Paul Mundt
   6  * Copyright (C) 2003  Richard Curnow
   7  * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
   8  *
   9  * This file is subject to the terms and conditions of the GNU General Public
  10  * License.  See the file "COPYING" in the main directory of this archive
  11  * for more details.
  12  */
  13 #include <linux/init.h>
  14 #include <linux/mm.h>
  15 #include <linux/io.h>
  16 #include <linux/mutex.h>
  17 #include <linux/fs.h>
  18 #include <linux/highmem.h>
  19 #include <asm/pgtable.h>
  20 #include <asm/mmu_context.h>
  21 #include <asm/cache_insns.h>
  22 #include <asm/cacheflush.h>
  23 
  24 /*
  25  * The maximum number of pages we support up to when doing ranged dcache
  26  * flushing. Anything exceeding this will simply flush the dcache in its
  27  * entirety.
  28  */
  29 #define MAX_ICACHE_PAGES        32
  30 
  31 static void __flush_cache_one(unsigned long addr, unsigned long phys,
  32                                unsigned long exec_offset);
  33 
  34 /*
  35  * Write back the range of D-cache, and purge the I-cache.
  36  *
  37  * Called from kernel/module.c:sys_init_module and routine for a.out format,
  38  * signal handler code and kprobes code
  39  */
  40 static void sh4_flush_icache_range(void *args)
  41 {
  42         struct flusher_data *data = args;
  43         unsigned long start, end;
  44         unsigned long flags, v;
  45         int i;
  46 
  47         start = data->addr1;
  48         end = data->addr2;
  49 
  50         /* If there are too many pages then just blow away the caches */
  51         if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
  52                 local_flush_cache_all(NULL);
  53                 return;
  54         }
  55 
  56         /*
  57          * Selectively flush d-cache then invalidate the i-cache.
  58          * This is inefficient, so only use this for small ranges.
  59          */
  60         start &= ~(L1_CACHE_BYTES-1);
  61         end += L1_CACHE_BYTES-1;
  62         end &= ~(L1_CACHE_BYTES-1);
  63 
  64         local_irq_save(flags);
  65         jump_to_uncached();
  66 
  67         for (v = start; v < end; v += L1_CACHE_BYTES) {
  68                 unsigned long icacheaddr;
  69                 int j, n;
  70 
  71                 __ocbwb(v);
  72 
  73                 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
  74                                 cpu_data->icache.entry_mask);
  75 
  76                 /* Clear i-cache line valid-bit */
  77                 n = boot_cpu_data.icache.n_aliases;
  78                 for (i = 0; i < cpu_data->icache.ways; i++) {
  79                         for (j = 0; j < n; j++)
  80                                 __raw_writel(0, icacheaddr + (j * PAGE_SIZE));
  81                         icacheaddr += cpu_data->icache.way_incr;
  82                 }
  83         }
  84 
  85         back_to_cached();
  86         local_irq_restore(flags);
  87 }
  88 
  89 static inline void flush_cache_one(unsigned long start, unsigned long phys)
  90 {
  91         unsigned long flags, exec_offset = 0;
  92 
  93         /*
  94          * All types of SH-4 require PC to be uncached to operate on the I-cache.
  95          * Some types of SH-4 require PC to be uncached to operate on the D-cache.
  96          */
  97         if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
  98             (start < CACHE_OC_ADDRESS_ARRAY))
  99                 exec_offset = cached_to_uncached;
 100 
 101         local_irq_save(flags);
 102         __flush_cache_one(start, phys, exec_offset);
 103         local_irq_restore(flags);
 104 }
 105 
 106 /*
 107  * Write back & invalidate the D-cache of the page.
 108  * (To avoid "alias" issues)
 109  */
 110 static void sh4_flush_dcache_page(void *arg)
 111 {
 112         struct page *page = arg;
 113         unsigned long addr = (unsigned long)page_address(page);
 114 #ifndef CONFIG_SMP
 115         struct address_space *mapping = page_mapping_file(page);
 116 
 117         if (mapping && !mapping_mapped(mapping))
 118                 clear_bit(PG_dcache_clean, &page->flags);
 119         else
 120 #endif
 121                 flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
 122                                 (addr & shm_align_mask), page_to_phys(page));
 123 
 124         wmb();
 125 }
 126 
 127 /* TODO: Selective icache invalidation through IC address array.. */
 128 static void flush_icache_all(void)
 129 {
 130         unsigned long flags, ccr;
 131 
 132         local_irq_save(flags);
 133         jump_to_uncached();
 134 
 135         /* Flush I-cache */
 136         ccr = __raw_readl(SH_CCR);
 137         ccr |= CCR_CACHE_ICI;
 138         __raw_writel(ccr, SH_CCR);
 139 
 140         /*
 141          * back_to_cached() will take care of the barrier for us, don't add
 142          * another one!
 143          */
 144 
 145         back_to_cached();
 146         local_irq_restore(flags);
 147 }
 148 
 149 static void flush_dcache_all(void)
 150 {
 151         unsigned long addr, end_addr, entry_offset;
 152 
 153         end_addr = CACHE_OC_ADDRESS_ARRAY +
 154                 (current_cpu_data.dcache.sets <<
 155                  current_cpu_data.dcache.entry_shift) *
 156                         current_cpu_data.dcache.ways;
 157 
 158         entry_offset = 1 << current_cpu_data.dcache.entry_shift;
 159 
 160         for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
 161                 __raw_writel(0, addr); addr += entry_offset;
 162                 __raw_writel(0, addr); addr += entry_offset;
 163                 __raw_writel(0, addr); addr += entry_offset;
 164                 __raw_writel(0, addr); addr += entry_offset;
 165                 __raw_writel(0, addr); addr += entry_offset;
 166                 __raw_writel(0, addr); addr += entry_offset;
 167                 __raw_writel(0, addr); addr += entry_offset;
 168                 __raw_writel(0, addr); addr += entry_offset;
 169         }
 170 }
 171 
 172 static void sh4_flush_cache_all(void *unused)
 173 {
 174         flush_dcache_all();
 175         flush_icache_all();
 176 }
 177 
 178 /*
 179  * Note : (RPC) since the caches are physically tagged, the only point
 180  * of flush_cache_mm for SH-4 is to get rid of aliases from the
 181  * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
 182  * lines can stay resident so long as the virtual address they were
 183  * accessed with (hence cache set) is in accord with the physical
 184  * address (i.e. tag).  It's no different here.
 185  *
 186  * Caller takes mm->mmap_sem.
 187  */
 188 static void sh4_flush_cache_mm(void *arg)
 189 {
 190         struct mm_struct *mm = arg;
 191 
 192         if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
 193                 return;
 194 
 195         flush_dcache_all();
 196 }
 197 
 198 /*
 199  * Write back and invalidate I/D-caches for the page.
 200  *
 201  * ADDR: Virtual Address (U0 address)
 202  * PFN: Physical page number
 203  */
 204 static void sh4_flush_cache_page(void *args)
 205 {
 206         struct flusher_data *data = args;
 207         struct vm_area_struct *vma;
 208         struct page *page;
 209         unsigned long address, pfn, phys;
 210         int map_coherent = 0;
 211         pgd_t *pgd;
 212         pud_t *pud;
 213         pmd_t *pmd;
 214         pte_t *pte;
 215         void *vaddr;
 216 
 217         vma = data->vma;
 218         address = data->addr1 & PAGE_MASK;
 219         pfn = data->addr2;
 220         phys = pfn << PAGE_SHIFT;
 221         page = pfn_to_page(pfn);
 222 
 223         if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
 224                 return;
 225 
 226         pgd = pgd_offset(vma->vm_mm, address);
 227         pud = pud_offset(pgd, address);
 228         pmd = pmd_offset(pud, address);
 229         pte = pte_offset_kernel(pmd, address);
 230 
 231         /* If the page isn't present, there is nothing to do here. */
 232         if (!(pte_val(*pte) & _PAGE_PRESENT))
 233                 return;
 234 
 235         if ((vma->vm_mm == current->active_mm))
 236                 vaddr = NULL;
 237         else {
 238                 /*
 239                  * Use kmap_coherent or kmap_atomic to do flushes for
 240                  * another ASID than the current one.
 241                  */
 242                 map_coherent = (current_cpu_data.dcache.n_aliases &&
 243                         test_bit(PG_dcache_clean, &page->flags) &&
 244                         page_mapcount(page));
 245                 if (map_coherent)
 246                         vaddr = kmap_coherent(page, address);
 247                 else
 248                         vaddr = kmap_atomic(page);
 249 
 250                 address = (unsigned long)vaddr;
 251         }
 252 
 253         flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
 254                         (address & shm_align_mask), phys);
 255 
 256         if (vma->vm_flags & VM_EXEC)
 257                 flush_icache_all();
 258 
 259         if (vaddr) {
 260                 if (map_coherent)
 261                         kunmap_coherent(vaddr);
 262                 else
 263                         kunmap_atomic(vaddr);
 264         }
 265 }
 266 
 267 /*
 268  * Write back and invalidate D-caches.
 269  *
 270  * START, END: Virtual Address (U0 address)
 271  *
 272  * NOTE: We need to flush the _physical_ page entry.
 273  * Flushing the cache lines for U0 only isn't enough.
 274  * We need to flush for P1 too, which may contain aliases.
 275  */
 276 static void sh4_flush_cache_range(void *args)
 277 {
 278         struct flusher_data *data = args;
 279         struct vm_area_struct *vma;
 280         unsigned long start, end;
 281 
 282         vma = data->vma;
 283         start = data->addr1;
 284         end = data->addr2;
 285 
 286         if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
 287                 return;
 288 
 289         /*
 290          * If cache is only 4k-per-way, there are never any 'aliases'.  Since
 291          * the cache is physically tagged, the data can just be left in there.
 292          */
 293         if (boot_cpu_data.dcache.n_aliases == 0)
 294                 return;
 295 
 296         flush_dcache_all();
 297 
 298         if (vma->vm_flags & VM_EXEC)
 299                 flush_icache_all();
 300 }
 301 
 302 /**
 303  * __flush_cache_one
 304  *
 305  * @addr:  address in memory mapped cache array
 306  * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
 307  *         set i.e. associative write)
 308  * @exec_offset: set to 0x20000000 if flush has to be executed from P2
 309  *               region else 0x0
 310  *
 311  * The offset into the cache array implied by 'addr' selects the
 312  * 'colour' of the virtual address range that will be flushed.  The
 313  * operation (purge/write-back) is selected by the lower 2 bits of
 314  * 'phys'.
 315  */
 316 static void __flush_cache_one(unsigned long addr, unsigned long phys,
 317                                unsigned long exec_offset)
 318 {
 319         int way_count;
 320         unsigned long base_addr = addr;
 321         struct cache_info *dcache;
 322         unsigned long way_incr;
 323         unsigned long a, ea, p;
 324         unsigned long temp_pc;
 325 
 326         dcache = &boot_cpu_data.dcache;
 327         /* Write this way for better assembly. */
 328         way_count = dcache->ways;
 329         way_incr = dcache->way_incr;
 330 
 331         /*
 332          * Apply exec_offset (i.e. branch to P2 if required.).
 333          *
 334          * FIXME:
 335          *
 336          *      If I write "=r" for the (temp_pc), it puts this in r6 hence
 337          *      trashing exec_offset before it's been added on - why?  Hence
 338          *      "=&r" as a 'workaround'
 339          */
 340         asm volatile("mov.l 1f, %0\n\t"
 341                      "add   %1, %0\n\t"
 342                      "jmp   @%0\n\t"
 343                      "nop\n\t"
 344                      ".balign 4\n\t"
 345                      "1:  .long 2f\n\t"
 346                      "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
 347 
 348         /*
 349          * We know there will be >=1 iteration, so write as do-while to avoid
 350          * pointless nead-of-loop check for 0 iterations.
 351          */
 352         do {
 353                 ea = base_addr + PAGE_SIZE;
 354                 a = base_addr;
 355                 p = phys;
 356 
 357                 do {
 358                         *(volatile unsigned long *)a = p;
 359                         /*
 360                          * Next line: intentionally not p+32, saves an add, p
 361                          * will do since only the cache tag bits need to
 362                          * match.
 363                          */
 364                         *(volatile unsigned long *)(a+32) = p;
 365                         a += 64;
 366                         p += 64;
 367                 } while (a < ea);
 368 
 369                 base_addr += way_incr;
 370         } while (--way_count != 0);
 371 }
 372 
 373 extern void __weak sh4__flush_region_init(void);
 374 
 375 /*
 376  * SH-4 has virtually indexed and physically tagged cache.
 377  */
 378 void __init sh4_cache_init(void)
 379 {
 380         printk("PVR=%08x CVR=%08x PRR=%08x\n",
 381                 __raw_readl(CCN_PVR),
 382                 __raw_readl(CCN_CVR),
 383                 __raw_readl(CCN_PRR));
 384 
 385         local_flush_icache_range        = sh4_flush_icache_range;
 386         local_flush_dcache_page         = sh4_flush_dcache_page;
 387         local_flush_cache_all           = sh4_flush_cache_all;
 388         local_flush_cache_mm            = sh4_flush_cache_mm;
 389         local_flush_cache_dup_mm        = sh4_flush_cache_mm;
 390         local_flush_cache_page          = sh4_flush_cache_page;
 391         local_flush_cache_range         = sh4_flush_cache_range;
 392 
 393         sh4__flush_region_init();
 394 }

/* [<][>][^][v][top][bottom][index][help] */