1/* 2 * safe read and write memory routines callable while atomic 3 * 4 * Copyright 2012 Imagination Technologies 5 */ 6 7#include <linux/uaccess.h> 8#include <asm/io.h> 9 10/* 11 * The generic probe_kernel_write() uses the user copy code which can split the 12 * writes if the source is unaligned, and repeats writes to make exceptions 13 * precise. We override it here to avoid these things happening to memory mapped 14 * IO memory where they could have undesired effects. 15 * Due to the use of CACHERD instruction this only works on Meta2 onwards. 16 */ 17#ifdef CONFIG_METAG_META21 18long probe_kernel_write(void *dst, const void *src, size_t size) 19{ 20 unsigned long ldst = (unsigned long)dst; 21 void __iomem *iodst = (void __iomem *)dst; 22 unsigned long lsrc = (unsigned long)src; 23 const u8 *psrc = (u8 *)src; 24 unsigned int pte, i; 25 u8 bounce[8] __aligned(8); 26 27 if (!size) 28 return 0; 29 30 /* Use the write combine bit to decide is the destination is MMIO. */ 31 pte = __builtin_meta2_cacherd(dst); 32 33 /* Check the mapping is valid and writeable. */ 34 if ((pte & (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT)) 35 != (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT)) 36 return -EFAULT; 37 38 /* Fall back to generic version for cases we're not interested in. */ 39 if (pte & MMCU_ENTRY_WRC_BIT || /* write combined memory */ 40 (ldst & (size - 1)) || /* destination unaligned */ 41 size > 8 || /* more than max write size */ 42 (size & (size - 1))) /* non power of 2 size */ 43 return __probe_kernel_write(dst, src, size); 44 45 /* If src is unaligned, copy to the aligned bounce buffer first. */ 46 if (lsrc & (size - 1)) { 47 for (i = 0; i < size; ++i) 48 bounce[i] = psrc[i]; 49 psrc = bounce; 50 } 51 52 switch (size) { 53 case 1: 54 writeb(*psrc, iodst); 55 break; 56 case 2: 57 writew(*(const u16 *)psrc, iodst); 58 break; 59 case 4: 60 writel(*(const u32 *)psrc, iodst); 61 break; 62 case 8: 63 writeq(*(const u64 *)psrc, iodst); 64 break; 65 } 66 return 0; 67} 68#endif 69