root/arch/alpha/include/asm/io.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __set_hae
  2. set_hae
  3. virt_to_phys
  4. phys_to_virt
  5. virt_to_phys
  6. phys_to_virt
  7. virt_to_bus
  8. bus_to_virt
  9. REMAP1
  10. generic_ioremap
  11. generic_iounmap
  12. generic_is_ioaddr
  13. generic_is_mmio
  14. ioport_map
  15. ioport_unmap
  16. ioremap
  17. __ioremap
  18. ioremap_nocache
  19. iounmap
  20. __is_ioaddr
  21. __is_mmio
  22. ioread8
  23. ioread16
  24. iowrite8
  25. iowrite16
  26. inb
  27. inw
  28. outb
  29. outw
  30. ioread32
  31. iowrite32
  32. outl
  33. __raw_readb
  34. __raw_readw
  35. __raw_writeb
  36. __raw_writew
  37. readb
  38. readw
  39. writeb
  40. writew
  41. __raw_readl
  42. __raw_readq
  43. __raw_writel
  44. __raw_writeq
  45. readl
  46. readq
  47. writel
  48. writeq
  49. memset_io
  50. memsetw_io

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __ALPHA_IO_H
   3 #define __ALPHA_IO_H
   4 
   5 #ifdef __KERNEL__
   6 
   7 #include <linux/kernel.h>
   8 #include <linux/mm.h>
   9 #include <asm/compiler.h>
  10 #include <asm/pgtable.h>
  11 #include <asm/machvec.h>
  12 #include <asm/hwrpb.h>
  13 
  14 /* The generic header contains only prototypes.  Including it ensures that
  15    the implementation we have here matches that interface.  */
  16 #include <asm-generic/iomap.h>
  17 
  18 /* We don't use IO slowdowns on the Alpha, but.. */
  19 #define __SLOW_DOWN_IO  do { } while (0)
  20 #define SLOW_DOWN_IO    do { } while (0)
  21 
  22 /*
  23  * Virtual -> physical identity mapping starts at this offset
  24  */
  25 #ifdef USE_48_BIT_KSEG
  26 #define IDENT_ADDR     0xffff800000000000UL
  27 #else
  28 #define IDENT_ADDR     0xfffffc0000000000UL
  29 #endif
  30 
  31 /*
  32  * We try to avoid hae updates (thus the cache), but when we
  33  * do need to update the hae, we need to do it atomically, so
  34  * that any interrupts wouldn't get confused with the hae
  35  * register not being up-to-date with respect to the hardware
  36  * value.
  37  */
  38 extern inline void __set_hae(unsigned long new_hae)
  39 {
  40         unsigned long flags = swpipl(IPL_MAX);
  41 
  42         barrier();
  43 
  44         alpha_mv.hae_cache = new_hae;
  45         *alpha_mv.hae_register = new_hae;
  46         mb();
  47         /* Re-read to make sure it was written.  */
  48         new_hae = *alpha_mv.hae_register;
  49 
  50         setipl(flags);
  51         barrier();
  52 }
  53 
  54 extern inline void set_hae(unsigned long new_hae)
  55 {
  56         if (new_hae != alpha_mv.hae_cache)
  57                 __set_hae(new_hae);
  58 }
  59 
  60 /*
  61  * Change virtual addresses to physical addresses and vv.
  62  */
  63 #ifdef USE_48_BIT_KSEG
  64 static inline unsigned long virt_to_phys(void *address)
  65 {
  66         return (unsigned long)address - IDENT_ADDR;
  67 }
  68 
  69 static inline void * phys_to_virt(unsigned long address)
  70 {
  71         return (void *) (address + IDENT_ADDR);
  72 }
  73 #else
  74 static inline unsigned long virt_to_phys(void *address)
  75 {
  76         unsigned long phys = (unsigned long)address;
  77 
  78         /* Sign-extend from bit 41.  */
  79         phys <<= (64 - 41);
  80         phys = (long)phys >> (64 - 41);
  81 
  82         /* Crop to the physical address width of the processor.  */
  83         phys &= (1ul << hwrpb->pa_bits) - 1;
  84 
  85         return phys;
  86 }
  87 
  88 static inline void * phys_to_virt(unsigned long address)
  89 {
  90         return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
  91 }
  92 #endif
  93 
  94 #define page_to_phys(page)      page_to_pa(page)
  95 
  96 /* Maximum PIO space address supported?  */
  97 #define IO_SPACE_LIMIT 0xffff
  98 
  99 /*
 100  * Change addresses as seen by the kernel (virtual) to addresses as
 101  * seen by a device (bus), and vice versa.
 102  *
 103  * Note that this only works for a limited range of kernel addresses,
 104  * and very well may not span all memory.  Consider this interface 
 105  * deprecated in favour of the DMA-mapping API.
 106  */
 107 extern unsigned long __direct_map_base;
 108 extern unsigned long __direct_map_size;
 109 
 110 static inline unsigned long __deprecated virt_to_bus(void *address)
 111 {
 112         unsigned long phys = virt_to_phys(address);
 113         unsigned long bus = phys + __direct_map_base;
 114         return phys <= __direct_map_size ? bus : 0;
 115 }
 116 #define isa_virt_to_bus virt_to_bus
 117 
 118 static inline void * __deprecated bus_to_virt(unsigned long address)
 119 {
 120         void *virt;
 121 
 122         /* This check is a sanity check but also ensures that bus address 0
 123            maps to virtual address 0 which is useful to detect null pointers
 124            (the NCR driver is much simpler if NULL pointers are preserved).  */
 125         address -= __direct_map_base;
 126         virt = phys_to_virt(address);
 127         return (long)address <= 0 ? NULL : virt;
 128 }
 129 #define isa_bus_to_virt bus_to_virt
 130 
 131 /*
 132  * There are different chipsets to interface the Alpha CPUs to the world.
 133  */
 134 
 135 #define IO_CONCAT(a,b)  _IO_CONCAT(a,b)
 136 #define _IO_CONCAT(a,b) a ## _ ## b
 137 
 138 #ifdef CONFIG_ALPHA_GENERIC
 139 
 140 /* In a generic kernel, we always go through the machine vector.  */
 141 
 142 #define REMAP1(TYPE, NAME, QUAL)                                        \
 143 static inline TYPE generic_##NAME(QUAL void __iomem *addr)              \
 144 {                                                                       \
 145         return alpha_mv.mv_##NAME(addr);                                \
 146 }
 147 
 148 #define REMAP2(TYPE, NAME, QUAL)                                        \
 149 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)      \
 150 {                                                                       \
 151         alpha_mv.mv_##NAME(b, addr);                                    \
 152 }
 153 
 154 REMAP1(unsigned int, ioread8, /**/)
 155 REMAP1(unsigned int, ioread16, /**/)
 156 REMAP1(unsigned int, ioread32, /**/)
 157 REMAP1(u8, readb, const volatile)
 158 REMAP1(u16, readw, const volatile)
 159 REMAP1(u32, readl, const volatile)
 160 REMAP1(u64, readq, const volatile)
 161 
 162 REMAP2(u8, iowrite8, /**/)
 163 REMAP2(u16, iowrite16, /**/)
 164 REMAP2(u32, iowrite32, /**/)
 165 REMAP2(u8, writeb, volatile)
 166 REMAP2(u16, writew, volatile)
 167 REMAP2(u32, writel, volatile)
 168 REMAP2(u64, writeq, volatile)
 169 
 170 #undef REMAP1
 171 #undef REMAP2
 172 
 173 extern inline void __iomem *generic_ioportmap(unsigned long a)
 174 {
 175         return alpha_mv.mv_ioportmap(a);
 176 }
 177 
 178 static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
 179 {
 180         return alpha_mv.mv_ioremap(a, s);
 181 }
 182 
 183 static inline void generic_iounmap(volatile void __iomem *a)
 184 {
 185         return alpha_mv.mv_iounmap(a);
 186 }
 187 
 188 static inline int generic_is_ioaddr(unsigned long a)
 189 {
 190         return alpha_mv.mv_is_ioaddr(a);
 191 }
 192 
 193 static inline int generic_is_mmio(const volatile void __iomem *a)
 194 {
 195         return alpha_mv.mv_is_mmio(a);
 196 }
 197 
 198 #define __IO_PREFIX             generic
 199 #define generic_trivial_rw_bw   0
 200 #define generic_trivial_rw_lq   0
 201 #define generic_trivial_io_bw   0
 202 #define generic_trivial_io_lq   0
 203 #define generic_trivial_iounmap 0
 204 
 205 #else
 206 
 207 #if defined(CONFIG_ALPHA_APECS)
 208 # include <asm/core_apecs.h>
 209 #elif defined(CONFIG_ALPHA_CIA)
 210 # include <asm/core_cia.h>
 211 #elif defined(CONFIG_ALPHA_IRONGATE)
 212 # include <asm/core_irongate.h>
 213 #elif defined(CONFIG_ALPHA_JENSEN)
 214 # include <asm/jensen.h>
 215 #elif defined(CONFIG_ALPHA_LCA)
 216 # include <asm/core_lca.h>
 217 #elif defined(CONFIG_ALPHA_MARVEL)
 218 # include <asm/core_marvel.h>
 219 #elif defined(CONFIG_ALPHA_MCPCIA)
 220 # include <asm/core_mcpcia.h>
 221 #elif defined(CONFIG_ALPHA_POLARIS)
 222 # include <asm/core_polaris.h>
 223 #elif defined(CONFIG_ALPHA_T2)
 224 # include <asm/core_t2.h>
 225 #elif defined(CONFIG_ALPHA_TSUNAMI)
 226 # include <asm/core_tsunami.h>
 227 #elif defined(CONFIG_ALPHA_TITAN)
 228 # include <asm/core_titan.h>
 229 #elif defined(CONFIG_ALPHA_WILDFIRE)
 230 # include <asm/core_wildfire.h>
 231 #else
 232 #error "What system is this?"
 233 #endif
 234 
 235 #endif /* GENERIC */
 236 
 237 /*
 238  * We always have external versions of these routines.
 239  */
 240 extern u8               inb(unsigned long port);
 241 extern u16              inw(unsigned long port);
 242 extern u32              inl(unsigned long port);
 243 extern void             outb(u8 b, unsigned long port);
 244 extern void             outw(u16 b, unsigned long port);
 245 extern void             outl(u32 b, unsigned long port);
 246 
 247 extern u8               readb(const volatile void __iomem *addr);
 248 extern u16              readw(const volatile void __iomem *addr);
 249 extern u32              readl(const volatile void __iomem *addr);
 250 extern u64              readq(const volatile void __iomem *addr);
 251 extern void             writeb(u8 b, volatile void __iomem *addr);
 252 extern void             writew(u16 b, volatile void __iomem *addr);
 253 extern void             writel(u32 b, volatile void __iomem *addr);
 254 extern void             writeq(u64 b, volatile void __iomem *addr);
 255 
 256 extern u8               __raw_readb(const volatile void __iomem *addr);
 257 extern u16              __raw_readw(const volatile void __iomem *addr);
 258 extern u32              __raw_readl(const volatile void __iomem *addr);
 259 extern u64              __raw_readq(const volatile void __iomem *addr);
 260 extern void             __raw_writeb(u8 b, volatile void __iomem *addr);
 261 extern void             __raw_writew(u16 b, volatile void __iomem *addr);
 262 extern void             __raw_writel(u32 b, volatile void __iomem *addr);
 263 extern void             __raw_writeq(u64 b, volatile void __iomem *addr);
 264 
 265 /*
 266  * Mapping from port numbers to __iomem space is pretty easy.
 267  */
 268 
 269 /* These two have to be extern inline because of the extern prototype from
 270    <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
 271    the same declaration.  */
 272 extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
 273 {
 274         return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
 275 }
 276 
 277 extern inline void ioport_unmap(void __iomem *addr)
 278 {
 279 }
 280 
 281 static inline void __iomem *ioremap(unsigned long port, unsigned long size)
 282 {
 283         return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
 284 }
 285 
 286 static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
 287                                       unsigned long flags)
 288 {
 289         return ioremap(port, size);
 290 }
 291 
 292 static inline void __iomem * ioremap_nocache(unsigned long offset,
 293                                              unsigned long size)
 294 {
 295         return ioremap(offset, size);
 296 }
 297 
 298 #define ioremap_wc ioremap_nocache
 299 #define ioremap_uc ioremap_nocache
 300 
 301 static inline void iounmap(volatile void __iomem *addr)
 302 {
 303         IO_CONCAT(__IO_PREFIX,iounmap)(addr);
 304 }
 305 
 306 static inline int __is_ioaddr(unsigned long addr)
 307 {
 308         return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
 309 }
 310 #define __is_ioaddr(a)          __is_ioaddr((unsigned long)(a))
 311 
 312 static inline int __is_mmio(const volatile void __iomem *addr)
 313 {
 314         return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
 315 }
 316 
 317 
 318 /*
 319  * If the actual I/O bits are sufficiently trivial, then expand inline.
 320  */
 321 
 322 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
 323 extern inline unsigned int ioread8(void __iomem *addr)
 324 {
 325         unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
 326         mb();
 327         return ret;
 328 }
 329 
 330 extern inline unsigned int ioread16(void __iomem *addr)
 331 {
 332         unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
 333         mb();
 334         return ret;
 335 }
 336 
 337 extern inline void iowrite8(u8 b, void __iomem *addr)
 338 {
 339         mb();
 340         IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
 341 }
 342 
 343 extern inline void iowrite16(u16 b, void __iomem *addr)
 344 {
 345         mb();
 346         IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
 347 }
 348 
 349 extern inline u8 inb(unsigned long port)
 350 {
 351         return ioread8(ioport_map(port, 1));
 352 }
 353 
 354 extern inline u16 inw(unsigned long port)
 355 {
 356         return ioread16(ioport_map(port, 2));
 357 }
 358 
 359 extern inline void outb(u8 b, unsigned long port)
 360 {
 361         iowrite8(b, ioport_map(port, 1));
 362 }
 363 
 364 extern inline void outw(u16 b, unsigned long port)
 365 {
 366         iowrite16(b, ioport_map(port, 2));
 367 }
 368 #endif
 369 
 370 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
 371 extern inline unsigned int ioread32(void __iomem *addr)
 372 {
 373         unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
 374         mb();
 375         return ret;
 376 }
 377 
 378 extern inline void iowrite32(u32 b, void __iomem *addr)
 379 {
 380         mb();
 381         IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
 382 }
 383 
 384 extern inline u32 inl(unsigned long port)
 385 {
 386         return ioread32(ioport_map(port, 4));
 387 }
 388 
 389 extern inline void outl(u32 b, unsigned long port)
 390 {
 391         iowrite32(b, ioport_map(port, 4));
 392 }
 393 #endif
 394 
 395 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
 396 extern inline u8 __raw_readb(const volatile void __iomem *addr)
 397 {
 398         return IO_CONCAT(__IO_PREFIX,readb)(addr);
 399 }
 400 
 401 extern inline u16 __raw_readw(const volatile void __iomem *addr)
 402 {
 403         return IO_CONCAT(__IO_PREFIX,readw)(addr);
 404 }
 405 
 406 extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
 407 {
 408         IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
 409 }
 410 
 411 extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
 412 {
 413         IO_CONCAT(__IO_PREFIX,writew)(b, addr);
 414 }
 415 
 416 extern inline u8 readb(const volatile void __iomem *addr)
 417 {
 418         u8 ret = __raw_readb(addr);
 419         mb();
 420         return ret;
 421 }
 422 
 423 extern inline u16 readw(const volatile void __iomem *addr)
 424 {
 425         u16 ret = __raw_readw(addr);
 426         mb();
 427         return ret;
 428 }
 429 
 430 extern inline void writeb(u8 b, volatile void __iomem *addr)
 431 {
 432         mb();
 433         __raw_writeb(b, addr);
 434 }
 435 
 436 extern inline void writew(u16 b, volatile void __iomem *addr)
 437 {
 438         mb();
 439         __raw_writew(b, addr);
 440 }
 441 #endif
 442 
 443 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
 444 extern inline u32 __raw_readl(const volatile void __iomem *addr)
 445 {
 446         return IO_CONCAT(__IO_PREFIX,readl)(addr);
 447 }
 448 
 449 extern inline u64 __raw_readq(const volatile void __iomem *addr)
 450 {
 451         return IO_CONCAT(__IO_PREFIX,readq)(addr);
 452 }
 453 
 454 extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
 455 {
 456         IO_CONCAT(__IO_PREFIX,writel)(b, addr);
 457 }
 458 
 459 extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
 460 {
 461         IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
 462 }
 463 
 464 extern inline u32 readl(const volatile void __iomem *addr)
 465 {
 466         u32 ret = __raw_readl(addr);
 467         mb();
 468         return ret;
 469 }
 470 
 471 extern inline u64 readq(const volatile void __iomem *addr)
 472 {
 473         u64 ret = __raw_readq(addr);
 474         mb();
 475         return ret;
 476 }
 477 
 478 extern inline void writel(u32 b, volatile void __iomem *addr)
 479 {
 480         mb();
 481         __raw_writel(b, addr);
 482 }
 483 
 484 extern inline void writeq(u64 b, volatile void __iomem *addr)
 485 {
 486         mb();
 487         __raw_writeq(b, addr);
 488 }
 489 #endif
 490 
 491 #define ioread16be(p) be16_to_cpu(ioread16(p))
 492 #define ioread32be(p) be32_to_cpu(ioread32(p))
 493 #define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
 494 #define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
 495 
 496 #define inb_p           inb
 497 #define inw_p           inw
 498 #define inl_p           inl
 499 #define outb_p          outb
 500 #define outw_p          outw
 501 #define outl_p          outl
 502 #define readb_relaxed(addr)     __raw_readb(addr)
 503 #define readw_relaxed(addr)     __raw_readw(addr)
 504 #define readl_relaxed(addr)     __raw_readl(addr)
 505 #define readq_relaxed(addr)     __raw_readq(addr)
 506 #define writeb_relaxed(b, addr) __raw_writeb(b, addr)
 507 #define writew_relaxed(b, addr) __raw_writew(b, addr)
 508 #define writel_relaxed(b, addr) __raw_writel(b, addr)
 509 #define writeq_relaxed(b, addr) __raw_writeq(b, addr)
 510 
 511 /*
 512  * String version of IO memory access ops:
 513  */
 514 extern void memcpy_fromio(void *, const volatile void __iomem *, long);
 515 extern void memcpy_toio(volatile void __iomem *, const void *, long);
 516 extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
 517 
 518 static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
 519 {
 520         _memset_c_io(addr, 0x0101010101010101UL * c, len);
 521 }
 522 
 523 #define __HAVE_ARCH_MEMSETW_IO
 524 static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
 525 {
 526         _memset_c_io(addr, 0x0001000100010001UL * c, len);
 527 }
 528 
 529 /*
 530  * String versions of in/out ops:
 531  */
 532 extern void insb (unsigned long port, void *dst, unsigned long count);
 533 extern void insw (unsigned long port, void *dst, unsigned long count);
 534 extern void insl (unsigned long port, void *dst, unsigned long count);
 535 extern void outsb (unsigned long port, const void *src, unsigned long count);
 536 extern void outsw (unsigned long port, const void *src, unsigned long count);
 537 extern void outsl (unsigned long port, const void *src, unsigned long count);
 538 
 539 /*
 540  * The Alpha Jensen hardware for some rather strange reason puts
 541  * the RTC clock at 0x170 instead of 0x70. Probably due to some
 542  * misguided idea about using 0x70 for NMI stuff.
 543  *
 544  * These defines will override the defaults when doing RTC queries
 545  */
 546 
 547 #ifdef CONFIG_ALPHA_GENERIC
 548 # define RTC_PORT(x)    ((x) + alpha_mv.rtc_port)
 549 #else
 550 # ifdef CONFIG_ALPHA_JENSEN
 551 #  define RTC_PORT(x)   (0x170+(x))
 552 # else
 553 #  define RTC_PORT(x)   (0x70 + (x))
 554 # endif
 555 #endif
 556 #define RTC_ALWAYS_BCD  0
 557 
 558 /*
 559  * Some mucking forons use if[n]def writeq to check if platform has it.
 560  * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
 561  * to play with; for now just use cpp anti-recursion logics and make sure
 562  * that damn thing is defined and expands to itself.
 563  */
 564 
 565 #define writeq writeq
 566 #define readq readq
 567 
 568 /*
 569  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 570  * access
 571  */
 572 #define xlate_dev_mem_ptr(p)    __va(p)
 573 
 574 /*
 575  * Convert a virtual cached pointer to an uncached pointer
 576  */
 577 #define xlate_dev_kmem_ptr(p)   p
 578 
 579 #endif /* __KERNEL__ */
 580 
 581 #endif /* __ALPHA_IO_H */

/* [<][>][^][v][top][bottom][index][help] */