1#ifndef __ASM_SH_IO_H 2#define __ASM_SH_IO_H 3 4/* 5 * Convention: 6 * read{b,w,l}/write{b,w,l} are for PCI, 7 * while in{b,w,l}/out{b,w,l} are for ISA 8 * These may (will) be platform specific function. 9 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p 10 * and 'string' versions: ins{b,w,l}/outs{b,w,l} 11 * For read{b,w,l} and write{b,w,l} there are also __raw versions, which 12 * do not have a memory barrier after them. 13 * 14 * In addition, we have 15 * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O. 16 * which are processor specific. 17 */ 18 19/* 20 * We follow the Alpha convention here: 21 * __inb expands to an inline function call (which calls via the mv) 22 * _inb is a real function call (note ___raw fns are _ version of __raw) 23 * inb by default expands to _inb, but the machine specific code may 24 * define it to __inb if it chooses. 25 */ 26#include <asm/cache.h> 27#include <asm/system.h> 28#include <asm/addrspace.h> 29#include <asm/machvec.h> 30#include <asm/pgtable.h> 31#include <asm-generic/iomap.h> 32 33#ifdef __KERNEL__ 34 35/* 36 * Depending on which platform we are running on, we need different 37 * I/O functions. 38 */ 39#define __IO_PREFIX generic 40#include <asm/io_generic.h> 41 42#define maybebadio(port) \ 43 printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ 44 __FUNCTION__, __LINE__, (port), (u32)__builtin_return_address(0)) 45 46/* 47 * Since boards are able to define their own set of I/O routines through 48 * their respective machine vector, we always wrap through the mv. 49 * 50 * Also, in the event that a board hasn't provided its own definition for 51 * a given routine, it will be wrapped to generic code at run-time. 52 */ 53 54#define __inb(p) sh_mv.mv_inb((p)) 55#define __inw(p) sh_mv.mv_inw((p)) 56#define __inl(p) sh_mv.mv_inl((p)) 57#define __outb(x,p) sh_mv.mv_outb((x),(p)) 58#define __outw(x,p) sh_mv.mv_outw((x),(p)) 59#define __outl(x,p) sh_mv.mv_outl((x),(p)) 60 61#define __inb_p(p) sh_mv.mv_inb_p((p)) 62#define __inw_p(p) sh_mv.mv_inw_p((p)) 63#define __inl_p(p) sh_mv.mv_inl_p((p)) 64#define __outb_p(x,p) sh_mv.mv_outb_p((x),(p)) 65#define __outw_p(x,p) sh_mv.mv_outw_p((x),(p)) 66#define __outl_p(x,p) sh_mv.mv_outl_p((x),(p)) 67 68#define __insb(p,b,c) sh_mv.mv_insb((p), (b), (c)) 69#define __insw(p,b,c) sh_mv.mv_insw((p), (b), (c)) 70#define __insl(p,b,c) sh_mv.mv_insl((p), (b), (c)) 71#define __outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c)) 72#define __outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c)) 73#define __outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c)) 74 75#define __readb(a) sh_mv.mv_readb((a)) 76#define __readw(a) sh_mv.mv_readw((a)) 77#define __readl(a) sh_mv.mv_readl((a)) 78#define __writeb(v,a) sh_mv.mv_writeb((v),(a)) 79#define __writew(v,a) sh_mv.mv_writew((v),(a)) 80#define __writel(v,a) sh_mv.mv_writel((v),(a)) 81 82#define inb __inb 83#define inw __inw 84#define inl __inl 85#define outb __outb 86#define outw __outw 87#define outl __outl 88 89#define inb_p __inb_p 90#define inw_p __inw_p 91#define inl_p __inl_p 92#define outb_p __outb_p 93#define outw_p __outw_p 94#define outl_p __outl_p 95 96#define insb __insb 97#define insw __insw 98#define insl __insl 99#define outsb __outsb 100#define outsw __outsw 101#define outsl __outsl 102 103#define __raw_readb(a) __readb((void __iomem *)(a)) 104#define __raw_readw(a) __readw((void __iomem *)(a)) 105#define __raw_readl(a) __readl((void __iomem *)(a)) 106#define __raw_writeb(v, a) __writeb(v, (void __iomem *)(a)) 107#define __raw_writew(v, a) __writew(v, (void __iomem *)(a)) 108#define __raw_writel(v, a) __writel(v, (void __iomem *)(a)) 109 110void __raw_writesl(unsigned long addr, const void *data, int longlen); 111void __raw_readsl(unsigned long addr, void *data, int longlen); 112 113/* 114 * The platform header files may define some of these macros to use 115 * the inlined versions where appropriate. These macros may also be 116 * redefined by userlevel programs. 117 */ 118#ifdef __readb 119# define readb(a) ({ unsigned int r_ = __raw_readb(a); mb(); r_; }) 120#endif 121#ifdef __raw_readw 122# define readw(a) ({ unsigned int r_ = __raw_readw(a); mb(); r_; }) 123#endif 124#ifdef __raw_readl 125# define readl(a) ({ unsigned int r_ = __raw_readl(a); mb(); r_; }) 126#endif 127 128#ifdef __raw_writeb 129# define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); }) 130#endif 131#ifdef __raw_writew 132# define writew(v,a) ({ __raw_writew((v),(a)); mb(); }) 133#endif 134#ifdef __raw_writel 135# define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) 136#endif 137 138#define writesl __raw_writesl 139#define readsl __raw_readsl 140 141#define readb_relaxed(a) readb(a) 142#define readw_relaxed(a) readw(a) 143#define readl_relaxed(a) readl(a) 144 145/* Simple MMIO */ 146#define ioread8(a) readb(a) 147#define ioread16(a) readw(a) 148#define ioread16be(a) be16_to_cpu(__raw_readw((a))) 149#define ioread32(a) readl(a) 150#define ioread32be(a) be32_to_cpu(__raw_readl((a))) 151 152#define iowrite8(v,a) writeb((v),(a)) 153#define iowrite16(v,a) writew((v),(a)) 154#define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a)) 155#define iowrite32(v,a) writel((v),(a)) 156#define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a)) 157 158#define ioread8_rep(a,d,c) insb((a),(d),(c)) 159#define ioread16_rep(a,d,c) insw((a),(d),(c)) 160#define ioread32_rep(a,d,c) insl((a),(d),(c)) 161 162#define iowrite8_rep(a,s,c) outsb((a),(s),(c)) 163#define iowrite16_rep(a,s,c) outsw((a),(s),(c)) 164#define iowrite32_rep(a,s,c) outsl((a),(s),(c)) 165 166#define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */ 167 168/* 169 * This function provides a method for the generic case where a board-specific 170 * ioport_map simply needs to return the port + some arbitrary port base. 171 * 172 * We use this at board setup time to implicitly set the port base, and 173 * as a result, we can use the generic ioport_map. 174 */ 175static inline void __set_io_port_base(unsigned long pbase) 176{ 177 extern unsigned long generic_io_base; 178 179 generic_io_base = pbase; 180} 181 182/* We really want to try and get these to memcpy etc */ 183extern void memcpy_fromio(void *, volatile void __iomem *, unsigned long); 184extern void memcpy_toio(volatile void __iomem *, const void *, unsigned long); 185extern void memset_io(volatile void __iomem *, int, unsigned long); 186 187/* SuperH on-chip I/O functions */ 188static inline unsigned char ctrl_inb(unsigned long addr) 189{ 190 return *(volatile unsigned char*)addr; 191} 192 193static inline unsigned short ctrl_inw(unsigned long addr) 194{ 195 return *(volatile unsigned short*)addr; 196} 197 198static inline unsigned int ctrl_inl(unsigned long addr) 199{ 200 return *(volatile unsigned long*)addr; 201} 202 203static inline void ctrl_outb(unsigned char b, unsigned long addr) 204{ 205 *(volatile unsigned char*)addr = b; 206} 207 208static inline void ctrl_outw(unsigned short b, unsigned long addr) 209{ 210 *(volatile unsigned short*)addr = b; 211} 212 213static inline void ctrl_outl(unsigned int b, unsigned long addr) 214{ 215 *(volatile unsigned long*)addr = b; 216} 217 218static inline void ctrl_delay(void) 219{ 220 ctrl_inw(P2SEG); 221} 222 223#define IO_SPACE_LIMIT 0xffffffff 224 225#ifdef CONFIG_MMU 226/* 227 * Change virtual addresses to physical addresses and vv. 228 * These are trivial on the 1:1 Linux/SuperH mapping 229 */ 230static inline unsigned long virt_to_phys(volatile void *address) 231{ 232 return PHYSADDR(address); 233} 234 235static inline void *phys_to_virt(unsigned long address) 236{ 237 return (void *)P1SEGADDR(address); 238} 239#else 240#define phys_to_virt(address) ((void *)(address)) 241#define virt_to_phys(address) ((unsigned long)(address)) 242#endif 243 244#define virt_to_bus virt_to_phys 245#define bus_to_virt phys_to_virt 246#define page_to_bus page_to_phys 247 248/* 249 * readX/writeX() are used to access memory mapped devices. On some 250 * architectures the memory mapped IO stuff needs to be accessed 251 * differently. On the x86 architecture, we just read/write the 252 * memory location directly. 253 * 254 * On SH, we traditionally have the whole physical address space mapped 255 * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not 256 * need to do anything but place the address in the proper segment. This 257 * is true for P1 and P2 addresses, as well as some P3 ones. However, 258 * most of the P3 addresses and newer cores using extended addressing 259 * need to map through page tables, so the ioremap() implementation 260 * becomes a bit more complicated. See arch/sh/mm/ioremap.c for 261 * additional notes on this. 262 * 263 * We cheat a bit and always return uncachable areas until we've fixed 264 * the drivers to handle caching properly. 265 */ 266#ifdef CONFIG_MMU 267void __iomem *__ioremap(unsigned long offset, unsigned long size, 268 unsigned long flags); 269void __iounmap(void __iomem *addr); 270#else 271#define __ioremap(offset, size, flags) ((void __iomem *)(offset)) 272#define __iounmap(addr) do { } while (0) 273#endif /* CONFIG_MMU */ 274 275static inline void __iomem * 276__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) 277{ 278 unsigned long last_addr = offset + size - 1; 279 280 /* 281 * For P1 and P2 space this is trivial, as everything is already 282 * mapped. Uncached access for P1 addresses are done through P2. 283 * In the P3 case or for addresses outside of the 29-bit space, 284 * mapping must be done by the PMB or by using page tables. 285 */ 286 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { 287 if (unlikely(flags & _PAGE_CACHABLE)) 288 return (void __iomem *)P1SEGADDR(offset); 289 290 return (void __iomem *)P2SEGADDR(offset); 291 } 292 293 return __ioremap(offset, size, flags); 294} 295 296#define ioremap(offset, size) \ 297 __ioremap_mode((offset), (size), 0) 298#define ioremap_nocache(offset, size) \ 299 __ioremap_mode((offset), (size), 0) 300#define ioremap_cache(offset, size) \ 301 __ioremap_mode((offset), (size), _PAGE_CACHABLE) 302#define p3_ioremap(offset, size, flags) \ 303 __ioremap((offset), (size), (flags)) 304#define iounmap(addr) \ 305 __iounmap((addr)) 306 307/* 308 * The caches on some architectures aren't dma-coherent and have need to 309 * handle this in software. There are three types of operations that 310 * can be applied to dma buffers. 311 * 312 * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by 313 * writing the content of the caches back to memory, if necessary. 314 * The function also invalidates the affected part of the caches as 315 * necessary before DMA transfers from outside to memory. 316 * - dma_cache_inv(start, size) invalidates the affected parts of the 317 * caches. Dirty lines of the caches may be written back or simply 318 * be discarded. This operation is necessary before dma operations 319 * to the memory. 320 * - dma_cache_wback(start, size) writes back any dirty lines but does 321 * not invalidate the cache. This can be used before DMA reads from 322 * memory, 323 */ 324 325#define dma_cache_wback_inv(_start,_size) \ 326 __flush_purge_region(_start,_size) 327#define dma_cache_inv(_start,_size) \ 328 __flush_invalidate_region(_start,_size) 329#define dma_cache_wback(_start,_size) \ 330 __flush_wback_region(_start,_size) 331 332/* 333 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 334 * access 335 */ 336#define xlate_dev_mem_ptr(p) __va(p) 337 338/* 339 * Convert a virtual cached pointer to an uncached pointer 340 */ 341#define xlate_dev_kmem_ptr(p) p 342 343#endif /* __KERNEL__ */ 344 345#endif /* __ASM_SH_IO_H */ 346