1/* 2 * linux/arch/alpha/kernel/core_apecs.c 3 * 4 * Rewritten for Apecs from the lca.c from: 5 * 6 * Written by David Mosberger (davidm@cs.arizona.edu) with some code 7 * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit 8 * bios code. 9 * 10 * Code common to all APECS core logic chips. 11 */ 12 13#define __EXTERN_INLINE inline 14#include <asm/io.h> 15#include <asm/core_apecs.h> 16#undef __EXTERN_INLINE 17 18#include <linux/types.h> 19#include <linux/pci.h> 20#include <linux/init.h> 21 22#include <asm/ptrace.h> 23#include <asm/smp.h> 24 25#include "proto.h" 26#include "pci_impl.h" 27 28/* 29 * NOTE: Herein lie back-to-back mb instructions. They are magic. 30 * One plausible explanation is that the i/o controller does not properly 31 * handle the system transaction. Another involves timing. Ho hum. 32 */ 33 34/* 35 * BIOS32-style PCI interface: 36 */ 37 38#define DEBUG_CONFIG 0 39 40#if DEBUG_CONFIG 41# define DBGC(args) printk args 42#else 43# define DBGC(args) 44#endif 45 46#define vuip volatile unsigned int * 47 48/* 49 * Given a bus, device, and function number, compute resulting 50 * configuration space address and setup the APECS_HAXR2 register 51 * accordingly. It is therefore not safe to have concurrent 52 * invocations to configuration space access routines, but there 53 * really shouldn't be any need for this. 54 * 55 * Type 0: 56 * 57 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 58 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 59 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 60 * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0| 61 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 62 * 63 * 31:11 Device select bit. 64 * 10:8 Function number 65 * 7:2 Register number 66 * 67 * Type 1: 68 * 69 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 70 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 71 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 72 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| 73 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 74 * 75 * 31:24 reserved 76 * 23:16 bus number (8 bits = 128 possible buses) 77 * 15:11 Device number (5 bits) 78 * 10:8 function number 79 * 7:2 register number 80 * 81 * Notes: 82 * The function number selects which function of a multi-function device 83 * (e.g., SCSI and Ethernet). 84 * 85 * The register selects a DWORD (32 bit) register offset. Hence it 86 * doesn't get shifted by 2 bits as we want to "drop" the bottom two 87 * bits. 88 */ 89 90static int 91mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, 92 unsigned long *pci_addr, unsigned char *type1) 93{ 94 unsigned long addr; 95 u8 bus = pbus->number; 96 97 DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," 98 " pci_addr=0x%p, type1=0x%p)\n", 99 bus, device_fn, where, pci_addr, type1)); 100 101 if (bus == 0) { 102 int device = device_fn >> 3; 103 104 /* type 0 configuration cycle: */ 105 106 if (device > 20) { 107 DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n", 108 device)); 109 return -1; 110 } 111 112 *type1 = 0; 113 addr = (device_fn << 8) | (where); 114 } else { 115 /* type 1 configuration cycle: */ 116 *type1 = 1; 117 addr = (bus << 16) | (device_fn << 8) | (where); 118 } 119 *pci_addr = addr; 120 DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); 121 return 0; 122} 123 124static unsigned int 125conf_read(unsigned long addr, unsigned char type1) 126{ 127 unsigned long flags; 128 unsigned int stat0, value; 129 unsigned int haxr2 = 0; 130 131 local_irq_save(flags); /* avoid getting hit by machine check */ 132 133 DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1)); 134 135 /* Reset status register to avoid losing errors. */ 136 stat0 = *(vuip)APECS_IOC_DCSR; 137 *(vuip)APECS_IOC_DCSR = stat0; 138 mb(); 139 DBGC(("conf_read: APECS DCSR was 0x%x\n", stat0)); 140 141 /* If Type1 access, must set HAE #2. */ 142 if (type1) { 143 haxr2 = *(vuip)APECS_IOC_HAXR2; 144 mb(); 145 *(vuip)APECS_IOC_HAXR2 = haxr2 | 1; 146 DBGC(("conf_read: TYPE1 access\n")); 147 } 148 149 draina(); 150 mcheck_expected(0) = 1; 151 mcheck_taken(0) = 0; 152 mb(); 153 154 /* Access configuration space. */ 155 156 /* Some SRMs step on these registers during a machine check. */ 157 asm volatile("ldl %0,%1; mb; mb" : "=r"(value) : "m"(*(vuip)addr) 158 : "$9", "$10", "$11", "$12", "$13", "$14", "memory"); 159 160 if (mcheck_taken(0)) { 161 mcheck_taken(0) = 0; 162 value = 0xffffffffU; 163 mb(); 164 } 165 mcheck_expected(0) = 0; 166 mb(); 167 168 /* 169 * david.rusling@reo.mts.dec.com. This code is needed for the 170 * EB64+ as it does not generate a machine check (why I don't 171 * know). When we build kernels for one particular platform 172 * then we can make this conditional on the type. 173 */ 174 draina(); 175 176 /* Now look for any errors. */ 177 stat0 = *(vuip)APECS_IOC_DCSR; 178 DBGC(("conf_read: APECS DCSR after read 0x%x\n", stat0)); 179 180 /* Is any error bit set? */ 181 if (stat0 & 0xffe0U) { 182 /* If not NDEV, print status. */ 183 if (!(stat0 & 0x0800)) { 184 printk("apecs.c:conf_read: got stat0=%x\n", stat0); 185 } 186 187 /* Reset error status. */ 188 *(vuip)APECS_IOC_DCSR = stat0; 189 mb(); 190 wrmces(0x7); /* reset machine check */ 191 value = 0xffffffff; 192 } 193 194 /* If Type1 access, must reset HAE #2 so normal IO space ops work. */ 195 if (type1) { 196 *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1; 197 mb(); 198 } 199 local_irq_restore(flags); 200 201 return value; 202} 203 204static void 205conf_write(unsigned long addr, unsigned int value, unsigned char type1) 206{ 207 unsigned long flags; 208 unsigned int stat0; 209 unsigned int haxr2 = 0; 210 211 local_irq_save(flags); /* avoid getting hit by machine check */ 212 213 /* Reset status register to avoid losing errors. */ 214 stat0 = *(vuip)APECS_IOC_DCSR; 215 *(vuip)APECS_IOC_DCSR = stat0; 216 mb(); 217 218 /* If Type1 access, must set HAE #2. */ 219 if (type1) { 220 haxr2 = *(vuip)APECS_IOC_HAXR2; 221 mb(); 222 *(vuip)APECS_IOC_HAXR2 = haxr2 | 1; 223 } 224 225 draina(); 226 mcheck_expected(0) = 1; 227 mb(); 228 229 /* Access configuration space. */ 230 *(vuip)addr = value; 231 mb(); 232 mb(); /* magic */ 233 mcheck_expected(0) = 0; 234 mb(); 235 236 /* 237 * david.rusling@reo.mts.dec.com. This code is needed for the 238 * EB64+ as it does not generate a machine check (why I don't 239 * know). When we build kernels for one particular platform 240 * then we can make this conditional on the type. 241 */ 242 draina(); 243 244 /* Now look for any errors. */ 245 stat0 = *(vuip)APECS_IOC_DCSR; 246 247 /* Is any error bit set? */ 248 if (stat0 & 0xffe0U) { 249 /* If not NDEV, print status. */ 250 if (!(stat0 & 0x0800)) { 251 printk("apecs.c:conf_write: got stat0=%x\n", stat0); 252 } 253 254 /* Reset error status. */ 255 *(vuip)APECS_IOC_DCSR = stat0; 256 mb(); 257 wrmces(0x7); /* reset machine check */ 258 } 259 260 /* If Type1 access, must reset HAE #2 so normal IO space ops work. */ 261 if (type1) { 262 *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1; 263 mb(); 264 } 265 local_irq_restore(flags); 266} 267 268static int 269apecs_read_config(struct pci_bus *bus, unsigned int devfn, int where, 270 int size, u32 *value) 271{ 272 unsigned long addr, pci_addr; 273 unsigned char type1; 274 long mask; 275 int shift; 276 277 if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) 278 return PCIBIOS_DEVICE_NOT_FOUND; 279 280 mask = (size - 1) * 8; 281 shift = (where & 3) * 8; 282 addr = (pci_addr << 5) + mask + APECS_CONF; 283 *value = conf_read(addr, type1) >> (shift); 284 return PCIBIOS_SUCCESSFUL; 285} 286 287static int 288apecs_write_config(struct pci_bus *bus, unsigned int devfn, int where, 289 int size, u32 value) 290{ 291 unsigned long addr, pci_addr; 292 unsigned char type1; 293 long mask; 294 295 if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) 296 return PCIBIOS_DEVICE_NOT_FOUND; 297 298 mask = (size - 1) * 8; 299 addr = (pci_addr << 5) + mask + APECS_CONF; 300 conf_write(addr, value << ((where & 3) * 8), type1); 301 return PCIBIOS_SUCCESSFUL; 302} 303 304struct pci_ops apecs_pci_ops = 305{ 306 .read = apecs_read_config, 307 .write = apecs_write_config, 308}; 309 310void 311apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) 312{ 313 wmb(); 314 *(vip)APECS_IOC_TBIA = 0; 315 mb(); 316} 317 318void __init 319apecs_init_arch(void) 320{ 321 struct pci_controller *hose; 322 323 /* 324 * Create our single hose. 325 */ 326 327 pci_isa_hose = hose = alloc_pci_controller(); 328 hose->io_space = &ioport_resource; 329 hose->mem_space = &iomem_resource; 330 hose->index = 0; 331 332 hose->sparse_mem_base = APECS_SPARSE_MEM - IDENT_ADDR; 333 hose->dense_mem_base = APECS_DENSE_MEM - IDENT_ADDR; 334 hose->sparse_io_base = APECS_IO - IDENT_ADDR; 335 hose->dense_io_base = 0; 336 337 /* 338 * Set up the PCI to main memory translation windows. 339 * 340 * Window 1 is direct access 1GB at 1GB 341 * Window 2 is scatter-gather 8MB at 8MB (for isa) 342 */ 343 hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); 344 hose->sg_pci = NULL; 345 __direct_map_base = 0x40000000; 346 __direct_map_size = 0x40000000; 347 348 *(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000; 349 *(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U; 350 *(vuip)APECS_IOC_TB1R = 0; 351 352 *(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000; 353 *(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000; 354 *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1; 355 356 apecs_pci_tbi(hose, 0, -1); 357 358 /* 359 * Finally, clear the HAXR2 register, which gets used 360 * for PCI Config Space accesses. That is the way 361 * we want to use it, and we do not want to depend on 362 * what ARC or SRM might have left behind... 363 */ 364 *(vuip)APECS_IOC_HAXR2 = 0; 365 mb(); 366} 367 368void 369apecs_pci_clr_err(void) 370{ 371 unsigned int jd; 372 373 jd = *(vuip)APECS_IOC_DCSR; 374 if (jd & 0xffe0L) { 375 *(vuip)APECS_IOC_SEAR; 376 *(vuip)APECS_IOC_DCSR = jd | 0xffe1L; 377 mb(); 378 *(vuip)APECS_IOC_DCSR; 379 } 380 *(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA; 381 mb(); 382 *(vuip)APECS_IOC_TBIA; 383} 384 385void 386apecs_machine_check(unsigned long vector, unsigned long la_ptr) 387{ 388 struct el_common *mchk_header; 389 struct el_apecs_procdata *mchk_procdata; 390 struct el_apecs_sysdata_mcheck *mchk_sysdata; 391 392 mchk_header = (struct el_common *)la_ptr; 393 394 mchk_procdata = (struct el_apecs_procdata *) 395 (la_ptr + mchk_header->proc_offset 396 - sizeof(mchk_procdata->paltemp)); 397 398 mchk_sysdata = (struct el_apecs_sysdata_mcheck *) 399 (la_ptr + mchk_header->sys_offset); 400 401 402 /* Clear the error before any reporting. */ 403 mb(); 404 mb(); /* magic */ 405 draina(); 406 apecs_pci_clr_err(); 407 wrmces(0x7); /* reset machine check pending flag */ 408 mb(); 409 410 process_mcheck_info(vector, la_ptr, "APECS", 411 (mcheck_expected(0) 412 && (mchk_sysdata->epic_dcsr & 0x0c00UL))); 413} 414