1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. 7 * 8 * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for 9 * a description of how these routines should be used. 10 */ 11 12#include <linux/module.h> 13#include <asm/dma.h> 14#include <asm/sn/intr.h> 15#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcidev.h> 17#include <asm/sn/sn_sal.h> 18 19#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 20#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) 21 22/** 23 * sn_dma_supported - test a DMA mask 24 * @dev: device to test 25 * @mask: DMA mask to test 26 * 27 * Return whether the given PCI device DMA address mask can be supported 28 * properly. For example, if your device can only drive the low 24-bits 29 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to 30 * this function. Of course, SN only supports devices that have 32 or more 31 * address bits when using the PMU. 32 */ 33int sn_dma_supported(struct device *dev, u64 mask) 34{ 35 BUG_ON(dev->bus != &pci_bus_type); 36 37 if (mask < 0x7fffffff) 38 return 0; 39 return 1; 40} 41EXPORT_SYMBOL(sn_dma_supported); 42 43/** 44 * sn_dma_set_mask - set the DMA mask 45 * @dev: device to set 46 * @dma_mask: new mask 47 * 48 * Set @dev's DMA mask if the hw supports it. 49 */ 50int sn_dma_set_mask(struct device *dev, u64 dma_mask) 51{ 52 BUG_ON(dev->bus != &pci_bus_type); 53 54 if (!sn_dma_supported(dev, dma_mask)) 55 return 0; 56 57 *dev->dma_mask = dma_mask; 58 return 1; 59} 60EXPORT_SYMBOL(sn_dma_set_mask); 61 62/** 63 * sn_dma_alloc_coherent - allocate memory for coherent DMA 64 * @dev: device to allocate for 65 * @size: size of the region 66 * @dma_handle: DMA (bus) address 67 * @flags: memory allocation flags 68 * 69 * dma_alloc_coherent() returns a pointer to a memory region suitable for 70 * coherent DMA traffic to/from a PCI device. On SN platforms, this means 71 * that @dma_handle will have the %PCIIO_DMA_CMD flag set. 72 * 73 * This interface is usually used for "command" streams (e.g. the command 74 * queue for a SCSI controller). See Documentation/DMA-API.txt for 75 * more information. 76 */ 77void *sn_dma_alloc_coherent(struct device *dev, size_t size, 78 dma_addr_t * dma_handle, gfp_t flags) 79{ 80 void *cpuaddr; 81 unsigned long phys_addr; 82 int node; 83 struct pci_dev *pdev = to_pci_dev(dev); 84 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 85 86 BUG_ON(dev->bus != &pci_bus_type); 87 88 /* 89 * Allocate the memory. 90 */ 91 node = pcibus_to_node(pdev->bus); 92 if (likely(node >=0)) { 93 struct page *p = alloc_pages_node(node, flags, get_order(size)); 94 95 if (likely(p)) 96 cpuaddr = page_address(p); 97 else 98 return NULL; 99 } else 100 cpuaddr = (void *)__get_free_pages(flags, get_order(size)); 101 102 if (unlikely(!cpuaddr)) 103 return NULL; 104 105 memset(cpuaddr, 0x0, size); 106 107 /* physical addr. of the memory we just got */ 108 phys_addr = __pa(cpuaddr); 109 110 /* 111 * 64 bit address translations should never fail. 112 * 32 bit translations can fail if there are insufficient mapping 113 * resources. 114 */ 115 116 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, 117 SN_DMA_ADDR_PHYS); 118 if (!*dma_handle) { 119 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 120 free_pages((unsigned long)cpuaddr, get_order(size)); 121 return NULL; 122 } 123 124 return cpuaddr; 125} 126EXPORT_SYMBOL(sn_dma_alloc_coherent); 127 128/** 129 * sn_pci_free_coherent - free memory associated with coherent DMAable region 130 * @dev: device to free for 131 * @size: size to free 132 * @cpu_addr: kernel virtual address to free 133 * @dma_handle: DMA address associated with this region 134 * 135 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping 136 * any associated IOMMU mappings. 137 */ 138void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 139 dma_addr_t dma_handle) 140{ 141 struct pci_dev *pdev = to_pci_dev(dev); 142 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 143 144 BUG_ON(dev->bus != &pci_bus_type); 145 146 provider->dma_unmap(pdev, dma_handle, 0); 147 free_pages((unsigned long)cpu_addr, get_order(size)); 148} 149EXPORT_SYMBOL(sn_dma_free_coherent); 150 151/** 152 * sn_dma_map_single - map a single page for DMA 153 * @dev: device to map for 154 * @cpu_addr: kernel virtual address of the region to map 155 * @size: size of the region 156 * @direction: DMA direction 157 * 158 * Map the region pointed to by @cpu_addr for DMA and return the 159 * DMA address. 160 * 161 * We map this to the one step pcibr_dmamap_trans interface rather than 162 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have 163 * no way of saving the dmamap handle from the alloc to later free 164 * (which is pretty much unacceptable). 165 * 166 * TODO: simplify our interface; 167 * figure out how to save dmamap handle so can use two step. 168 */ 169dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, 170 int direction) 171{ 172 dma_addr_t dma_addr; 173 unsigned long phys_addr; 174 struct pci_dev *pdev = to_pci_dev(dev); 175 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 176 177 BUG_ON(dev->bus != &pci_bus_type); 178 179 phys_addr = __pa(cpu_addr); 180 dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); 181 if (!dma_addr) { 182 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 183 return 0; 184 } 185 return dma_addr; 186} 187EXPORT_SYMBOL(sn_dma_map_single); 188 189/** 190 * sn_dma_unmap_single - unamp a DMA mapped page 191 * @dev: device to sync 192 * @dma_addr: DMA address to sync 193 * @size: size of region 194 * @direction: DMA direction 195 * 196 * This routine is supposed to sync the DMA region specified 197 * by @dma_handle into the coherence domain. On SN, we're always cache 198 * coherent, so we just need to free any ATEs associated with this mapping. 199 */ 200void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 201 int direction) 202{ 203 struct pci_dev *pdev = to_pci_dev(dev); 204 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 205 206 BUG_ON(dev->bus != &pci_bus_type); 207 208 provider->dma_unmap(pdev, dma_addr, direction); 209} 210EXPORT_SYMBOL(sn_dma_unmap_single); 211 212/** 213 * sn_dma_unmap_sg - unmap a DMA scatterlist 214 * @dev: device to unmap 215 * @sg: scatterlist to unmap 216 * @nhwentries: number of scatterlist entries 217 * @direction: DMA direction 218 * 219 * Unmap a set of streaming mode DMA translations. 220 */ 221void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 222 int nhwentries, int direction) 223{ 224 int i; 225 struct pci_dev *pdev = to_pci_dev(dev); 226 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 227 228 BUG_ON(dev->bus != &pci_bus_type); 229 230 for (i = 0; i < nhwentries; i++, sg++) { 231 provider->dma_unmap(pdev, sg->dma_address, direction); 232 sg->dma_address = (dma_addr_t) NULL; 233 sg->dma_length = 0; 234 } 235} 236EXPORT_SYMBOL(sn_dma_unmap_sg); 237 238/** 239 * sn_dma_map_sg - map a scatterlist for DMA 240 * @dev: device to map for 241 * @sg: scatterlist to map 242 * @nhwentries: number of entries 243 * @direction: direction of the DMA transaction 244 * 245 * Maps each entry of @sg for DMA. 246 */ 247int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 248 int direction) 249{ 250 unsigned long phys_addr; 251 struct scatterlist *saved_sg = sg; 252 struct pci_dev *pdev = to_pci_dev(dev); 253 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 254 int i; 255 256 BUG_ON(dev->bus != &pci_bus_type); 257 258 /* 259 * Setup a DMA address for each entry in the scatterlist. 260 */ 261 for (i = 0; i < nhwentries; i++, sg++) { 262 phys_addr = SG_ENT_PHYS_ADDRESS(sg); 263 sg->dma_address = provider->dma_map(pdev, 264 phys_addr, sg->length, 265 SN_DMA_ADDR_PHYS); 266 267 if (!sg->dma_address) { 268 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 269 270 /* 271 * Free any successfully allocated entries. 272 */ 273 if (i > 0) 274 sn_dma_unmap_sg(dev, saved_sg, i, direction); 275 return 0; 276 } 277 278 sg->dma_length = sg->length; 279 } 280 281 return nhwentries; 282} 283EXPORT_SYMBOL(sn_dma_map_sg); 284 285void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 286 size_t size, int direction) 287{ 288 BUG_ON(dev->bus != &pci_bus_type); 289} 290EXPORT_SYMBOL(sn_dma_sync_single_for_cpu); 291 292void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 293 size_t size, int direction) 294{ 295 BUG_ON(dev->bus != &pci_bus_type); 296} 297EXPORT_SYMBOL(sn_dma_sync_single_for_device); 298 299void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 300 int nelems, int direction) 301{ 302 BUG_ON(dev->bus != &pci_bus_type); 303} 304EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu); 305 306void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 307 int nelems, int direction) 308{ 309 BUG_ON(dev->bus != &pci_bus_type); 310} 311EXPORT_SYMBOL(sn_dma_sync_sg_for_device); 312 313int sn_dma_mapping_error(dma_addr_t dma_addr) 314{ 315 return 0; 316} 317EXPORT_SYMBOL(sn_dma_mapping_error); 318 319char *sn_pci_get_legacy_mem(struct pci_bus *bus) 320{ 321 if (!SN_PCIBUS_BUSSOFT(bus)) 322 return ERR_PTR(-ENODEV); 323 324 return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET); 325} 326 327int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) 328{ 329 unsigned long addr; 330 int ret; 331 struct ia64_sal_retval isrv; 332 333 /* 334 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work 335 * around hw issues at the pci bus level. SGI proms older than 336 * 4.10 don't implement this. 337 */ 338 339 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, 340 pci_domain_nr(bus), bus->number, 341 0, /* io */ 342 0, /* read */ 343 port, size, __pa(val)); 344 345 if (isrv.status == 0) 346 return size; 347 348 /* 349 * If the above failed, retry using the SAL_PROBE call which should 350 * be present in all proms (but which cannot work round PCI chipset 351 * bugs). This code is retained for compatibility with old 352 * pre-4.10 proms, and should be removed at some point in the future. 353 */ 354 355 if (!SN_PCIBUS_BUSSOFT(bus)) 356 return -ENODEV; 357 358 addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; 359 addr += port; 360 361 ret = ia64_sn_probe_mem(addr, (long)size, (void *)val); 362 363 if (ret == 2) 364 return -EINVAL; 365 366 if (ret == 1) 367 *val = -1; 368 369 return size; 370} 371 372int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) 373{ 374 int ret = size; 375 unsigned long paddr; 376 unsigned long *addr; 377 struct ia64_sal_retval isrv; 378 379 /* 380 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work 381 * around hw issues at the pci bus level. SGI proms older than 382 * 4.10 don't implement this. 383 */ 384 385 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, 386 pci_domain_nr(bus), bus->number, 387 0, /* io */ 388 1, /* write */ 389 port, size, __pa(&val)); 390 391 if (isrv.status == 0) 392 return size; 393 394 /* 395 * If the above failed, retry using the SAL_PROBE call which should 396 * be present in all proms (but which cannot work round PCI chipset 397 * bugs). This code is retained for compatibility with old 398 * pre-4.10 proms, and should be removed at some point in the future. 399 */ 400 401 if (!SN_PCIBUS_BUSSOFT(bus)) { 402 ret = -ENODEV; 403 goto out; 404 } 405 406 /* Put the phys addr in uncached space */ 407 paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; 408 paddr += port; 409 addr = (unsigned long *)paddr; 410 411 switch (size) { 412 case 1: 413 *(volatile u8 *)(addr) = (u8)(val); 414 break; 415 case 2: 416 *(volatile u16 *)(addr) = (u16)(val); 417 break; 418 case 4: 419 *(volatile u32 *)(addr) = (u32)(val); 420 break; 421 default: 422 ret = -EINVAL; 423 break; 424 } 425 out: 426 return ret; 427} 428