• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/powerpc/kernel/

Lines Matching refs:viodev

76 	struct vio_dev *viodev;
140 * @viodev: VIO device requesting IO memory
151 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
161 if (viodev->cmo.entitled > viodev->cmo.allocated)
162 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
173 viodev->cmo.allocated += size;
185 * @viodev: VIO device freeing IO memory
195 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
208 if (viodev->cmo.allocated > viodev->cmo.entitled) {
209 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
210 viodev->cmo.entitled));
215 viodev->cmo.allocated -= (reserve_freed + excess_freed);
242 (viodev->cmo.entitled -
246 viodev->cmo.entitled -= tmp;
288 struct vio_dev *viodev;
327 viodev = dev_ent->viodev;
328 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
329 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
330 avail += viodev->cmo.entitled -
331 max_t(size_t, viodev->cmo.allocated,
352 viodev = dev_ent->viodev;
354 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
355 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
356 tmp = viodev->cmo.entitled -
357 max_t(size_t, viodev->cmo.allocated,
359 viodev->cmo.entitled -= min(tmp, delta);
397 struct vio_dev *viodev;
420 viodev = dev_ent->viodev;
422 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
423 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
424 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
436 viodev = dev_ent->viodev;
438 if (viodev->cmo.desired <= level) {
449 chunk = min(chunk, (viodev->cmo.desired -
450 viodev->cmo.entitled));
451 viodev->cmo.entitled += chunk;
458 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
459 max(viodev->cmo.allocated, level);
474 viodev = dev_ent->viodev;
476 if (viodev->cmo.entitled)
477 cmo->reserve.size += (viodev->cmo.entitled -
480 if (viodev->cmo.allocated > viodev->cmo.entitled)
481 need += viodev->cmo.allocated - viodev->cmo.entitled;
493 struct vio_dev *viodev = to_vio_dev(dev);
496 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
497 atomic_inc(&viodev->cmo.allocs_failed);
503 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
504 atomic_inc(&viodev->cmo.allocs_failed);
513 struct vio_dev *viodev = to_vio_dev(dev);
517 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
525 struct vio_dev *viodev = to_vio_dev(dev);
528 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
529 atomic_inc(&viodev->cmo.allocs_failed);
535 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
536 atomic_inc(&viodev->cmo.allocs_failed);
547 struct vio_dev *viodev = to_vio_dev(dev);
551 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
558 struct vio_dev *viodev = to_vio_dev(dev);
566 if (vio_cmo_alloc(viodev, alloc_size)) {
567 atomic_inc(&viodev->cmo.allocs_failed);
574 vio_cmo_dealloc(viodev, alloc_size);
575 atomic_inc(&viodev->cmo.allocs_failed);
582 vio_cmo_dealloc(viodev, alloc_size);
592 struct vio_dev *viodev = to_vio_dev(dev);
602 vio_cmo_dealloc(viodev, alloc_size);
618 * @viodev: struct vio_dev for device to alter
625 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
644 if (viodev == dev_ent->viodev) {
654 if (desired >= viodev->cmo.desired) {
656 vio_cmo.desired += desired - viodev->cmo.desired;
657 viodev->cmo.desired = desired;
660 vio_cmo.desired -= viodev->cmo.desired - desired;
661 viodev->cmo.desired = desired;
666 if (viodev->cmo.entitled > desired) {
667 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
668 vio_cmo.excess.size += viodev->cmo.entitled - desired;
674 if (viodev->cmo.allocated < viodev->cmo.entitled)
675 vio_cmo.excess.free += viodev->cmo.entitled -
676 max(viodev->cmo.allocated, desired);
677 viodev->cmo.entitled = desired;
687 * @viodev - Pointer to struct vio_dev for device
698 static int vio_cmo_bus_probe(struct vio_dev *viodev)
701 struct device *dev = &viodev->dev;
710 if (of_get_property(viodev->dev.of_node,
719 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
720 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
721 viodev->cmo.desired = VIO_CMO_MIN_ENT;
729 dev_ent->viodev = viodev;
733 viodev->cmo.desired = 0;
748 vio_cmo.desired += (viodev->cmo.desired -
774 vio_cmo.desired += viodev->cmo.desired;
783 * @viodev - Pointer to struct vio_dev for device
790 static void vio_cmo_bus_remove(struct vio_dev *viodev)
797 if (viodev->cmo.allocated) {
798 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
800 __func__, viodev->cmo.allocated);
809 if (viodev == dev_ent->viodev) {
820 if (viodev->cmo.entitled) {
826 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
833 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
836 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
837 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
840 viodev->cmo.entitled -= tmp;
844 vio_cmo.excess.size += viodev->cmo.entitled;
845 vio_cmo.excess.free += viodev->cmo.entitled;
846 vio_cmo.reserve.size -= viodev->cmo.entitled;
853 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
854 viodev->cmo.desired = VIO_CMO_MIN_ENT;
855 atomic_set(&viodev->cmo.allocs_failed, 0);
861 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
864 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
930 struct vio_dev *viodev = to_vio_dev(dev);
931 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
937 struct vio_dev *viodev = to_vio_dev(dev);
938 atomic_set(&viodev->cmo.allocs_failed, 0);
945 struct vio_dev *viodev = to_vio_dev(dev);
953 vio_cmo_set_dev_desired(viodev, new_desired);
1038 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1039 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1040 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1041 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1110 struct vio_dev *viodev = to_vio_dev(dev);
1118 id = vio_match_device(viodrv->id_table, viodev);
1120 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1122 error = vio_cmo_bus_probe(viodev);
1126 error = viodrv->probe(viodev, id);
1128 vio_cmo_bus_remove(viodev);
1137 struct vio_dev *viodev = to_vio_dev(dev);
1149 ret = viodrv->remove(viodev);
1152 vio_cmo_bus_remove(viodev);
1202 struct vio_dev *viodev;
1222 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1223 if (viodev == NULL)
1226 viodev->irq = irq_of_parse_and_map(of_node, 0);
1228 dev_set_name(&viodev->dev, "%x", *unit_address);
1229 viodev->name = of_node->name;
1230 viodev->type = of_node->type;
1231 viodev->unit_address = *unit_address;
1236 viodev->unit_address = *unit_address;
1238 viodev->dev.of_node = of_node_get(of_node);
1241 vio_cmo_set_dma_ops(viodev);
1243 viodev->dev.archdata.dma_ops = &dma_iommu_ops;
1244 set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
1245 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1248 viodev->dev.parent = &vio_bus_device.dev;
1249 viodev->dev.bus = &vio_bus_type;
1250 viodev->dev.release = vio_dev_release;
1253 if (device_register(&viodev->dev)) {
1255 __func__, dev_name(&viodev->dev));
1256 kfree(viodev);
1260 return viodev;
1351 void __devinit vio_unregister_device(struct vio_dev *viodev)
1353 device_unregister(&viodev->dev);