Searched refs:pdev (Results 1 - 22 of 22) sorted by relevance

/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mthca/
H A Dmthca_reset.c77 while ((bridge = pci_get_device(mdev->pdev->vendor,
78 mdev->pdev->device + 2,
81 bridge->subordinate == mdev->pdev->bus) {
95 pci_name(mdev->pdev));
116 if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) {
124 hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
125 hca_pcie_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);
159 void __iomem *reset = ioremap(pci_resource_start(mdev->pdev, 0) +
182 if (pci_read_config_dword(bridge ? bridge : mdev->pdev, 0, &v)) {
246 if (pci_write_config_dword(mdev->pdev, hca_pcix_ca
[all...]
H A Dmthca_catas.c71 struct pci_dev *pdev = dev->pdev; local
72 ret = __mthca_restart_one(dev->pdev);
76 pci_name(pdev), ret);
78 struct mthca_dev *d = pci_get_drvdata(pdev);
154 addr = pci_resource_start(dev->pdev, 0) +
155 ((pci_resource_len(dev->pdev, 0) - 1) &
H A Dmthca_main.c141 if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
142 if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
150 if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) {
151 if (pcie_set_readrq(mdev->pdev, 4096)) {
192 if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
196 (unsigned long long)pci_resource_len(mdev->pdev, 2));
844 dev->pdev->irq);
937 err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries));
980 static int __mthca_init_one(struct pci_dev *pdev, in argument
1161 __mthca_remove_one(struct pci_dev *pdev) argument
1203 __mthca_restart_one(struct pci_dev *pdev) argument
1216 mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id) argument
1243 mthca_remove_one(struct pci_dev *pdev) argument
[all...]
H A Dmthca_uar.c44 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
H A Dmthca_allocator.c208 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
247 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
290 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
294 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
H A Dmthca_memfree.c68 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
81 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
173 ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
186 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
206 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
498 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
509 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
584 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
593 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
664 pci_unmap_sg(dev->pdev,
[all...]
H A Dmthca_eq.c500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
579 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
629 pci_free_consistent(dev->pdev, PAGE_SIZE,
642 free_irq(dev->pdev->irq, dev);
655 unsigned long base = pci_resource_start(dev->pdev, 0);
674 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
686 if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
694 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
752 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
754 if (pci_dma_mapping_error(dev->pdev, de
[all...]
H A Dmthca_dev.h291 struct pci_dev *pdev; member in struct:mthca_dev
369 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
379 dev_err(&mdev->pdev->dev, format, ## arg)
381 dev_info(&mdev->pdev->dev, format, ## arg)
383 dev_warn(&mdev->pdev->dev, format, ## arg)
455 int __mthca_restart_one(struct pci_dev *pdev);
H A Dmthca_av.c338 dev->av_table.pool = pci_pool_create("mthca_av", dev->pdev,
345 dev->av_table.av_map = ioremap(pci_resource_start(dev->pdev, 4) +
H A Dmthca_mr.c358 dma_sync_single(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE);
810 dma_sync_single(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
893 addr = pci_resource_start(dev->pdev, 4) +
894 ((pci_resource_len(dev->pdev, 4) - 1) &
906 addr = pci_resource_start(dev->pdev, 4) +
907 ((pci_resource_len(dev->pdev, 4) - 1) &
H A Dmthca_cmd.c482 dev->hcr = ioremap(pci_resource_start(dev->pdev, 0) + MTHCA_HCR_BASE,
489 dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev,
721 addr = pci_resource_start(dev->pdev, 2) +
722 ((pci_resource_len(dev->pdev, 2) - 1) & base);
H A Dmthca_qp.c1373 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1415 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1481 dma_free_coherent(&dev->pdev->dev,
H A Dmthca_provider.c1216 switch (dev->pdev->device) {
1334 dev->ib_dev.dma_device = &dev->pdev->dev;
H A Dmthca_cq.c950 synchronize_irq(dev->pdev->irq);
/barrelfish-master/lib/devif/backends/net/mlx4/include/linux/mlx4/
H A Ddriver.h112 int mlx4_get_val(struct mlx4_dbdf2val *tbl, struct pci_dev *pdev, int idx,
H A Ddevice.h786 struct pci_dev *pdev; member in struct:mlx4_dev
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/net/mlx4/
H A Dfw.h221 int mlx4_RUN_FW(struct mlx4_priv *pdev);
222 int mlx4_QUERY_FW(struct mlx4_priv *pdev);
H A Dfw.c1322 int mlx4_RUN_FW(struct mlx4_priv *pdev) { argument
1323 return mlx4_cmd(&pdev->dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A,
1327 int mlx4_QUERY_FW(struct mlx4_priv *pdev) { argument
1328 struct mlx4_fw *fw = &pdev->fw;
1329 struct mlx4_cmd *cmd = &pdev->cmd;
1364 err = mlx4_cmd_box(&pdev->dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1377 pdev->dev.caps.fw_ver = (fw_ver & 0xffff00000000ull)
1381 /*MLX4_DEBUG("0x%lx\n", pdev->dev.caps.fw_ver);*/
1384 pdev->dev.caps.function = lg;
1386 /*MLX4_DEBUG("0x%x\n", pdev
[all...]
H A Dcmd.c559 static int mlx4_cmd_post(struct mlx4_priv *pdev, struct timespec *ts1, argument
562 struct mlx4_cmd *cmd = &pdev->cmd;
587 while (cmd_pending(pdev)) {
742 if (pci_channel_offline(dev->pdev)) {
917 if (pci_channel_offline(dev->pdev))
2253 ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
2257 ioremap(pci_resource_start(dev->pdev, 2) +
2366 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2386 /*priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
2395 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev
[all...]
H A Dmlx4_en.h435 struct pci_dev *pdev; member in struct:mlx4_en_dev
917 DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
936 dev_name(&mdev->pdev->dev), ##arg)
939 dev_name(&mdev->pdev->dev), ##arg)
942 dev_name(&mdev->pdev->dev), ##arg)
H A Dmlx4_devif_queue.c369 pci_physfn(priv->dev.pdev), i - 1,
459 if (priv->dev.pdev->device == 0x1003)
505 /*mlx4_get_val(num_vfs.dbdf2val.tbl, pci_physfn(priv->dev.pdev), 0, &nvfs);*/
1011 priv->dev.pdev = calloc(1, sizeof(struct pci_dev));
1038 retry: err = pci_enable_msix(priv->dev.pdev, entries, nreq);
1080 priv->eq_table.eq[i].irq = priv->dev.pdev->irq;
1376 if (device_create_file(&priv->dev.pdev->dev, &info->firmware_attr))
1381 if (device_create_file(&priv->dev.pdev->dev, &info->hca_attr))
1386 if (device_create_file(&priv->dev.pdev->dev, &info->board_attr))
1414 err = device_create_file(&dev->pdev
[all...]
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mlx4/
H A Dsysfs.c378 strlcpy(name, pci_name(dev->dev->pdev), max);

Completed in 194 milliseconds