Searched refs:dev (Results 51 - 75 of 400) sorted by relevance

1234567891011>>

/barrelfish-2018-10-04/usr/arrakismon/
H A Dpci_host.c19 #include <dev/pci_hdr0_mem_dev.h>
36 static void confspace_write(struct pci_device *dev, argument
42 static void confspace_read(struct pci_device *dev, argument
46 struct pci_host *h = dev->state;
68 struct pci_device *dev = calloc(1, sizeof(struct pci_device)); local
72 dev->confspace_write = confspace_write;
73 dev->confspace_read = confspace_read;
74 dev->state = host;
78 int r = pci_attach_device(pci, 0, 1, dev);
H A Dpci_hostbridge.c19 #include <dev/pci_hdr0_mem_dev.h>
28 static void confspace_write(struct pci_device *dev, argument
34 static void confspace_read(struct pci_device *dev, argument
38 struct pci_hostbridge *h = dev->state;
54 struct pci_device *dev = calloc(1, sizeof(struct pci_device)); local
58 dev->confspace_write = confspace_write;
59 dev->confspace_read = confspace_read;
60 dev->state = host;
69 return dev;
/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mlx4/
H A Dalias_GUID.c54 struct mlx4_ib_dev *dev ; member in struct:mlx4_alias_guid_work_context
69 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, argument
77 if (!mlx4_is_master(dev->dev))
80 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
90 if (slave_id >= dev->dev->num_slaves) {
96 memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
105 static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index) argument
111 return *(__be64 *)&dev
129 mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, int block_num, u8 port_num, u8 *p_data) argument
202 struct mlx4_ib_dev *dev; local
312 invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) argument
350 struct mlx4_ib_dev *dev = to_mdev(ibdev); local
436 mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) argument
466 get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, struct mlx4_next_alias_guid_work *rec) argument
491 set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, int rec_index, struct mlx4_sriov_alias_guid_info_rec_det *rec_det) argument
503 set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) argument
532 struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); local
555 mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) argument
571 mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) argument
607 mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) argument
[all...]
/barrelfish-2018-10-04/usr/vmkitmon/
H A Dpci_host.c19 #include <dev/pci_hdr0_mem_dev.h>
36 static void confspace_write(struct pci_device *dev, argument
42 static void confspace_read(struct pci_device *dev, argument
46 struct pci_host *h = dev->state;
68 struct pci_device *dev = calloc(1, sizeof(struct pci_device)); local
72 dev->confspace_write = confspace_write;
73 dev->confspace_read = confspace_read;
74 dev->state = host;
78 int r = pci_attach_device(pci, 0, 1, dev);
H A Dpci_hostbridge.c19 #include <dev/pci_hdr0_mem_dev.h>
28 static void confspace_write(struct pci_device *dev, argument
34 static void confspace_read(struct pci_device *dev, argument
38 struct pci_hostbridge *h = dev->state;
54 struct pci_device *dev = calloc(1, sizeof(struct pci_device)); local
58 dev->confspace_write = confspace_write;
59 dev->confspace_read = confspace_read;
60 dev->state = host;
69 return dev;
/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/net/mlx4/
H A Dmlx4_devif_queue.c157 err = mlx4_cmd(&priv->dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_FA,
184 owner = priv->dev.bar_info->vaddr + MLX4_OWNER_BASE;
226 /*unmap_flag = mlx4_UNMAP_FA(&priv->dev);
260 if (dev_cap->uar_size > priv->dev.bar_info[1].bytes) {
263 (unsigned long long ) priv->dev.bar_info[1].bytes);
267 priv->dev.caps.num_ports = dev_cap->num_ports;
268 priv->dev.phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
269 for (i = 1; i <= priv->dev.caps.num_ports; ++i) {
270 priv->dev.caps.vl_cap[i] = dev_cap->max_vl[i];
271 priv->dev
500 choose_steering_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) argument
837 mlx4_set_port_mask(struct mlx4_dev *dev) argument
1636 __mlx4_counter_alloc(struct mlx4_dev *dev, int slave, int port, int *idx) argument
1708 mlx4_counter_alloc(struct mlx4_dev *dev, u8 port, int *idx) argument
[all...]
H A Dpd.c49 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) { argument
50 struct mlx4_priv *priv = mlx4_priv(dev);
61 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
63 mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR);
67 int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
69 struct mlx4_dev *dev = mlx4_priv(dev);
78 int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
83 if (mlx4_is_mfunc(dev)) {
84 err = mlx4_cmd_imm(dev,
141 mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) argument
174 mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) argument
[all...]
/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mthca/
H A Dmthca_memfree.c63 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) argument
68 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
76 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) argument
81 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
87 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent) argument
96 mthca_free_icm_coherent(dev, chunk);
98 mthca_free_icm_pages(dev, chunk);
122 static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, argument
125 void *buf = dma_alloc_coherent(dev, PAGE_SIZ
136 mthca_alloc_icm(struct mthca_dev *dev, int npages, gfp_t gfp_mask, int coherent) argument
221 mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) argument
257 mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) argument
323 mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, int start, int end) argument
346 mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, int start, int end) argument
358 mthca_alloc_icm_table(struct mthca_dev *dev, u64 virt, int obj_size, int nobj, int reserved, int use_lowmem, int use_coherent) argument
428 mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) argument
444 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) argument
459 mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab, int index, u64 uaddr) argument
610 mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab, int index) argument
628 mthca_init_user_db_tab(struct mthca_dev *dev) argument
652 mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab) argument
681 mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, u32 qn, __be32 **db) argument
782 mthca_free_db(struct mthca_dev *dev, int type, int db_index) argument
819 mthca_init_db_tab(struct mthca_dev *dev) argument
850 mthca_cleanup_db_tab(struct mthca_dev *dev) argument
[all...]
H A Dmthca_cmd.c186 static inline int go_bit(struct mthca_dev *dev) argument
188 return readl(dev->hcr + HCR_STATUS_OFFSET) &
192 static void mthca_cmd_post_dbell(struct mthca_dev *dev, argument
200 void __iomem *ptr = dev->cmd.dbell_map;
201 u16 *offs = dev->cmd.dbell_offsets;
224 static int mthca_cmd_post_hcr(struct mthca_dev *dev, argument
236 while (go_bit(dev) && time_before(jiffies, end))
240 if (go_bit(dev))
249 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4);
250 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev
267 mthca_cmd_post(struct mthca_dev *dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token, int event) argument
297 mthca_cmd_poll(struct mthca_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout, u8 *status) argument
342 mthca_cmd_event(struct mthca_dev *dev, u16 token, u8 status, u64 out_param) argument
361 mthca_cmd_wait(struct mthca_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout, u8 *status) argument
420 mthca_cmd_box(struct mthca_dev *dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout, u8 *status) argument
440 mthca_cmd(struct mthca_dev *dev, u64 in_param, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout, u8 *status) argument
457 mthca_cmd_imm(struct mthca_dev *dev, u64 in_param, u64 *out_param, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout, u8 *status) argument
476 mthca_cmd_init(struct mthca_dev *dev) argument
500 mthca_cmd_cleanup(struct mthca_dev *dev) argument
512 mthca_cmd_use_events(struct mthca_dev *dev) argument
549 mthca_cmd_use_polling(struct mthca_dev *dev) argument
563 mthca_alloc_mailbox(struct mthca_dev *dev, gfp_t gfp_mask) argument
581 mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox) argument
590 mthca_SYS_EN(struct mthca_dev *dev, u8 *status) argument
606 mthca_SYS_DIS(struct mthca_dev *dev, u8 *status) argument
611 mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, u64 virt, u8 *status) argument
690 mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) argument
695 mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status) argument
700 mthca_RUN_FW(struct mthca_dev *dev, u8 *status) argument
705 mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) argument
731 mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) argument
837 mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) argument
890 mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status) argument
895 mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) argument
945 mthca_QUERY_DEV_LIM(struct mthca_dev *dev, struct mthca_dev_lim *dev_lim, u8 *status) argument
1225 mthca_QUERY_ADAPTER(struct mthca_dev *dev, struct mthca_adapter *adapter, u8 *status) argument
1268 mthca_INIT_HCA(struct mthca_dev *dev, struct mthca_init_hca_param *param, u8 *status) argument
1393 mthca_INIT_IB(struct mthca_dev *dev, struct mthca_init_ib_param *param, int port, u8 *status) argument
1445 mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status) argument
1450 mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status) argument
1455 mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, int port, u8 *status) argument
1491 mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status) argument
1496 mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) argument
1522 mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status) argument
1530 mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) argument
1535 mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status) argument
1540 mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, u8 *status) argument
1559 mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int mpt_index, u8 *status) argument
1566 mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int mpt_index, u8 *status) argument
1574 mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int num_mtt, u8 *status) argument
1581 mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status) argument
1586 mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, int eq_num, u8 *status) argument
1596 mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int eq_num, u8 *status) argument
1603 mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int eq_num, u8 *status) argument
1611 mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status) argument
1618 mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status) argument
1626 mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size, u8 *status) argument
1657 mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int srq_num, u8 *status) argument
1664 mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int srq_num, u8 *status) argument
1672 mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, struct mthca_mailbox *mailbox, u8 *status) argument
1679 mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) argument
1685 mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur, enum ib_qp_state next, u32 num, int is_ee, struct mthca_mailbox *mailbox, u32 optmask, u8 *status) argument
1789 mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, struct mthca_mailbox *mailbox, u8 *status) argument
1796 mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, u8 *status) argument
1822 mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, int port, struct ib_wc *in_wc, struct ib_grh *in_grh, void *in_mad, void *response_mad, u8 *status) argument
1901 mthca_READ_MGM(struct mthca_dev *dev, int index, struct mthca_mailbox *mailbox, u8 *status) argument
1908 mthca_WRITE_MGM(struct mthca_dev *dev, int index, struct mthca_mailbox *mailbox, u8 *status) argument
1915 mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, u16 *hash, u8 *status) argument
1928 mthca_NOP(struct mthca_dev *dev, u8 *status) argument
[all...]
H A Dmthca_profile.c64 s64 mthca_make_profile(struct mthca_dev *dev, argument
97 profile[MTHCA_RES_MTT].size = dev->limits.mtt_seg_size;
119 if (mthca_is_memfree(dev))
123 if (mthca_is_memfree(dev)) {
127 mem_base = dev->ddr_start;
128 mem_avail = dev->fw.tavor.fw_start - dev->ddr_start;
152 mthca_err(dev, "Profile requires 0x%llx bytes; "
161 mthca_dbg(dev, "profile[%2d]--%2d/%2d @ 0x%16llx "
168 if (mthca_is_memfree(dev))
[all...]
H A Dmthca_mad.c50 static int mthca_update_rate(struct mthca_dev *dev, u8 port_num) argument
59 ret = ib_query_port(&dev->ib_dev, port_num, tprops);
62 ret, dev->ib_dev.name, port_num);
66 dev->rate[port_num - 1] = tprops->active_speed *
74 static void update_sm_ah(struct mthca_dev *dev, argument
81 if (!dev->send_agent[port_num - 1][0])
89 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
94 spin_lock_irqsave(&dev->sm_lock, flags);
95 if (dev->sm_ah[port_num - 1])
96 ib_destroy_ah(dev
148 node_desc_override(struct ib_device *dev, struct ib_mad *mad) argument
161 forward_trap(struct mthca_dev *dev, u8 port_num, struct ib_mad *mad) argument
289 mthca_create_agents(struct mthca_dev *dev) argument
331 mthca_free_agents(struct mthca_dev *dev) argument
[all...]
H A Dmthca_cq.c188 static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr) argument
193 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
203 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, argument
206 if (mthca_is_memfree(dev)) {
211 dev->kar + MTHCA_CQ_DOORBELL,
212 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
221 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn) argument
225 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
228 mthca_warn(dev, "Completio
237 mthca_cq_event(struct mthca_dev *dev, u32 cqn, enum ib_event_type event_type) argument
277 mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, struct mthca_srq *srq) argument
352 mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) argument
370 mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) argument
376 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) argument
482 mthca_poll_one(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp **cur_qp, int *freed, struct ib_wc *entry) argument
662 struct mthca_dev *dev = to_mdev(ibcq->device); local
773 mthca_init_cq(struct mthca_dev *dev, int nent, struct mthca_ucontext *ctx, u32 pdn, struct mthca_cq *cq) argument
900 get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) argument
911 mthca_free_cq(struct mthca_dev *dev, struct mthca_cq *cq) argument
967 mthca_init_cq_table(struct mthca_dev *dev) argument
988 mthca_cleanup_cq_table(struct mthca_dev *dev) argument
[all...]
H A Dmthca_dev.h369 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
379 dev_err(&mdev->pdev->dev, format, ## arg)
381 dev_info(&mdev->pdev->dev, format, ## arg)
383 dev_warn(&mdev->pdev->dev, format, ## arg)
424 int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
427 void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
430 int mthca_init_uar_table(struct mthca_dev *dev);
431 int mthca_init_pd_table(struct mthca_dev *dev);
432 int mthca_init_mr_table(struct mthca_dev *dev);
433 int mthca_init_eq_table(struct mthca_dev *dev);
591 mthca_is_memfree(struct mthca_dev *dev) argument
[all...]
/barrelfish-2018-10-04/lib/dma/xeon_phi/
H A Dxeon_phi_dma_device.c13 #include <dev/xeon_phi/xeon_phi_dma_dev.h>
48 * \param dev Xeon Phi DMA device
51 void xeon_phi_dma_device_get_dstat_addr(struct xeon_phi_dma_device *dev, argument
54 assert(dev->dstat.vaddr);
56 *mem = dev->dstat;
58 mem->paddr += (XEON_PHI_DMA_CHANNEL_DSTAT_SIZE * dev->common.channels.next);
61 mem->vaddr += (XEON_PHI_DMA_CHANNEL_DSTAT_SIZE * dev->common.channels.next++);
67 * \param dev Xeon Phi DMA device
70 errval_t xeon_phi_dma_device_irq_setup(struct xeon_phi_dma_device *dev, argument
82 * \param dev Xeo
87 xeon_phi_dma_device_get_channel_vbase(struct xeon_phi_dma_device *dev, uint8_t idx) argument
102 xeon_phi_dma_device_set_channel_owner(struct xeon_phi_dma_device *dev, uint8_t idx, xeon_phi_dma_owner_t owner) argument
154 xeon_phi_dma_device_set_channel_state(struct xeon_phi_dma_device *dev, uint8_t idx, uint8_t enabled) argument
228 xeon_phi_dma_device_init(void *mmio_base, struct xeon_phi_dma_device **dev) argument
303 xeon_phi_dma_device_shutdown(struct xeon_phi_dma_device *dev) argument
323 xeon_phi_dma_device_intr_enable(struct xeon_phi_dma_device *dev, dma_irq_t type, dma_irq_fn_t fn, void *arg) argument
337 xeon_phi_dma_device_intr_disable(struct xeon_phi_dma_device *dev) argument
357 xeon_phi_dma_device_poll_channels(struct dma_device *dev) argument
[all...]
/barrelfish-2018-10-04/lib/openssl-1.0.0d/util/
H A Dpod2mantest34 if "$pod2man" --section=1 --center=OpenSSL --release=dev pod2mantest.pod | fgrep OpenSSL >/dev/null; then
41 if "$pod2man" --section=1 --center=OpenSSL --release=dev pod2mantest.pod | grep '^MARKER - ' >/dev/null; then
/barrelfish-2018-10-04/lib/virtio/backends/
H A Dvirtio_device_io.c22 errval_t virtio_device_pci_alloc(struct virtio_device **dev) argument
27 errval_t virtio_device_pci_free(struct virtio_device **dev) argument
H A Dvirtio_device_pci.c22 errval_t virtio_device_pci_alloc(struct virtio_device **dev) argument
27 errval_t virtio_device_pci_free(struct virtio_device **dev) argument
/barrelfish-2018-10-04/lib/virtio/
H A Ddevice.h63 errval_t (*reset)(struct virtio_device *dev);
64 errval_t (*set_status)(struct virtio_device *dev, uint8_t status);
65 errval_t (*get_status)(struct virtio_device *dev, uint32_t *status);
66 errval_t (*negotiate_features)(struct virtio_device *dev, uint64_t driv_features);
67 errval_t (*get_queue_num_max)(struct virtio_device *dev, uint16_t queue_index, uint16_t *num_max);
68 errval_t (*set_virtq)(struct virtio_device *dev, struct virtqueue *vq);
70 errval_t (*set_config)(struct virtio_device *dev,void *config,size_t offset, size_t length);
71 errval_t (*notify)(struct virtio_device *dev, uint16_t vq_id);
H A Ddevice.c32 * \param dev device structure to initialize
36 errval_t virtio_device_open(struct virtio_device **dev, argument
56 err = virtio_device_mmio_init(dev, setup);
75 struct virtio_device *vdev = *dev;
152 * \param dev device structure to initialize
156 errval_t virtio_device_open_with_cap(struct virtio_device **dev, argument
199 err = virtio_device_open(dev, setup);
210 * \param dev the device to query for the feature
216 bool virtio_device_has_feature(struct virtio_device *dev, argument
222 if (dev
229 virtio_device_specific_setup(struct virtio_device *dev, void *arg) argument
246 virtio_device_reset(struct virtio_device *dev) argument
262 virtio_device_get_status(struct virtio_device *dev, uint32_t *ret_status) argument
276 virtio_device_set_status(struct virtio_device *dev, uint8_t status) argument
344 virtio_device_config_write(struct virtio_device *dev, void *config, size_t offset, size_t length) argument
356 virtio_device_set_driver_features(struct virtio_device *dev, uint64_t features) argument
363 virtio_device_get_device_features(struct virtio_device *dev, uint64_t *ret_features) argument
370 virtio_device_feature_negotiate(struct virtio_device *dev, uint64_t driver_features) argument
403 virtio_device_set_virtq(struct virtio_device *dev, struct virtqueue *vq) argument
[all...]
/barrelfish-2018-10-04/usr/acpi/arch/x86/
H A Dioapic.c42 lpc_ioapic_initialize(&a->dev, (void *)base);
48 lpc_ioapic_id_wr(&a->dev, (lpc_ioapic_id_t) { .id = id });
51 a->nintis = lpc_ioapic_ver_rd(&a->dev).mre + 1;
70 lpc_ioapic_redir_tbl_t tbl = lpc_ioapic_redirtbl_rd(&a->dev, inti);
72 lpc_ioapic_redirtbl_wr(&a->dev, inti, tbl);
78 lpc_ioapic_redirtbl_wr(&a->dev, inti, entry);
84 lpc_ioapic_redir_tbl_t tbl = lpc_ioapic_redirtbl_rd(&a->dev, inti);
87 lpc_ioapic_redirtbl_wr(&a->dev, inti, tbl);
/barrelfish-2018-10-04/usr/drivers/omap44xx/twl6030/
H A Dtwl6030.h29 #define ti_twl6030_id1_read_8(dev, off) _ti_twl6030_id1_read_8(dev, off)
30 #define ti_twl6030_id1_write_8(dev, off, regval) _ti_twl6030_id1_write_8(dev, off, regval)
31 #include <dev/ti_twl6030_dev.h>
/barrelfish-2018-10-04/tools/usbboot/
H A Dusb-linux.c32 struct libusb_device_handle *dev; local
35 dev = libusb_open_device_with_vid_pid(ctx, vendor, device);
36 if (dev) {
37 r= libusb_set_auto_detach_kernel_driver(dev, 1);
39 r = libusb_set_configuration(dev, 1);
41 r = libusb_claim_interface(dev, 0);
44 *result = dev;
/barrelfish-2018-10-04/include/dma/client/
H A Ddma_client_request.h47 * \param dev DMA client device
52 errval_t dma_client_register_memory(struct dma_device *dev,
69 * \param dev DMA client device
74 errval_t dma_client_deregister_memory(struct dma_device *dev,
99 * \param dev DMA client device
105 errval_t dma_client_request_memcpy(struct dma_device *dev,
/barrelfish-2018-10-04/include/dma/
H A Ddma_bench.h42 errval_t dma_bench_run_default(struct dma_device *dev);
44 errval_t dma_bench_run_default_xphi(struct dma_device *dev);
46 errval_t dma_bench_run(struct dma_device *dev, lpaddr_t src, lpaddr_t dst);
/barrelfish-2018-10-04/include/dma/ioat/
H A Dioat_dma_request.h50 * \param dev IOAT DMA device
57 errval_t ioat_dma_request_memcpy(struct dma_device *dev,
77 * \param dev IOAT DMA device
84 errval_t ioat_dma_request_memset(struct dma_device *dev,
98 * \param dev IOAT DMA device
104 void ioat_dma_request_nop(struct ioat_dma_device *dev);

Completed in 154 milliseconds

1234567891011>>