Searched refs:dev (Results 101 - 125 of 400) sorted by relevance

1234567891011>>

/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/net/mlx4/
H A Dmr.c186 if (mlx4_is_mfunc(&priv->dev)) {
188 err = mlx4_cmd_imm(&priv->dev, in_param, &out_param, RES_MTT,
199 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, argument
201 struct mlx4_priv *priv = mlx4_priv(dev);
230 struct mlx4_mr_table *mr_table = &mlx4_priv(&priv->dev)->mr_table;
236 mlx4_table_put_range(&priv->dev, &mr_table->mtt_table, offset,
245 if (mlx4_is_mfunc(&priv->dev)) {
248 err = mlx4_cmd(&priv->dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
257 __mlx4_free_mtt_range(&priv->dev, offset, order);
265 mlx4_free_mtt_range(&priv->dev, mt
269 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) argument
283 mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) argument
355 __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) argument
360 mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) argument
394 mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument
451 mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) argument
568 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
612 mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_buf *buf) argument
[all...]
H A Dmcg.c54 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) { argument
55 return 1 << dev->oper_log_mgm_entry_size;
58 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) { argument
59 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
62 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, argument
67 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
77 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) { argument
80 err = mlx4_cmd(dev, regid, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
86 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, argument
88 return mlx4_cmd_box(dev,
92 mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, struct mlx4_cmd_mailbox *mailbox) argument
109 mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, u16 *hash, u8 op_mod) argument
122 get_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) argument
143 new_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) argument
233 existing_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) argument
663 find_entry(struct mlx4_dev *dev, u8 port, u8 *gid, enum mlx4_protocol prot, struct mlx4_cmd_mailbox *mgm_mailbox, int *prev, int *index) argument
777 hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id) argument
788 parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, struct _rule_hw *rule_hw) argument
842 mlx4_err_rule(struct mlx4_dev *dev, char *str, struct mlx4_net_trans_rule *rule) argument
912 mlx4_flow_attach(struct mlx4_dev *dev, struct mlx4_net_trans_rule *rule, u64 *reg_id) argument
952 mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) argument
964 mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn) argument
980 mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer) argument
1205 mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], u8 attach, u8 block_loopback, enum mlx4_protocol prot) argument
1234 mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], u8 port, int block_mcast_loopback, enum mlx4_protocol prot, u64 *reg_id) argument
1271 mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], u8 port, int block_mcast_loopback, enum mlx4_protocol prot, u64 *reg_id) argument
1389 mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot) argument
[all...]
H A Den_netdev.c85 struct net_device *dev = cq->dev;
86 struct mlx4_en_priv *priv = netdev_priv(dev);
96 done = mlx4_en_process_rx_cq(dev, cq, 4);
200 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
206 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
211 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
277 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
402 rps_may_expire_flow(priv->dev,
423 static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u1
480 struct mlx4_dev *dev = mdev->dev; local
558 struct mlx4_dev *dev = mdev->dev; local
[all...]
/barrelfish-2018-10-04/usr/drivers/usb/usb_manager/include/
H A Dusb_request.h51 usb_error_t usb_req_get_descriptor(struct usb_device *dev,
55 usb_error_t usb_req_get_device_descriptor(struct usb_device *dev,
57 usb_error_t usb_req_get_config_descriptor(struct usb_device *dev,
59 usb_error_t usb_req_get_string_desc(struct usb_device *dev, void *sdesc,
61 usb_error_t usb_req_get_string(struct usb_device *dev, char *buf, uint16_t len,
63 usb_error_t usb_req_set_config(struct usb_device *dev, uint8_t config);
/barrelfish-2018-10-04/tools/mkrelease/
H A Dmkrelease.sh7 echo "This script must be run in the root of a Barrelfish tree" > /dev/stderr
16 echo "Error: $TARFILE already exists" > /dev/stderr
25 echo "Aborted." > /dev/stderr
42 echo "Error: the following files are executable, but not listed in $EXEC_FILES_LIST:" > /dev/stderr
43 echo "$BADPERMS" > /dev/stderr
44 echo "Please fix this before continuing." > /dev/stderr
/barrelfish-2018-10-04/lib/acpica/generate/linux/
H A Dlinuxize.sh51 make_acpisrc $SRCDIR $FORCE_ACPISRC > /dev/null
56 $ACPISRC -ldqy $ACPICAFILE $LINUXFILE > /dev/null
/barrelfish-2018-10-04/usr/acpi/arch/x86/
H A Dioapic.h18 #include <dev/lpc_ioapic_dev.h>
23 lpc_ioapic_t dev; member in struct:ioapic
/barrelfish-2018-10-04/usr/arrakismon/
H A Dpci.c58 struct pci_device *dev = bus->device[device]; local
60 if(dev != NULL) {
61 dev->confspace_write(dev, pci->address, size, val);
99 struct pci_device *dev = bus->device[device]; local
101 if(dev != NULL) {
102 dev->confspace_read(dev, pci->address, size, val);
/barrelfish-2018-10-04/usr/drivers/omap44xx/cm2/
H A Dcm2.h14 #include <dev/omap/omap44xx_l3init_cm2_dev.h>
15 #include <dev/omap/omap44xx_l4per_cm2_dev.h>
/barrelfish-2018-10-04/usr/drivers/virtio/block/
H A Drequest.c62 * \param dev vblock device to allocate the requests for
66 errval_t vblock_request_queue_init(struct vblock_device *dev) argument
68 uint16_t ndesc = virtio_virtqueue_get_num_desc(dev->blk.vq);
78 dev->free_queue.length = ndesc;
79 dev->free_queue.head = req;
80 dev->free_queue.tail = req+(ndesc-1);
83 req->queue = &dev->free_queue;
88 dev->requests = req;
96 * \param dev vblock device to free the requests
100 void vblock_request_queue_destroy(struct vblock_device *dev) argument
176 vblock_request_start(struct vblock_device *dev, struct vblock_req *req) argument
239 vblock_request_exec(struct vblock_device *dev, struct vblock_req *req) argument
282 vblock_request_finish_completed(struct vblock_device *dev) argument
294 vblock_request_issue_get_id(struct virtio_device_blk *dev) argument
[all...]
/barrelfish-2018-10-04/usr/vmkitmon/
H A Dpci.c58 struct pci_device *dev = bus->device[device]; local
60 if(dev != NULL) {
61 dev->confspace_write(dev, pci->address, size, val);
102 struct pci_device *dev = bus->device[device]; local
104 if(dev != NULL) {
105 dev->confspace_read(dev, pci->address, size, val);
H A Dpci_ethernet.c23 #include <dev/e10k_dev.h>
24 #include <dev/e10k_q_dev.h>
64 static void confspace_write(struct pci_device *dev, argument
84 static void confspace_read(struct pci_device *dev, argument
158 static void mem_write(struct pci_device *dev, uint32_t addr, int bar, uint32_t val){ argument
159 struct pci_ethernet * eth = (struct pci_ethernet *)dev->state;
238 static void mem_read(struct pci_device *dev, uint32_t addr, int bar, uint32_t *val){ argument
239 struct pci_ethernet * eth = (struct pci_ethernet *)dev->state;
254 struct pci_device *dev = (struct pci_device *)arg; local
255 lpc_pic_assert_irq(dev
260 struct pci_device *dev = calloc(1, sizeof(struct pci_device)); local
[all...]
H A Dpci.h36 typedef void (*pci_device_confspace_write)(struct pci_device *dev,
40 typedef void (*pci_device_confspace_read)(struct pci_device *dev,
44 typedef void (*pci_device_mem_write)(struct pci_device *dev,
47 typedef void (*pci_device_mem_read)(struct pci_device *dev,
/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mlx4/
H A Dqp.c143 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) {
144 if (!mlx4_is_master(dev->dev))
147 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn
149 < dev->dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX;
152 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { argument
157 real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfun
176 is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
382 set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_rq, struct mlx4_ib_qp *qp) argument
420 set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) argument
549 alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) argument
773 alloc_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct ib_qp_init_attr *attr, int *qpn) argument
847 create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp) argument
1259 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr) argument
1463 to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type) argument
1535 mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, struct mlx4_ib_qp *qp, struct mlx4_qp_path *path, u8 port, int is_primary) argument
1676 update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
1725 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); local
2233 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); local
[all...]
H A Dcq.c99 struct mlx4_ib_dev *dev = to_mdev(cq->device);
101 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
104 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, argument
108 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
114 buf->entry_size = dev->dev
177 struct mlx4_ib_dev *dev = to_mdev(ibdev); local
459 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); local
[all...]
H A Dmain.c117 struct mlx4_ib_dev *dev;
123 int dev;
147 struct mlx4_ib_dev *dev = to_mdev(ibdev);
167 props->fw_ver = dev->dev->caps.fw_ver;
175 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
177 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
179 if (dev
244 struct mlx4_dev *dev = to_mdev(device)->dev; local
424 struct mlx4_ib_dev *dev = to_mdev(ibdev); local
475 struct mlx4_ib_dev *dev = to_mdev(ibdev); local
993 flow_spec_to_net_rule(struct ib_device *dev, struct ib_flow_spec *flow_spec, struct list_head *rule_list_h) argument
1253 init_node_data(struct mlx4_ib_dev *dev) argument
1583 mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) argument
1861 mlx4_ib_add(struct mlx4_dev *dev) argument
2151 mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) argument
[all...]
/barrelfish-2018-10-04/lib/dma/include/client/
H A Ddma_client_channel_internal.h20 * \param dev DMA client device
27 errval_t dma_client_channel_init(struct dma_client_device *dev,
/barrelfish-2018-10-04/lib/dma/include/ioat/
H A Dioat_dma_channel_internal.h22 * \param dev IOAT DMA device
29 errval_t ioat_dma_channel_init(struct ioat_dma_device *dev,
/barrelfish-2018-10-04/kernel/arch/armv7/
H A Dplat_omap44xx_boot.c19 #include <dev/omap/omap44xx_cortexa9_wugen_dev.h>
/barrelfish-2018-10-04/kernel/include/arch/armv8/
H A Dtimers.h29 #include <dev/armv8_dev.h>
/barrelfish-2018-10-04/include/devif/backends/net/
H A De10k_devif.h33 uint32_t bus, uint32_t function, uint32_t devid, uint32_t dev,
/barrelfish-2018-10-04/lib/devif/backends/net/e10k/
H A De10k_devif_vf.h37 uint32_t dev, uint32_t device_id, bool interrupts);
/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/include/
H A Dmlx4ib.h12 void *mlx4_ib_add(struct mlx4_dev *dev);
/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mthca/
H A Dmthca_provider.c752 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, argument
779 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
799 struct mthca_dev *dev = to_mdev(ibcq->device); local
806 if (entries < 1 || entries > dev->limits.max_cqes)
818 ret = mthca_alloc_resize_buf(dev, cq, entries);
830 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status);
836 mthca_free_cq_buf(dev, &cq->resize_buf->buf,
866 mthca_free_cq_buf(dev, &tbuf, tcqe);
1012 struct mthca_dev *dev = to_mdev(pd->device); local
1024 mthca_warn(dev, "Proces
1196 struct mthca_dev *dev = local
1204 struct mthca_dev *dev = local
1214 struct mthca_dev *dev = local
1234 struct mthca_dev *dev = local
1251 mthca_init_node_data(struct mthca_dev *dev) argument
1300 mthca_register_device(struct mthca_dev *dev) argument
1424 mthca_unregister_device(struct mthca_dev *dev) argument
[all...]
/barrelfish-2018-10-04/lib/dma/client/
H A Ddma_client_device.c33 static errval_t dma_client_device_poll(struct dma_device *dev) argument
48 * \param dev returns a pointer to the device structure
54 struct dma_client_device **dev)
137 *dev = cdev;
145 * \param dev IOAT DMA device to shutdown
150 errval_t dma_client_device_shutdown(struct dma_client_device *dev) argument
162 dma_dev_type_t dma_client_get_device_type(struct dma_client_device *dev) argument
164 assert(dev->info.type != DMA_DEV_TYPE_CLIENT);
165 return dev->info.type;
175 void dma_client_device_get_mem_range(struct dma_client_device *dev, argument
53 dma_client_device_init(struct dma_client_info *info, struct dma_client_device **dev) argument
[all...]

Completed in 203 milliseconds

1234567891011>>