Searched refs:dev (Results 26 - 50 of 400) sorted by relevance

1234567891011>>

/barrelfish-2018-10-04/usr/drivers/ioat_dma/
H A Dioat_mgr_service.c36 struct ioat_dev_handle *dev = calloc(1, sizeof(*dev)); local
37 if (dev == NULL) {
41 dev->devframe = frame;
43 dev->next = avail_devices;
44 avail_devices = dev;
59 struct ioat_dev_handle *dev = avail_devices; local
60 avail_devices = dev->next;
62 dev->next = used_devices;
63 used_devices = dev;
[all...]
/barrelfish-2018-10-04/lib/dma/include/xeon_phi/
H A Dxeon_phi_dma_device_internal.h18 #include <dev/xeon_phi/xeon_phi_dma_dev.h>
29 * \param dev Xeon Phi DMA device
32 void xeon_phi_dma_device_get_dstat_addr(struct xeon_phi_dma_device *dev,
38 * \param dev IOAT DMA device
41 errval_t xeon_phi_dma_device_irq_setup(struct xeon_phi_dma_device *dev,
48 * \param dev Xeon Phi DMA device
53 void *xeon_phi_dma_device_get_channel_vbase(struct xeon_phi_dma_device *dev,
63 void xeon_phi_dma_device_set_channel_state(struct xeon_phi_dma_device *dev,
70 * \param dev Xeon Phi DMA device
74 void xeon_phi_dma_device_set_channel_owner(struct xeon_phi_dma_device *dev,
[all...]
/barrelfish-2018-10-04/include/dma/client/
H A Ddma_client_device.h50 static inline struct dma_client_device *dma_device_to_client(struct dma_device *dev) argument
52 return (struct dma_client_device *)dev;
66 * \param dev returns a pointer to the device structure
72 struct dma_client_device **dev);
77 * \param dev IOAT DMA device to shutdown
82 errval_t dma_client_device_shutdown(struct dma_client_device *dev);
93 * \param dev DMA client device
97 dma_dev_type_t dma_client_get_device_type(struct dma_client_device *dev);
106 void dma_client_device_get_mem_range(struct dma_client_device *dev,
/barrelfish-2018-10-04/usr/arrakismon/
H A Dpc16550d.c34 pc16550d_mem_initialize(&u->dev, (mackerel_addr_t)u->regs);
41 pc16550d_mem_lsr_thre_wrf(&u->dev, 1);
42 pc16550d_mem_lsr_temt_wrf(&u->dev, 1);
60 if (pc16550d_mem_ier_rd(&u->dev).elsi &&
61 pc16550d_mem_lsr_rd(&u->dev).oe) {
63 pc16550d_mem_iir_iid_wrf(&u->dev, pc16550d_mem_irq_rls);
66 else if (pc16550d_mem_ier_rd(&u->dev).erbfi &&
67 pc16550d_mem_lsr_rd(&u->dev).dr) {
68 pc16550d_mem_iir_iid_wrf(&u->dev, pc16550d_mem_irq_rda);
72 else if (pc16550d_mem_ier_rd(&u->dev)
[all...]
/barrelfish-2018-10-04/lib/libc/gen/
H A Ddevname.c46 devname_r(dev_t dev, mode_t type, char *buf, int len) argument
51 if (dev == NODEV || !(S_ISCHR(type) || S_ISBLK(dev))) {
58 i = sysctlbyname("kern.devname", buf, &j, &dev, sizeof (dev));
65 S_ISCHR(type) ? 'C' : 'B', (uintmax_t)dev);
70 devname(dev_t dev, mode_t type) argument
74 return (devname_r(dev, type, buf, sizeof(buf)));
/barrelfish-2018-10-04/kernel/include/arch/x86/
H A Dia32_spaces.h18 static inline uint64_t ia32_msr_read_64(ia32_t *dev, size_t offset);
19 static inline void ia32_msr_write_64(ia32_t *dev, size_t offset,
22 static inline uint32_t ia32_msr_read_32(ia32_t *dev, size_t offset);
23 static inline void ia32_msr_write_32(ia32_t *dev, size_t offset,
26 static inline uint64_t ia32_msr_read_64(ia32_t *dev, size_t offset) argument
31 static inline void ia32_msr_write_64(ia32_t *dev, size_t offset, argument
37 static inline uint32_t ia32_msr_read_32(ia32_t *dev, size_t offset) argument
42 static inline void ia32_msr_write_32(ia32_t *dev, size_t offset, argument
/barrelfish-2018-10-04/usr/tests/msun/
H A Dlround_test.t8 make $executable 2>&1 > /dev/null
/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mthca/
H A Dmthca_mr.c190 static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order, argument
198 if (mthca_is_memfree(dev))
199 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
208 static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, argument
223 for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
235 struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size) argument
237 return __mthca_alloc_mtt(dev, size, &dev
240 mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt) argument
254 __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) argument
306 mthca_write_mtt_size(struct mthca_dev *dev) argument
322 mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) argument
336 mthca_arbel_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) argument
361 mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) argument
408 hw_index_to_key(struct mthca_dev *dev, u32 ind) argument
416 key_to_hw_index(struct mthca_dev *dev, u32 key) argument
424 adjust_key(struct mthca_dev *dev, u32 key) argument
432 mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) argument
522 mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, u32 access, struct mthca_mr *mr) argument
529 mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, u64 *buffer_list, int buffer_size_shift, int list_len, u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) argument
555 mthca_free_region(struct mthca_dev *dev, u32 lkey) argument
563 mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) argument
582 mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, u32 access, struct mthca_fmr *mr) argument
701 mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) argument
744 struct mthca_dev *dev = to_mdev(ibfmr->device); local
785 struct mthca_dev *dev = to_mdev(ibfmr->device); local
827 mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) argument
837 mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) argument
847 mthca_init_mr_table(struct mthca_dev *dev) argument
971 mthca_cleanup_mr_table(struct mthca_dev *dev) argument
[all...]
H A Dmthca_mcg.c63 static int find_mgm(struct mthca_dev *dev, argument
73 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
80 err = mthca_MGID_HASH(dev, mailbox, hash, &status);
84 mthca_err(dev, "MGID_HASH returned status %02x\n", status);
90 mthca_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
96 err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status);
100 mthca_err(dev, "READ_MGM returned status %02x\n", status);
107 mthca_err(dev, "Found zero MGID in AMGM.\n");
123 mthca_free_mailbox(dev, mailbox);
129 struct mthca_dev *dev local
236 struct mthca_dev *dev = to_mdev(ibqp->device); local
352 mthca_init_mcg_table(struct mthca_dev *dev) argument
369 mthca_cleanup_mcg_table(struct mthca_dev *dev) argument
[all...]
H A Dmthca_srq.c95 static void mthca_tavor_init_srq_context(struct mthca_dev *dev, argument
110 context->uar = cpu_to_be32(dev->driver_uar.index);
113 static void mthca_arbel_init_srq_context(struct mthca_dev *dev, argument
136 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
140 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) argument
142 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
147 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, argument
162 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
199 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, argument
208 if (attr->max_wr > dev
328 get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) argument
339 mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) argument
379 struct mthca_dev *dev = to_mdev(ibsrq->device); local
408 struct mthca_dev *dev = to_mdev(ibsrq->device); local
441 mthca_srq_event(struct mthca_dev *dev, u32 srqn, enum ib_event_type event_type) argument
497 struct mthca_dev *dev = to_mdev(ibsrq->device); local
597 struct mthca_dev *dev = to_mdev(ibsrq->device); local
659 mthca_max_srq_sge(struct mthca_dev *dev) argument
684 mthca_init_srq_table(struct mthca_dev *dev) argument
708 mthca_cleanup_srq_table(struct mthca_dev *dev) argument
[all...]
H A Dmthca_av.c94 enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port) argument
96 if (mthca_is_memfree(dev)) {
98 if (dev->limits.stat_rate_support == 0x3 && mthca_rate)
101 return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]);
103 return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]);
134 u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port) argument
138 if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1])
141 if (mthca_is_memfree(dev))
143 dev->rate[port - 1]);
147 if (!(dev
153 mthca_create_ah(struct mthca_dev *dev, struct mthca_pd *pd, struct ib_ah_attr *ah_attr, struct mthca_ah *ah) argument
240 mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah) argument
266 mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, struct ib_ud_header *header) argument
295 struct mthca_dev *dev = to_mdev(ibah->device); local
324 mthca_init_av_table(struct mthca_dev *dev) argument
365 mthca_cleanup_av_table(struct mthca_dev *dev) argument
[all...]
H A Dmthca_cmd.h244 int mthca_cmd_init(struct mthca_dev *dev);
245 void mthca_cmd_cleanup(struct mthca_dev *dev);
246 int mthca_cmd_use_events(struct mthca_dev *dev);
247 void mthca_cmd_use_polling(struct mthca_dev *dev);
248 void mthca_cmd_event(struct mthca_dev *dev, u16 token,
251 struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
253 void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
255 int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
256 int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status);
257 int mthca_MAP_FA(struct mthca_dev *dev, struc
[all...]
/barrelfish-2018-10-04/lib/virtio/devices/
H A Dvirtio_block.c18 #include <dev/virtio/virtio_blk_dev.h>
26 * \param dev the virtio block device
33 bool virtio_block_get_topology(struct virtio_device_blk *dev, argument
42 if (!virtio_device_has_feature(dev->vdev, VIRTIO_BLOCK_F_TOPOLOGY)) {
46 topo->alignment_offset = virtio_blk_topo_blocks_offset_aligned_rdf(&dev
48 topo->min_io_size = virtio_blk_topo_io_size_min_rdf(&dev->config_space);
49 topo->opt_io_size = virtio_blk_topo_io_size_opt_rdf(&dev->config_space);
50 topo->num_logic_per_phys = virtio_blk_topo_blocks_logic_per_phys_rdf(&dev
59 * \param dev the virtio block device
66 bool virtio_block_get_geometry(struct virtio_device_blk *dev, argument
91 virtio_block_config_read(struct virtio_device_blk *dev) argument
129 struct virtio_device_blk *dev = virtio_device_get_type_state(vdev); local
171 virtio_block_init_device(struct virtio_device_blk *dev, struct virtio_device_setup *setup) argument
197 virtio_block_init_device_with_cap(struct virtio_device_blk *dev, struct virtio_device_setup *setup, struct capref dev_cap) argument
[all...]
/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/net/mlx4/
H A Dqp.c89 static inline int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, argument
93 u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn
94 + 8 * mlx4_master_func_num(dev);
97 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn
98 && qp->qpn <= dev->phys_caps.base_sqpn + 1;
103 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, argument
132 /*struct mlx4_priv *priv = mlx4_priv(dev);*/
148 ret = mlx4_cmd(dev, 0, qp->qpn, 2, MLX4_CMD_2RST_QP,
150 /*if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR
152 && is_master_qp0(dev, q
210 mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp) argument
240 mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base, u8 flags) argument
294 __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) argument
332 mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) argument
369 mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) argument
580 mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_qp_context *context, struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) argument
[all...]
/barrelfish-2018-10-04/lib/dma/ioat/
H A Dioat_dma_device.c12 #include <dev/ioat_dma_dev.h>
51 static errval_t device_init_ioat_v1(struct ioat_dma_device *dev) argument
54 dev->common.id);
58 static errval_t device_init_ioat_v2(struct ioat_dma_device *dev) argument
61 dev->common.id);
65 static errval_t device_init_ioat_v3(struct ioat_dma_device *dev) argument
69 IOATDEV_DEBUG("initialize Crystal Beach 3 DMA device\n", dev->common.id);
71 ioat_dma_dmacapability_t cap = ioat_dma_dmacapability_rd(&dev->device);
73 if (ioat_dma_cbver_minor_extract(dev->version) == 2) {
75 dev
177 ioat_dma_device_get_complsts_addr(struct ioat_dma_device *dev, struct dma_mem *mem) argument
202 struct dma_device *dev = arg; local
239 ioat_dma_device_irq_setup(struct ioat_dma_device *dev, dma_irq_t type) argument
360 ioat_dma_device_init(struct capref mmio, struct pci_addr *pci_addr, struct ioat_dma_device **dev) argument
448 ioat_dma_device_shutdown(struct ioat_dma_device *dev) argument
463 ioat_dma_device_acquire(struct ioat_dma_device **dev) argument
485 ioat_dma_device_release(struct ioat_dma_device *dev) argument
505 ioat_dma_device_intr_enable(struct ioat_dma_device *dev, dma_irq_t type, dma_irq_fn_t fn, void *arg) argument
519 ioat_dma_device_intr_disable(struct ioat_dma_device *dev) argument
530 ioat_dma_device_set_intr_delay(struct ioat_dma_device *dev, uint16_t usec) argument
551 ioat_dma_device_poll_channels(struct dma_device *dev) argument
[all...]
/barrelfish-2018-10-04/include/cpuid/
H A Dcpuid_spaces.h21 static inline uint32_t cpuid_eax_read_32(cpuid_t *dev, size_t offset) argument
28 static inline uint32_t cpuid_ebx_read_32(cpuid_t *dev, size_t offset) argument
35 static inline uint32_t cpuid_ecx_read_32(cpuid_t *dev, size_t offset) argument
42 static inline uint32_t cpuid_edx_read_32(cpuid_t *dev, size_t offset) argument
49 static inline uint32_t cpuid_dcpa_read_32(cpuid_t *dev, size_t offset) argument
54 static inline uint32_t cpuid_dcpb_read_32(cpuid_t *dev, size_t offset) argument
59 static inline uint32_t cpuid_dcpc_read_32(cpuid_t *dev, size_t offset) argument
/barrelfish-2018-10-04/usr/drivers/omap44xx/twl6030/
H A Di2c.c14 #include <dev/ti_i2c_dev.h>
32 lpaddr_t i2c_get_pbase(size_t dev) { argument
33 assert(dev < 4);
34 return i2c_pbase[dev];
68 ti_i2c_t *dev = &i2c[i]; local
80 ti_i2c_con_wr(dev, 0x0000);
83 ti_i2c_sysc_srst_wrf(dev, 0x1);
86 ti_i2c_con_en_wrf(dev, 0x1);
88 while (ti_i2c_syss_rdone_rdf(dev) == 0x0) {
93 ti_i2c_con_wr(dev,
144 ti_i2c_poll_stat(ti_i2c_t *dev, ti_i2c_irqstatus_t flags, ti_i2c_irqstatus_t *retflags, int32_t timeout) argument
181 ti_i2c_wait_for_free_bus(ti_i2c_t *dev, int32_t timeout) argument
193 ti_i2c_read(ti_i2c_t *dev, uint8_t *buf, uint16_t length) argument
293 ti_i2c_write(ti_i2c_t *dev, uint8_t *buf, uint16_t length) argument
428 ti_i2c_t *dev = &i2c[devid]; local
[all...]
/barrelfish-2018-10-04/kernel/include/arch/x86_32/
H A Dcpuid_spaces.h31 static inline uint32_t cpuid_eax_read_32(cpuid_t *dev, size_t offset) argument
38 static inline uint32_t cpuid_ebx_read_32(cpuid_t *dev, size_t offset) argument
56 static inline uint32_t cpuid_ecx_read_32(cpuid_t *dev, size_t offset) argument
63 static inline uint32_t cpuid_edx_read_32(cpuid_t *dev, size_t offset) argument
70 static inline uint32_t cpuid_dcpa_read_32(cpuid_t *dev, size_t offset) argument
75 static inline uint32_t cpuid_dcpb_read_32(cpuid_t *dev, size_t offset) argument
80 static inline uint32_t cpuid_dcpc_read_32(cpuid_t *dev, size_t offset) argument
85 static inline void cpuid_eax_write_32(cpuid_t *dev, size_t offset, argument
90 static inline void cpuid_ebx_write_32(cpuid_t *dev, size_t offset, argument
95 static inline void cpuid_ecx_write_32(cpuid_t *dev, size_ argument
100 cpuid_edx_write_32(cpuid_t *dev, size_t offset, uint32_t value) argument
105 cpuid_dcpa_write_32(cpuid_t *dev, size_t offset, uint32_t value) argument
110 cpuid_dcpb_write_32(cpuid_t *dev, size_t offset, uint32_t value) argument
115 cpuid_dcpc_write_32(cpuid_t *dev, size_t offset, uint32_t value) argument
[all...]
/barrelfish-2018-10-04/usr/vmkitmon/
H A Dpc16550d.c43 pc16550d_mem_initialize(&u->dev, (mackerel_addr_t)u->regs);
51 pc16550d_mem_lsr_thre_wrf(&u->dev, 1);
52 pc16550d_mem_lsr_temt_wrf(&u->dev, 1);
70 if (pc16550d_mem_ier_rd(&u->dev).elsi &&
71 pc16550d_mem_lsr_rd(&u->dev).oe) {
73 pc16550d_mem_iir_iid_wrf(&u->dev, pc16550d_mem_irq_rls);
76 else if (pc16550d_mem_ier_rd(&u->dev).erbfi &&
77 pc16550d_mem_lsr_rd(&u->dev).dr) {
78 pc16550d_mem_iir_iid_wrf(&u->dev, pc16550d_mem_irq_rda);
82 else if (pc16550d_mem_ier_rd(&u->dev)
[all...]
/barrelfish-2018-10-04/lib/acpica/tests/templates/
H A Dtemplates.sh19 $ASL_COMPILER -T ALL > /dev/null 2>&1
20 $ASL_COMPILER *.asl > /dev/null 2>&1
26 $ASL_COMPILER -vt -T ALL > /dev/null 2>&1
30 $ASL_COMPILER *.asl > /dev/null 2>&1
34 $ASL_COMPILER -d *.aml > /dev/null 2>&1
/barrelfish-2018-10-04/include/virtio/devices/
H A Dvirtio_block.h13 #include <dev/virtio/virtio_blk_dev.h>
194 * \param dev the block device to read the configuration space.
198 errval_t virtio_block_config_read(struct virtio_device_blk *dev);
203 * \param dev the virtio block device
207 static inline uint32_t virtio_block_get_block_size(struct virtio_device_blk *dev) argument
209 if (!virtio_device_has_feature(dev->vdev, VIRTIO_BLOCK_F_BLK_SIZE)) {
213 return virtio_blk_block_size_size_rdf(&dev->config_space);
219 * \param dev the virtio block device
223 static inline uint64_t virtio_block_get_capacity(struct virtio_device_blk *dev) argument
225 return virtio_blk_capacity_sectors_rdf(&dev
236 virtio_block_get_segment_num(struct virtio_device_blk *dev) argument
252 virtio_block_get_segment_size(struct virtio_device_blk *dev) argument
[all...]
/barrelfish-2018-10-04/lib/dma/include/ioat/
H A Dioat_dma_device_internal.h20 #include <dev/ioat_dma_dev.h>
39 * \param dev IOAT DMA device
42 void ioat_dma_device_get_complsts_addr(struct ioat_dma_device *dev,
49 * \param dev IOAT DMA device
52 errval_t ioat_dma_device_irq_setup(struct ioat_dma_device *dev,
/barrelfish-2018-10-04/lib/virtio/backends/
H A Dvirtio_mmio.h15 #include <dev/virtio/virtio_mmio_dev.h>
21 struct virtio_device dev; member in struct:virtio_device_mmio
40 * \param dev returns a pointer to the newly allocated device structure
45 errval_t virtio_device_mmio_init(struct virtio_device **dev,
51 * \param dev returns a pointer to the newly allocated device structure
56 errval_t virtio_device_mmio_init_host(struct virtio_device **dev,
/barrelfish-2018-10-04/usr/drivers/e10k/
H A De10k.h13 #include <dev/e10k_dev.h>
/barrelfish-2018-10-04/lib/devif/backends/net/mlx4/include/linux/
H A Dpci.h42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pci_private.h>
89 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
131 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
132 void (*remove)(struct pci_dev *dev);
133 int (*suspend)(struct pci_dev *dev, pm_message_t state); /*Device suspended*/
134 int (*resume)(struct pci_dev *dev); /* Device woken up*/
147 /*struct device dev;
164 dinfo = device_get_ivars(pdev->dev
[all...]

Completed in 111 milliseconds

1234567891011>>