Searched refs:type (Results 101 - 125 of 13790) sorted by last modified time

1234567891011>>

/linux-master/drivers/net/ethernet/broadcom/bnxt/
H A Dbnxt.c432 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
2152 enum pkt_hash_types type; local
2155 type = bnxt_rss_ext_op(bp, rxcmp);
2163 type = PKT_HASH_TYPE_L3;
2165 type = PKT_HASH_TYPE_L4;
2167 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2444 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); local
2449 switch (type) {
2463 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2478 bp->thermal_threshold_type = type;
3134 u16 type; local
3990 bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) argument
4065 u32 type; local
4856 u8 type = fltr->type, flags = fltr->flags; local
6789 u32 type; local
6939 u32 type; local
7972 u16 type; local
7999 u16 type; local
8506 u16 type; local
8529 u16 type; local
14102 bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) argument
14121 bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) argument
[all...]
/linux-master/drivers/net/dsa/mv88e6xxx/
H A Dchip.c1091 switch (s->type) {
1129 if (stat->type & types) {
1210 if (stat->type & types)
1268 if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_PORT)))
1280 if (!(stat->type & STATS_TYPE_BANK0))
1292 if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
1305 if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
1557 if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA)
1573 if (other_dp->type
[all...]
/linux-master/drivers/mtd/nand/raw/
H A Dqcom_nandc.c242 * This data type corresponds to the BAM transaction which will be used for all
282 * This data type corresponds to the nand dma descriptor
548 * This data type corresponds to the NAND controller properties which varies
933 * to be formed in command element type so this function uses the command
2606 switch (instr->type) {
2983 switch (instr->type) {
H A Ddiskonchip.c516 switch (instr->type) {
603 switch (instr->type) {
/linux-master/drivers/mtd/nand/raw/brcmnand/
H A Dbrcmnand.c1791 enum brcmnand_llop_type type, u32 data,
1799 switch (type) {
2422 last_op = ((i == (op->ninstrs - 1)) && (instr->type != NAND_OP_WAITRDY_INSTR)) ||
2423 ((i == (op->ninstrs - 2)) && (op->instrs[i + 1].type == NAND_OP_WAITRDY_INSTR));
2425 switch (instr->type) {
2457 dev_err(ctrl->dev, "unsupported instruction type: %d\n",
2458 instr->type);
2469 op->instrs[0].type == NAND_OP_CMD_INSTR &&
2471 op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
2480 op->instrs[0].type
1790 brcmnand_low_level_op(struct brcmnand_host *host, enum brcmnand_llop_type type, u32 data, bool last_op) argument
[all...]
/linux-master/drivers/mtd/
H A Dmtdcore.c140 char *type; local
142 switch (mtd->type) {
144 type = "absent";
147 type = "ram";
150 type = "rom";
153 type = "nor";
156 type = "nand";
159 type = "dataflash";
162 type = "ubi";
165 type
173 MTD_DEVICE_ATTR_RO(type); variable
[all...]
/linux-master/drivers/md/
H A Ddm.c430 if (!ti->type->prepare_ioctl)
436 r = ti->type->prepare_ioctl(ti, bdev);
1110 dm_endio_fn endio = ti->type->end_io;
1246 if (!ti->type->direct_access)
1252 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1273 if (WARN_ON(!ti->type->dax_zero_page_range)) {
1280 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1297 if (!ti || !ti->type->dax_recovery_write)
1300 ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
1443 if (likely(ti->type
2337 dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) argument
2358 enum dm_queue_mode type = dm_table_get_type(t); local
3183 enum pr_type type; member in struct:dm_pr
3302 dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, u32 flags) argument
3345 dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) argument
3380 dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, enum pr_type type, bool abort) argument
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_guc_ct.c902 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); local
916 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
923 type, fence);
938 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
942 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
966 u32 origin, type; local
981 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
982 switch (type) {
993 "G2H channel broken on read, type=%d, reset required\n",
994 type);
[all...]
H A Dxe_huc.c78 huc->fw.type = XE_UC_FW_TYPE_HUC;
230 bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type) argument
234 return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
237 int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type) argument
248 if (xe_huc_is_authenticated(huc, type)) {
256 switch (type) {
265 XE_WARN_ON(type);
270 huc_auth_modes[type].name, ret);
274 ret = xe_mmio_wait32(gt, huc_auth_modes[type]
[all...]
/linux-master/drivers/gpu/drm/amd/pm/swsmu/smu13/
H A Dsmu_v13_0_6_ppt.c116 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count);
118 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry);
891 * No need to define another data type for the same.
992 enum smu_clk_type type, char *buf)
1011 switch (type) {
1194 enum smu_clk_type type, uint32_t mask)
1204 switch (type) {
1717 enum PP_OD_DPM_TABLE_COMMAND type,
1732 switch (type) {
2518 static int smu_v13_0_6_get_valid_mca_count(struct smu_context *smu, enum amdgpu_mca_error_type type, uint32_ argument
991 smu_v13_0_6_print_clk_levels(struct smu_context *smu, enum smu_clk_type type, char *buf) argument
1193 smu_v13_0_6_force_clk_levels(struct smu_context *smu, enum smu_clk_type type, uint32_t mask) argument
1716 smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) argument
2546 __smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type, int idx, int offset, uint32_t *val) argument
2567 smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type, int idx, int offset, uint32_t *val, int count) argument
2614 mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, int idx, int reg_idx, uint64_t *val) argument
2636 mca_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry) argument
2678 mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) argument
2702 mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) argument
2736 mca_gfx_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) argument
2760 mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) argument
2785 mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry) argument
2804 mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry) argument
2875 mca_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) argument
2893 mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry) argument
2905 __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) argument
2937 mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) argument
2954 mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) argument
2974 mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry) argument
2980 mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) argument
3003 smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count) argument
3031 aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count) argument
3050 __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, int idx, int offset, u32 *val) argument
3071 smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, int idx, int offset, u32 *val, int count) argument
3088 aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type, int idx, int reg_idx, u64 *val) argument
3110 aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, enum aca_error_type type, int idx, struct aca_bank *bank) argument
[all...]
/linux-master/drivers/gpu/drm/amd/pm/
H A Damdgpu_pm.c757 uint32_t type; local
768 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
770 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
772 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
774 type = PP_OD_RESTORE_DEFAULT_TABLE;
776 type = PP_OD_COMMIT_DPM_TABLE;
778 type = PP_OD_EDIT_VDDC_CURVE;
780 type = PP_OD_EDIT_VDDGFX_OFFSET;
789 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
790 (type
1016 amdgpu_get_pp_dpm_clock(struct device *dev, enum pp_clock_type type, char *buf) argument
1084 amdgpu_set_pp_dpm_clock(struct device *dev, enum pp_clock_type type, const char *buf, size_t count) argument
3608 parse_input_od_command_lines(const char *buf, size_t count, u32 *type, long *params, uint32_t *num_of_params) argument
[all...]
/linux-master/drivers/gpu/drm/amd/display/amdgpu_dm/
H A Damdgpu_dm.c724 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
772 if (notify->type == DMUB_NOTIFICATION_HPD)
774 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
777 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
778 notify->type, link_index);
787 if (notify->type == DMUB_NOTIFICATION_HPD)
789 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
797 * @type: Type of dmub notification
807 enum dmub_notification_type type,
811 if (callback != NULL && type < ARRAY_SIZ
806 register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_svm.c575 bp.type = ttm_bo_type_device;
695 switch (attrs[i].type) {
717 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
743 switch (attrs[i].type) {
758 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
761 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
794 switch (attrs[i].type) {
809 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
813 } else if (attrs[i].type
[all...]
H A Dkfd_process.c152 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
153 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
217 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
218 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
384 else if (!strcmp(attr->name, "type"))
385 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
449 .name = "type",
H A Dkfd_migrate.c1039 pgmap->type = MEMORY_DEVICE_COHERENT;
1046 pgmap->type = MEMORY_DEVICE_PRIVATE;
1059 if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1062 pgmap->type = 0;
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Dvpe_v6_1.c310 unsigned int type,
308 vpe_v6_1_set_trap_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, enum amdgpu_interrupt_state state) argument
H A Dsdma_v5_2.c1378 unsigned type,
1382 u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
1683 .type = AMDGPU_RING_TYPE_SDMA,
1839 .type = AMD_IP_BLOCK_TYPE_SDMA,
1376 sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) argument
H A Dsdma_v4_4_2.c1504 unsigned type,
1509 sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
1512 WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
1600 unsigned type,
1605 sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
1608 WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
1812 .type = AMDGPU_RING_TYPE_SDMA,
1843 .type = AMDGPU_RING_TYPE_SDMA,
2037 .type = AMD_IP_BLOCK_TYPE_SDMA,
2184 struct aca_bank *bank, enum aca_error_type type,
1502 sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) argument
1598 sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) argument
2183 sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, struct aca_bank_report *report, void *data) argument
2210 sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data) argument
[all...]
H A Dgfx_v9_0.c791 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
821 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
849 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
1769 /* type 1 wave data */
1992 /* type-2 packets are deprecated on MEC, use type-3 instead */
3235 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3370 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
5114 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5332 int usepfp = (ring->funcs->type
5771 gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) argument
5790 gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) argument
5817 gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) argument
5848 gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) argument
[all...]
H A Dgfx_v11_0.c167 switch (ring->funcs->type) {
209 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
241 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
352 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
389 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
814 /* type 3 wave data */
988 /* type-2 packets are deprecated on MEC, use type-3 instead */
4346 * loaded firstly, so in direct type, it has to load smc ucode
5053 && ((ring->funcs->type
5906 gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) argument
5991 gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) argument
6010 gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) argument
[all...]
H A Dgfx_v10_0.c3523 switch (ring->funcs->type) {
3562 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
3594 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
4288 /* type 2 wave data */
4488 /* type-2 packets are deprecated on MEC, use type-3 instead */
7133 * loaded firstly, so in direct type, it has to load smc ucode
8307 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
8446 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
8475 if (ring->funcs->type
8885 gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type, enum amdgpu_interrupt_state state) argument
8982 gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, enum amdgpu_interrupt_state state) argument
9001 gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, enum amdgpu_interrupt_state state) argument
9072 gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type, enum amdgpu_interrupt_state state) argument
[all...]
H A Damdgpu_vpe.c863 .type = AMDGPU_RING_TYPE_VPE,
916 .type = AMD_IP_BLOCK_TYPE_VPE,
H A Damdgpu_umsch_mm.c546 .type = AMDGPU_RING_TYPE_UMSCH_MM,
884 .type = AMD_IP_BLOCK_TYPE_UMSCH_MM,
H A Damdgpu_object.c265 bp.type = ttm_bo_type_kernel;
540 .interruptible = (bp->type != ttm_bo_type_kernel),
544 .allow_res_evict = bp->type != ttm_bo_type_kernel,
581 if (bp->type != ttm_bo_type_kernel &&
607 if (bp->type == ttm_bo_type_kernel)
615 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
647 if (bp->type == ttm_bo_type_device)
1124 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1146 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1173 BUG_ON(bo->tbo.type
[all...]
H A Damdgpu_mes.c700 "queue type=%d, doorbell=0x%llx\n",
790 queue_input.queue_type = ring->funcs->type;
977 props->queue_type = ring->funcs->type;
1009 switch (ring->funcs->type) {

Completed in 544 milliseconds

1234567891011>>