Searched refs:role (Results 1 - 25 of 210) sorted by path

123456789

/linux-master/drivers/media/tuners/
H A Dtda18271-common.c720 (state->role == TDA18271_MASTER) ? 'M' : 'S',
H A Dtda18271-priv.h98 enum tda18271_role role; member in struct:tda18271_priv
H A Dtda18271.h82 enum tda18271_role role; member in struct:tda18271_config
/linux-master/drivers/scsi/bfa/
H A Dbfa_fcbuild.h165 u16 ox_id, enum bfa_lport_role role);
217 enum bfa_lport_role role);
H A Dbfi_ms.h594 u8 role; member in struct:bfi_itn_create_req_s
/linux-master/drivers/scsi/csiostor/
H A Dcsio_attr.c55 * If remote port is Initiator OR Target OR both, change the role appropriately.
72 if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) {
98 if (rn->role & CSIO_RNFR_INITIATOR)
100 if (rn->role & CSIO_RNFR_TARGET)
108 csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n",
126 rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET);
H A Dcsio_rnode.h81 /* Defines for rnode role */
105 uint32_t role; /* Fabric/Target/ member in struct:csio_rnode
/linux-master/drivers/usb/roles/
H A DMakefile5 obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
/linux-master/arch/arm/mach-omap2/
H A Domap_device.c103 * form <dev-id=dev_name, con-id=role> if it does not exist already.
105 * This allows drivers to get a pointer to its optional clocks based on its role
106 * by calling clk_get(<dev*>, <role>).
119 _add_clkdev(od, oh->opt_clks[i].role, oh->opt_clks[i].clk);
H A Domap_hwmod.c948 pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
963 pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
H A Domap_hwmod.h171 * @role: "sys", "32k", "tv", etc -- for use in clk_get()
179 const char *role; member in struct:omap_hwmod_opt_clk
H A Domap_hwmod_2420_data.c151 { .role = "pad_fck", .clk = "mcbsp_clks" },
152 { .role = "prcm_fck", .clk = "func_96m_ck" },
H A Domap_hwmod_2430_data.c208 { .role = "pad_fck", .clk = "mcbsp_clks" },
209 { .role = "prcm_fck", .clk = "func_96m_ck" },
311 { .role = "dbck", .clk = "mmchsdb1_fck" },
337 { .role = "dbck", .clk = "mmchsdb2_fck" },
H A Domap_hwmod_2xxx_ipblock_data.c411 { .role = "tv_clk", .clk = "dss_54m_fck" },
412 { .role = "sys_clk", .clk = "dss2_fck" },
445 { .role = "ick", .clk = "dss_ick" },
H A Domap_hwmod_3xxx_data.c409 * UART4 is extremely unclear and opaque; it is unclear what the role
415 { .role = "softreset_uart1_fck", .clk = "uart1_fck" },
446 { .role = "sys_clk", .clk = "dss2_alwon_fck" },
447 { .role = "tv_clk", .clk = "dss_tv_fck" },
449 { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
541 { .role = "sys_clk", .clk = "dss2_alwon_fck" },
559 { .role = "ick", .clk = "dss_ick" },
578 { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
663 { .role = "dbclk", .clk = "gpio1_dbck", },
684 { .role
[all...]
H A Domap_hwmod_81xx_data.c474 { .role = "dbclk", .clk = "sysclk18_ck" },
500 { .role = "dbclk", .clk = "sysclk18_ck" },
526 { .role = "dbclk", .clk = "sysclk18_ck" },
552 { .role = "dbclk", .clk = "sysclk18_ck" },
915 { .role = "dbck", .clk = "sysclk18_ck", },
/linux-master/arch/x86/include/asm/
H A Dkvm_host.h309 * cannot be reused. The ability to reuse a SP is tracked by its role, which
312 * is the number of bits that are used to compute the role.
365 * including on nested transitions, if nothing in the full role changes then
369 * The properties that are tracked in the extended role but not the page role
371 * or (b) are indirectly reflected in the shadow page's role. For example,
374 * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
376 * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
2213 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role)
[all...]
/linux-master/arch/x86/kvm/mmu/
H A Dmmu.c218 * The MMU itself (with a valid role) is the single source of truth for the
219 * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
565 int level = sptep_to_sp(sptep)->role.level;
719 if (sp->role.passthrough)
722 if (!sp->role.direct)
725 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
749 * In both cases, sp->role.access contains the correct access bits.
751 return sp->role.access;
764 sp->role
2151 kvm_mmu_find_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) argument
2236 kvm_mmu_alloc_shadow_page(struct kvm *kvm, struct shadow_page_caches *caches, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) argument
2272 __kvm_mmu_get_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, struct shadow_page_caches *caches, gfn_t gfn, union kvm_mmu_page_role role) argument
2294 kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu, gfn_t gfn, union kvm_mmu_page_role role) argument
2311 union kvm_mmu_page_role role; local
2357 union kvm_mmu_page_role role; local
3686 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role; local
4669 is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, union kvm_mmu_page_role role) argument
5260 union kvm_cpu_role role = {0}; local
5329 union kvm_mmu_page_role role = {0}; local
5450 union kvm_cpu_role role = {0}; local
6494 union kvm_mmu_page_role role; local
[all...]
H A Dmmu_internal.h80 union kvm_mmu_page_role role; member in struct:kvm_mmu_page
138 static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role) argument
140 return role.smm ? 1 : 0;
145 return kvm_mmu_role_as_id(sp->role);
158 return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
H A Dmmutrace.h14 __field(__u32, role) \
21 __entry->role = sp->role.word; \
30 union kvm_mmu_page_role role; \
32 role.word = __entry->role; \
37 __entry->gfn, role.level, \
38 role.has_4_byte_gpte ? 4 : 8, \
39 role.quadrant, \
40 role
[all...]
H A Dpaging_tmpl.h545 pte_access = sp->role.access & FNAME(gpte_access)(gpte);
594 if (sp->role.level > PG_LEVEL_4K)
604 if (sp->role.direct)
859 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
862 offset = sp->role.quadrant << SPTE_LEVEL_BITS;
928 pte_access = sp->role.access;
H A Dspte.c143 int level = sp->role.level;
149 if (sp->role.ad_disabled)
274 u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role, argument
292 child_spte |= (index * KVM_PAGES_PER_HPAGE(role.level)) << PAGE_SHIFT;
294 if (role.level == PG_LEVEL_4K) {
302 if ((role.access & ACC_EXEC_MASK) && is_nx_huge_page_enabled(kvm))
H A Dspte.h276 return sp->role.ad_disabled;
480 union kvm_mmu_page_role role, int index);
H A Dtdp_iter.c42 if (WARN_ON_ONCE(!root || (root->role.level < 1) ||
43 (root->role.level > PT64_ROOT_MAX_LEVEL))) {
49 iter->root_level = root->role.level;
H A Dtdp_mmu.c86 KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
112 * role.invalid are protected by mmu_lock.
127 if ((!only_valid || !next_root->role.invalid) &&
178 ((_only_valid) && (_root)->role.invalid))) { \
198 gfn_t gfn, union kvm_mmu_page_role role)
204 sp->role = role;
216 union kvm_mmu_page_role role; local
220 role = parent_sp->role;
197 tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, gfn_t gfn, union kvm_mmu_page_role role) argument
229 union kvm_mmu_page_role role = mmu->root_role; local
[all...]

Completed in 361 milliseconds

123456789